diff --git a/4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-60f75f53-189b-4910-bc91-de9064bb67731759002168004-2025_09_27-21.42.49.485/source.csv b/4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-60f75f53-189b-4910-bc91-de9064bb67731759002168004-2025_09_27-21.42.49.485/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..aa95e977794c1adbc17fb50112d9976ac32032aa --- /dev/null +++ b/4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-60f75f53-189b-4910-bc91-de9064bb67731759002168004-2025_09_27-21.42.49.485/source.csv @@ -0,0 +1,33 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +2,44,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"9:42:49 PM [info] Activating crowd-code\n9:42:49 PM [info] Recording started\n9:42:49 PM [info] Initializing git provider using file system watchers...\n9:42:49 PM [info] Git repository found\n9:42:49 PM [info] Git provider initialized successfully\n",Log,tab +3,64,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"9:42:49 PM [info] Initial git state: [object Object]\n",Log,content +4,14146,"src/makefile",0,0,"# Copyright 2021 Manna Harbour\n# https://github.com/manna-harbour/miryoku\n\nsource := $(wildcard *.kbd.cpp)\n\ntargets := $(source:%.kbd.cpp=build/%.kbd)\n\nall: $(targets)\n\nbuild/%.kbd: %.kbd.cpp FORCE\n\tcpp -P $(OPT_DEFS) $< | \\n\tsed \\n\t -e ""s/U_QUOT/'/g"" \\n\t -e 's/U_DQUO/""/g' \\n\t -e 's/U_COMM/,/g' \\n\t -e 's/U_LPRN/\\(/g' \\n\t -e 's/U_RPRN/\\)/g' \\n\t -e 's/U_PIPE/|/g' \\n\t -e 's/[ ]*U_LF[ ]*/\n/g' \\n\t -e 's/[ ]*U_TAB[ ]*/\t/g' \\n\t > $@\n\nFORCE: ;\n\ntest: build/miryoku_kmonad.kbd\n\tkmonad -d $<\n\ninclude custom_rules.mk\n\ninclude post_rules.mk\n",makefile,tab +5,15208,"src/makefile",231,0,"",makefile,selection_mouse +6,15218,"src/makefile",230,0,"",makefile,selection_command +7,19878,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab +8,20509,"src/makefile",0,0,"",makefile,tab +9,895935,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab +10,897761,"TERMINAL",0,0,"",,terminal_focus +11,897769,"src/makefile",0,0,"",makefile,tab +12,912730,"TERMINAL",0,0,"cd src",,terminal_command +13,912734,"TERMINAL",0,0,"]633;C% \r \r",,terminal_output +14,923821,"TERMINAL",0,0,"clang -E -P -x c++ -DMIRYOKU_ALPHAS_QWERTY -DMIRYOKU_NAV_VI -DMIRYOKU_CLIPBOARD_MAC -DMIRYOKU_KMONAD_OS_MAC miryoku_kmonad.kbd.cpp | sed -e ""s/U_QUOT/'/g"" -e 's/U_DQUO/""/g' -e 's/U_COMM/,/g' -e 's/U_LPRN/\\(/g' -e 's/U_RPRN/\\)/g' -e 's/U_PIPE/|/g' -e 's/[ ]*U_LF[ ]*/\n/g' -e 's/[ ]*U_TAB[ ]*/\t/g' > build/miryoku_kmonad.kbd && sed -n '1,60p' build/miryoku_kmonad.kbd | cat",,terminal_command +15,923848,"TERMINAL",0,0,"]633;C;; Copyright 2021 Manna Harbour\r\n;; github.com/manna-harbour/miryoku\r\n\r\n\r\n(defcfg\r\n input (iokit-name )\r\n output (kext)\r\n fallthrough false\r\n)\r\n(defsrc\r\n 2 3 4 5 6 8 9 0 - =\r\n q w e r t i o p [ ]\r\n caps a s d f k l ; ' ent\r\n x c v , . /\r\n)\r\n(deflayer U_BASE\r\nq\tw\te\tr\tt\ty\tu\ti\to\tp\r\n(tap-hold-next-release 200 a met)\t(tap-hold-next-release 200 s alt)\t(tap-hold-next-release 200 d ctl)\t(tap-hold-next-release 200 f sft)\tg\th\t(tap-hold-next-release 200 j sft)\t(tap-hold-next-release 200 k ctl)\t(tap-hold-next-release 200 l alt)\t(tap-hold-next-release 200 ' met)\r\n(tap-hold-next-release 200 z (layer-toggle U_BUTTON))\t(tap-hold-next-release 200 x ralt)\tc\tv\tb\tn\tm\t,\t(tap-hold-next-release 200 . ralt)\t(tap-hold-next-release 200 / (layer-toggle U_BUTTON))\r\n\t\t(tap-hold-next-release 200 esc (layer-toggle U_MEDIA))\t(tap-hold-next-release 200 spc (layer-toggle U_NAV))\t(tap-hold-next-release 200 tab (layer-toggle U_MOUSE))\t(tap-hold-next-release 200 ent (layer-toggle U_SYM))\t(tap-hold-next-release 200 bspc (layer-toggle U_NUM))\t(tap-hold-next-release 200 del (layer-toggle U_FUN))\r\n)\r\n(deflayer U_EXTRA\r\nq\tw\te\tr\tt\ty\tu\ti\to\tp\r\n(tap-hold-next-release 200 a met)\t(tap-hold-next-release 200 s alt)\t(tap-hold-next-release 200 d ctl)\t(tap-hold-next-release 200 f sft)\tg\th\t(tap-hold-next-release 200 j sft)\t(tap-hold-next-release 200 k ctl)\t(tap-hold-next-release 200 l alt)\t(tap-hold-next-release 200 ' met)\r\n(tap-hold-next-release 200 z (layer-toggle U_BUTTON))\t(tap-hold-next-release 200 x ralt)\tc\tv\tb\tn\tm\t,\t(tap-hold-next-release 200 . ralt)\t(tap-hold-next-release 200 / (layer-toggle U_BUTTON))\r\n\t\t(tap-hold-next-release 200 esc (layer-toggle U_MEDIA))\t(tap-hold-next-release 200 spc (layer-toggle U_NAV))\t(tap-hold-next-release 200 tab (layer-toggle U_MOUSE))\t(tap-hold-next-release 200 ent (layer-toggle U_SYM))\t(tap-hold-next-release 200 bspc (layer-toggle U_NUM))\t(tap-hold-next-release 200 del (layer-toggle U_FUN))\r\n)\r\n(deflayer U_TAP\r\nq\tw\tf\tp\tb\tj\tl\tu\ty\t'\r\na\tr\ts\tt\tg\tm\tn\te\ti\to\r\nz\tx\tc\td\tv\tk\th\t,\t.\t/\r\n\t\tesc\tspc\ttab\tent\tbspc\tdel\r\n)\r\n(deflayer U_BUTTON\r\nM-z\tM-x\tM-c\tM-v\tS-M-z\tS-M-z\tM-v\tM-c\tM-x\tM-z\r\nmet\talt\tctl\tsft\tXX\tXX\tsft\tctl\talt\tmet\r\nM-z\tM-x\tM-c\tM-v\tS-M-z\tS-M-z\tM-v\tM-c\tM-x\tM-z\r\n\t\t#(kp* kp5)\t#(kp/ kp5)\t#(kp- kp5)\t#(kp- kp5)\t#(kp/ kp5)\t#(kp* kp5)\r\n)\r\n(deflayer U_NAV\r\nXX\t(multi-tap 200 XX (layer-switch U_TAP))\t(multi-tap 200 XX (layer-switch U_EXTRA))\t(multi-tap 200 XX (layer-switch U_BASE))\tXX\tS-M-z\tM-v\tM-c\tM-x\tM-z\r\nmet\talt\tctl\tsft\tXX\tleft\tdown\tup\tright\tcaps\r\nXX\tralt\t(multi-tap 200 XX (layer-switch U_NUM))\t(multi-tap 200 XX (layer-switch U_NAV))\tXX\thome\tpgdn\tpgup\tend\tins\r\n\t\tXX\tXX\tXX\tent\tbspc\tdel\r\n)\r\n(deflayer U_MOUSE\r\nXX\t(multi-tap 200 XX (layer-switch U_TAP))\t(multi-tap 200 XX (layer-switch U_EXTRA))\t(multi-tap 200 XX (layer-switch U_BASE))\tXX\tS-M-z\tM-v\tM-c\tM-x\tM-z\r\nmet\talt\tctl\tsft\tXX\tkp4\tkp2\tkp8\tkp6\tXX\r\nXX\tralt\t(multi-tap 200 XX (layer-switch U_SYM))\t(multi-tap 200 XX (layer-switch U_MOUSE))\tXX\tXX\tXX\tXX\tXX\tXX\r\n\t\tXX\tXX\tXX\t#(kp- kp5)\t#(kp/ kp5)\t#(kp* kp5)\r\n)\r\n(deflayer U_MEDIA\r\nXX\t(multi-tap 200 XX (layer-switch U_TAP))\t(multi-tap 200 XX (layer-switch U_EXTRA))\t(multi-tap 200 XX (layer-switch U_BASE))\tXX\tXX\tXX\tXX\tXX\tXX\r\nmet\talt\tctl\tsft\tXX\tprevioussong\tvold\tvolu\tnextsong\tXX\r\nXX\tralt\t(multi-tap 200 XX (layer-switch U_FUN))\t(multi-tap 200 XX (layer-switch U_MEDIA))\tXX\tXX\tXX\tXX\tXX\tXX\r\n\t\tXX\tXX\tXX\tstopcd\tplaypause\tmute\r\n)\r\n(deflayer U_NUM\r\n[\t7\t8\t9\t]\tXX\t(multi-tap 200 XX (layer-switch U_BASE))\t(multi-tap 200 XX (layer-switch U_EXTRA))\t(multi-tap 200 XX (layer-switch U_TAP))\tXX\r\n;\t4\t5\t6\t=\tXX\tsft\tctl\talt\tmet\r\n% \r \r",,terminal_output +16,11141236,"src/custom_config.h",0,0,"// Copyright 2021 Manna Harbour\n// https://github.com/manna-harbour/miryoku\n\n#pragma once\n\n// Display brightness controls for macOS (KMonad kext)\n// Adjust these if your KMonad version uses different names\n#define U_BRIGHT_DN display_brightness_dec\n#define U_BRIGHT_UP display_brightness_inc\n",cpp,tab +17,11141252,"src/custom_config.h",91,0,"",cpp,selection_command +18,11588969,"src/custom_config.h",90,202,"",cpp,content +19,11589018,"src/custom_config.h",90,0,"// Display brightness controls for macOS (KMonad kext)\n// Adjust these if your KMonad version uses different names\n#define U_BRIGHT_DN display_brightness_decrement\n#define U_BRIGHT_UP display_brightness_increment\n\n// Override Media layer to place brightness on 'u' and 'i' positions (top row, right-hand 1st and 2nd)\n#if !defined(MIRYOKU_LAYER_MEDIA)\n #if defined (MIRYOKU_NAV_INVERTEDT)\n #define MIRYOKU_LAYER_MEDIA MIRYOKU_ALTERNATIVES_MEDIA_INVERTEDT\n #elif defined (MIRYOKU_NAV_VI)\n // Row 1: set K05 ('u') = U_BRIGHT_DN, K06 ('i') = U_BRIGHT_UP\n #define MIRYOKU_LAYER_MEDIA \\n U_NA, U_DF(U_TAP), U_DF(U_EXTRA), U_DF(U_BASE), U_NA, U_BRIGHT_DN, U_BRIGHT_UP, U_NU, U_NU, U_NU, \\n met, alt, ctl, sft, U_NA, previoussong, vold, volu, nextsong, U_NU, \\n U_NA, ralt, U_DF(U_FUN), U_DF(U_MEDIA), U_NA, U_NU, U_NU, U_NU, U_NU, U_NU, \\n U_NP, U_NP, U_NA, U_NA, U_NA, stopcd, playpause, mute, U_NP, U_NP\n #else\n #define MIRYOKU_LAYER_MEDIA MIRYOKU_ALTERNATIVES_MEDIA\n #endif\n#endif\n\n",cpp,content +20,11589020,"src/custom_config.h",1539,1,"",cpp,content +21,11589020,"src/custom_config.h",90,0,"\n",cpp,content +22,12139484,"src/custom_config.h",90,1450,"",cpp,content +23,12139511,"src/custom_config.h",90,0,"// Display brightness controls for macOS (KMonad kext)\n// Adjust these if your KMonad version uses different names\n#define U_BRIGHT_DN display_brightness_dec\n#define U_BRIGHT_UP display_brightness_inc\n\n// Override Media layer to place brightness on 'u' and 'i' positions (top row, right-hand 1st and 2nd)\n#if !defined(MIRYOKU_LAYER_MEDIA)\n #if defined (MIRYOKU_NAV_INVERTEDT)\n #define MIRYOKU_LAYER_MEDIA MIRYOKU_ALTERNATIVES_MEDIA_INVERTEDT\n #elif defined (MIRYOKU_NAV_VI)\n // Place brightness on 'u' (K15) and 'i' (K16) positions of the Media layer\n #define MIRYOKU_LAYER_MEDIA \\n U_NA, U_DF(U_TAP), U_DF(U_EXTRA), U_DF(U_BASE), U_NA, U_NU, U_NU, U_NU, U_NU, U_NU, \\n met, alt, ctl, sft, U_NA, U_BRIGHT_DN, U_BRIGHT_UP, volu, nextsong, U_NU, \\n U_NA, ralt, U_DF(U_FUN), U_DF(U_MEDIA), U_NA, U_NU, U_NU, U_NU, U_NU, U_NU, \\n U_NP, U_NP, U_NA, U_NA, U_NA, stopcd, playpause, mute, U_NP, U_NP\n #else\n #define MIRYOKU_LAYER_MEDIA MIRYOKU_ALTERNATIVES_MEDIA\n #endif\n#endif\n\n",cpp,content +24,12139513,"src/custom_config.h",1542,1,"",cpp,content +25,12139513,"src/custom_config.h",90,0,"\n",cpp,content +26,12298380,"src/custom_config.h",90,1453,"",cpp,content +27,12298424,"src/custom_config.h",90,0,"// Display brightness controls for macOS (KMonad kext)\n// Adjust these if your KMonad version uses different names\n#define U_BRIGHT_DN display_brightness_dec\n#define U_BRIGHT_UP display_brightness_inc\n\n// Override Media layer to place brightness on 'u' and 'i' positions (top row, right-hand 1st and 2nd)\n#if !defined(MIRYOKU_LAYER_MEDIA)\n #if defined (MIRYOKU_NAV_INVERTEDT)\n #define MIRYOKU_LAYER_MEDIA MIRYOKU_ALTERNATIVES_MEDIA_INVERTEDT\n #elif defined (MIRYOKU_NAV_VI)\n // Place brightness on 'u' (K06) and 'i' (K07) positions of the Media layer\n #define MIRYOKU_LAYER_MEDIA \\n U_NA, U_DF(U_TAP), U_DF(U_EXTRA), U_DF(U_BASE), U_NA, U_NU, U_BRIGHT_DN, U_BRIGHT_UP, U_NU, U_NU, \\n met, alt, ctl, sft, U_NA, previoussong, vold, volu, nextsong, U_NU, \\n U_NA, ralt, U_DF(U_FUN), U_DF(U_MEDIA), U_NA, U_NU, U_NU, U_NU, U_NU, U_NU, \\n U_NP, U_NP, U_NA, U_NA, U_NA, stopcd, playpause, mute, U_NP, U_NP\n #else\n #define MIRYOKU_LAYER_MEDIA MIRYOKU_ALTERNATIVES_MEDIA\n #endif\n#endif\n\n",cpp,content +28,12298428,"src/custom_config.h",1542,1,"",cpp,content +29,12298428,"src/custom_config.h",90,0,"\n",cpp,content +30,12723380,"TERMINAL",0,0,"clang -E -P -x c++ -DMIRYOKU_ALPHAS_QWERTY -DMIRYOKU_NAV_VI -DMIRYOKU_CLIPBOARD_MAC -DMIRYOKU_KMONAD_OS_MAC miryoku_kmonad.kbd.cpp | sed -e ""s/U_QUOT/'/g"" -e 's/U_DQUO/""/g' -e 's/U_COMM/,/g' -e 's/U_LPRN/\\(/g' -e 's/U_RPRN/\\)/g' -e 's/U_PIPE/|/g' -e 's/[ ]*U_LF[ ]*/\n/g' -e 's/[ ]*U_TAB[ ]*/\t/g' > build/miryoku_kmonad.kbd",,terminal_command +31,12723432,"TERMINAL",0,0,"]633;C",,terminal_output +32,12723506,"TERMINAL",0,0,"% \r \r",,terminal_output +33,12784545,"src/custom_config.h",90,1453,"",cpp,content diff --git a/4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-67e49b73-4378-4d9b-aa07-eb22704d83ae1750992411736-2025_06_26-19.46.53.239/source.csv b/4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-67e49b73-4378-4d9b-aa07-eb22704d83ae1750992411736-2025_06_26-19.46.53.239/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..9f5157fbacaa00da96335a8fbe348ef6194e0c24 --- /dev/null +++ b/4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-67e49b73-4378-4d9b-aa07-eb22704d83ae1750992411736-2025_06_26-19.46.53.239/source.csv @@ -0,0 +1,241 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +1,2,"train_tokenizer.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer__\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log :\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log({""loss"": loss, ""step"": step, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication. \n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +2,27,"tasks",0,0,"",Log,tab +3,28,"train_tokenizer.py",0,0,"",python,tab +4,59,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab +5,70,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"7:46:53 PM [info] Activating crowd-code\n7:46:53 PM [info] Recording started\n7:46:53 PM [info] Initializing git provider using file system watchers...\n7:46:53 PM [info] Git repository found\n7:46:53 PM [info] Git provider initialized successfully\n",Log,content +6,82,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"7:46:53 PM [info] Initial git state: [object Object]\n",Log,content +7,6647,"train_tokenizer.py",0,0,"",python,tab +8,10063,"train_tokenizer.py",0,0,"Switched from branch 'fix-multiprocess-image-logging' to 'main'",python,git_branch_checkout +9,20323,"utils/dataloader.py",0,0,"from cgi import test\nimport functools\nimport jax\n\nimport tensorflow as tf\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n cache_processed_data: bool = True,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert global_batch_size % num_processes == 0, ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n \n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.cache() if cache_processed_data else dataset\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab +10,22376,"train_tokenizer.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer__\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log :\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log({""loss"": loss, ""step"": step, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication. \n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +11,24879,"utils/dataloader.py",0,0,"",python,tab +12,26164,"utils/dataloader.py",900,0,"",python,selection_command +13,26410,"utils/dataloader.py",1591,0,"",python,selection_command +14,26446,"utils/dataloader.py",2494,0,"",python,selection_command +15,26473,"utils/dataloader.py",3222,0,"",python,selection_command +16,26506,"utils/dataloader.py",4041,0,"",python,selection_command +17,26540,"utils/dataloader.py",4367,0,"",python,selection_command +18,27279,"utils/dataloader.py",0,0,"",python,selection_command +19,27700,"utils/dataloader.py",0,21,"",python,content +20,28587,"train_tokenizer.py",0,0,"",python,tab +21,30438,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"7:46:53 PM [info] Activating crowd-code\n7:46:53 PM [info] Recording started\n7:46:53 PM [info] Initializing git provider using file system watchers...\n7:46:53 PM [info] Git repository found\n7:46:53 PM [info] Git provider initialized successfully\n7:46:53 PM [info] Initial git state: [object Object]\n7:47:03 PM [info] Branch checkout detected: fix-multiprocess-image-logging -> main\n7:47:03 PM [info] Recording git checkout: Switched from branch 'fix-multiprocess-image-logging' to 'main'\n7:47:03 PM [info] Resetting file cache due to branch checkout\n",Log,tab +22,31032,"train_tokenizer.py",0,0,"",python,tab +23,34437,"train_tokenizer.py",0,7825,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer__\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n start_time = time.time()\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n",python,content +24,59345,"train_tokenizer.py",7447,0,"",python,selection_mouse +25,60087,"train_tokenizer.py",0,0,"Switched from branch 'main' to 'proper-seeding-dataloader'",python,git_branch_checkout +26,82111,"train_tokenizer.py",532,0,"",python,selection_command +27,82341,"train_tokenizer.py",6425,0,"",python,selection_command +28,83082,"train_tokenizer.py",6458,0,"",python,selection_command +29,83330,"train_tokenizer.py",6517,0,"",python,selection_command +30,83364,"train_tokenizer.py",6582,0,"",python,selection_command +31,83395,"train_tokenizer.py",6606,0,"",python,selection_command +32,83424,"train_tokenizer.py",6628,0,"",python,selection_command +33,83582,"train_tokenizer.py",6653,0,"",python,selection_command +34,83834,"train_tokenizer.py",6657,0,"\n ",python,content +35,84049,"train_tokenizer.py",6666,0,"s",python,content +36,84051,"train_tokenizer.py",6667,0,"",python,selection_keyboard +37,84131,"train_tokenizer.py",6667,0,"e",python,content +38,84132,"train_tokenizer.py",6668,0,"",python,selection_keyboard +39,84292,"train_tokenizer.py",6668,0,"e",python,content +40,84294,"train_tokenizer.py",6669,0,"",python,selection_keyboard +41,84404,"train_tokenizer.py",6669,0,"d",python,content +42,84405,"train_tokenizer.py",6670,0,"",python,selection_keyboard +43,84562,"train_tokenizer.py",6670,0,"=",python,content +44,84565,"train_tokenizer.py",6671,0,"",python,selection_keyboard +45,84827,"train_tokenizer.py",6671,0,"s",python,content +46,84830,"train_tokenizer.py",6672,0,"",python,selection_keyboard +47,84894,"train_tokenizer.py",6672,0,"e",python,content +48,84896,"train_tokenizer.py",6673,0,"",python,selection_keyboard +49,85077,"train_tokenizer.py",6673,0,"e",python,content +50,85079,"train_tokenizer.py",6674,0,"",python,selection_keyboard +51,85148,"train_tokenizer.py",6674,0,"d",python,content +52,85151,"train_tokenizer.py",6675,0,"",python,selection_keyboard +53,85506,"train_tokenizer.py",6674,1,"",python,content +54,85675,"train_tokenizer.py",6673,1,"",python,content +55,85813,"train_tokenizer.py",6672,1,"",python,content +56,85954,"train_tokenizer.py",6671,1,"",python,content +57,86027,"train_tokenizer.py",6671,0,"a",python,content +58,86028,"train_tokenizer.py",6672,0,"",python,selection_keyboard +59,86496,"train_tokenizer.py",6672,0,"r",python,content +60,86501,"train_tokenizer.py",6673,0,"",python,selection_keyboard +61,86659,"train_tokenizer.py",6673,0,"g",python,content +62,86662,"train_tokenizer.py",6674,0,"",python,selection_keyboard +63,86712,"train_tokenizer.py",6674,0,"s",python,content +64,86718,"train_tokenizer.py",6675,0,"",python,selection_keyboard +65,86790,"train_tokenizer.py",6675,0,".",python,content +66,86795,"train_tokenizer.py",6676,0,"",python,selection_keyboard +67,86928,"train_tokenizer.py",6676,0,"s",python,content +68,86931,"train_tokenizer.py",6677,0,"",python,selection_keyboard +69,87007,"train_tokenizer.py",6677,0,"e",python,content +70,87013,"train_tokenizer.py",6678,0,"",python,selection_keyboard +71,87196,"train_tokenizer.py",6678,0,"e",python,content +72,87202,"train_tokenizer.py",6679,0,"",python,selection_keyboard +73,87295,"train_tokenizer.py",6679,0,"d",python,content +74,87300,"train_tokenizer.py",6680,0,"",python,selection_keyboard +75,87429,"train_tokenizer.py",6680,0,",",python,content +76,87432,"train_tokenizer.py",6681,0,"",python,selection_keyboard +77,87560,"train_tokenizer.py",6680,0,"",python,selection_command +78,88768,"train_tokenizer.py",6680,1,"",python,content +79,88775,"train_tokenizer.py",6679,0,"",python,selection_command +80,89370,"train_tokenizer.py",6680,0,"",python,selection_command +81,89528,"train_tokenizer.py",6680,0,",",python,content +82,89530,"train_tokenizer.py",6681,0,"",python,selection_keyboard +83,89641,"train_tokenizer.py",6680,0,"",python,selection_command +84,96963,"utils/dataloader.py",0,0,"import functools\nimport jax\n\nimport tensorflow as tf\n\n# reserve GPU memory for JAX only if tensorflow is built with GPU support\ntf.config.experimental.set_visible_devices([], ""GPU"")\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert (\n global_batch_size % num_processes == 0\n ), ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n\n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab +85,101346,"utils/dataloader.py",0,0,"",python,selection_command +86,103097,"utils/dataloader.py",258,0,"",python,selection_command +87,103665,"utils/dataloader.py",329,0,"",python,selection_command +88,103932,"utils/dataloader.py",328,0,"",python,selection_command +89,104129,"utils/dataloader.py",327,0,"",python,selection_command +90,104547,"utils/dataloader.py",327,0," ",python,content +91,104551,"utils/dataloader.py",328,0,"",python,selection_keyboard +92,104823,"utils/dataloader.py",327,1,"",python,content +93,105013,"utils/dataloader.py",327,0,",",python,content +94,105015,"utils/dataloader.py",328,0,"",python,selection_keyboard +95,105094,"utils/dataloader.py",328,0," ",python,content +96,105095,"utils/dataloader.py",329,0,"",python,selection_keyboard +97,105158,"utils/dataloader.py",329,0,"s",python,content +98,105161,"utils/dataloader.py",330,0,"",python,selection_keyboard +99,105221,"utils/dataloader.py",330,0,"e",python,content +100,105226,"utils/dataloader.py",331,0,"",python,selection_keyboard +101,105395,"utils/dataloader.py",331,0,"e",python,content +102,105400,"utils/dataloader.py",332,0,"",python,selection_keyboard +103,105472,"utils/dataloader.py",332,0,"d",python,content +104,105477,"utils/dataloader.py",333,0,"",python,selection_keyboard +105,107401,"utils/dataloader.py",332,0,"",python,selection_command +106,107747,"utils/dataloader.py",342,0,"",python,selection_command +107,107995,"utils/dataloader.py",392,0,"",python,selection_command +108,108025,"utils/dataloader.py",465,0,"",python,selection_command +109,108057,"utils/dataloader.py",475,0,"",python,selection_command +110,108090,"utils/dataloader.py",554,0,"",python,selection_command +111,108122,"utils/dataloader.py",635,0,"",python,selection_command +112,108156,"utils/dataloader.py",712,0,"",python,selection_command +113,108190,"utils/dataloader.py",780,0,"",python,selection_command +114,108226,"utils/dataloader.py",823,0,"",python,selection_command +115,108532,"utils/dataloader.py",865,0,"",python,selection_command +116,108694,"utils/dataloader.py",920,0,"",python,selection_command +117,108845,"utils/dataloader.py",933,0,"",python,selection_command +118,108993,"utils/dataloader.py",1004,0,"",python,selection_command +119,109161,"utils/dataloader.py",933,0,"",python,selection_command +120,109317,"utils/dataloader.py",920,0,"",python,selection_command +121,109885,"utils/dataloader.py",921,0,"\n ",python,content +122,110783,"utils/dataloader.py",930,0,"seed: The seed for the random number generator.",python,content +123,111064,"utils/dataloader.py",976,0,"",python,selection_command +124,117529,"utils/dataloader.py",989,0,"",python,selection_command +125,117778,"utils/dataloader.py",1045,0,"",python,selection_command +126,117811,"utils/dataloader.py",1112,0,"",python,selection_command +127,117840,"utils/dataloader.py",1164,0,"",python,selection_command +128,117873,"utils/dataloader.py",1172,0,"",python,selection_command +129,117907,"utils/dataloader.py",1226,0,"",python,selection_command +130,117943,"utils/dataloader.py",1228,0,"",python,selection_command +131,117974,"utils/dataloader.py",1277,0,"",python,selection_command +132,118006,"utils/dataloader.py",1279,0,"",python,selection_command +133,118041,"utils/dataloader.py",1313,0,"",python,selection_command +134,118212,"utils/dataloader.py",1369,0,"",python,selection_command +135,118426,"utils/dataloader.py",1388,0,"",python,selection_command +136,118612,"utils/dataloader.py",1390,0,"",python,selection_command +137,118761,"utils/dataloader.py",1388,0,"",python,selection_command +138,118943,"utils/dataloader.py",1369,0,"",python,selection_command +139,119216,"utils/dataloader.py",1383,0,"",python,selection_command +140,119396,"utils/dataloader.py",1383,0,",",python,content +141,119402,"utils/dataloader.py",1384,0,"",python,selection_keyboard +142,119527,"utils/dataloader.py",1384,0," ",python,content +143,119531,"utils/dataloader.py",1385,0,"",python,selection_keyboard +144,119718,"utils/dataloader.py",1385,0,"seed=seed",python,content +145,119947,"utils/dataloader.py",1393,0,"",python,selection_command +146,139247,"utils/dataloader.py",1711,0,"",python,selection_command +147,139494,"utils/dataloader.py",1810,0,"",python,selection_command +148,139831,"utils/dataloader.py",2286,0,"",python,selection_command +149,140416,"utils/dataloader.py",2515,0,"",python,selection_command +150,140813,"utils/dataloader.py",2703,0,"",python,selection_command +151,141584,"utils/dataloader.py",3869,0,"",python,selection_command +152,148097,"utils/dataloader.py",3923,0,"",python,selection_command +153,148260,"utils/dataloader.py",3953,0,"",python,selection_command +154,151496,"utils/dataloader.py",3923,0,"",python,selection_command +155,174924,"utils/dataloader.py",3953,0,"",python,selection_command +156,175063,"utils/dataloader.py",4000,0,"",python,selection_command +157,175208,"utils/dataloader.py",4029,0,"",python,selection_command +158,175345,"utils/dataloader.py",4067,0,"",python,selection_command +159,175627,"utils/dataloader.py",4029,0,"",python,selection_command +160,175878,"utils/dataloader.py",4000,0,"",python,selection_command +161,175911,"utils/dataloader.py",3953,0,"",python,selection_command +162,176019,"utils/dataloader.py",3923,0,"",python,selection_command +163,176192,"utils/dataloader.py",3869,0,"",python,selection_command +164,182197,"utils/dataloader.py",3923,0,"",python,selection_command +165,182443,"utils/dataloader.py",3953,0,"",python,selection_command +166,182474,"utils/dataloader.py",4000,0,"",python,selection_command +167,182506,"utils/dataloader.py",4029,0,"",python,selection_command +168,182542,"utils/dataloader.py",4067,0,"",python,selection_command +169,182575,"utils/dataloader.py",4092,0,"",python,selection_command +170,182609,"utils/dataloader.py",4117,0,"",python,selection_command +171,182642,"utils/dataloader.py",4142,0,"",python,selection_command +172,182675,"utils/dataloader.py",4167,0,"",python,selection_command +173,182708,"utils/dataloader.py",4173,0,"",python,selection_command +174,182742,"utils/dataloader.py",4203,0,"",python,selection_command +175,182905,"utils/dataloader.py",4255,0,"",python,selection_command +176,183278,"utils/dataloader.py",4203,0,"",python,selection_command +177,184182,"utils/dataloader.py",4173,0,"",python,selection_command +178,184437,"utils/dataloader.py",4167,0,"",python,selection_command +179,184464,"utils/dataloader.py",4142,0,"",python,selection_command +180,184630,"utils/dataloader.py",4167,0,"",python,selection_command +181,184793,"utils/dataloader.py",4173,0,"",python,selection_command +182,185078,"utils/dataloader.py",4167,0,"",python,selection_command +183,185298,"utils/dataloader.py",4168,0,"",python,selection_command +184,185462,"utils/dataloader.py",4168,0,"\n ",python,content +185,186233,"utils/dataloader.py",4177,0,"seed=seed,",python,content +186,186506,"utils/dataloader.py",4186,0,"",python,selection_command +187,198474,"utils/dataloader.py",4161,0,"",python,selection_command +188,198726,"utils/dataloader.py",4136,0,"",python,selection_command +189,198760,"utils/dataloader.py",4111,0,"",python,selection_command +190,198869,"utils/dataloader.py",4086,0,"",python,selection_command +191,199047,"utils/dataloader.py",4057,0,"",python,selection_command +192,200749,"utils/dataloader.py",4057,0,"",python,selection_command +193,202978,"utils/dataloader.py",276,0,"",python,selection_command +194,203226,"utils/dataloader.py",277,0,"",python,selection_command +195,203260,"utils/dataloader.py",291,0,"",python,selection_command +196,203290,"utils/dataloader.py",293,0,"",python,selection_command +197,203322,"utils/dataloader.py",300,0,"",python,selection_command +198,203357,"utils/dataloader.py",302,0,"",python,selection_command +199,203391,"utils/dataloader.py",309,0,"",python,selection_command +200,203424,"utils/dataloader.py",311,0,"",python,selection_command +201,203706,"utils/dataloader.py",318,0,"",python,selection_command +202,203900,"utils/dataloader.py",320,0,"",python,selection_command +203,204081,"utils/dataloader.py",327,0,"",python,selection_command +204,204709,"utils/dataloader.py",329,0,"",python,selection_command +205,205294,"utils/dataloader.py",930,0,"",python,selection_command +206,205878,"utils/dataloader.py",940,0,"",python,selection_command +207,206129,"utils/dataloader.py",1385,0,"",python,selection_command +208,206376,"utils/dataloader.py",1390,0,"",python,selection_command +209,206845,"utils/dataloader.py",2842,0,"",python,selection_command +210,235602,"utils/dataloader.py",0,0,"",python,tab +211,235615,"utils/dataloader.py",253,0,"",python,selection_command +212,236452,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer__\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n seed=args.seed,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n start_time = time.time()\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +213,236464,"train_tokenizer.py",6658,0,"",python,selection_command +214,239808,"utils/dataloader.py",0,0,"",python,tab +215,239819,"utils/dataloader.py",253,0,"",python,selection_command +216,288398,"utils/dataloader.py",0,0,"",python,tab +217,290847,"utils/dataloader.py",253,3935,"def _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert (\n global_batch_size % num_processes == 0\n ), ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n\n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n",python,content +218,295225,"utils/dataloader.py",0,0,"Switched from branch 'proper-seeding-dataloader' to 'main'",python,git_branch_checkout +219,303254,"utils/dataloader.py",2716,0,"",python,selection_mouse +220,303260,"utils/dataloader.py",2715,0,"",python,selection_command +221,305241,"utils/dataloader.py",0,0,"Switched from branch 'main' to 'dataloader-reproducibility-test'",python,git_branch_checkout +222,380349,"tests/data/generate_dummy_data.py",0,0,"",python,tab +223,389063,"tests/data/generate_dummy_data.py",0,0,"import tyro\nimport tensorflow as tf\nimport numpy as np\nfrom pathlib import Path\nfrom dataclasses import dataclass\n\n@dataclass\nclass Args:\n data_dir: str = ""data_tfrecords/dummy""\n num_episodes: int = 5\n episode_length: int = 16\n\n\n\ndef _bytes_feature(value):\n """"""Returns a bytes_list from a string / byte.""""""\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _int64_feature(value):\n """"""Returns an int64_list from a bool / enum / int / uint.""""""\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef create_tfrecord_example(episode_numpy_array):\n """"""Creates a TFRecord example from a numpy array video.""""""\n feature = {\n ""height"": _int64_feature(episode_numpy_array.shape[1]),\n ""width"": _int64_feature(episode_numpy_array.shape[2]),\n ""channels"": _int64_feature(episode_numpy_array.shape[3]),\n ""sequence_length"": _int64_feature(episode_numpy_array.shape[0]),\n ""raw_video"": _bytes_feature(episode_numpy_array.tobytes()),\n }\n return tf.train.Example(features=tf.train.Features(feature=feature))\n\n\ndef generate_dummy_tfrecord(\n output_path, num_episodes=5, episode_length=16, height=90, width=160, channels=3\n):\n """"""Generates a dummy TFRecord file with synthetic video data.""""""\n print(f""Generating dummy TFRecord file at {output_path}"")\n with tf.io.TFRecordWriter(str(output_path)) as writer:\n for i in range(num_episodes):\n np.random.seed(i) # Seed per episode for some variation, but deterministic\n dummy_video = np.random.randint(\n 0, 256, size=(episode_length, height, width, channels), dtype=np.uint8\n )\n tf_example = create_tfrecord_example(dummy_video)\n writer.write(tf_example.SerializeToString())\n print(""Dummy TFRecord generation complete."")\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(Args)\n temp_dir = Path(args.data_dir)\n temp_dir.mkdir(parents=True, exist_ok=True)\n dummy_file = temp_dir / ""dummy_test_shard.tfrecord""\n generate_dummy_tfrecord(dummy_file, num_episodes=args.num_episodes, episode_length=args.episode_length)\n print(f""Generated dummy file: {dummy_file}"")",python,content +224,389406,"tests/data/generate_dummy_data.py",2337,0,"",python,selection_command +225,390350,"tests/data/generate_dummy_data.py",0,0,"",python,selection_command +226,400299,"tests/test_dataloader.py",0,0,"",python,tab +227,406857,"tests/test_dataloader.py",0,0,"import unittest\nimport numpy as np\nimport tensorflow as tf\nimport tempfile\nfrom pathlib import Path\n\nfrom utils.dataloader import get_dataloader\nfrom tests.data.generate_dummy_tfrecord import generate_dummy_tfrecord\n\n\nclass DataloaderReproducibilityTest(unittest.TestCase):\n\n def setUp(self):\n super().setUp()\n self._temp_dir_manager = tempfile.TemporaryDirectory()\n self.test_data_dir = Path(self._temp_dir_manager.name)\n self.addCleanup(self._temp_dir_manager.cleanup)\n self.dummy_tfrecord_path = self.test_data_dir / ""dummy_test_shard.tfrecord""\n\n self.num_episodes = 5\n self.episode_length = 16\n self.image_height = 64\n self.image_width = 64\n self.image_channels = 3\n generate_dummy_tfrecord(\n self.dummy_tfrecord_path,\n num_episodes=self.num_episodes,\n episode_length=self.episode_length,\n height=self.image_height,\n width=self.image_width,\n channels=self.image_channels,\n )\n self.tfrecord_files = [str(self.dummy_tfrecord_path)]\n\n self.fixed_seed = 42\n\n def test_dataloader_yields_reproducible_batches(self):\n seq_len = 8\n batch_size = 2\n\n dataloader1 = get_dataloader(\n self.tfrecord_files,\n seq_len,\n batch_size,\n self.image_height,\n self.image_width,\n self.image_channels,\n seed=self.fixed_seed,\n )\n batches1 = [next(dataloader1) for _ in range(3)]\n\n dataloader2 = get_dataloader(\n self.tfrecord_files,\n seq_len,\n batch_size,\n self.image_height,\n self.image_width,\n self.image_channels,\n seed=self.fixed_seed,\n )\n batches2 = [next(dataloader2) for _ in range(3)]\n\n for i, (b1, b2) in enumerate(zip(batches1, batches2)):\n np.testing.assert_array_equal(b1, b2, err_msg=f""Batch {i} is not reproducible"") # type: ignore\n\n\nif __name__ == ""__main__"":\n unittest.main()",python,content +228,407175,"tests/test_dataloader.py",2070,0,"",python,selection_command +229,407617,"tests/test_dataloader.py",0,0,"",python,selection_command +230,410557,"tests/test_dataloader.py",16,0,"",python,selection_command +231,410809,"tests/test_dataloader.py",35,0,"",python,selection_command +232,410841,"tests/test_dataloader.py",59,0,"",python,selection_command +233,410873,"tests/test_dataloader.py",75,0,"",python,selection_command +234,410907,"tests/test_dataloader.py",100,0,"",python,selection_command +235,410940,"tests/test_dataloader.py",101,0,"",python,selection_command +236,411050,"tests/test_dataloader.py",145,0,"",python,selection_command +237,411208,"tests/test_dataloader.py",216,0,"",python,selection_command +238,411358,"tests/test_dataloader.py",217,0,"",python,selection_command +239,413643,"tests/data/generate_dummy_data.py",0,0,"",python,tab +240,418693,"tests/test_dataloader.py",0,0,"",python,tab diff --git a/4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-88e23d98-00ad-4d5b-8d4d-1f239e211eb71763045757922-2025_11_13-15.56.09.849/source.csv b/4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-88e23d98-00ad-4d5b-8d4d-1f239e211eb71763045757922-2025_11_13-15.56.09.849/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..133d779223573f58ade40c530982949c38e57ea1 --- /dev/null +++ b/4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-88e23d98-00ad-4d5b-8d4d-1f239e211eb71763045757922-2025_11_13-15.56.09.849/source.csv @@ -0,0 +1,17 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +1,3,"crowd-pilot/crowd-pilot/serialization_utils.py",0,0,"#!/usr/bin/env python3\n""""""\nCommon utilities for dataset serialization scripts.\n""""""\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple, Dict\n\nimport difflib\nimport re\nimport pandas as pd\nfrom datasets import Dataset, load_dataset\n\n\n_ANSI_CSI_RE = re.compile(r""\x1b\[[0-9;?]*[ -/]*[@-~]"")\n_ANSI_OSC_TERMINATED_RE = re.compile(r""\x1b\][\s\S]*?(?:\x07|\x1b\\)"")\n_ANSI_OSC_LINE_FALLBACK_RE = re.compile(r""\x1b\][^\n]*$"")\n_BRACKETED_PASTE_ENABLE = ""\x1b[?2004h""\n_BRACKETED_PASTE_DISABLE = ""\x1b[?2004l""\n_OSC_633 = ""\x1b]633;""\n_OSC_0 = ""\x1b]0;""\n\n\n@dataclass\nclass SerializeConfig:\n output_dir: str\n shard_size: int\n target_chars: int\n overlap_chars: int\n min_session_chars: int\n max_docs: Optional[int]\n long_pause_threshold_ms: int\n csv_root: Optional[str]\n val_ratio: float\n arrayrecord_group_size: Optional[int] = None\n\n\ndef _clean_text(text: str) -> str:\n # Normalize line endings and strip trailing spaces; preserve tabs/newlines.\n return text.replace(""\r\n"", ""\n"").replace(""\r"", ""\n"").rstrip()\n\n\ndef _fenced_block(path: str, language: Optional[str], content: str) -> str:\n lang = (language or """").lower()\n return f""```{lang}\n{content}\n```\n""\n\n\ndef _apply_change(content: str, offset: int, length: int, new_text: str) -> str:\n # Mirrors crowd_code_player.replay_file.apply_change\n base = str(content)\n text = str(new_text) if pd.notna(new_text) else """"\n text = text.replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n if offset > len(base):\n base = base + ("" "" * (offset - len(base)))\n return base[:offset] + text + base[offset + length:]\n\n\ndef _apply_backspaces(text: str) -> str:\n out: List[str] = []\n for ch in text:\n if ch == ""\b"": # \x08\n if out:\n out.pop()\n else:\n out.append(ch)\n return """".join(out)\n\n\ndef _normalize_terminal_output(raw: str) -> str:\n """"""\n Normalize PTY/terminal output for training:\n - Apply backspaces (\x08)\n - Strip OSC (window title/shell integration) first, keeping BEL/ST terminators intact\n - Resolve carriage returns (\r) by keeping the last rewrite per line\n - Strip CSI (coloring etc.)\n - Finally drop any remaining BEL (\x07)\n """"""\n if not raw:\n return raw\n s = _apply_backspaces(raw)\n # Remove OSC sequences that are properly terminated (BEL or ST)\n s = _ANSI_OSC_TERMINATED_RE.sub("""", s)\n # Fallback: drop any unterminated OSC up to end-of-line only\n s = ""\n"".join(_ANSI_OSC_LINE_FALLBACK_RE.sub("""", line) for line in s.split(""\n""))\n # Resolve carriage returns per line:\n # - If there are multiple rewrites, keep the last non-empty chunk\n # - If it's CRLF (ending with '\r' before '\n'), keep the content before '\r'\n resolved_lines: List[str] = []\n for seg in s.split(""\n""):\n parts = seg.split(""\r"")\n chosen = """"\n # pick last non-empty part if available; else last part\n for p in reversed(parts):\n if p != """":\n chosen = p\n break\n if chosen == """" and parts:\n chosen = parts[-1]\n resolved_lines.append(chosen)\n s = ""\n"".join(resolved_lines)\n # Strip ANSI escape sequences\n s = _ANSI_CSI_RE.sub("""", s)\n # Remove any remaining BEL beeps\n s = s.replace(""\x07"", """")\n return s\n\n\ndef _line_numbered_output(content: str, start_line: Optional[int] = None, end_line: Optional[int] = None) -> str:\n # TODO (f.srambical): check whether this corresponds **exactly** to the output of cat -n {file_path} | sed -n '{vstart},{vend}p'\n lines = content.splitlines()\n total = len(lines)\n if total == 0:\n return """"\n s = 1 if start_line is None else max(1, min(start_line, total))\n e = total if end_line is None else max(1, min(end_line, total))\n if e < s:\n # FIXME (f.srambical): If this does not happen, remove the condition\n raise ValueError(""This should never happen!"")\n e = s\n buf: List[str] = []\n for idx in range(s, e + 1):\n buf.append(f""{idx:6}\t{lines[idx - 1]}"")\n return ""\n"".join(buf)\n\n\ndef _compute_viewport(total_lines: int, center_line: int, radius: int) -> Tuple[int, int]:\n if total_lines <= 0:\n return (1, 0)\n start = max(1, center_line - radius)\n end = min(total_lines, center_line + radius)\n if end < start:\n # FIXME (f.srambical): If this does not happen, remove the condition\n raise ValueError(""This should never happen!"")\n return (start, end)\n\n\ndef _escape_single_quotes_for_sed(text: str) -> str:\n # Close quote, add an escaped single quote, reopen quote: '""'""'\n return text.replace(""'"", ""'\""'\""'"")\n\n\ndef _compute_changed_block_lines(before: str, after: str) -> Tuple[int, int, List[str]]:\n """"""\n Return 1-based start and end line numbers in 'before' that should be replaced,\n and the replacement lines from 'after'.\n For pure deletions, the replacement list may be empty.\n """"""\n before_lines = before.splitlines()\n after_lines = after.splitlines()\n sm = difflib.SequenceMatcher(a=before_lines, b=after_lines, autojunk=False)\n opcodes = [op for op in sm.get_opcodes() if op[0] != ""equal""]\n if not opcodes:\n # FIXME (f.srambical): clean this up\n raise ValueError(""No diff opcodes found for content change"")\n # No visible change; choose a safe single-line replace at end of file\n start_line = max(1, len(before_lines))\n end_line = start_line\n repl = after_lines[start_line - 1:start_line] if after_lines else [""""]\n return (start_line, end_line, repl)\n\n first = opcodes[0]\n last = opcodes[-1]\n # i1/i2 refer to 'before' indices, j1/j2 to 'after'\n start_line = (first[1] + 1) if (first[1] + 1) > 0 else 1\n end_line = last[2] # no increment since we go from 'exclusive' to 'inclusive' indexing\n replacement_lines = after_lines[first[3]:last[4]]\n return (start_line, end_line, replacement_lines)\n\n\ndef _session_to_transcript(\n df: pd.DataFrame,\n long_pause_threshold_ms: int,\n) -> str:\n\n file_states: Dict[str, str] = {}\n terminal_state: str = """"\n per_file_event_counts: Dict[str, int] = {}\n per_file_cursor_positions: Dict[str, Tuple[int, int]] = {} # (offset, length) for each file\n last_time_ms: Optional[int] = None\n\n parts: List[str] = []\n\n for i in range(len(df)):\n row = df.iloc[i]\n file_path: str = row[""File""]\n event_time: int = row[""Time""]\n language: Optional[str] = row[""Language""]\n\n # Long pause detection\n if last_time_ms is not None:\n delta = event_time - last_time_ms\n if delta > long_pause_threshold_ms:\n # TODO (f.srambical): think about whether we want to emit this as an observation or not\n parts.append(f"""")\n last_time_ms = event_time\n\n event_type = row[""Type""]\n\n match event_type:\n case ""tab"":\n # File switch event\n parts.append(f"""")\n \n # If Text is present, this is the first time opening the file\n # and the entire file content is captured\n text = row[""Text""]\n if pd.notna(text):\n file_content = str(text).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n file_states[file_path] = file_content\n parts.append(f""// observation: file={file_path}"")\n parts.append(_fenced_block(file_path, language, _clean_text(file_content)))\n\n case ""terminal_command"":\n # Terminal command execution\n command = row[""Text""]\n command_str = str(command).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n parts.append(f"""")\n parts.append(_fenced_block(file_path, ""bash"", _clean_text(command_str)))\n\n case ""terminal_output"":\n # Terminal output capture\n output = row[""Text""]\n output_str = str(output).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n parts.append(f"""")\n parts.append(_fenced_block(file_path, None, _clean_text(output_str)))\n\n case ""terminal_focus"":\n # Terminal focus event\n parts.append(f"""")\n\n case ""git_branch_checkout"":\n # Git branch checkout event\n branch_info = row[""Text""]\n branch_str = str(branch_info).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n parts.append(f"""")\n parts.append(f""// git: {_clean_text(branch_str)}"")\n\n case ""selection_command"" | ""selection_mouse"" | ""selection_keyboard"":\n # Handle cursor movement\n offset = row[""RangeOffset""]\n length = row[""RangeLength""]\n old_cursor = per_file_cursor_positions.get(file_path, (0, 0))\n new_cursor = (offset, length)\n per_file_cursor_positions[file_path] = new_cursor\n \n # Emit cursor movement observation if position changed\n if old_cursor != new_cursor:\n parts.append(f"""")\n\n case ""content"":\n # Handle file edit events\n offset = row[""RangeOffset""]\n length = row[""RangeLength""]\n new_text = row[""Text""]\n new_text_str = str(new_text) if pd.notna(new_text) else """"\n\n operation = ""noop""\n if length == 0 and new_text_str:\n operation = ""insert""\n elif length > 0 and not new_text_str:\n operation = ""delete""\n elif length > 0 and new_text_str:\n operation = ""replace""\n\n parts.append(f"""")\n\n if new_text_str and (operation == ""insert"" or operation == ""replace""):\n parts.append(_fenced_block(file_path, language, _clean_text(new_text_str)))\n\n before = file_states.get(file_path, """")\n after = _apply_change(before, offset, length, new_text)\n file_states[file_path] = after\n per_file_event_counts[file_path] = per_file_event_counts.get(file_path, 0) + 1\n\n # Update cursor position after edit (cursor moves to end of inserted/replaced text)\n per_file_cursor_positions[file_path] = (offset + len(new_text_str), 0)\n\n case _:\n raise ValueError(f""Unknown event type: {event_type}"")\n\n return ""\n"".join(parts).strip()\n\n\ndef session_to_bash_formatted_transcript(\n df: pd.DataFrame,\n viewport_radius: int = 10,\n normalize_terminal_output: bool = True,\n) -> str:\n r""""""\n Serialize a session to a bash-like transcript comprised of:\n - Commands (bash fenced blocks): cat -n, sed -i 'S,Ec\...' && cat -n | sed -n 'VSTART,VENDp'\n - Outputs (...) that reflect the file state after each action\n Tracks per-file state and a per-file viewport. Viewport only shifts when selection moves out of bounds\n or when first initialized.\n """"""\n file_states: Dict[str, str] = {}\n per_file_viewport: Dict[str, Optional[Tuple[int, int]]] = {}\n\n parts: List[str] = []\n terminal_output_buffer: List[str] = []\n pending_edits_before: Dict[str, Optional[str]] = {}\n\n def _flush_terminal_output_buffer() -> None:\n if not terminal_output_buffer:\n return\n aggregated = """".join(terminal_output_buffer)\n out = aggregated\n if normalize_terminal_output:\n out = _normalize_terminal_output(out)\n cleaned = _clean_text(out)\n if cleaned.strip():\n parts.append(f""\n{cleaned}\n"")\n terminal_output_buffer.clear()\n\n def _flush_pending_edit_for_file(target_file: str) -> None:\n before_snapshot = pending_edits_before.get(target_file)\n if before_snapshot is None:\n return\n after_state = file_states.get(target_file, """")\n try:\n start_line, end_line, repl_lines = _compute_changed_block_lines(before_snapshot, after_state)\n except ValueError:\n pending_edits_before[target_file] = None\n return\n before_total_lines = len(before_snapshot.splitlines())\n if end_line < start_line:\n escaped_lines = [_escape_single_quotes_for_sed(line) for line in repl_lines]\n sed_payload = ""\n"".join(escaped_lines)\n if start_line <= max(1, before_total_lines):\n sed_cmd = f""sed -i '{start_line}i\\\n{sed_payload}' {target_file}""\n else:\n sed_cmd = f""sed -i '$a\\\n{sed_payload}' {target_file}""\n elif not repl_lines:\n sed_cmd = f""sed -i '{start_line},{end_line}d' {target_file}""\n else:\n escaped_lines = [_escape_single_quotes_for_sed(line) for line in repl_lines]\n sed_payload = ""\n"".join(escaped_lines)\n sed_cmd = f""sed -i '{start_line},{end_line}c\\\n{sed_payload}' {target_file}""\n total_lines = len(after_state.splitlines())\n center = (start_line + end_line) // 2\n vp = _compute_viewport(total_lines, center, viewport_radius)\n per_file_viewport[target_file] = vp\n vstart, vend = vp\n chained_cmd = f""{sed_cmd} && cat -n {target_file} | sed -n '{vstart},{vend}p'""\n parts.append(_fenced_block(target_file, ""bash"", _clean_text(chained_cmd)))\n viewport_output = _line_numbered_output(after_state, vstart, vend)\n parts.append(f""\n{viewport_output}\n"")\n pending_edits_before[target_file] = None\n\n def _flush_all_pending_edits() -> None:\n for fname in list(pending_edits_before.keys()):\n _flush_pending_edit_for_file(fname)\n\n for i in range(len(df)):\n row = df.iloc[i]\n file_path: str = row[""File""]\n event_type = row[""Type""]\n\n if i % 100 == 0:\n breakpoint()\n \n match event_type:\n case ""tab"":\n _flush_all_pending_edits()\n _flush_terminal_output_buffer()\n text = row[""Text""]\n if pd.notna(text):\n content = str(text).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n file_states[file_path] = content\n # First open with full file capture\n cmd = f""cat -n {file_path}""\n parts.append(_fenced_block(file_path, ""bash"", _clean_text(cmd)))\n output = _line_numbered_output(content)\n parts.append(f""\n{output}\n"")\n else:\n # File switch without content snapshot: show current viewport only\n content = file_states.get(file_path, """")\n total_lines = len(content.splitlines())\n vp = per_file_viewport.get(file_path)\n if not vp or vp[1] == 0:\n vp = _compute_viewport(total_lines, 1, viewport_radius)\n per_file_viewport[file_path] = vp\n if vp:\n vstart, vend = vp\n cmd = f""cat -n {file_path} | sed -n '{vstart},{vend}p'""\n parts.append(_fenced_block(file_path, ""bash"", _clean_text(cmd)))\n viewport_output = _line_numbered_output(content, vstart, vend)\n parts.append(f""\n{viewport_output}\n"")\n\n case ""content"":\n _flush_terminal_output_buffer()\n offset = int(row[""RangeOffset""])\n length = int(row[""RangeLength""])\n new_text = row[""Text""]\n before = file_states.get(file_path, """")\n after = _apply_change(before, offset, length, new_text)\n if pending_edits_before.get(file_path) is None:\n pending_edits_before[file_path] = before\n file_states[file_path] = after\n\n case ""selection_command"" | ""selection_mouse"" | ""selection_keyboard"":\n # During an edit burst (pending edits), suppress flush and viewport emissions\n if pending_edits_before.get(file_path) is None:\n _flush_terminal_output_buffer()\n else:\n # Skip emitting viewport while edits are pending to avoid per-keystroke sed/cat spam\n break\n offset = int(row[""RangeOffset""])\n content = file_states.get(file_path, """")\n total_lines = len(content.splitlines())\n target_line = content[:offset].count(""\n"") + 1\n vp = per_file_viewport.get(file_path)\n should_emit = False\n if not vp or vp[1] == 0:\n vp = _compute_viewport(total_lines, target_line, viewport_radius)\n per_file_viewport[file_path] = vp\n should_emit = True\n else:\n vstart, vend = vp\n if target_line < vstart or target_line > vend:\n vp = _compute_viewport(total_lines, target_line, viewport_radius)\n per_file_viewport[file_path] = vp\n should_emit = True\n if should_emit and vp:\n vstart, vend = vp\n cmd = f""cat -n {file_path} | sed -n '{vstart},{vend}p'""\n parts.append(_fenced_block(file_path, ""bash"", _clean_text(cmd)))\n viewport_output = _line_numbered_output(content, vstart, vend)\n parts.append(f""\n{viewport_output}\n"")\n\n case ""terminal_command"":\n _flush_all_pending_edits()\n _flush_terminal_output_buffer()\n command = row[""Text""]\n command_str = str(command).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n parts.append(_fenced_block(file_path, ""bash"", _clean_text(command_str)))\n\n case ""terminal_output"":\n output = row[""Text""]\n raw_output = str(output).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n terminal_output_buffer.append(raw_output)\n\n case ""terminal_focus"" | ""git_branch_checkout"":\n _flush_all_pending_edits()\n _flush_terminal_output_buffer()\n # FIXME (f.srambical): handle these events \n pass\n\n case _:\n _flush_all_pending_edits()\n _flush_terminal_output_buffer()\n raise ValueError(f""Unknown event type: {event_type}"")\n\n _flush_all_pending_edits()\n _flush_terminal_output_buffer()\n return ""\n"".join(parts).strip()\n\ndef load_hf_csv(hf_path: str, split: str) -> Dataset:\n loaded = load_dataset(hf_path, split=split)\n\n assert isinstance(loaded, Dataset), ""Expected a Dataset from load_dataset""\n return loaded\n\n\ndef _discover_local_sessions(root: Path) -> List[Path]:\n # Recursively find all CSV files\n paths: List[Path] = []\n for p in root.rglob(""*.csv""):\n if p.is_file():\n paths.append(p)\n paths.sort()\n return paths\n\n\ndef _chunk_text(text: str, target_chars: int, overlap_chars: int) -> List[str]:\n """"""Split a long text into overlapping chunks near target length.""""""\n if target_chars <= 0:\n return [text]\n n = len(text)\n if n <= target_chars:\n return [text]\n\n chunks: List[str] = []\n start = 0\n # Ensure sane overlap\n overlap = max(0, min(overlap_chars, target_chars // 2))\n while start < n:\n end_target = min(start + target_chars, n)\n if end_target < n:\n end = end_target\n else:\n end = n\n chunk = text[start:end].strip()\n chunks.append(chunk)\n if end == n:\n break\n # advance with overlap\n start = max(0, end - overlap)\n if start >= n:\n break\n return chunks\n\n\n",python,tab +2,318,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"3:56:09 PM [info] Activating crowd-code\n3:56:09 PM [info] Recording started\n3:56:09 PM [info] Initializing git provider using file system watchers...\n",Log,tab +3,550,"extension-output-pdoom-org.crowd-code-#1-crowd-code",150,0,"3:56:10 PM [info] Git repository found\n3:56:10 PM [info] Git provider initialized successfully\n3:56:10 PM [info] Initial git state: [object Object]\n",Log,content +4,230600,"TERMINAL",0,0,"",,terminal_focus +5,230602,"crowd-pilot/crowd-pilot/serialization_utils.py",0,0,"",python,tab +6,266711,"TERMINAL",0,0,"source /home/franz.srambical/crowd-pilot/.venv/bin/activate",,terminal_command +7,474880,"slurm/dev/franz/berlin/crowd-pilot/generate_array_record_dataset_bash_version.sh",0,0,"#!/bin/bash\n\nset -uex\n\nOUTPUT_DIR=""/fast/project/HFMI_SynergyUnit/jafar_ws/data/crowd-pilot/crowd-code-0.1/bash_format_array_record/""\nCSV_ROOT=""/fast/project/HFMI_SynergyUnit/jafar_ws/data/crowd-pilot/crowd-code-0.1/csv/""\n\nuv run crowd-pilot/serialize_dataset_array_record.py --csv_root=$CSV_ROOT --output_dir=$OUTPUT_DIR",shellscript,tab +8,567093,"slurm/dev/franz/berlin/crowd-pilot/generate_array_record_dataset_bash_version.sh",321,0,"",shellscript,selection_mouse +9,567133,"slurm/dev/franz/berlin/crowd-pilot/generate_array_record_dataset_bash_version.sh",320,0,"",shellscript,selection_command +10,571032,"slurm/dev/franz/berlin/crowd-pilot/generate_array_record_dataset_bash_version.sh",321,0,"",shellscript,selection_mouse +11,571033,"slurm/dev/franz/berlin/crowd-pilot/generate_array_record_dataset_bash_version.sh",320,0,"",shellscript,selection_command +12,578088,"slurm/dev/franz/berlin/crowd-pilot/generate_array_record_dataset_bash_version.sh",321,0,"",shellscript,selection_mouse +13,578089,"slurm/dev/franz/berlin/crowd-pilot/generate_array_record_dataset_bash_version.sh",320,0,"",shellscript,selection_command +14,598813,"slurm/dev/franz/berlin/crowd-pilot/qwen_0_6/convert_checkpoint_from_hf.sh",0,0,"#!/usr/bin/env bash\nset -uex\n\nexport XLA_PJRT_GPU_HOST_MEMORY_LIMIT_GB=400\ncd maxtext\nsource .venv/bin/activate\n\nexport HF_HOME=""$TMPDIR/.cache/huggingface""\n\nMODEL_NAME=""qwen3-0.6b""\nOUTPUT_DIR=""/fast/project/HFMI_SynergyUnit/jafar_ws/data/crowd-pilot/checkpoint_pretrained_maxtext/Qwen/Qwen3-0.6B""\n\n# Convert using the general conversion framework\npython3 -m MaxText.utils.ckpt_conversion.to_maxtext \\n src/MaxText/configs/base.yml \\n model_name=${MODEL_NAME} \\n base_output_directory=${OUTPUT_DIR} \\n use_multimodal=false \\n scan_layers=true \\n run_name=qwen3_0_6b_conversion",shellscript,tab +15,599422,"slurm/dev/franz/berlin/crowd-pilot/qwen_0_6/convert_checkpoint_to_hf.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --gres=gpu:1\n#SBATCH --time=02:00:00\n#SBATCH --cpus-per-task=4\n#SBATCH --output=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/crowd-pilot/maxtext/%x_%j.log\n#SBATCH --error=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/crowd-pilot/maxtext/%x_%j.log\n#SBATCH --job-name=convert_maxtext_to_hf\n#SBATCH --requeue\n\n# Usage:\n# sbatch convert_checkpoint_to_hf.sh \\n# /path/to/maxtext/checkpoints/0/items \\n# qwen3-0.6b \\n# /path/to/output/hf_dir \\n# \n\nset -euo pipefail\n\ncat $0\n\nif [ $# -lt 3 ]; then\n echo ""Usage: $0 [HF_ACCESS_TOKEN]"" >&2\n exit 1\nfi\n\nCHECKPOINT_PATH=""$1""\nMODEL_NAME=""$2""\nHF_OUT_DIR=""$3""\nHF_ACCESS_TOKEN=""${4-}""\n\ncd /home/franz.srambical/crowd-pilot/maxtext\nsource .venv/bin/activate\n\nmkdir -p ""$HF_OUT_DIR""\n\npython3 -m MaxText.utils.ckpt_conversion.to_huggingface src/MaxText/configs/base.yml \\n model_name=$MODEL_NAME \\n load_parameters_path=$CHECKPOINT_PATH \\n base_output_directory=$HF_OUT_DIR \\n scan_layers=false \\n use_multimodal=false \\n hf_access_token=$HF_ACCESS_TOKEN\n\necho ""Converted HF model saved to: $HF_OUT_DIR""\n\n\n",shellscript,tab +16,600006,"slurm/dev/franz/berlin/crowd-pilot/qwen_0_6/maxtext_decode.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --gres=gpu:1\n#SBATCH --time=24:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/crowd-pilot/maxtext/%x_%j.log\n#SBATCH --error=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/crowd-pilot/maxtext/%x_%j.log\n#SBATCH --job-name=crowd-pilot_qwen3-0.6b_maxtext_decode\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# Log the sbatch script\ncat $0\n\ncd maxtext\nsource .venv/bin/activate\n\nCHECKPOINT_PATH=/fast/project/HFMI_SynergyUnit/jafar_ws/data/crowd-pilot/outputs/crowd-pilot_qwen3-0.6b_batch_size_4_32858/checkpoints/0/items\nPROMPT=$(cat << 'EOF'\n\n// observation\nEOF\n)\nMODEL_NAME=qwen3-0.6b\nPER_DEVICE_BATCH_SIZE=1\nMAX_TARGET_LENGTH=128\n\npython3 -m MaxText.decode src/MaxText/configs/base.yml\\n load_parameters_path=$CHECKPOINT_PATH\\n tokenizer_path=src/MaxText/assets/qwen3-tokenizer\\n prompt=""$PROMPT""\\n model_name=$MODEL_NAME\\n per_device_batch_size=$PER_DEVICE_BATCH_SIZE\\n max_target_length=$MAX_TARGET_LENGTH\\n skip_jax_distributed_system=True",shellscript,tab diff --git a/4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-afb1f1b7-0bba-414b-b08e-fc18851671de1764452457464-2025_11_29-22.41.01.611/source.csv b/4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-afb1f1b7-0bba-414b-b08e-fc18851671de1764452457464-2025_11_29-22.41.01.611/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..2edf4205aa7ef47a7fc767c0465401655eec719f --- /dev/null +++ b/4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-afb1f1b7-0bba-414b-b08e-fc18851671de1764452457464-2025_11_29-22.41.01.611/source.csv @@ -0,0 +1,107 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +1,12,"tasks",0,0,"",Log,tab +2,27,"Untitled-1",0,0,"",plaintext,tab +3,133,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"10:41:01 PM [info] Activating crowd-code\n10:41:01 PM [info] Recording started\n10:41:01 PM [info] Initializing git provider using file system watchers...\n10:41:01 PM [info] No workspace folder found\n",Log,tab +4,850,"Untitled-1",0,0,"",plaintext,tab +5,4554,"Untitled-1",0,0,"\n",plaintext,content +6,4726,"Untitled-1",1,0,"\n",plaintext,content +7,5245,"Untitled-1",1,0,"",plaintext,selection_command +8,5326,"Untitled-1",0,0,"",plaintext,selection_command +9,6484,"Untitled-1",0,0,"\n",plaintext,content +10,6953,"Untitled-1",0,0,"",plaintext,selection_command +11,8161,"Untitled-1",1,0,"",plaintext,selection_command +12,8262,"Untitled-1",2,0,"",plaintext,selection_command +13,9027,"Untitled-1",1,0,"",plaintext,selection_command +14,9619,"TERMINAL",0,0,"Test",,terminal_focus +15,9622,"Untitled-1",3,0,"/* crowd-pilot: insert start */\nline A\nline B\n/* crowd-pilot: insert end */\n",plaintext,content +16,14490,"Untitled-1",0,0,"",plaintext,selection_command +17,15128,"Untitled-1",1,0,"",plaintext,selection_command +18,15294,"Untitled-1",2,0,"",plaintext,selection_command +19,15649,"Untitled-1",3,0,"",plaintext,selection_command +20,15826,"Untitled-1",35,0,"",plaintext,selection_command +21,16210,"Untitled-1",3,0,"",plaintext,selection_command +22,16628,"Untitled-1",35,0,"",plaintext,selection_command +23,17204,"Untitled-1",3,0,"",plaintext,selection_command +24,17610,"Untitled-1",35,0,"",plaintext,selection_command +25,17827,"Untitled-1",3,0,"",plaintext,selection_command +26,18045,"Untitled-1",2,0,"",plaintext,selection_command +27,18367,"Untitled-1",1,0,"",plaintext,selection_command +28,18814,"Untitled-1",0,0,"",plaintext,selection_command +29,19896,"Untitled-1",1,0,"",plaintext,selection_command +30,20049,"Untitled-1",2,0,"",plaintext,selection_command +31,20209,"Untitled-1",3,0,"",plaintext,selection_command +32,20326,"Untitled-1",35,0,"",plaintext,selection_command +33,20715,"Untitled-1",3,0,"",plaintext,selection_command +34,20879,"Untitled-1",2,0,"",plaintext,selection_command +35,21024,"Untitled-1",1,0,"",plaintext,selection_command +36,21178,"Untitled-1",0,0,"",plaintext,selection_command +37,21610,"Untitled-1",1,0,"",plaintext,selection_command +38,21765,"Untitled-1",2,0,"",plaintext,selection_command +39,21913,"Untitled-1",3,0,"",plaintext,selection_command +40,22041,"Untitled-1",35,0,"",plaintext,selection_command +41,22434,"Untitled-1",3,0,"",plaintext,selection_command +42,22756,"Untitled-1",2,0,"",plaintext,selection_command +43,22914,"Untitled-1",1,0,"",plaintext,selection_command +44,23065,"Untitled-1",0,0,"",plaintext,selection_command +45,23529,"Untitled-1",1,0,"",plaintext,selection_command +46,23681,"Untitled-1",2,0,"",plaintext,selection_command +47,23844,"Untitled-1",3,0,"",plaintext,selection_command +48,23984,"Untitled-1",35,0,"",plaintext,selection_command +49,24371,"Untitled-1",3,0,"",plaintext,selection_command +50,24525,"Untitled-1",2,0,"",plaintext,selection_command +51,24678,"Untitled-1",1,0,"",plaintext,selection_command +52,24809,"Untitled-1",0,0,"",plaintext,selection_command +53,25830,"Untitled-1",1,0,"",plaintext,selection_command +54,26422,"Untitled-1",0,0,"",plaintext,selection_command +55,26778,"Untitled-1",1,0,"",plaintext,selection_command +56,29610,"Untitled-1",2,0,"",plaintext,selection_command +57,30065,"Untitled-1",1,0,"",plaintext,selection_command +58,31595,"Untitled-1",35,13,"/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2",plaintext,content +59,34818,"Untitled-1",2,0,"",plaintext,selection_command +60,34827,"Untitled-1",1,0,"",plaintext,selection_command +61,35649,"Untitled-1",98,30,"",plaintext,content +62,37420,"Untitled-1",2,0,"",plaintext,selection_command +63,37821,"Untitled-1",3,0,"",plaintext,selection_command +64,40049,"Untitled-1",35,0,"",plaintext,selection_command +65,40375,"Untitled-1",66,0,"",plaintext,selection_command +66,40562,"Untitled-1",82,0,"",plaintext,selection_command +67,40700,"Untitled-1",98,0,"",plaintext,selection_command +68,41073,"Untitled-1",82,0,"",plaintext,selection_command +69,41326,"Untitled-1",66,0,"",plaintext,selection_command +70,41356,"Untitled-1",35,0,"",plaintext,selection_command +71,41387,"Untitled-1",3,0,"",plaintext,selection_command +72,41420,"Untitled-1",2,0,"",plaintext,selection_command +73,41646,"Untitled-1",1,0,"",plaintext,selection_command +74,42128,"Untitled-1",2,0,"",plaintext,selection_command +75,42512,"Untitled-1",3,0,"",plaintext,selection_command +76,42745,"Untitled-1",35,0,"",plaintext,selection_command +77,42881,"Untitled-1",66,0,"",plaintext,selection_command +78,43047,"Untitled-1",82,0,"",plaintext,selection_command +79,43229,"Untitled-1",66,0,"",plaintext,selection_command +80,43395,"Untitled-1",35,0,"",plaintext,selection_command +81,43545,"Untitled-1",3,0,"",plaintext,selection_command +82,43691,"Untitled-1",2,0,"",plaintext,selection_command +83,43876,"Untitled-1",1,0,"",plaintext,selection_command +84,85576,"TERMINAL",0,0,"echo ""Hello World""",,terminal_command +85,85577,"TERMINAL",0,0,"]633;CHello World\r\n% \r \r",,terminal_output +86,86091,"Untitled-1",98,0,"",plaintext,selection_command +87,91930,"Untitled-1",82,0,"",plaintext,selection_command +88,92000,"Untitled-1",98,0,"",plaintext,selection_command +89,92527,"Untitled-1",82,0,"",plaintext,selection_command +90,93508,"Untitled-1",98,0,"",plaintext,selection_command +91,98548,"Untitled-1",82,0,"",plaintext,selection_command +92,98591,"Untitled-1",98,0,"",plaintext,selection_command +93,99618,"Untitled-1",82,0,"",plaintext,selection_command +94,99729,"Untitled-1",66,0,"",plaintext,selection_command +95,185047,"Untitled-1",98,0,"/* crowd-pilot: insert start */\nline A\nline B\n/* crowd-pilot: insert end */\n",plaintext,content +96,194794,"Untitled-1",130,13,"/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2",plaintext,content +97,195409,"Untitled-1",193,30,"",plaintext,content +98,196586,"TERMINAL",0,0,"echo ""Hello World""",,terminal_command +99,196586,"TERMINAL",0,0,"]633;CHello World\r\n% \r \r",,terminal_output +100,197243,"Untitled-1",193,0,"",plaintext,selection_command +101,197828,"Untitled-1",177,0,"",plaintext,selection_command +102,197957,"Untitled-1",161,0,"",plaintext,selection_command +103,198771,"Untitled-1",130,0,"",plaintext,selection_command +104,217653,"Untitled-1",177,0,"/* crowd-pilot: insert start */\nline A\nline B\n/* crowd-pilot: insert end */\n",plaintext,content +105,241275,"Untitled-1",209,13,"/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2",plaintext,content +106,242140,"Untitled-1",272,46,"",css,content diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-05016444-b54b-4934-b340-97e6db49021a1753717457401-2025_07_28-17.45.12.572/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-05016444-b54b-4934-b340-97e6db49021a1753717457401-2025_07_28-17.45.12.572/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..fa39fd3d19482a248e3fcdb1fc9e75fd0b11a92a --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-05016444-b54b-4934-b340-97e6db49021a1753717457401-2025_07_28-17.45.12.572/source.csv @@ -0,0 +1,2609 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +2,952,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"5:45:12 PM [info] Activating crowd-code\n5:45:12 PM [info] Recording started\n5:45:12 PM [info] Initializing git provider using file system watchers...\n5:45:12 PM [info] Git repository found\n5:45:12 PM [info] Git provider initialized successfully\n",Log,tab +3,1325,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"5:45:13 PM [info] Initial git state: [object Object]\n",Log,content +4,16552,"TERMINAL",0,0,"queue",,terminal_command +5,16608,"TERMINAL",0,0,"]633;E;2025-07-28 17:45:29 queue;252991b4-29d1-4b67-8848-3bd7bccaeb47]633;C",,terminal_output +6,16723,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Mon Jul 28 17:45:29 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3379590 accelerat train_dy tum_cte0 PD\t0:00\t 3 (Priority)3371238 accelerat train_dy tum_cte0 R56:05\t 2 hkn[0509,0532]3377693 accelerat train_dy tum_cte0 R 23:12:56\t 8 hkn[0426,0436,0510,0524,0603,0627,0805,0812]3373408 accelerat train_dy tum_cte0 R 1-04:55:24\t 8 hkn[0417-0419,0422,0527,0621,0625,0628]3380061 accelerat interact tum_cte0 R 1:04:06\t 1 hkn0724",,terminal_output +7,17707,"TERMINAL",0,0,"306757",,terminal_output +8,18737,"TERMINAL",0,0,"17868",,terminal_output +9,19840,"TERMINAL",0,0,"28979",,terminal_output +10,20153,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +11,21119,"TERMINAL",0,0,"bash",,terminal_focus +12,23356,"TERMINAL",0,0,"bash",,terminal_focus +13,56623,"sample.py",0,0,"from dataclasses import dataclass\nfrom typing import Optional\nimport time\nimport os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nfrom flax.training.train_state import TrainState\nimport grain\nimport orbax.checkpoint as ocp\nimport optax\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n checkpoint_step: Optional[int] = None\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_co_train: bool = True\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=args.lam_co_train,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n use_maskgit=False,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n # sampling_fn = jax.jit(nn.apply(_sampling_wrapper, genie)) \n sampling_fn = nn.apply(_sampling_wrapper, genie)\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\n generated_vid = sampling_fn(params, batch)\n return generated_vid\n\ndef _get_dataloader_iterator():\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=0,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n return grain_iterator\n\n# --- Get video + latent actions ---\ngrain_iterator = _get_dataloader_iterator()\nvideo_batch = next(grain_iterator)\n# video_batch = np.load(""overfit_dir/single_sample_corner.npy"")\n# video_batch = np.load(""overfit_dir/oai_sample_seed69_1.npy"") # *255.\n\nvideo_batch = jnp.array(video_batch)\nprint(video_batch.dtype)\nvideo_batch = video_batch.astype(args.dtype) # / 255.0\nprint(video_batch.dtype)\nvideo_batch = video_batch / 255.0\nprint(video_batch.dtype)\n# Get latent actions for all videos in the batch\nbatch = dict(videos=video_batch[:,:args.seq_len])\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(video_batch.shape[0], args.seq_len - 1, 1)\n\n# --- Sample + evaluate video ---\nprint(""autoreg sampling..."")\nvid = _autoreg_sample(rng, video_batch, action_batch)\nprint(""autoreg sampling done. calculating ssim and saving video"")\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\nprint(f""SSIM: {ssim}"")\n\n# --- Construct video ---\ntrue_videos = (video_batch * 255).astype(np.uint8)\npred_videos = (vid * 255).astype(np.uint8)\nvideo_comparison = np.zeros((2, *vid.shape), dtype=np.uint8)\nvideo_comparison[0] = true_videos[:, : args.seq_len]\nvideo_comparison[1] = pred_videos\nframes = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n# --- Save video ---\nimgs = [Image.fromarray(img) for img in frames]\n# Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\nfor t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(action_batch.shape[0]):\n action = action_batch[row, t, 0]\n y_offset = row * video_batch.shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\nimgs[0].save(\n f""generation_{time.time()}.gif"",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n)\n",python,tab +14,61694,"genie.py",0,0,"from typing import Dict, Any\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nfrom flax.training.train_state import TrainState\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT, DynamicsAutoregressive\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\nimport grain\n\n\nclass Genie(nn.Module):\n """"""Genie model""""""\n\n # --- Tokenizer ---\n in_dim: int\n tokenizer_dim: int\n tokenizer_ffn_dim: int\n latent_patch_dim: int\n num_patch_latents: int\n patch_size: int\n tokenizer_num_blocks: int\n tokenizer_num_heads: int\n # --- LAM ---\n lam_dim: int\n lam_ffn_dim: int\n latent_action_dim: int\n num_latent_actions: int\n lam_patch_size: int\n lam_num_blocks: int\n lam_num_heads: int\n lam_co_train: bool\n # --- Dynamics ---\n dyna_dim: int\n dyna_ffn_dim: int\n dyna_num_blocks: int\n dyna_num_heads: int\n use_maskgit: bool\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n dropout: float = 0.0\n mask_limit: float = 0.0\n\n def setup(self):\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n ffn_dim=self.tokenizer_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n ffn_dim=self.lam_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n\n if self.use_maskgit:\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n else:\n self.dynamics = DynamicsAutoregressive(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\n lam_outputs = self.lam.vq_encode(batch[""videos""], training=False)\n latent_actions = jax.lax.cond(\n self.lam_co_train,\n lambda: lam_outputs[""z_q""],\n lambda: jax.lax.stop_gradient(lam_outputs[""z_q""]),\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\n latent_actions=latent_actions,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )\n outputs[""lam_indices""] = lam_outputs[""indices""]\n return outputs\n\n def sample_causal(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n temperature: float = 1,\n sample_argmax: bool = False,\n ):\n """"""\n Autoregressively samples up to `seq_len` future frames using the causal transformer backend.\n\n - Input frames are tokenized once.\n - Future frames are generated one at a time, each conditioned on all previous frames.\n - All frames are detokenized in a single pass at the end.\n\n Args:\n batch: Dict with at least ""videos"" (B, T, H, W, C)\n seq_len: total number of frames to generate (including context)\n temperature: sampling temperature\n sample_argmax: if True, use argmax instead of sampling\n\n Returns:\n Generated video frames (B, seq_len, H, W, C)\n """"""\n # --- Encode context frames ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""] # (B, T, N)\n B, T, N = token_idxs.shape\n\n # jax.debug.print(""token_idxs shape: {}"", token_idxs.shape)\n # --- Prepare initial token sequence ---\n # Pad with zeros for future frames\n pad_shape = (B, seq_len - T, N)\n token_idxs_full = jnp.concatenate(\n [token_idxs, jnp.zeros(pad_shape, dtype=token_idxs.dtype)], axis=1\n ) # (B, seq_len, N)\n\n # --- Prepare latent actions ---\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""]) # (B, S-1, )\n # --- Autoregressive generation loop ---\n rng = batch[""rng""]\n for t in range(T, seq_len):\n for n in range(N):\n jax.debug.print(""Sampling token {} from frame {}"", n, t)\n dyna_inputs = {\n ""video_tokens"": token_idxs_full,\n ""latent_actions"": action_tokens,\n }\n # jax.debug.print(""token_idxs_full 0: {}"", token_idxs_full[0,:,0])\n dyna_outputs = self.dynamics(dyna_inputs, training=False)\n # # dyna_outputs[""token_logits""]: (B, t, N, vocab_size)\n # # We want the logits for the last time step (frame t-1 predicting t)\n # jax.debug.breakpoint()\n next_token_logits = dyna_outputs[""token_logits""][:, t, n, :].astype(\n jnp.float32\n ) # (B, 1, vocab_size)\n\n # Sample or argmax for each patch\n if sample_argmax:\n next_token = jnp.argmax(next_token_logits, axis=-1) # (B, 1)\n else:\n rng, step_rng = jax.random.split(rng)\n next_token = jax.random.categorical(\n step_rng, next_token_logits / temperature, axis=-1\n ) # (B, 1)\n \n # Insert the generated tokens into the sequence\n token_idxs_full = token_idxs_full.at[:, t, n].set(next_token)\n # FIXME @mihir: HACK\n # token_idxs_full = jnp.argmax(dyna_outputs[""token_logits""].astype(jnp.float32) , axis=-1)\n # break\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n token_idxs_full, video_hw=batch[""videos""].shape[2:4]\n )\n return final_frames\n\n @nn.compact\n def sample_maskgit(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> Any:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: patches per frame\n S: sequence length\n A: action space\n D: model latent dimension\n """"""\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""] # (B, T, N)\n B, T, N = token_idxs.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs.dtype)\n token_idxs = jnp.concatenate([token_idxs, pad], axis=1) # (B, S, N)\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n\n def generation_step_fn(carry, step_t):\n rng, current_token_idxs = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current and future frames (i.e., t >= step_t)\n mask = jnp.arange(seq_len) >= step_t # (S,)\n mask = jnp.broadcast_to(mask[None, :, None], (B, seq_len, N)) # (B, S, N)\n mask = mask.astype(bool)\n masked_token_idxs = current_token_idxs * ~mask\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs,\n mask,\n action_tokens,\n )\n final_carry_maskgit, _ = loop_fn(init_carry_maskgit, jnp.arange(steps))\n updated_token_idxs = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs)\n return new_carry, None\n\n # --- Run the autoregressive generation using scan ---\n initial_carry = (batch[""rng""], token_idxs)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn, initial_carry, timesteps_to_scan\n )\n final_token_idxs = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n final_token_idxs,\n video_hw=batch[""videos""].shape[2:4],\n )\n return final_frames\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, token_idxs, mask, action_tokens = carry\n step = x\n N = token_idxs.shape[2]\n\n # --- Construct + encode video ---\n vid_embed = self.dynamics.patch_embed(token_idxs) # (B, S, N, D)\n mask_token = self.dynamics.mask_token # (1, 1, 1, D,)\n mask_expanded = mask[..., None] # (B, S, N, 1)\n vid_embed = jnp.where(mask_expanded, mask_token, vid_embed)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed) / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jax.random.categorical(_rng, final_logits)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, token_idxs, new_mask, action_tokens)\n return new_carry, None\n\n\ndef restore_genie_components(\n train_state: TrainState,\n sharding: jax.sharding.NamedSharding,\n inputs: Dict[str, jax.Array],\n rng: jax.Array,\n args,\n):\n """"""Restore pre-trained Genie components""""""\n rng, _rng = jax.random.split(rng)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.adamw(\n learning_rate=optax.constant_schedule(args.max_lr),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n )\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n ffn_dim=args.tokenizer_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n tokenizer_init_params = dummy_tokenizer.init(_rng, inputs)\n dummy_tokenizer_train_state = TrainState.create(\n apply_fn=dummy_tokenizer.apply, params=tokenizer_init_params, tx=dummy_tx\n )\n abstract_sharded_tokenizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_train_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_tokenizer_state),\n ),\n )[""model_state""]\n restored_tokenizer_params = restored_tokenizer.params[""params""]\n train_state.params[""params""][""tokenizer""].update(restored_tokenizer_params)\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n ffn_dim=args.lam_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n lam_init_params = dummy_lam.init(_rng, inputs)\n dummy_lam_train_state = TrainState.create(\n apply_fn=dummy_lam.apply, params=lam_init_params, tx=dummy_tx\n )\n abstract_sharded_lam_state = _create_abstract_sharded_pytree(\n dummy_lam_train_state, sharding\n )\n restored_lam = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_lam_state),\n ),\n )[""model_state""]\n restored_lam_params = restored_lam.params[""params""]\n # Genie does not initialize all LAM modules, thus we omit those extra modules during restoration\n # (f.srambical) FIXME: Currently, this is a small HBM memory crunch since the LAM's decoder is loaded into HBM and immediately dicarded.\n # A workaround would be to restore to host memory first, and only move the weights to HBM after pruning the decoder\n restored_lam_params = {\n k: v\n for k, v in restored_lam_params.items()\n if k in train_state.params[""params""][""lam""]\n }\n train_state.params[""params""][""lam""].update(restored_lam_params)\n lam_checkpoint_manager.close()\n\n return train_state\n\n\ndef _create_abstract_sharded_pytree(pytree_template, sharding_spec):\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)\n",python,tab +15,62296,"TERMINAL",0,0,"bash",,terminal_focus +16,63028,"TERMINAL",0,0,"bash",,terminal_focus +17,113524,"TERMINAL",0,0,"bash",,terminal_focus +18,114094,"TERMINAL",0,0,"bash",,terminal_focus +19,115203,"genie.py",0,0,"",python,tab +20,116516,"genie.py",5844,0,"",python,selection_mouse +21,117144,"genie.py",6103,0,"",python,selection_mouse +22,117781,"genie.py",6102,0,"",python,selection_mouse +23,117959,"genie.py",6096,12,"video_tokens",python,selection_mouse +24,118560,"genie.py",6067,0,"",python,selection_mouse +25,118727,"genie.py",6059,11,"dyna_inputs",python,selection_mouse +26,121243,"genie.py",6060,0,"",python,selection_mouse +27,123026,"genie.py",5987,0,"",python,selection_mouse +28,138645,"genie.py",6355,0,"",python,selection_mouse +29,138680,"genie.py",6354,0,"",python,selection_command +30,218736,"genie.py",0,0,"",python,tab +31,222563,"TERMINAL",0,0,"bash",,terminal_focus +32,224723,"TERMINAL",0,0,"queue",,terminal_command +33,224806,"TERMINAL",0,0,"]633;E;2025-07-28 17:48:57 queue;adbf53fe-397b-40d3-9339-94ea79afad56]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Mon Jul 28 17:48:57 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3379590 accelerat train_dy tum_cte0 PD\t0:00\t 3 (Priority)3371238 accelerat train_dy tum_cte0 R59:33\t 2 hkn[0509,0532]3377693 accelerat train_dy tum_cte0 R 23:16:24\t 8 hkn[0426,0436,0510,0524,0603,0627,0805,0812]3373408 accelerat train_dy tum_cte0 R 1-04:58:52\t 8 hkn[0417-0419,0422,0527,0621,0625,0628]3380061 accelerat interact tum_cte0 R 1:07:34\t 1 hkn0724",,terminal_output +34,226037,"TERMINAL",0,0,"84535",,terminal_output +35,226926,"TERMINAL",0,0,"95646",,terminal_output +36,229407,"TERMINAL",0,0,"scancel 3380061",,terminal_command +37,229446,"TERMINAL",0,0,"]633;E;2025-07-28 17:49:01 scancel 3380061;adbf53fe-397b-40d3-9339-94ea79afad56]633;C",,terminal_output +38,232173,"TERMINAL",0,0,"idling",,terminal_command +39,232245,"TERMINAL",0,0,"]633;E;2025-07-28 17:49:04 idling;adbf53fe-397b-40d3-9339-94ea79afad56]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1993.localdomain: Mon Jul 28 17:49:04 2025Partition dev_cpuonly: 12 nodes idle\rPartition cpuonly: 222 nodes idle\rPartition dev_accelerated:\t 0 nodes idle\rPartition accelerated:\t 0 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 1 nodes idle\rPartition large:\t 8 nodes idle",,terminal_output +40,233317,"TERMINAL",0,0,"5\t",,terminal_output +41,233948,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +42,250059,"TERMINAL",0,0,"cursor ../jafar_jobs",,terminal_command +43,250146,"TERMINAL",0,0,"]633;E;2025-07-28 17:49:22 cursor ../jafar_jobs;adbf53fe-397b-40d3-9339-94ea79afad56]633;C",,terminal_output +44,250415,"TERMINAL",0,0,"]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +45,307659,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5",,terminal_command +46,307738,"TERMINAL",0,0,"]633;E;2025-07-28 17:50:20 salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5;adbf53fe-397b-40d3-9339-94ea79afad56]633;Csalloc: Pending job allocation 3380176\r\nsalloc: job 3380176 queued and waiting for resources\r\n",,terminal_output +47,309234,"TERMINAL",0,0,"bash",,terminal_focus +48,311063,"TERMINAL",0,0,"idling",,terminal_command +49,311121,"TERMINAL",0,0,"]633;E;2025-07-28 17:50:23 idling;54c1b5ea-730a-4f9f-ba91-1c5dfbf41461]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1993.localdomain: Mon Jul 28 17:50:23 2025Partition dev_cpuonly: 12 nodes idle\rPartition cpuonly: 222 nodes idle\rPartition dev_accelerated:\t 0 nodes idle\rPartition accelerated:\t 0 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 1 nodes idle\rPartition large:\t 8 nodes idle",,terminal_output +50,312097,"TERMINAL",0,0,"4\t",,terminal_output +51,313231,"TERMINAL",0,0,"5\t",,terminal_output +52,314187,"TERMINAL",0,0,"6\t",,terminal_output +53,315256,"TERMINAL",0,0,"7\t",,terminal_output +54,315463,"TERMINAL",0,0,"salloc",,terminal_focus +55,316269,"TERMINAL",0,0,"8\t",,terminal_output +56,317318,"TERMINAL",0,0,"9\t",,terminal_output +57,318377,"TERMINAL",0,0,"30\t",,terminal_output +58,319402,"TERMINAL",0,0,"1\t",,terminal_output +59,320475,"TERMINAL",0,0,"2\t",,terminal_output +60,321498,"TERMINAL",0,0,"3\t",,terminal_output +61,322532,"TERMINAL",0,0,"4\t",,terminal_output +62,323571,"TERMINAL",0,0,"6\t",,terminal_output +63,324615,"TERMINAL",0,0,"7\t",,terminal_output +64,325668,"TERMINAL",0,0,"8\t",,terminal_output +65,326753,"TERMINAL",0,0,"9\t",,terminal_output +66,327748,"TERMINAL",0,0,"40\t",,terminal_output +67,328810,"TERMINAL",0,0,"1\t",,terminal_output +68,329836,"TERMINAL",0,0,"2\t",,terminal_output +69,330871,"TERMINAL",0,0,"3\t",,terminal_output +70,331930,"TERMINAL",0,0,"4\t",,terminal_output +71,332954,"TERMINAL",0,0,"5\t",,terminal_output +72,334004,"TERMINAL",0,0,"6\t",,terminal_output +73,335071,"TERMINAL",0,0,"7\t",,terminal_output +74,336095,"TERMINAL",0,0,"8\t",,terminal_output +75,337294,"TERMINAL",0,0,"9\t",,terminal_output +76,338444,"TERMINAL",0,0,"50\t",,terminal_output +77,339246,"TERMINAL",0,0,"1\t",,terminal_output +78,340294,"TERMINAL",0,0,"2\t",,terminal_output +79,341307,"TERMINAL",0,0,"3\t",,terminal_output +80,342335,"TERMINAL",0,0,"4\t",,terminal_output +81,343370,"TERMINAL",0,0,"5\t",,terminal_output +82,345341,"TERMINAL",0,0,"6\t",,terminal_output +83,345614,"TERMINAL",0,0,"7\t",,terminal_output +84,346562,"TERMINAL",0,0,"8\t",,terminal_output +85,347546,"TERMINAL",0,0,"9\t",,terminal_output +86,348594,"TERMINAL",0,0,"1:01\t",,terminal_output +87,349648,"TERMINAL",0,0,"2\t",,terminal_output +88,350691,"TERMINAL",0,0,"3\t",,terminal_output +89,351744,"TERMINAL",0,0,"4\t",,terminal_output +90,355183,"TERMINAL",0,0,"5\t",,terminal_output +91,356619,"TERMINAL",0,0,"6\t",,terminal_output +92,358350,"TERMINAL",0,0,"7\t8\t",,terminal_output +93,358839,"TERMINAL",0,0,"9\t",,terminal_output +94,359413,"TERMINAL",0,0,"10\t",,terminal_output +95,359764,"TERMINAL",0,0,"1\t",,terminal_output +96,360105,"TERMINAL",0,0,"2\t",,terminal_output +97,361107,"TERMINAL",0,0,"3\t",,terminal_output +98,362141,"TERMINAL",0,0,"4\t",,terminal_output +99,363177,"TERMINAL",0,0,"5\t",,terminal_output +100,364269,"TERMINAL",0,0,"6\t",,terminal_output +101,365264,"TERMINAL",0,0,"7\t",,terminal_output +102,366302,"TERMINAL",0,0,"8\t",,terminal_output +103,367346,"TERMINAL",0,0,"9\t",,terminal_output +104,368381,"TERMINAL",0,0,"20\t",,terminal_output +105,369422,"TERMINAL",0,0,"1\t",,terminal_output +106,370471,"TERMINAL",0,0,"2\t",,terminal_output +107,371527,"TERMINAL",0,0,"3\t",,terminal_output +108,372558,"TERMINAL",0,0,"4\t",,terminal_output +109,373702,"TERMINAL",0,0,"6\t",,terminal_output +110,374640,"TERMINAL",0,0,"7\t",,terminal_output +111,376349,"TERMINAL",0,0,"8\t",,terminal_output +112,377486,"TERMINAL",0,0,"9\t",,terminal_output +113,378604,"TERMINAL",0,0,"30\t",,terminal_output +114,380675,"TERMINAL",0,0,"1\t",,terminal_output +115,381406,"TERMINAL",0,0,"2\t3\t",,terminal_output +116,382320,"TERMINAL",0,0,"4\t",,terminal_output +117,383505,"TERMINAL",0,0,"5\t",,terminal_output +118,383533,"TERMINAL",0,0,"salloc: job 3380176 has been allocated resources\r\nsalloc: Granted job allocation 3380176\r\nsalloc: Waiting for resource configuration\r\n",,terminal_output +119,384408,"TERMINAL",0,0,"6\t",,terminal_output +120,385457,"TERMINAL",0,0,"7\t",,terminal_output +121,386500,"TERMINAL",0,0,"8\t",,terminal_output +122,387498,"TERMINAL",0,0,"9\t",,terminal_output +123,388550,"TERMINAL",0,0,"40\t",,terminal_output +124,389669,"TERMINAL",0,0,"2\t",,terminal_output +125,390630,"TERMINAL",0,0,"3\t",,terminal_output +126,391716,"TERMINAL",0,0,"4\t",,terminal_output +127,392744,"TERMINAL",0,0,"5\t",,terminal_output +128,393867,"TERMINAL",0,0,"6\t",,terminal_output +129,394997,"TERMINAL",0,0,"7\t",,terminal_output +130,395824,"TERMINAL",0,0,"8\t",,terminal_output +131,396948,"TERMINAL",0,0,"9\t",,terminal_output +132,397970,"TERMINAL",0,0,"50\t",,terminal_output +133,398990,"TERMINAL",0,0,"1\t",,terminal_output +134,400012,"TERMINAL",0,0,"2\t",,terminal_output +135,401072,"TERMINAL",0,0,"3\t",,terminal_output +136,402163,"TERMINAL",0,0,"4\t",,terminal_output +137,403192,"TERMINAL",0,0,"5\t",,terminal_output +138,404223,"TERMINAL",0,0,"6\t",,terminal_output +139,405339,"TERMINAL",0,0,"7\t",,terminal_output +140,406362,"TERMINAL",0,0,"83",,terminal_output +141,407332,"TERMINAL",0,0,"9\t",,terminal_output +142,408433,"TERMINAL",0,0,"2:00\t",,terminal_output +143,409385,"TERMINAL",0,0,"1\t",,terminal_output +144,410417,"TERMINAL",0,0,"2\t",,terminal_output +145,410613,"TERMINAL",0,0,"salloc: Nodes hkn0706 are ready for job\r\n",,terminal_output +146,411469,"TERMINAL",0,0,"3\t",,terminal_output +147,412496,"TERMINAL",0,0,"4\t",,terminal_output +148,412568,"TERMINAL",0,0,"]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h[tum_cte0515@hkn0706 jafar]$ ",,terminal_output +149,413514,"TERMINAL",0,0,"5\t",,terminal_output +150,414562,"TERMINAL",0,0,"6\t",,terminal_output +151,415591,"TERMINAL",0,0,"8\t",,terminal_output +152,416621,"TERMINAL",0,0,"9\t",,terminal_output +153,417678,"TERMINAL",0,0,"10\t",,terminal_output +154,418752,"TERMINAL",0,0,"1\t",,terminal_output +155,419776,"TERMINAL",0,0,"2\t",,terminal_output +156,420799,"TERMINAL",0,0,"3\t",,terminal_output +157,421870,"TERMINAL",0,0,"4\t",,terminal_output +158,422961,"TERMINAL",0,0,"5\t",,terminal_output +159,423978,"TERMINAL",0,0,"6\t",,terminal_output +160,425011,"TERMINAL",0,0,"7\t",,terminal_output +161,426018,"TERMINAL",0,0,"8\t",,terminal_output +162,427048,"TERMINAL",0,0,"9\t",,terminal_output +163,428072,"TERMINAL",0,0,"20\t",,terminal_output +164,429344,"TERMINAL",0,0,"1\t",,terminal_output +165,430148,"TERMINAL",0,0,"2\t",,terminal_output +166,431390,"TERMINAL",0,0,"3\t",,terminal_output +167,432273,"TERMINAL",0,0,"4\t",,terminal_output +168,433326,"TERMINAL",0,0,"5\t",,terminal_output +169,434322,"TERMINAL",0,0,"6\t",,terminal_output +170,435378,"TERMINAL",0,0,"7\t",,terminal_output +171,436400,"TERMINAL",0,0,"8\t",,terminal_output +172,437435,"TERMINAL",0,0,"9\t",,terminal_output +173,438477,"TERMINAL",0,0,"30\t",,terminal_output +174,439518,"TERMINAL",0,0,"1\t",,terminal_output +175,440563,"TERMINAL",0,0,"3\t",,terminal_output +176,441692,"TERMINAL",0,0,"4\t",,terminal_output +177,442657,"TERMINAL",0,0,"5\t",,terminal_output +178,444157,"TERMINAL",0,0,"6\t",,terminal_output +179,447167,"TERMINAL",0,0,"7\t8\t",,terminal_output +180,447794,"TERMINAL",0,0,"9\t",,terminal_output +181,448446,"TERMINAL",0,0,"40\t",,terminal_output +182,449078,"TERMINAL",0,0,"1\t",,terminal_output +183,449933,"TERMINAL",0,0,"2\t",,terminal_output +184,451109,"TERMINAL",0,0,"3\t",,terminal_output +185,452033,"TERMINAL",0,0,"4\t",,terminal_output +186,453159,"TERMINAL",0,0,"5\t",,terminal_output +187,454184,"TERMINAL",0,0,"6\t",,terminal_output +188,455207,"TERMINAL",0,0,"7\t",,terminal_output +189,456174,"TERMINAL",0,0,"8\t",,terminal_output +190,457223,"TERMINAL",0,0,"9\t",,terminal_output +191,458326,"TERMINAL",0,0,"50\t",,terminal_output +192,459356,"TERMINAL",0,0,"1\t",,terminal_output +193,460364,"TERMINAL",0,0,"2\t",,terminal_output +194,461404,"TERMINAL",0,0,"3\t",,terminal_output +195,462448,"TERMINAL",0,0,"4\t",,terminal_output +196,463480,"TERMINAL",0,0,"5\t",,terminal_output +197,464538,"TERMINAL",0,0,"6\t",,terminal_output +198,465567,"TERMINAL",0,0,"8\t",,terminal_output +199,466605,"TERMINAL",0,0,"9\t",,terminal_output +200,467660,"TERMINAL",0,0,"3:00\t",,terminal_output +201,467747,"models/dynamics.py",0,0,"from typing import Dict, Any\n\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport einops\n\nfrom utils.nn import STTransformer\n\n\nclass DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n ffn_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n spatial_bert=True,\n use_flash_attention=self.use_flash_attention,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n _rng_prob, *_rngs_mask = jax.random.split(batch[""mask_rng""], batch_size + 1)\n mask_prob = jax.random.uniform(\n _rng_prob, shape=(batch_size,), minval=self.mask_limit\n )\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(jnp.asarray(_rngs_mask), mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n return dict(token_logits=logits, mask=mask)\n\n\nclass DynamicsAutoregressive(nn.Module):\n """"""Autoregressive (causal) dynamics model""""""\n\n model_dim: int\n ffn_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n self.use_flash_attention,\n spatial_bert=False,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.action_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n vid_embed = self.patch_embed(batch[""video_tokens""])\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n # vid_embed_padded = jnp.pad(vid_embed, ((0, 0), (1, 0), (1, 0), (0, 0)))\n # logits = self.dynamics(vid_embed_padded)[:, :-1, :-1]\n vid_embed_padded = jnp.pad(vid_embed, ((0, 0), (0, 0), (1, 0), (0, 0)))\n\n # FIXME mihir: HACK\n # rng1, _rng = jax.random.split(batch[""mask_rng""])\n # noise = jax.random.normal(_rng, vid_embed_padded.shape)\n # logits = self.dynamics(noise)[:, :, :-1]\n\n # rng1, _rng = jax.random.split(batch[""mask_rng""])\n # noise = 0.25 * jax.random.normal(_rng, vid_embed_padded.shape)\n # logits = self.dynamics(vid_embed_padded + noise)[:, :, :-1]\n\n logits = self.dynamics(vid_embed_padded)[:, :, :-1]\n\n mask = jnp.ones(vid_embed.shape[:-1])\n return dict(token_logits=logits, mask=mask)\n",python,tab +202,468742,"TERMINAL",0,0,"1\t",,terminal_output +203,469855,"TERMINAL",0,0,"2\t",,terminal_output +204,470573,"models/dynamics.py",3505,0,"",python,selection_mouse +205,470780,"TERMINAL",0,0,"3\t",,terminal_output +206,471876,"TERMINAL",0,0,"4\t",,terminal_output +207,472977,"TERMINAL",0,0,"5\t",,terminal_output +208,473955,"TERMINAL",0,0,"6\t",,terminal_output +209,475070,"TERMINAL",0,0,"7\t",,terminal_output +210,476100,"TERMINAL",0,0,"8\t",,terminal_output +211,477051,"TERMINAL",0,0,"9\t",,terminal_output +212,478141,"TERMINAL",0,0,"10\t",,terminal_output +213,479174,"TERMINAL",0,0,"1\t",,terminal_output +214,480181,"TERMINAL",0,0,"2\t",,terminal_output +215,481216,"TERMINAL",0,0,"3\t",,terminal_output +216,482342,"TERMINAL",0,0,"4\t",,terminal_output +217,483312,"TERMINAL",0,0,"5\t",,terminal_output +218,484348,"TERMINAL",0,0,"6\t",,terminal_output +219,485419,"TERMINAL",0,0,"7\t",,terminal_output +220,486431,"TERMINAL",0,0,"8\t",,terminal_output +221,487469,"TERMINAL",0,0,"9\t",,terminal_output +222,488523,"TERMINAL",0,0,"20\t",,terminal_output +223,489561,"TERMINAL",0,0,"1\t",,terminal_output +224,490606,"TERMINAL",0,0,"3\t",,terminal_output +225,491768,"TERMINAL",0,0,"4\t",,terminal_output +226,492678,"TERMINAL",0,0,"5\t",,terminal_output +227,493817,"TERMINAL",0,0,"6\t",,terminal_output +228,494833,"TERMINAL",0,0,"7\t",,terminal_output +229,495861,"TERMINAL",0,0,"8\t",,terminal_output +230,496844,"TERMINAL",0,0,"9\t",,terminal_output +231,497910,"TERMINAL",0,0,"30\t",,terminal_output +232,498910,"TERMINAL",0,0,"1\t",,terminal_output +233,500057,"TERMINAL",0,0,"2\t",,terminal_output +234,500996,"TERMINAL",0,0,"3\t",,terminal_output +235,502105,"TERMINAL",0,0,"4\t",,terminal_output +236,503128,"TERMINAL",0,0,"5\t",,terminal_output +237,504358,"TERMINAL",0,0,"60",,terminal_output +238,505487,"TERMINAL",0,0,"7\t",,terminal_output +239,506443,"TERMINAL",0,0,"8\t",,terminal_output +240,507487,"TERMINAL",0,0,"9\t",,terminal_output +241,508541,"TERMINAL",0,0,"40\t",,terminal_output +242,509584,"TERMINAL",0,0,"2\t",,terminal_output +243,510710,"TERMINAL",0,0,"3\t",,terminal_output +244,511666,"TERMINAL",0,0,"4\t",,terminal_output +245,512756,"TERMINAL",0,0,"5\t",,terminal_output +246,513778,"TERMINAL",0,0,"6\t",,terminal_output +247,514802,"TERMINAL",0,0,"7\t",,terminal_output +248,515930,"TERMINAL",0,0,"8\t",,terminal_output +249,516864,"TERMINAL",0,0,"9\t",,terminal_output +250,517922,"TERMINAL",0,0,"50\t",,terminal_output +251,519103,"TERMINAL",0,0,"1\t",,terminal_output +252,520083,"TERMINAL",0,0,"2\t",,terminal_output +253,521152,"TERMINAL",0,0,"3\t",,terminal_output +254,522184,"TERMINAL",0,0,"4\t",,terminal_output +255,523207,"TERMINAL",0,0,"5\t",,terminal_output +256,524240,"TERMINAL",0,0,"6\t",,terminal_output +257,525459,"TERMINAL",0,0,"7\t",,terminal_output +258,526379,"TERMINAL",0,0,"8\t",,terminal_output +259,527373,"TERMINAL",0,0,"9\t",,terminal_output +260,528385,"TERMINAL",0,0,"4:00\t",,terminal_output +261,529438,"TERMINAL",0,0,"1\t",,terminal_output +262,530498,"TERMINAL",0,0,"26",,terminal_output +263,531502,"TERMINAL",0,0,"3\t",,terminal_output +264,532554,"TERMINAL",0,0,"4\t",,terminal_output +265,533589,"TERMINAL",0,0,"6\t",,terminal_output +266,534645,"TERMINAL",0,0,"7\t",,terminal_output +267,535795,"TERMINAL",0,0,"8\t",,terminal_output +268,536867,"TERMINAL",0,0,"9\t",,terminal_output +269,537788,"TERMINAL",0,0,"10\t",,terminal_output +270,538856,"TERMINAL",0,0,"1\t",,terminal_output +271,539890,"TERMINAL",0,0,"2\t",,terminal_output +272,541027,"TERMINAL",0,0,"3\t",,terminal_output +273,542050,"TERMINAL",0,0,"4\t",,terminal_output +274,543067,"TERMINAL",0,0,"5\t",,terminal_output +275,544092,"TERMINAL",0,0,"6\t",,terminal_output +276,545121,"TERMINAL",0,0,"7\t",,terminal_output +277,546155,"TERMINAL",0,0,"8\t",,terminal_output +278,547265,"TERMINAL",0,0,"9\t",,terminal_output +279,548431,"TERMINAL",0,0,"20\t",,terminal_output +280,549319,"TERMINAL",0,0,"1\t",,terminal_output +281,550384,"TERMINAL",0,0,"2\t",,terminal_output +282,551469,"TERMINAL",0,0,"3\t",,terminal_output +283,552411,"TERMINAL",0,0,"4\t",,terminal_output +284,553459,"TERMINAL",0,0,"5\t",,terminal_output +285,554491,"TERMINAL",0,0,"6\t",,terminal_output +286,555545,"TERMINAL",0,0,"7\t",,terminal_output +287,556586,"TERMINAL",0,0,"9\t",,terminal_output +288,557638,"TERMINAL",0,0,"30\t",,terminal_output +289,558684,"TERMINAL",0,0,"1\t",,terminal_output +290,559761,"TERMINAL",0,0,"2\t",,terminal_output +291,560745,"TERMINAL",0,0,"3\t",,terminal_output +292,561792,"TERMINAL",0,0,"4\t",,terminal_output +293,563077,"TERMINAL",0,0,"5\t",,terminal_output +294,563930,"TERMINAL",0,0,"6\t",,terminal_output +295,564979,"TERMINAL",0,0,"7\t",,terminal_output +296,566019,"TERMINAL",0,0,"8\t",,terminal_output +297,567014,"TERMINAL",0,0,"9\t",,terminal_output +298,568158,"TERMINAL",0,0,"40\t",,terminal_output +299,569114,"TERMINAL",0,0,"17",,terminal_output +300,570168,"TERMINAL",0,0,"2\t",,terminal_output +301,571229,"TERMINAL",0,0,"3\t",,terminal_output +302,572293,"TERMINAL",0,0,"4\t",,terminal_output +303,573481,"TERMINAL",0,0,"5\t",,terminal_output +304,574311,"TERMINAL",0,0,"6\t",,terminal_output +305,575417,"TERMINAL",0,0,"7\t",,terminal_output +306,576451,"TERMINAL",0,0,"8\t",,terminal_output +307,577434,"TERMINAL",0,0,"9\t",,terminal_output +308,578482,"TERMINAL",0,0,"50\t",,terminal_output +309,579534,"TERMINAL",0,0,"1\t",,terminal_output +310,580571,"TERMINAL",0,0,"3\t",,terminal_output +311,581616,"TERMINAL",0,0,"4\t",,terminal_output +312,582661,"TERMINAL",0,0,"5\t",,terminal_output +313,583706,"TERMINAL",0,0,"6\t",,terminal_output +314,584846,"TERMINAL",0,0,"7\t",,terminal_output +315,585971,"TERMINAL",0,0,"8\t",,terminal_output +316,586838,"TERMINAL",0,0,"9\t",,terminal_output +317,587914,"TERMINAL",0,0,"5:00\t",,terminal_output +318,588945,"TERMINAL",0,0,"1\t",,terminal_output +319,590140,"TERMINAL",0,0,"2\t",,terminal_output +320,591055,"TERMINAL",0,0,"3\t",,terminal_output +321,592034,"TERMINAL",0,0,"4\t",,terminal_output +322,593139,"TERMINAL",0,0,"5\t",,terminal_output +323,594164,"TERMINAL",0,0,"6\t",,terminal_output +324,595169,"TERMINAL",0,0,"7\t",,terminal_output +325,596325,"TERMINAL",0,0,"8\t",,terminal_output +326,597254,"TERMINAL",0,0,"9\t",,terminal_output +327,598395,"TERMINAL",0,0,"10\t",,terminal_output +328,599334,"TERMINAL",0,0,"1\t",,terminal_output +329,600401,"TERMINAL",0,0,"2\t",,terminal_output +330,601441,"TERMINAL",0,0,"3\t",,terminal_output +331,602451,"TERMINAL",0,0,"4\t",,terminal_output +332,603487,"TERMINAL",0,0,"5\t",,terminal_output +333,604531,"TERMINAL",0,0,"6\t",,terminal_output +334,605584,"TERMINAL",0,0,"8\t",,terminal_output +335,606617,"TERMINAL",0,0,"9\t",,terminal_output +336,607657,"TERMINAL",0,0,"20\t",,terminal_output +337,608807,"TERMINAL",0,0,"1\t",,terminal_output +338,609743,"TERMINAL",0,0,"2\t",,terminal_output +339,610030,"models/dynamics.py",3762,0,"",python,selection_mouse +340,610829,"TERMINAL",0,0,"3\t",,terminal_output +341,611888,"TERMINAL",0,0,"4\t",,terminal_output +342,612022,"TERMINAL",0,0,"[?25lso[?25h",,terminal_output +343,612084,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +344,612234,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +345,612352,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +346,612446,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +347,612685,"TERMINAL",0,0,"[?25le[?25h[?25l [?25h",,terminal_output +348,612758,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +349,612825,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +350,612923,"TERMINAL",0,0,"5\t",,terminal_output +351,613057,"TERMINAL",0,0,"env/",,terminal_output +352,613182,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +353,613286,"TERMINAL",0,0,"in/",,terminal_output +354,613478,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +355,613585,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +356,613677,"TERMINAL",0,0,"tivate",,terminal_output +357,613939,"TERMINAL",0,0,"[?25l[?2004l\r[?25h]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +358,613939,"TERMINAL",0,0,"6\t",,terminal_output +359,615155,"TERMINAL",0,0,"7\t",,terminal_output +360,616009,"TERMINAL",0,0,"8\t",,terminal_output +361,617213,"TERMINAL",0,0,"9\t",,terminal_output +362,618061,"TERMINAL",0,0,"30\t",,terminal_output +363,618967,"TERMINAL",0,0,"[?25lcd[?25h",,terminal_output +364,619030,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +365,619140,"TERMINAL",0,0,"1\t",,terminal_output +366,619208,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +367,619877,"TERMINAL",0,0,"",,terminal_output +368,620041,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +369,620173,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +370,620206,"TERMINAL",0,0,"2\t",,terminal_output +371,620272,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +372,620409,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +373,620480,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +374,620534,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +375,620596,"TERMINAL",0,0,"[?25l[?2004l\r]0;tum_cte0515@hkn0706:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ [?25h",,terminal_output +376,621189,"TERMINAL",0,0,"3\t",,terminal_output +377,622236,"TERMINAL",0,0,"4\t",,terminal_output +378,623354,"TERMINAL",0,0,"5\t",,terminal_output +379,624324,"TERMINAL",0,0,"6\t",,terminal_output +380,625445,"TERMINAL",0,0,"75",,terminal_output +381,625598,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\ndynamics_ckpt_dir=$1\necho $dynamics_ckpt_dir\n\nenv | grep SLURM\n\nsrun python sample.py \\n --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --seed=69 \\n --batch_size=10 \\n --start_frame=0 \\n --data_dir $array_records_dir\n\n# srun python sample.py \\n # --checkpoint $dynamics_ckpt_dir \\n # --start_frame=0 \\n # --batch_size=12 \\n # --seq_len=2 \\n # --data_dir $array_records_dir\n",shellscript,tab +382,626448,"TERMINAL",0,0,"8\t",,terminal_output +383,627454,"TERMINAL",0,0,"9\t",,terminal_output +384,628502,"TERMINAL",0,0,"40\t",,terminal_output +385,629529,"TERMINAL",0,0,"1\t",,terminal_output +386,630575,"TERMINAL",0,0,"3\t",,terminal_output +387,631621,"TERMINAL",0,0,"4\t",,terminal_output +388,632638,"TERMINAL",0,0,"5\t",,terminal_output +389,633703,"TERMINAL",0,0,"6\t",,terminal_output +390,634827,"TERMINAL",0,0,"7\t",,terminal_output +391,635782,"TERMINAL",0,0,"8\t",,terminal_output +392,636236,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",787,0,"",shellscript,selection_mouse +393,636815,"TERMINAL",0,0,"9\t",,terminal_output +394,637864,"TERMINAL",0,0,"50\t",,terminal_output +395,638919,"TERMINAL",0,0,"1\t",,terminal_output +396,639940,"TERMINAL",0,0,"2\t",,terminal_output +397,640168,"TERMINAL",0,0,"[?25lvi[?25h",,terminal_output +398,640270,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +399,640445,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +400,640921,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +401,641018,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +402,641019,"TERMINAL",0,0,"3\t",,terminal_output +403,641427,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +404,641529,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +405,641596,"TERMINAL",0,0,"urm",,terminal_output +406,642047,"TERMINAL",0,0,"4\t",,terminal_output +407,642127,"TERMINAL",0,0,"",,terminal_output +408,642835,"TERMINAL",0,0,"[?25l/[?25h",,terminal_output +409,643049,"TERMINAL",0,0,"",,terminal_output +410,643119,"TERMINAL",0,0,"5\t",,terminal_output +411,643220,"TERMINAL",0,0,"\r\ncommon/ dev/ .git/ jobs/ README.md templates/ utils/ \r\n(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ vim slurm/",,terminal_output +412,644157,"TERMINAL",0,0,"6\t",,terminal_output +413,644726,"TERMINAL",0,0,"d",,terminal_output +414,644833,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +415,645042,"TERMINAL",0,0,"v/",,terminal_output +416,645172,"TERMINAL",0,0,"7\t",,terminal_output +417,646248,"TERMINAL",0,0,"[?25lj[?25h",,terminal_output +418,646248,"TERMINAL",0,0,"8\t",,terminal_output +419,646345,"TERMINAL",0,0,"",,terminal_output +420,647313,"TERMINAL",0,0,"9\t",,terminal_output +421,648319,"TERMINAL",0,0,"6:00\t",,terminal_output +422,648451,"TERMINAL",0,0,"",,terminal_output +423,648587,"TERMINAL",0,0,"\r\nalfred/ franz/ mihir/ \r\n(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ vim slurm/dev/",,terminal_output +424,649157,"TERMINAL",0,0,"m",,terminal_output +425,649260,"TERMINAL",0,0,"ihir/",,terminal_output +426,649339,"TERMINAL",0,0,"1\t",,terminal_output +427,649923,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +428,650038,"TERMINAL",0,0,"oreka/",,terminal_output +429,650403,"TERMINAL",0,0,"2\t",,terminal_output +430,650568,"TERMINAL",0,0,"",,terminal_output +431,650741,"TERMINAL",0,0,"\r\npreprocess_dataset.sbatch train_dynamics.sh train_tokenizer_coinrun.sbatch \r\nsync_runner.sh train_lam.sh train_tokenizer.sh \r\n(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ vim slurm/dev/mihir/horeka/",,terminal_output +432,651408,"TERMINAL",0,0,"3\t",,terminal_output +433,652450,"TERMINAL",0,0,"4\t",,terminal_output +434,653502,"TERMINAL",0,0,"5\t",,terminal_output +435,654541,"TERMINAL",0,0,"6\t",,terminal_output +436,655577,"TERMINAL",0,0,"8\t",,terminal_output +437,656606,"TERMINAL",0,0,"9\t",,terminal_output +438,657657,"TERMINAL",0,0,"10\t",,terminal_output +439,658703,"TERMINAL",0,0,"1\t",,terminal_output +440,659780,"TERMINAL",0,0,"2\t",,terminal_output +441,660892,"TERMINAL",0,0,"3\t",,terminal_output +442,661867,"TERMINAL",0,0,"4\t",,terminal_output +443,662314,"TERMINAL",0,0,"",,terminal_output +444,662530,"TERMINAL",0,0,"",,terminal_output +445,662977,"TERMINAL",0,0,"5\t",,terminal_output +446,664105,"TERMINAL",0,0,"6\t",,terminal_output +447,665038,"TERMINAL",0,0,"7\t",,terminal_output +448,665269,"TERMINAL",0,0,"bash",,terminal_focus +449,666052,"TERMINAL",0,0,"8\t",,terminal_output +450,667076,"TERMINAL",0,0,"9\t",,terminal_output +451,668334,"TERMINAL",0,0,"20\t",,terminal_output +452,669142,"TERMINAL",0,0,"1\t",,terminal_output +453,670355,"TERMINAL",0,0,"2\t",,terminal_output +454,671410,"TERMINAL",0,0,"3\t",,terminal_output +455,672295,"TERMINAL",0,0,"4\t",,terminal_output +456,673321,"TERMINAL",0,0,"56",,terminal_output +457,674365,"TERMINAL",0,0,"6\t",,terminal_output +458,675412,"TERMINAL",0,0,"7\t",,terminal_output +459,676497,"TERMINAL",0,0,"8\t",,terminal_output +460,677485,"TERMINAL",0,0,"9\t",,terminal_output +461,678501,"TERMINAL",0,0,"30\t",,terminal_output +462,679535,"TERMINAL",0,0,"1\t",,terminal_output +463,680612,"TERMINAL",0,0,"3\t",,terminal_output +464,681615,"TERMINAL",0,0,"4\t",,terminal_output +465,682663,"TERMINAL",0,0,"5\t",,terminal_output +466,683719,"TERMINAL",0,0,"6\t",,terminal_output +467,684792,"TERMINAL",0,0,"7\t",,terminal_output +468,685786,"TERMINAL",0,0,"8\t",,terminal_output +469,686785,"TERMINAL",0,0,"srun",,terminal_focus +470,686865,"TERMINAL",0,0,"9\t",,terminal_output +471,687475,"TERMINAL",0,0,"bash",,terminal_focus +472,687879,"TERMINAL",0,0,"40\t",,terminal_output +473,688934,"TERMINAL",0,0,"1\t",,terminal_output +474,690002,"TERMINAL",0,0,"2\t",,terminal_output +475,691019,"TERMINAL",0,0,"3\t",,terminal_output +476,692059,"TERMINAL",0,0,"4\t",,terminal_output +477,693099,"TERMINAL",0,0,"5\t",,terminal_output +478,694209,"TERMINAL",0,0,"6\t",,terminal_output +479,695189,"TERMINAL",0,0,"7\t",,terminal_output +480,696264,"TERMINAL",0,0,"8\t",,terminal_output +481,697319,"TERMINAL",0,0,"9\t",,terminal_output +482,698440,"TERMINAL",0,0,"50\t",,terminal_output +483,699371,"TERMINAL",0,0,"1\t",,terminal_output +484,700421,"TERMINAL",0,0,"2\t",,terminal_output +485,701452,"TERMINAL",0,0,"3\t",,terminal_output +486,702504,"TERMINAL",0,0,"4\t",,terminal_output +487,703546,"TERMINAL",0,0,"5\t",,terminal_output +488,704619,"TERMINAL",0,0,"7\t",,terminal_output +489,705631,"TERMINAL",0,0,"8\t",,terminal_output +490,706688,"TERMINAL",0,0,"9\t",,terminal_output +491,707745,"TERMINAL",0,0,"7:00\t",,terminal_output +492,708796,"TERMINAL",0,0,"1\t",,terminal_output +493,709810,"TERMINAL",0,0,"2\t",,terminal_output +494,710906,"TERMINAL",0,0,"3\t",,terminal_output +495,711872,"TERMINAL",0,0,"4\t",,terminal_output +496,713058,"TERMINAL",0,0,"5\t",,terminal_output +497,713965,"TERMINAL",0,0,"6\t",,terminal_output +498,715029,"TERMINAL",0,0,"7\t",,terminal_output +499,715844,"TERMINAL",0,0,"cp -r slurm/dev/mihir/horeka/* ../jafar_jobs/slurm/dev/mihir/horeka/",,terminal_command +500,715881,"TERMINAL",0,0,"]633;E;2025-07-28 17:57:08 cp -r slurm/dev/mihir/horeka/* ../jafar_jobs/slurm/dev/mihir/horeka/;252991b4-29d1-4b67-8848-3bd7bccaeb47]633;C",,terminal_output +501,716054,"TERMINAL",0,0,"]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +502,716068,"TERMINAL",0,0,"8\t",,terminal_output +503,717093,"TERMINAL",0,0,"9\t",,terminal_output +504,717975,"TERMINAL",0,0,"srun",,terminal_focus +505,718134,"TERMINAL",0,0,"10\t",,terminal_output +506,718882,"TERMINAL",0,0,"l",,terminal_output +507,718995,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +508,719144,"TERMINAL",0,0,"[?25l[?2004l\r[?25h",,terminal_output +509,719185,"TERMINAL",0,0,"1\t",,terminal_output +510,719205,"TERMINAL",0,0,"debug generate_dataset.py local-logs README.md sample.py_bak slurm-3359334.out train_tokenizer.py\r\ndiff.diff generation_1753196800.0453017.gif logs read_tf_record.py scripts_cremers slurm-3359338.out utils\r\ndiff.log genie.py models requeuer.log scripts_horeka tests wandb\r\nframe-knoms.png gifs overfit_dir requirements-franz.txt slurm train_dynamics.py weekend-job-requeuer.sh\r\nframe.png input_pipeline overfit_dir.zip requirements.txt slurm-3309772.out train_lam.py weekend-job-starter.sh\r\nframes LICENSE __pycache__ sample.py slurm-3359333.out train_tokenizer_bak.py\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ ",,terminal_output +511,720260,"TERMINAL",0,0,"2\t",,terminal_output +512,721342,"TERMINAL",0,0,"3\t",,terminal_output +513,722299,"TERMINAL",0,0,"4\t",,terminal_output +514,723344,"TERMINAL",0,0,"5\t",,terminal_output +515,723504,"TERMINAL",0,0,"ls",,terminal_output +516,724157,"TERMINAL",0,0,"runner",,terminal_output +517,724426,"TERMINAL",0,0,"6\t",,terminal_output +518,724620,"TERMINAL",0,0,"ls",,terminal_output +519,724777,"TERMINAL",0,0,"",,terminal_output +520,724945,"TERMINAL",0,0,"",,terminal_output +521,725442,"TERMINAL",0,0,"7\t",,terminal_output +522,726476,"TERMINAL",0,0,"8\t",,terminal_output +523,727000,"TERMINAL",0,0,"vim slurm/dev/mihir/horeka/",,terminal_output +524,727516,"TERMINAL",0,0,"9\t",,terminal_output +525,727849,"TERMINAL",0,0,"vim slurm/dev/mihir/horeka/",,terminal_output +526,727902,"TERMINAL",0,0,"\r\ncausal_fit_modelsizes/ overfit_sample/ sync_runner.sh train_tokenizer_coinrun.sbatch \r\noverfit_batch/ overfit_sample_tiny/ train_dynamics.sh train_tokenizer.sh \r\noverfit_batch_tiny/ preprocess_dataset.sbatch train_lam.sh yolo-runs/ \r\n(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ vim slurm/dev/mihir/horeka/",,terminal_output +527,728552,"TERMINAL",0,0,"20\t",,terminal_output +528,729590,"TERMINAL",0,0,"2\t",,terminal_output +529,730622,"TERMINAL",0,0,"3\t",,terminal_output +530,731134,"TERMINAL",0,0,"y",,terminal_output +531,731234,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +532,731402,"TERMINAL",0,0,"lo-runs/",,terminal_output +533,731665,"TERMINAL",0,0,"4\t",,terminal_output +534,732124,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +535,732508,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +536,732577,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +537,732678,"TERMINAL",0,0,"pling",,terminal_output +538,732742,"TERMINAL",0,0,"5\t",,terminal_output +539,733398,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +540,733773,"TERMINAL",0,0,"6\t",,terminal_output +541,733849,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +542,733988,"TERMINAL",0,0,"ev.sh ",,terminal_output +543,734520,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h[>4;2m[?1h=[?2004h[?1004h[?12h[?12l",,terminal_output +544,734626,"TERMINAL",0,0,"[?25l""slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh"" 40L, 1040B▽ Pzz\[0%m [>c]10;?]11;?# Log the sbatch scriptcat $0module unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=2 \\r\n --seed=69 \1,0-1Top[?25h",,terminal_output +545,734717,"TERMINAL",0,0,"P+q436f\P+q6b75\P+q6b64\P+q6b72\P+q6b6c\P+q2332\P+q2334\P+q2569\P+q2a37\P+q6b31\[?12$p[?25l/3333/3333 [?25h[?25l/f6f6/e3e3 [?25h",,terminal_output +546,734794,"TERMINAL",0,0,"7\t",,terminal_output +547,735217,"TERMINAL",0,0,"[?25lj 2,1 [?25h",,terminal_output +548,735795,"TERMINAL",0,0,"[?25lj 3[?25h[?25lj 4,0-1[?25h[?25lj 5,1 [?25h[?25lj 6[?25h[?25lj 7[?25h",,terminal_output +549,735897,"TERMINAL",0,0,"8\t",,terminal_output +550,735898,"TERMINAL",0,0,"[?25lj 8,0-1[?25h[?25lj 9,1 [?25h[?25l10,0-1[?25h[?25lj 1,1 [?25h",,terminal_output +551,736006,"TERMINAL",0,0,"[?25lj 2[?25h[?25lj 3,0-1[?25h[?25lj 4,1 [?25h",,terminal_output +552,736079,"TERMINAL",0,0,"[?25lj 5[?25h[?25lj 6,0-1[?25h",,terminal_output +553,736198,"TERMINAL",0,0,"[?25l7,1 [?25h[?25lj 8[?25h[?25lj 9[?25h[?25lj 20,0-1[?25h[?25lj 1,1 [?25h",,terminal_output +554,736322,"TERMINAL",0,0,"[?25lj 2,0-1[?25h[?25lj 3,1 [?25h[?25lj 4[?25h",,terminal_output +555,736403,"TERMINAL",0,0,"[?25lj 5[?25h[?25lj \r\n--batch_size=10 \26,110%[?25h[?25lj \r\n--start_frame=0 \27,120%[?25h",,terminal_output +556,736539,"TERMINAL",0,0,"[?25lj \r\n--data_dir $array_records_dir28,130%[?25h[?25lj \r\n29,140%[?25h[?25lj \r\n# srun python sample.py \30,150%[?25h[?25lj \r\n# --checkpoint $dynamics_ckpt_dir \31,160%[?25h[?25lj \r\n# --start_frame=0 \32,170%[?25h",,terminal_output +557,736616,"TERMINAL",0,0,"[?25lj \r\n# --batch_size=12 \33,180%[?25h[?25lj \r\n# --seq_len=2 \34,0-190%[?25h",,terminal_output +558,736698,"TERMINAL",0,0,"[?25lj \r\n# --data_dir $array_records_dir35,1Bot[?25h[?25lj 6[?25h",,terminal_output +559,736894,"TERMINAL",0,0,"9\t",,terminal_output +560,737098,"TERMINAL",0,0,"[?25lk 5[?25h",,terminal_output +561,737373,"TERMINAL",0,0,"[?25lk 4,0-1[?25h",,terminal_output +562,737480,"TERMINAL",0,0,"[?25lk 3,1 [?25h",,terminal_output +563,737613,"TERMINAL",0,0,"[?25lk 2[?25h",,terminal_output +564,737787,"TERMINAL",0,0,"[?25lk 1[?25h",,terminal_output +565,737957,"TERMINAL",0,0,"[?25lk 0[?25h",,terminal_output +566,737957,"TERMINAL",0,0,"30\t",,terminal_output +567,738869,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +568,738972,"TERMINAL",0,0,"17",,terminal_output +569,739026,"TERMINAL",0,0,"[?25ld \r\n~ 30,5Bot[?25h",,terminal_output +570,739935,"TERMINAL",0,0,"[?25l^[",,terminal_output +571,740010,"TERMINAL",0,0," ^[ [?25h",,terminal_output +572,740011,"TERMINAL",0,0,"2\t",,terminal_output +573,741112,"TERMINAL",0,0,"3\t",,terminal_output +574,741188,"TERMINAL",0,0,"[?25l::[?25h",,terminal_output +575,741380,"TERMINAL",0,0,"w",,terminal_output +576,741473,"TERMINAL",0,0,"q",,terminal_output +577,741788,"TERMINAL",0,0,"\r[?25l[?2004l[>4;m""slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh"" 39L, 1024B written",,terminal_output +578,741894,"TERMINAL",0,0,"\r\r\r\n[?1004l[?2004l[?1l>[?25h[>4;m[?1049l]0;tum_cte0515@hkn0706:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ ",,terminal_output +579,742082,"TERMINAL",0,0,"4\t",,terminal_output +580,743176,"TERMINAL",0,0,"5\t",,terminal_output +581,743789,"TERMINAL",0,0,"vim slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh ",,terminal_output +582,744146,"TERMINAL",0,0,"\r",,terminal_output +583,744156,"TERMINAL",0,0,"6\t",,terminal_output +584,744789,"TERMINAL",0,0,"vim slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh ",,terminal_output +585,745230,"TERMINAL",0,0,"7\t",,terminal_output +586,746234,"TERMINAL",0,0,"8\t",,terminal_output +587,746836,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +588,747005,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +589,747313,"TERMINAL",0,0,"9\t",,terminal_output +590,747506,"TERMINAL",0,0,"[1@s",,terminal_output +591,747578,"TERMINAL",0,0,"[1@h",,terminal_output +592,748133,"TERMINAL",0,0,"[?25lsh[?25h",,terminal_output +593,748312,"TERMINAL",0,0,"",,terminal_output +594,748347,"TERMINAL",0,0,"40\t",,terminal_output +595,748483,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +596,748551,"TERMINAL",0,0,"[?25l [1@s[?25h",,terminal_output +597,748618,"TERMINAL",0,0,"[1@h",,terminal_output +598,748827,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=2 \\r\n --batch_size=10 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +599,749026,"TERMINAL",0,0,"\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=653563\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0706\r\nSLURM_JOB_START_TIME=1753717895\r\nSLURM_STEP_NODELIST=hkn0706\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753721495\r\nSLURM_PMI2_SRUN_PORT=35695\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3380176\r\nSLURM_PTY_PORT=33197\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=31\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e8.hkn0706\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0706\r\nSLURM_SRUN_COMM_PORT=44649\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3380176\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0706\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=44649\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0706\r\n",,terminal_output +600,749170,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +601,749362,"TERMINAL",0,0,"1\t",,terminal_output +602,750504,"TERMINAL",0,0,"2\t",,terminal_output +603,751556,"TERMINAL",0,0,"3\t",,terminal_output +604,752475,"TERMINAL",0,0,"4\t",,terminal_output +605,753512,"TERMINAL",0,0,"5\t",,terminal_output +606,754552,"TERMINAL",0,0,"6\t",,terminal_output +607,755606,"TERMINAL",0,0,"8\t",,terminal_output +608,756647,"TERMINAL",0,0,"9\t",,terminal_output +609,757703,"TERMINAL",0,0,"50\t",,terminal_output +610,758822,"TERMINAL",0,0,"1\t",,terminal_output +611,759795,"TERMINAL",0,0,"2\t",,terminal_output +612,760872,"TERMINAL",0,0,"3\t",,terminal_output +613,761897,"TERMINAL",0,0,"4\t",,terminal_output +614,762686,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n╭─ Parsing error ────────────────────────────╮\r\n│ Argument --checkpoint: expected 1 argument │\r\n│ ────────────────────────────────────────── │\r\n│ For full helptext, run sample.py --help │\r\n╰────────────────────────────────────────────╯\r\n",,terminal_output +615,762742,"TERMINAL",0,0,"srun: error: hkn0706: task 0: Exited with exit code 2\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ ",,terminal_output +616,762944,"TERMINAL",0,0,"5\t",,terminal_output +617,764149,"TERMINAL",0,0,"6\t",,terminal_output +618,765075,"TERMINAL",0,0,"7\t",,terminal_output +619,766074,"TERMINAL",0,0,"8\t",,terminal_output +620,767126,"TERMINAL",0,0,"9\t",,terminal_output +621,768354,"TERMINAL",0,0,"8:00\t",,terminal_output +622,769277,"TERMINAL",0,0,"1\t",,terminal_output +623,770398,"TERMINAL",0,0,"2\t",,terminal_output +624,771282,"TERMINAL",0,0,"3\t",,terminal_output +625,772319,"TERMINAL",0,0,"4\t",,terminal_output +626,773438,"TERMINAL",0,0,"5\t",,terminal_output +627,774425,"TERMINAL",0,0,"6\t",,terminal_output +628,775515,"TERMINAL",0,0,"7\t",,terminal_output +629,776484,"TERMINAL",0,0,"8\t",,terminal_output +630,777535,"TERMINAL",0,0,"9\t",,terminal_output +631,778576,"TERMINAL",0,0,"11\t",,terminal_output +632,779612,"TERMINAL",0,0,"2\t",,terminal_output +633,780662,"TERMINAL",0,0,"3\t",,terminal_output +634,781707,"TERMINAL",0,0,"4\t",,terminal_output +635,782749,"TERMINAL",0,0,"5\t",,terminal_output +636,783815,"TERMINAL",0,0,"6\t",,terminal_output +637,784834,"TERMINAL",0,0,"7\t",,terminal_output +638,785868,"TERMINAL",0,0,"8\t",,terminal_output +639,787004,"TERMINAL",0,0,"91",,terminal_output +640,788011,"TERMINAL",0,0,"20\t",,terminal_output +641,789033,"TERMINAL",0,0,"1\t",,terminal_output +642,790270,"TERMINAL",0,0,"2\t",,terminal_output +643,791290,"TERMINAL",0,0,"3\t",,terminal_output +644,792145,"TERMINAL",0,0,"4\t",,terminal_output +645,793235,"TERMINAL",0,0,"5\t",,terminal_output +646,794308,"TERMINAL",0,0,"6\t",,terminal_output +647,795296,"TERMINAL",0,0,"7\t",,terminal_output +648,796408,"TERMINAL",0,0,"8\t",,terminal_output +649,797381,"TERMINAL",0,0,"9\t",,terminal_output +650,798457,"TERMINAL",0,0,"30\t",,terminal_output +651,799495,"TERMINAL",0,0,"1\t",,terminal_output +652,800518,"TERMINAL",0,0,"2\t",,terminal_output +653,801566,"TERMINAL",0,0,"3\t",,terminal_output +654,802594,"TERMINAL",0,0,"5\t",,terminal_output +655,803638,"TERMINAL",0,0,"6\t",,terminal_output +656,804691,"TERMINAL",0,0,"7\t",,terminal_output +657,805749,"TERMINAL",0,0,"8\t",,terminal_output +658,806762,"TERMINAL",0,0,"9\t",,terminal_output +659,807776,"TERMINAL",0,0,"40\t",,terminal_output +660,808822,"TERMINAL",0,0,"1\t",,terminal_output +661,809867,"TERMINAL",0,0,"2\t",,terminal_output +662,810916,"TERMINAL",0,0,"3\t",,terminal_output +663,811949,"TERMINAL",0,0,"4\t",,terminal_output +664,812985,"TERMINAL",0,0,"5\t",,terminal_output +665,814033,"TERMINAL",0,0,"6\t",,terminal_output +666,815146,"TERMINAL",0,0,"7\t",,terminal_output +667,816169,"TERMINAL",0,0,"81",,terminal_output +668,817301,"TERMINAL",0,0,"9\t",,terminal_output +669,818203,"TERMINAL",0,0,"50\t",,terminal_output +670,819242,"TERMINAL",0,0,"1\t",,terminal_output +671,820283,"TERMINAL",0,0,"2\t",,terminal_output +672,821344,"TERMINAL",0,0,"3\t",,terminal_output +673,822369,"TERMINAL",0,0,"4\t",,terminal_output +674,823399,"TERMINAL",0,0,"5\t",,terminal_output +675,824524,"TERMINAL",0,0,"6\t",,terminal_output +676,825482,"TERMINAL",0,0,"7\t",,terminal_output +677,826515,"TERMINAL",0,0,"8\t",,terminal_output +678,827144,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +679,827566,"TERMINAL",0,0,"9\t",,terminal_output +680,828606,"TERMINAL",0,0,"9:01\t",,terminal_output +681,829655,"TERMINAL",0,0,"2\t",,terminal_output +682,830702,"TERMINAL",0,0,"3\t",,terminal_output +683,831746,"TERMINAL",0,0,"4\t",,terminal_output +684,832464,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",542,0,"",shellscript,selection_mouse +685,832791,"TERMINAL",0,0,"5\t",,terminal_output +686,833886,"TERMINAL",0,0,"6\t",,terminal_output +687,834907,"TERMINAL",0,0,"7\t",,terminal_output +688,835937,"TERMINAL",0,0,"8\t",,terminal_output +689,836956,"TERMINAL",0,0,"watch",,terminal_focus +690,836989,"TERMINAL",0,0,"9\t",,terminal_output +691,837971,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +692,840403,"TERMINAL",0,0,"cd $ws_dir",,terminal_command +693,840420,"TERMINAL",0,0,"]633;E;2025-07-28 17:59:12 cd $ws_dir;54c1b5ea-730a-4f9f-ba91-1c5dfbf41461]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared]633;D;0",,terminal_output +694,842042,"TERMINAL",0,0,"cd checkpoints/",,terminal_command +695,842198,"TERMINAL",0,0,"ls",,terminal_command +696,842248,"TERMINAL",0,0,"]633;E;2025-07-28 17:59:14 ls;54c1b5ea-730a-4f9f-ba91-1c5dfbf41461]633;C",,terminal_output +697,842348,"TERMINAL",0,0,"0000 3290440 3292333 3294603 3297577 3299062 3300233 3301031 3311672 causal lam_main_test train_dyn_causal_500M\r\n3290283 3291405 3292334 3296502 3297578 3299063 3300290 3306801 3313562 checkpoints_alfred maskgit-maskprob-fix train_dyn_new_arch-bugfixed-spatial-shift\r\n3290284 3292213 3292335 3296540 3297582 3299065 3300658 3307618 3313563 coinrun tokenizer train_dyn_new_arch-bugfixed-temporal-shift\r\n3290295 3292221 3292336 3296571 3297586 3299066 3300663 3307619 3313564 debug tokenizer_ckpt_dir train_dyn_yolorun_new_arch\r\n3290296 3292258 3292337 3296573 3297606 3299068 3300672 3309662 3313565 dyn train_dynamics_lr_schedule_const train_lam_minecraft_overfit_sample\r\n3290366 3292328 3292338 3296574 3297671 3299069 3301025 3309663 3313570 dynamics_ckpt_dir train_dynamics_lr_schedule_cos train_tokenizer_batch_size_scaling_16_node\r\n3290367 3292329 3292339 3296575 3297693 3299258 3301026 3309699 3313571 interactive train_dynamics_lr_schedule_wsd train_tokenizer_minecraft_overfit_sample\r\n3290391 3292330 3294600 3297569 3297706 3299259 3301027 3310436 3313572 lam train_dyn_causal_180M wrap\r\n3290392 3292331 3294601 3297575 3297727 3299272 3301029 3310437 3316022 lam-1-action train_dyn_causal_255M\r\n3290439 3292332 3294602 3297576 3299016 3299579 3301030 3311671 big-runs lam_ckpt_dir train_dyn_causal_356M\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints]633;D;0",,terminal_output +698,844194,"TERMINAL",0,0,"cd maskgit-maskprob-fix/",,terminal_command +699,844568,"TERMINAL",0,0,"ls",,terminal_command +700,844625,"TERMINAL",0,0,"]633;E;2025-07-28 17:59:17 ls;54c1b5ea-730a-4f9f-ba91-1c5dfbf41461]633;Cinteractive train_dynamics_maskprob_fix_2_node train_dynamics_maskprob_fix_2_node_80M train_dynamics_maskprob_fix_8_node train_dynamics_maskprob_fix_8_node_80M\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix]633;D;0",,terminal_output +701,847627,"TERMINAL",0,0,"cd train_dynamics_maskprob_fix_8_node",,terminal_command +702,848077,"TERMINAL",0,0,"ls",,terminal_command +703,848122,"TERMINAL",0,0,"]633;E;2025-07-28 17:59:20 ls;54c1b5ea-730a-4f9f-ba91-1c5dfbf41461]633;C3370788 3371237\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node]633;D;0",,terminal_output +704,852811,"TERMINAL",0,0,"cd 3371237/",,terminal_command +705,852851,"TERMINAL",0,0,"]633;E;2025-07-28 17:59:25 cd 3371237/;54c1b5ea-730a-4f9f-ba91-1c5dfbf41461]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237]633;D;0",,terminal_output +706,853231,"TERMINAL",0,0,"ls",,terminal_command +707,853291,"TERMINAL",0,0,"]633;E;2025-07-28 17:59:25 ls;54c1b5ea-730a-4f9f-ba91-1c5dfbf41461]633;C",,terminal_output +708,853402,"TERMINAL",0,0,"020000 040000 053000 054000 055000\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237]633;D;0",,terminal_output +709,854900,"TERMINAL",0,0,"pwd",,terminal_command +710,858297,"TERMINAL",0,0,"srun",,terminal_focus +711,859408,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh ",,terminal_output +712,860634,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237",,terminal_output +713,862357,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=2 \\r\n --batch_size=10 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +714,862472,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=653563\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0706\r\nSLURM_JOB_START_TIME=1753717895\r\nSLURM_STEP_NODELIST=hkn0706\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753721495\r\nSLURM_PMI2_SRUN_PORT=35695\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3380176\r\nSLURM_PTY_PORT=33197\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=31\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e8.hkn0706\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0706\r\nSLURM_SRUN_COMM_PORT=44649\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3380176\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0706\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=44649\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0706\r\n",,terminal_output +715,862612,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +716,863615,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +717,866419,"TERMINAL",0,0,"2025-07-28 17:59:38.881900: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +718,880594,"TERMINAL",0,0,"2025-07-28 17:59:53.070871: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +719,904121,"sample.py",0,0,"",python,tab +720,904457,"TERMINAL",0,0,"2025-07-28 18:00:16.911538: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +721,906115,"sample.py",5742,0,"",python,selection_mouse +722,906154,"sample.py",5741,0,"",python,selection_command +723,906877,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 101, in \r\n ckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 803, in restore\r\n structure, use_zarr3_metadata = self._get_internal_metadata(directory)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 959, in _get_internal_metadata\r\n raise FileNotFoundError(\r\nFileNotFoundError: No structure could be identified for the checkpoint at /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237.\r\n",,terminal_output +724,906909,"sample.py",5718,24,"print(video_batch.dtype)",python,selection_command +725,907131,"sample.py",5684,58,"video_batch = video_batch / 255.0\nprint(video_batch.dtype)",python,selection_command +726,907281,"sample.py",5659,83,"print(video_batch.dtype)\nvideo_batch = video_batch / 255.0\nprint(video_batch.dtype)",python,selection_command +727,907413,"sample.py",5604,138,"video_batch = video_batch.astype(args.dtype) # / 255.0\nprint(video_batch.dtype)\nvideo_batch = video_batch / 255.0\nprint(video_batch.dtype)",python,selection_command +728,907560,"sample.py",5579,163,"print(video_batch.dtype)\nvideo_batch = video_batch.astype(args.dtype) # / 255.0\nprint(video_batch.dtype)\nvideo_batch = video_batch / 255.0\nprint(video_batch.dtype)",python,selection_command +729,907665,"sample.py",5542,200,"video_batch = jnp.array(video_batch)\nprint(video_batch.dtype)\nvideo_batch = video_batch.astype(args.dtype) # / 255.0\nprint(video_batch.dtype)\nvideo_batch = video_batch / 255.0\nprint(video_batch.dtype)",python,selection_command +730,907793,"sample.py",5542,0,"",python,selection_command +731,908091,"TERMINAL",0,0,"srun: error: hkn0706: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ ",,terminal_output +732,928635,"sample.py",5542,36,"video_batch = jnp.array(video_batch)",python,selection_command +733,929549,"sample.py",5542,61,"video_batch = jnp.array(video_batch)\nprint(video_batch.dtype)",python,selection_command +734,929733,"sample.py",5542,116,"video_batch = jnp.array(video_batch)\nprint(video_batch.dtype)\nvideo_batch = video_batch.astype(args.dtype) # / 255.0",python,selection_command +735,929946,"sample.py",5542,141,"video_batch = jnp.array(video_batch)\nprint(video_batch.dtype)\nvideo_batch = video_batch.astype(args.dtype) # / 255.0\nprint(video_batch.dtype)",python,selection_command +736,930094,"sample.py",5542,175,"video_batch = jnp.array(video_batch)\nprint(video_batch.dtype)\nvideo_batch = video_batch.astype(args.dtype) # / 255.0\nprint(video_batch.dtype)\nvideo_batch = video_batch / 255.0",python,selection_command +737,930613,"sample.py",5542,200,"video_batch = jnp.array(video_batch)\nprint(video_batch.dtype)\nvideo_batch = video_batch.astype(args.dtype) # / 255.0\nprint(video_batch.dtype)\nvideo_batch = video_batch / 255.0\nprint(video_batch.dtype)",python,selection_command +738,952439,"sample.py",5742,0,"",python,selection_mouse +739,952444,"sample.py",5741,0,"",python,selection_command +740,974021,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237",,terminal_output +741,977658,"TERMINAL",0,0,"/",,terminal_output +742,978218,"TERMINAL",0,0,"0",,terminal_output +743,978306,"TERMINAL",0,0,"",,terminal_output +744,978833,"TERMINAL",0,0,"\r\n020000/ 040000/ 053000/ 054000/ 055000/ \r\n(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/0",,terminal_output +745,979929,"TERMINAL",0,0,"",,terminal_output +746,1040363,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n use_maskgit: bool = False\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n outputs[""recon""] = outputs[""recon""].astype(jnp.float32)\n logits = outputs[""token_logits""]\n targets = outputs[""video_tokens""]\n\n # if not args.use_maskgit:\n # logits = outputs[""token_logits""][:, :, :-1]\n # targets = outputs[""video_tokens""][:, :, 1:]\n # mask = outputs[""mask""][:, :, 1:]\n\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(logits, targets)\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = logits.argmax(-1) == targets\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(logits)\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=logits.max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n use_maskgit=args.use_maskgit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=args.dtype\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n # Restore full dynamics model\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n for videos in dataloader:\n # for i in range(videos.shape[0]):\n # video_i = videos[i:i+1] # shape (1, T, H, W, C)\n # np.save(f""overfit_dir/oai_sample_seed69_{i}.npy"", video_i)\n # jax.debug.breakpoint()\n # videos = np.load(""overfit_dir/oai_sample_seed69_1.npy"") # *255.\n # videos = videos.astype(np.uint8)\n # videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n # while True:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +747,1041253,"sample.py",0,0,"",python,tab +748,1046521,"sample.py",3900,0,"",python,selection_mouse +749,1046527,"sample.py",3899,0,"",python,selection_command +750,1046701,"sample.py",3899,1,"s",python,selection_mouse +751,1046702,"sample.py",3900,0,"",python,selection_command +752,1046734,"sample.py",3853,47,"el_state""]\nparams = restored_train_state.params",python,selection_mouse +753,1046735,"sample.py",3814,86,"\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +754,1046754,"sample.py",3738,162,"(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +755,1046771,"sample.py",3608,292,"\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +756,1046796,"sample.py",3504,396,"\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +757,1046824,"sample.py",3405,495,"s=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +758,1046839,"sample.py",3235,665,"_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +759,1046840,"sample.py",3167,733,"r_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +760,1046892,"sample.py",3160,740,"\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +761,1046893,"sample.py",3155,745,"), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +762,1046924,"sample.py",3144,756," )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +763,1046925,"sample.py",3106,794," 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +764,1046964,"sample.py",3062,838," optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +765,1046990,"sample.py",3042,858," tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +766,1047041,"sample.py",3039,861," tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +767,1047080,"sample.py",3020,880," params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +768,1047081,"sample.py",2994,906," apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +769,1047082,"sample.py",2955,945,"dummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +770,1047089,"sample.py",2954,946,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +771,1047132,"sample.py",2880,1020,"rng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +772,1047133,"sample.py",2878,1022,")\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +773,1047165,"sample.py",2859,1041," mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +774,1047166,"sample.py",2750,1150,"dummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +775,1047166,"sample.py",2677,1223,"image_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +776,1047201,"sample.py",2643,1257,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +777,1047202,"sample.py",2591,1309," use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +778,1047235,"sample.py",2569,1331," dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +779,1047236,"sample.py",2512,1388," use_maskgit=False,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +780,1047268,"sample.py",2472,1428," dyna_num_heads=args.dyna_num_heads,\n use_maskgit=False,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +781,1047269,"sample.py",2430,1470," dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n use_maskgit=False,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +782,1047311,"sample.py",2394,1506," dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n use_maskgit=False,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +783,1047349,"sample.py",2351,1549," # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n use_maskgit=False,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +784,1047350,"sample.py",2432,1468," dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n use_maskgit=False,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +785,1047354,"sample.py",2571,1329," dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +786,1047355,"sample.py",2593,1307," use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +787,1047384,"sample.py",2680,1220,"ge_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +788,1047385,"sample.py",2863,1037,"mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +789,1047424,"sample.py",2954,946,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +790,1047424,"sample.py",2999,901,"pply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +791,1047460,"sample.py",3025,875,"arams=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +792,1047461,"sample.py",3045,855,"=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +793,1047498,"sample.py",3065,835," optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +794,1047499,"sample.py",3109,791," 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +795,1047572,"sample.py",3147,753," )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +796,1047653,"sample.py",3157,743," \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +797,1047654,"sample.py",3156,744,", \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +798,1047681,"sample.py",3160,740,"\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +799,1047782,"sample.py",3165,735,"ler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +800,1047813,"sample.py",3160,740,"\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +801,1047849,"sample.py",3154,746," ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +802,1047887,"sample.py",3143,757," )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +803,1047888,"sample.py",3105,795," 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +804,1047925,"sample.py",3061,839," optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +805,1047960,"sample.py",3041,859," tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +806,1048004,"sample.py",3022,878," params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +807,1048039,"sample.py",2996,904," apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +808,1048040,"sample.py",2957,943,"mmy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +809,1048088,"sample.py",2954,946,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +810,1048118,"sample.py",2917,983,"ams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +811,1048119,"sample.py",2883,1017,", _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +812,1048120,"sample.py",2862,1038," mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +813,1048158,"sample.py",2774,1126," videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +814,1048159,"sample.py",2753,1147,"my_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +815,1048195,"sample.py",2680,1220,"ge_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +816,1048196,"sample.py",2645,1255,"g, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +817,1048234,"sample.py",2642,1258,"\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +818,1048386,"sample.py",2644,1256,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +819,1048782,"sample.py",2643,1257,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +820,1057028,"sample.py",3150,0,"",python,selection_mouse +821,1057070,"sample.py",3149,0,"",python,selection_command +822,1067057,"sample.py",2644,0,"",python,selection_mouse +823,1067272,"sample.py",2644,34,"ng, _rng = jax.random.split(rng)\ni",python,selection_mouse +824,1067306,"sample.py",2644,108,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndu",python,selection_mouse +825,1067340,"sample.py",2644,219,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n ",python,selection_mouse +826,1067393,"sample.py",2644,360,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_",python,selection_mouse +827,1067394,"sample.py",2644,427,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n opta",python,selection_mouse +828,1067394,"sample.py",2644,472,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0",python,selection_mouse +829,1067455,"sample.py",2644,506,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )",python,selection_mouse +830,1067460,"sample.py",2644,514,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), ",python,selection_mouse +831,1067495,"sample.py",2644,516,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)",python,selection_mouse +832,1067496,"sample.py",2644,583,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()",python,selection_mouse +833,1067499,"sample.py",2644,533,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry",python,selection_mouse +834,1067574,"sample.py",2644,685,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)",python,selection_mouse +835,1067575,"sample.py",2644,729,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(",python,selection_mouse +836,1067618,"sample.py",2644,702,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manag",python,selection_mouse +837,1067619,"sample.py",2644,750,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,",python,selection_mouse +838,1067653,"sample.py",2644,820,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),",python,selection_mouse +839,1067655,"sample.py",2644,767,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.",python,selection_mouse +840,1067655,"sample.py",2644,858,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry",python,selection_mouse +841,1067696,"sample.py",2644,860,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)",python,selection_mouse +842,1067761,"sample.py",2644,907,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(",python,selection_mouse +843,1067762,"sample.py",2644,962,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state",python,selection_mouse +844,1067762,"sample.py",2644,964,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)",python,selection_mouse +845,1067763,"sample.py",2644,965,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n",python,selection_mouse +846,1067766,"sample.py",2644,1004,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(",python,selection_mouse +847,1067767,"sample.py",2644,1066,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),",python,selection_mouse +848,1067811,"sample.py",2644,1095,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(",python,selection_mouse +849,1067812,"sample.py",2644,1163,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),",python,selection_mouse +850,1067827,"sample.py",2644,1170,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),",python,selection_mouse +851,1067847,"sample.py",2644,1172,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)",python,selection_mouse +852,1067880,"sample.py",2644,1189,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_s",python,selection_mouse +853,1067881,"sample.py",2644,1236,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restore",python,selection_mouse +854,1067922,"sample.py",2644,1257,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n",python,selection_mouse +855,1067923,"sample.py",2644,1258,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n",python,selection_mouse +856,1067958,"sample.py",2644,1296,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):",python,selection_mouse +857,1067959,"sample.py",2644,1410,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)",python,selection_mouse +858,1067999,"sample.py",2644,1501,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)",python,selection_mouse +859,1068000,"sample.py",2644,1502,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n",python,selection_mouse +860,1068033,"sample.py",2644,1519,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n\n# --- Define aut",python,selection_mouse +861,1068034,"sample.py",2644,1601,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):",python,selection_mouse +862,1068068,"sample.py",2644,1519,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n\n# --- Define aut",python,selection_mouse +863,1068069,"sample.py",2644,1276,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wra",python,selection_mouse +864,1068115,"sample.py",2644,1237,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored",python,selection_mouse +865,1068116,"sample.py",2644,1085,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.",python,selection_mouse +866,1068249,"sample.py",2644,984,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpo",python,selection_mouse +867,1068250,"sample.py",2644,965,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n",python,selection_mouse +868,1068250,"sample.py",2644,964,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)",python,selection_mouse +869,1068251,"sample.py",2644,928,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_sha",python,selection_mouse +870,1068251,"sample.py",2644,881,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state",python,selection_mouse +871,1068348,"sample.py",2644,928,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_sha",python,selection_mouse +872,1068379,"sample.py",2644,964,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)",python,selection_mouse +873,1068413,"sample.py",2644,985,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoi",python,selection_mouse +874,1068414,"sample.py",2644,1024,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint",python,selection_mouse +875,1068572,"sample.py",2644,1115,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state",python,selection_mouse +876,1068573,"sample.py",2644,1170,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),",python,selection_mouse +877,1068574,"sample.py",2644,1172,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)",python,selection_mouse +878,1068656,"sample.py",2644,1192,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_stat",python,selection_mouse +879,1068697,"sample.py",2644,1239,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_t",python,selection_mouse +880,1068858,"sample.py",2644,1240,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_tr",python,selection_mouse +881,1068910,"sample.py",2644,1241,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_tra",python,selection_mouse +882,1068927,"sample.py",2644,1242,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_trai",python,selection_mouse +883,1068953,"sample.py",2644,1243,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train",python,selection_mouse +884,1068963,"sample.py",2644,1244,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_",python,selection_mouse +885,1068987,"sample.py",2644,1245,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_s",python,selection_mouse +886,1068997,"sample.py",2644,1246,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_st",python,selection_mouse +887,1069022,"sample.py",2644,1248,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_stat",python,selection_mouse +888,1069030,"sample.py",2644,1249,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state",python,selection_mouse +889,1069176,"sample.py",2644,1250,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.",python,selection_mouse +890,1069177,"sample.py",2644,1253,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.par",python,selection_mouse +891,1069178,"sample.py",2644,1254,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.para",python,selection_mouse +892,1069178,"sample.py",2644,1255,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.param",python,selection_mouse +893,1069179,"sample.py",2644,1256,"ng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +894,1072109,"sample.py",2643,0,"",python,selection_mouse +895,1072819,"sample.py",2643,34,"rng, _rng = jax.random.split(rng)\n",python,selection_mouse +896,1072865,"sample.py",2643,129,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n ",python,selection_mouse +897,1072901,"sample.py",2643,236,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)",python,selection_mouse +898,1072902,"sample.py",2643,311,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n",python,selection_mouse +899,1072939,"sample.py",2643,316,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndumm",python,selection_mouse +900,1072940,"sample.py",2643,355,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n ",python,selection_mouse +901,1073057,"sample.py",2643,382,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n p",python,selection_mouse +902,1073058,"sample.py",2643,401,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n t",python,selection_mouse +903,1073058,"sample.py",2643,422,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n ",python,selection_mouse +904,1073663,"sample.py",2643,423,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n ",python,selection_mouse +905,1073696,"sample.py",2643,404,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=o",python,selection_mouse +906,1073737,"sample.py",2643,386,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n param",python,selection_mouse +907,1073772,"sample.py",2643,387,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params",python,selection_mouse +908,1073805,"sample.py",2643,361,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_",python,selection_mouse +909,1073806,"sample.py",2643,362,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_f",python,selection_mouse +910,1073934,"sample.py",2643,363,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn",python,selection_mouse +911,1073963,"sample.py",2643,409,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.",python,selection_mouse +912,1073997,"sample.py",2643,429,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax",python,selection_mouse +913,1074033,"sample.py",2643,474,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0,",python,selection_mouse +914,1074087,"sample.py",2643,507,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )",python,selection_mouse +915,1074381,"sample.py",2643,515,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), ",python,selection_mouse +916,1084179,"sample.py",3078,0,"",python,selection_mouse +917,1084777,"sample.py",2954,0,"",python,selection_mouse +918,1084982,"sample.py",2954,17,"\ndummy_train_stat",python,selection_mouse +919,1085010,"sample.py",2954,56,"\ndummy_train_state = TrainState.create(\n apply_fn=gen",python,selection_mouse +920,1085031,"sample.py",2954,101,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.ada",python,selection_mouse +921,1085072,"sample.py",2954,102,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adam",python,selection_mouse +922,1085126,"sample.py",2954,122,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.war",python,selection_mouse +923,1085166,"sample.py",2954,196,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )",python,selection_mouse +924,1085167,"sample.py",2954,204,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), ",python,selection_mouse +925,1085167,"sample.py",2954,206,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)",python,selection_mouse +926,1085204,"sample.py",2954,225,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry =",python,selection_mouse +927,1085289,"sample.py",2954,292,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.a",python,selection_mouse +928,1085317,"sample.py",2954,293,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.ad",python,selection_mouse +929,1085351,"sample.py",2954,395,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager ",python,selection_mouse +930,1085384,"sample.py",2954,439,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint",python,selection_mouse +931,1085385,"sample.py",2954,510,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),",python,selection_mouse +932,1085422,"sample.py",2954,548,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry",python,selection_mouse +933,1085422,"sample.py",2954,550,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)",python,selection_mouse +934,1085457,"sample.py",2954,597,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(",python,selection_mouse +935,1085458,"sample.py",2954,652,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state",python,selection_mouse +936,1085458,"sample.py",2954,654,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)",python,selection_mouse +937,1085493,"sample.py",2954,655,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n",python,selection_mouse +938,1085494,"sample.py",2954,694,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(",python,selection_mouse +939,1085538,"sample.py",2954,654,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)",python,selection_mouse +940,1085538,"sample.py",2954,617,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_sh",python,selection_mouse +941,1085571,"sample.py",2954,550,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)",python,selection_mouse +942,1085571,"sample.py",2954,529,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_regist",python,selection_mouse +943,1085614,"sample.py",2954,459,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.Ch",python,selection_mouse +944,1085647,"sample.py",2954,460,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.Che",python,selection_mouse +945,1085648,"sample.py",2954,439,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint",python,selection_mouse +946,1085688,"sample.py",2954,396,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager =",python,selection_mouse +947,1085769,"sample.py",2954,294,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add",python,selection_mouse +948,1085854,"sample.py",2954,396,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager =",python,selection_mouse +949,1085855,"sample.py",2954,440,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,",python,selection_mouse +950,1085861,"sample.py",2954,462,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.Check",python,selection_mouse +951,1085872,"sample.py",2954,532,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=",python,selection_mouse +952,1085889,"sample.py",2954,550,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)",python,selection_mouse +953,1085908,"sample.py",2954,572,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state ",python,selection_mouse +954,1085930,"sample.py",2954,573,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state =",python,selection_mouse +955,1085945,"sample.py",2954,620,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape",python,selection_mouse +956,1085986,"sample.py",2954,654,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)",python,selection_mouse +957,1086028,"sample.py",2954,655,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n",python,selection_mouse +958,1086297,"sample.py",2954,678,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_",python,selection_mouse +959,1086377,"sample.py",2954,756,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),",python,selection_mouse +960,1086378,"sample.py",2954,785,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(",python,selection_mouse +961,1086383,"sample.py",2954,853,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),",python,selection_mouse +962,1086397,"sample.py",2954,860,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),",python,selection_mouse +963,1086416,"sample.py",2954,862,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)",python,selection_mouse +964,1086428,"sample.py",2954,909,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]",python,selection_mouse +965,1086452,"sample.py",2954,946,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +966,1086460,"sample.py",2954,947,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n",python,selection_mouse +967,1086492,"sample.py",2954,948,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n",python,selection_mouse +968,1086521,"sample.py",2954,1100,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)",python,selection_mouse +969,1086529,"sample.py",2954,1009,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sa",python,selection_mouse +970,1086561,"sample.py",2954,1192,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n",python,selection_mouse +971,1086565,"sample.py",2954,1238,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n\n# --- Define autoregressive sampling loop ---",python,selection_mouse +972,1086591,"sample.py",2954,1291,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):",python,selection_mouse +973,1086596,"sample.py",2954,1261,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rn",python,selection_mouse +974,1086629,"sample.py",2954,1340,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]",python,selection_mouse +975,1086634,"sample.py",2954,1405,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n # sampling_fn = jax.jit(nn.apply(_sampling_wrapper, genie)) ",python,selection_mouse +976,1086661,"sample.py",2954,1314,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[",python,selection_mouse +977,1086661,"sample.py",2954,1261,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rn",python,selection_mouse +978,1086693,"sample.py",2954,1215,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n\n# --- Define autoregre",python,selection_mouse +979,1086729,"sample.py",2954,1192,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n",python,selection_mouse +980,1086766,"sample.py",2954,1123,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.samp",python,selection_mouse +981,1086800,"sample.py",2954,1009,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sa",python,selection_mouse +982,1086801,"sample.py",2954,971,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(",python,selection_mouse +983,1086831,"sample.py",2954,948,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n",python,selection_mouse +984,1086863,"sample.py",2954,947,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n",python,selection_mouse +985,1086930,"sample.py",2954,933,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train",python,selection_mouse +986,1087193,"sample.py",2954,934,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_",python,selection_mouse +987,1087229,"sample.py",2954,935,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_s",python,selection_mouse +988,1087263,"sample.py",2954,936,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_st",python,selection_mouse +989,1087264,"sample.py",2954,937,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_sta",python,selection_mouse +990,1087298,"sample.py",2954,939,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state",python,selection_mouse +991,1087299,"sample.py",2954,941,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.p",python,selection_mouse +992,1087336,"sample.py",2954,942,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.pa",python,selection_mouse +993,1087376,"sample.py",2954,946,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +994,1102633,"sample.py",277,0,"",python,selection_mouse +995,1103828,"sample.py",303,0,"",python,selection_mouse +996,1103834,"sample.py",302,0,"",python,selection_command +997,1105034,"sample.py",300,0,"",python,selection_mouse +998,1105212,"sample.py",298,5,"optax",python,selection_mouse +999,1105352,"sample.py",291,13,"import optax\n",python,selection_mouse +1000,1119365,"sample.py",323,0,"",python,selection_mouse +1001,1121248,"sample.py",227,0,"",python,selection_mouse +1002,1121372,"sample.py",217,11,"train_state",python,selection_mouse +1003,1121517,"sample.py",198,49,"from flax.training.train_state import TrainState\n",python,selection_mouse +1004,1134086,"sample.py",287,0,"",python,selection_mouse +1005,1134197,"sample.py",287,3,"ocp",python,selection_mouse +1006,1134332,"sample.py",260,31,"import orbax.checkpoint as ocp\n",python,selection_mouse +1007,1152578,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=2 \\r\n --batch_size=10 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +1008,1153188,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=653563\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0706\r\nSLURM_JOB_START_TIME=1753717895\r\nSLURM_STEP_NODELIST=hkn0706\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753721495\r\nSLURM_PMI2_SRUN_PORT=35695\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3380176\r\nSLURM_PTY_PORT=33197\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=31\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e8.hkn0706\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0706\r\nSLURM_SRUN_COMM_PORT=44649\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3380176\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0706\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=44649\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0706\r\nGpuFreq=control_disabled\r\n",,terminal_output +1009,1157575,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +1010,1159574,"TERMINAL",0,0,"2025-07-28 18:04:32.043853: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1011,1172628,"TERMINAL",0,0,"2025-07-28 18:04:44.996634: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1012,1195465,"TERMINAL",0,0,"2025-07-28 18:05:07.736106: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1013,1198777,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 54000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/054000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 53000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/053000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 55000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/055000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/020000/metrics/metrics not found.\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 126, in \r\n args.checkpoint_step or checkpoint_manager.latest_step(),\r\nAttributeError: 'Args' object has no attribute 'checkpoint_step'\r\n",,terminal_output +1014,1199851,"TERMINAL",0,0,"srun: error: hkn0706: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ ",,terminal_output +1015,1265423,"train_dynamics.py",0,0,"",python,tab +1016,1329142,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/",,terminal_output +1017,1329865,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=2 \\r\n --batch_size=10 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +1018,1329962,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=653563\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0706\r\nSLURM_JOB_START_TIME=1753717895\r\nSLURM_STEP_NODELIST=hkn0706\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753721495\r\nSLURM_PMI2_SRUN_PORT=35695\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3380176\r\nSLURM_PTY_PORT=33197\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=31\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e8.hkn0706\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0706\r\nSLURM_SRUN_COMM_PORT=44649\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3380176\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0706\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=44649\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0706\r\n",,terminal_output +1019,1330090,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +1020,1330993,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +1021,1332978,"TERMINAL",0,0,"2025-07-28 18:07:25.453413: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1022,1345583,"TERMINAL",0,0,"2025-07-28 18:07:37.995019: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1023,1368318,"TERMINAL",0,0,"2025-07-28 18:08:00.676857: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1024,1371712,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 53000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/053000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 54000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/054000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 55000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/055000/metrics/metrics not found.\r\n",,terminal_output +1025,1422030,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +1026,1425934,"TERMINAL",0,0,"2025-07-28 18:08:58.373602: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-28 18:08:58.409259: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1027,1426484,"TERMINAL",0,0,"2025-07-28 18:08:58.755010: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1028,1428066,"TERMINAL",0,0,"E0728 18:09:00.532098 660260 cuda_blas.cc:196] failed to create cublas handle: the resource allocation failed\r\nE0728 18:09:00.532182 660260 cuda_blas.cc:199] Failure to initialize cublas may be due to OOM (cublas needs some free memory when you initialize it, and your deep-learning framework may have preallocated more than its fair share), or may be because this binary was not built with support for the GPU in your machine.\r\nE0728 18:09:00.538829 660262 cuda_blas.cc:196] failed to create cublas handle: the resource allocation failed\r\nE0728 18:09:00.538870 660262 cuda_blas.cc:199] Failure to initialize cublas may be due to OOM (cublas needs some free memory when you initialize it, and your deep-learning framework may have preallocated more than its fair share), or may be because this binary was not built with support for the GPU in your machine.\r\n",,terminal_output +1029,1428243,"TERMINAL",0,0,"jax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 94, in __call__\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 94, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/models/tokenizer.py"", line 72, in vq_encode\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/models/tokenizer.py"", line 72, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 117, in __call__\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 117, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 113, in __call__\r\n outputs = layer(outputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"", line 287, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 113, in __call__\r\n outputs = layer(outputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"", line 287, in __call__\r\n y = dot_general(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: Failed to initialize BLAS support\r\n y = dot_general(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: Failed to initialize BLAS support\r\n",,terminal_output +1030,1428328,"TERMINAL",0,0,"2025-07-28 18:09:00.796595: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_0_bfc) ran out of memory trying to allocate 16.05MiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\r\n",,terminal_output +1031,1428434,"TERMINAL",0,0,"jax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 94, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/models/tokenizer.py"", line 72, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 117, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 113, in __call__\r\n outputs = layer(outputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"", line 287, in __call__\r\n y = dot_general(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 16826368 bytes.\r\n",,terminal_output +1032,1434927,"TERMINAL",0,0,"2025-07-28 18:09:07.253636: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 3.30MiB (rounded to 3456000)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-07-28 18:09:07.256811: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] *___________________________________________________________________________________________________\r\nE0728 18:09:07.256831 660256 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 3456000 bytes. [tf-allocator-allocation-error='']\r\n2025-07-28 18:09:07.274269: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 3.30MiB (rounded to 3456000)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-07-28 18:09:07.274433: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] *___________________________________________________________________________________________________\r\nE0728 18:09:07.274483 660261 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 3456000 bytes. [tf-allocator-allocation-error='']\r\n2025-07-28 18:09:07.285592: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 3.30MiB (rounded to 3456000)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-07-28 18:09:07.285668: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] *___________________________________________________________________________________________________\r\nE0728 18:09:07.285682 660259 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 3456000 bytes. [tf-allocator-allocation-error='']\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 98, in \r\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array_creation.py"", line 83, in zeros\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n return lax.full(shape, 0, dtypes.jax_dtype(dtype), sharding=util.normalize_device_to_sharding(device))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 3356, in full\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 98, in \r\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array_creation.py"", line 83, in zeros\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n return lax.full(shape, 0, dtypes.jax_dtype(dtype), sharding=util.normalize_device_to_sharding(device))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 3356, in full\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n return broadcast(fill_value, shape)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 2691, in broadcast\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n return broadcast_in_dim(operand, tuple(sizes) + np.shape(operand), dims,\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 2725, in broadcast_in_dim\r\n return broadcast(fill_value, shape)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 2691, in broadcast\r\n return broadcast_in_dim_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return broadcast_in_dim(operand, tuple(sizes) + np.shape(operand), dims,\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 2725, in broadcast_in_dim\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n return broadcast_in_dim_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n outs = fun(*args)\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 3456000 bytes.\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 98, in \r\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array_creation.py"", line 83, in zeros\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n return lax.full(shape, 0, dtypes.jax_dtype(dtype), sharding=util.normalize_device_to_sharding(device))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 3356, in full\r\n outs = fun(*args)\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 3456000 bytes.\r\n return broadcast(fill_value, shape)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 2691, in broadcast\r\n return broadcast_in_dim(operand, tuple(sizes) + np.shape(operand), dims,\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 2725, in broadcast_in_dim\r\n return broadcast_in_dim_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n outs = fun(*args)\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 3456000 bytes.\r\n",,terminal_output +1033,1436020,"TERMINAL",0,0,"ERROR:absl:Processing Failed. Shutting down.\r\n",,terminal_output +1034,1436080,"TERMINAL",0,0,"2025-07-28 18:09:08.557706: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1035,1436253,"TERMINAL",0,0,"2025-07-28 18:09:08.733122: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1036,1452206,"TERMINAL",0,0,"2025-07-28 18:09:24.671866: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1037,1452512,"TERMINAL",0,0,"2025-07-28 18:09:24.990104: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1038,1457067,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 166, in \r\n video_batch = next(iter(dataloader))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_loader.py"", line 479, in __next__\r\n result_record = next(self._iterator)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_loader.py"", line 380, in _iterator_with_context\r\n yield from it\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 784, in __next__\r\n raise element\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 616, in _process_elements_in_grain_pool\r\n for element in g_pool:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 505, in __next__\r\n raise RuntimeError(\r\nRuntimeError: Grain worker process 0 was terminated unexpectedly with exit code 1. Search the logs above for the source of the crash.\r\n",,terminal_output +1039,1458428,"TERMINAL",0,0,"srun: error: hkn0706: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ ",,terminal_output +1040,1475181,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/",,terminal_output +1041,1475414,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=2 \\r\n --batch_size=5 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +1042,1475884,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=653563\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0706\r\nSLURM_JOB_START_TIME=1753717895\r\nSLURM_STEP_NODELIST=hkn0706\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753721495\r\nSLURM_PMI2_SRUN_PORT=35695\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3380176\r\nSLURM_PTY_PORT=33197\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=31\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e8.hkn0706\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0706\r\nSLURM_SRUN_COMM_PORT=44649\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3380176\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0706\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=44649\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0706\r\nGpuFreq=control_disabled\r\n",,terminal_output +1043,1476796,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +1044,1478769,"TERMINAL",0,0,"2025-07-28 18:09:51.230497: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1045,1491641,"TERMINAL",0,0,"2025-07-28 18:10:04.090522: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1046,1513523,"TERMINAL",0,0,"2025-07-28 18:10:25.943916: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1047,1516913,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 54000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/054000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 53000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/053000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 55000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/055000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/020000/metrics/metrics not found.\r\n",,terminal_output +1048,1557553,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +1049,1559626,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 742, in backends\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 742, in backends\r\n backend = _init_backend(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 828, in _init_backend\r\n backend = registration.factory()\r\n backend = _init_backend(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 828, in _init_backend\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 518, in factory\r\n return xla_client.make_c_api_client(plugin_name, updated_options, None)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jaxlib/xla_client.py"", line 153, in make_c_api_client\r\n backend = registration.factory()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 518, in factory\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 742, in backends\r\n return xla_client.make_c_api_client(plugin_name, updated_options, None)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jaxlib/xla_client.py"", line 153, in make_c_api_client\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 742, in backends\r\n backend = _init_backend(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 828, in _init_backend\r\n backend = _init_backend(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 828, in _init_backend\r\n backend = registration.factory()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 518, in factory\r\n backend = registration.factory()\r\n return xla_client.make_c_api_client(plugin_name, updated_options, None)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jaxlib/xla_client.py"", line 153, in make_c_api_client\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 518, in factory\r\n return _xla.get_c_api_client(plugin_name, options, distributed_client)\r\nRuntimeError: Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n return _xla.get_c_api_client(plugin_name, options, distributed_client)\r\n return _xla.get_c_api_client(plugin_name, options, distributed_client)\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\nRuntimeError: Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\n prepare(preparation_data)\r\nRuntimeError: Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n _fixup_main_from_path(data['init_main_from_path'])\r\n return xla_client.make_c_api_client(plugin_name, updated_options, None)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jaxlib/xla_client.py"", line 153, in make_c_api_client\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n exitcode = _main(fd, parent_sentinel)\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n return _xla.get_c_api_client(plugin_name, options, distributed_client)\r\nRuntimeError: Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n _run_code(code, mod_globals, init_globals,\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 64, in \r\n _fixup_main_from_path(data['init_main_from_path'])\r\n rng = jax.random.PRNGKey(args.seed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 249, in PRNGKey\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n main_content = runpy.run_path(main_path,\r\n return _return_prng_keys(True, _key('PRNGKey', seed, impl))\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 201, in _key\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n return _run_module_code(code, init_globals, run_name,\r\n return prng.random_seed(seed, impl=impl)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/prng.py"", line 551, in random_seed\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n main_content = runpy.run_path(main_path,\r\n seeds_arr = jnp.asarray(np.int64(seeds))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 383, in asarray\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return array(a, dtype=dtype, copy=bool(copy), order=order, device=device)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 268, in array\r\n exec(code, run_globals)\r\n out_array: Array = lax._convert_element_type(\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 64, in \r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 1726, in _convert_element_type\r\n rng = jax.random.PRNGKey(args.seed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 249, in PRNGKey\r\n _run_code(code, mod_globals, init_globals,\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n return _return_prng_keys(True, _key('PRNGKey', seed, impl))\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 201, in _key\r\n return convert_element_type_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 64, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n return prng.random_seed(seed, impl=impl)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/prng.py"", line 551, in random_seed\r\n rng = jax.random.PRNGKey(args.seed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 249, in PRNGKey\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n return _return_prng_keys(True, _key('PRNGKey', seed, impl))\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n seeds_arr = jnp.asarray(np.int64(seeds))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 201, in _key\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 4902, in _convert_element_type_bind_with_trace\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 383, in asarray\r\n return prng.random_seed(seed, impl=impl)\r\n return array(a, dtype=dtype, copy=bool(copy), order=order, device=device)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/prng.py"", line 551, in random_seed\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 268, in array\r\n seeds_arr = jnp.asarray(np.int64(seeds))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 383, in asarray\r\n out_array: Array = lax._convert_element_type(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 1726, in _convert_element_type\r\n operand = core.Primitive.bind_with_trace(convert_element_type_p, trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n return _run_module_code(code, init_globals, run_name,\r\n return trace.process_primitive(self, args, params)\r\n return array(a, dtype=dtype, copy=bool(copy), order=order, device=device)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 268, in array\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n out_array: Array = lax._convert_element_type(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 1726, in _convert_element_type\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n _run_code(code, mod_globals, init_globals,\r\n outs = fun(*args)\r\n return convert_element_type_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\nRuntimeError: Unable to initialize backend 'cuda': Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\r\n return convert_element_type_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n return self._true_bind(*args, **params)\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 4902, in _convert_element_type_bind_with_trace\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 4902, in _convert_element_type_bind_with_trace\r\n operand = core.Primitive.bind_with_trace(convert_element_type_p, trace, args, params)\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 64, in \r\n rng = jax.random.PRNGKey(args.seed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 249, in PRNGKey\r\n operand = core.Primitive.bind_with_trace(convert_element_type_p, trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n return trace.process_primitive(self, args, params)\r\n return _return_prng_keys(True, _key('PRNGKey', seed, impl))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 201, in _key\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n return primitive.impl(*args, **params)\r\n return prng.random_seed(seed, impl=impl)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/prng.py"", line 551, in random_seed\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n outs = fun(*args)\r\nRuntimeError: Unable to initialize backend 'cuda': Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\r\n seeds_arr = jnp.asarray(np.int64(seeds))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 383, in asarray\r\n return primitive.impl(*args, **params)\r\n return array(a, dtype=dtype, copy=bool(copy), order=order, device=device)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 268, in array\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n out_array: Array = lax._convert_element_type(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 1726, in _convert_element_type\r\n outs = fun(*args)\r\nRuntimeError: Unable to initialize backend 'cuda': Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\r\n return convert_element_type_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 4902, in _convert_element_type_bind_with_trace\r\n operand = core.Primitive.bind_with_trace(convert_element_type_p, trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n outs = fun(*args)\r\nRuntimeError: Unable to initialize backend 'cuda': Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\r\n",,terminal_output +1050,1561492,"TERMINAL",0,0,"2025-07-28 18:11:13.968730: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1051,1561566,"TERMINAL",0,0,"2025-07-28 18:11:14.022540: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-28 18:11:14.044611: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1052,1563674,"TERMINAL",0,0,"E0728 18:11:16.084571 663200 cuda_blas.cc:196] failed to create cublas handle: the resource allocation failed\r\nE0728 18:11:16.084735 663200 cuda_blas.cc:199] Failure to initialize cublas may be due to OOM (cublas needs some free memory when you initialize it, and your deep-learning framework may have preallocated more than its fair share), or may be because this binary was not built with support for the GPU in your machine.\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 94, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/models/tokenizer.py"", line 72, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 117, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 113, in __call__\r\n outputs = layer(outputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"", line 287, in __call__\r\n y = dot_general(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: Failed to initialize BLAS support\r\n",,terminal_output +1053,1563835,"TERMINAL",0,0,"E0728 18:11:16.263619 663196 cuda_blas.cc:196] failed to create cublas handle: the resource allocation failed\r\nE0728 18:11:16.263664 663196 cuda_blas.cc:199] Failure to initialize cublas may be due to OOM (cublas needs some free memory when you initialize it, and your deep-learning framework may have preallocated more than its fair share), or may be because this binary was not built with support for the GPU in your machine.\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 94, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/models/tokenizer.py"", line 72, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 117, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 113, in __call__\r\n outputs = layer(outputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"", line 287, in __call__\r\n y = dot_general(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: Failed to initialize BLAS support\r\nE0728 18:11:16.312180 663201 cuda_blas.cc:196] failed to create cublas handle: the resource allocation failed\r\nE0728 18:11:16.312243 663201 cuda_blas.cc:199] Failure to initialize cublas may be due to OOM (cublas needs some free memory when you initialize it, and your deep-learning framework may have preallocated more than its fair share), or may be because this binary was not built with support for the GPU in your machine.\r\n",,terminal_output +1054,1563927,"TERMINAL",0,0,"jax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 94, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/models/tokenizer.py"", line 72, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 117, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 113, in __call__\r\n outputs = layer(outputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"", line 287, in __call__\r\n y = dot_general(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: Failed to initialize BLAS support\r\n",,terminal_output +1055,1564731,"TERMINAL",0,0,"ERROR:absl:Processing Failed. Shutting down.\r\n",,terminal_output +1056,1571481,"TERMINAL",0,0,"2025-07-28 18:11:23.891241: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1057,1575809,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 166, in \r\n video_batch = next(iter(dataloader))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_loader.py"", line 479, in __next__\r\n result_record = next(self._iterator)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_loader.py"", line 380, in _iterator_with_context\r\n yield from it\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 784, in __next__\r\n raise element\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 616, in _process_elements_in_grain_pool\r\n for element in g_pool:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 505, in __next__\r\n raise RuntimeError(\r\nRuntimeError: Grain worker process 0 was terminated unexpectedly with exit code 1. Search the logs above for the source of the crash.\r\n",,terminal_output +1058,1577115,"TERMINAL",0,0,"srun: error: hkn0706: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ ",,terminal_output +1059,1618158,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ ",,terminal_output +1060,1619703,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/",,terminal_output +1061,1627225,"TERMINAL",0,0,"\r\n\r",,terminal_output +1062,1627827,"TERMINAL",0,0,"",,terminal_output +1063,1628078,"TERMINAL",0,0,"",,terminal_output +1064,1628312,"TERMINAL",0,0,"",,terminal_output +1065,1628477,"TERMINAL",0,0,"",,terminal_output +1066,1628645,"TERMINAL",0,0,"",,terminal_output +1067,1628871,"TERMINAL",0,0,"",,terminal_output +1068,1629016,"TERMINAL",0,0,"",,terminal_output +1069,1629179,"TERMINAL",0,0,"",,terminal_output +1070,1629433,"TERMINAL",0,0,"",,terminal_output +1071,1629745,"TERMINAL",0,0,"",,terminal_output +1072,1629980,"TERMINAL",0,0,"",,terminal_output +1073,1630730,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +1074,1630817,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1075,1631069,"TERMINAL",0,0,"[1@c",,terminal_output +1076,1631240,"TERMINAL",0,0,"[?25ls [1@a[?25h",,terminal_output +1077,1631305,"TERMINAL",0,0,"[1@t",,terminal_output +1078,1631882,"TERMINAL",0,0,"[?25l[?2004l\r[?25h\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=2 \\r\n --batch_size=1 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ ",,terminal_output +1079,1633154,"TERMINAL",0,0,"cat slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh ",,terminal_output +1080,1634177,"TERMINAL",0,0,"\rsh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/",,terminal_output +1081,1639429,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=2 \\r\n --batch_size=1 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +1082,1639526,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=653563\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0706\r\nSLURM_JOB_START_TIME=1753717895\r\nSLURM_STEP_NODELIST=hkn0706\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753721495\r\nSLURM_PMI2_SRUN_PORT=35695\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3380176\r\nSLURM_PTY_PORT=33197\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=31\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e8.hkn0706\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0706\r\nSLURM_SRUN_COMM_PORT=44649\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3380176\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0706\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=44649\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0706\r\n",,terminal_output +1083,1639633,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +1084,1640601,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +1085,1642538,"TERMINAL",0,0,"2025-07-28 18:12:35.010176: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1086,1654019,"TERMINAL",0,0,"2025-07-28 18:12:46.434878: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1087,1661152,"TERMINAL",0,0,"2025-07-28 18:12:53.630411: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1088,1676172,"TERMINAL",0,0,"2025-07-28 18:13:08.646715: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1089,1679109,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 53000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/053000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 54000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/054000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 55000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/055000/metrics/metrics not found.\r\n",,terminal_output +1090,1718731,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +1091,1720689,"TERMINAL",0,0,"E0728 18:13:53.096039 665498 cuda_dnn.cc:535] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR\r\nE0728 18:13:53.104146 665498 cuda_dnn.cc:539] Memory usage: 1572864 bytes free, 42406903808 bytes total.\r\nE0728 18:13:53.109601 665498 cuda_dnn.cc:535] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR\r\nE0728 18:13:53.109717 665498 cuda_dnn.cc:539] Memory usage: 1572864 bytes free, 42406903808 bytes total.\r\nE0728 18:13:53.167481 665498 cuda_dnn.cc:535] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR\r\nE0728 18:13:53.167661 665498 cuda_dnn.cc:539] Memory usage: 1572864 bytes free, 42406903808 bytes total.\r\nE0728 18:13:53.171161 665498 cuda_dnn.cc:535] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR\r\nE0728 18:13:53.171269 665498 cuda_dnn.cc:539] Memory usage: 1572864 bytes free, 42406903808 bytes total.\r\n",,terminal_output +1092,1720792,"TERMINAL",0,0,"jax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 64, in \r\n rng = jax.random.PRNGKey(args.seed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 249, in PRNGKey\r\n return _return_prng_keys(True, _key('PRNGKey', seed, impl))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 201, in _key\r\n return prng.random_seed(seed, impl=impl)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/prng.py"", line 551, in random_seed\r\n seeds_arr = jnp.asarray(np.int64(seeds))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 383, in asarray\r\n return array(a, dtype=dtype, copy=bool(copy), order=order, device=device)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 268, in array\r\n out_array: Array = lax._convert_element_type(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 1726, in _convert_element_type\r\n return convert_element_type_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 4902, in _convert_element_type_bind_with_trace\r\n operand = core.Primitive.bind_with_trace(convert_element_type_p, trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n outs = fun(*args)\r\njaxlib._jax.XlaRuntimeError: FAILED_PRECONDITION: DNN library initialization failed. Look at the errors above for more details.\r\n",,terminal_output +1093,1723127,"TERMINAL",0,0,"2025-07-28 18:13:55.567363: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1094,1723210,"TERMINAL",0,0,"2025-07-28 18:13:55.621729: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-28 18:13:55.665499: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1095,1723245,"TERMINAL",0,0,"2025-07-28 18:13:55.718037: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1096,1723425,"TERMINAL",0,0,"2025-07-28 18:13:55.901365: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1097,1726870,"TERMINAL",0,0,"2025-07-28 18:13:59.215639: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_0_bfc) ran out of memory trying to allocate 16.17MiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\r\nE0728 18:13:59.248730 665492 cuda_blas.cc:196] failed to create cublas handle: the resource allocation failed\r\nE0728 18:13:59.248781 665492 cuda_blas.cc:199] Failure to initialize cublas may be due to OOM (cublas needs some free memory when you initialize it, and your deep-learning framework may have preallocated more than its fair share), or may be because this binary was not built with support for the GPU in your machine.\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 94, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/models/tokenizer.py"", line 72, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 117, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 113, in __call__\r\n outputs = layer(outputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"", line 287, in __call__\r\n y = dot_general(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: Failed to initialize BLAS support\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 94, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/models/tokenizer.py"", line 72, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 117, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 113, in __call__\r\n outputs = layer(outputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"", line 287, in __call__\r\n y = dot_general(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 16953856 bytes.\r\nE0728 18:13:59.337411 665493 cuda_blas.cc:196] failed to create cublas handle: the resource allocation failed\r\nE0728 18:13:59.337578 665493 cuda_blas.cc:199] Failure to initialize cublas may be due to OOM (cublas needs some free memory when you initialize it, and your deep-learning framework may have preallocated more than its fair share), or may be because this binary was not built with support for the GPU in your machine.\r\n",,terminal_output +1098,1726934,"TERMINAL",0,0,"jax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 94, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/models/tokenizer.py"", line 72, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 117, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 113, in __call__\r\n outputs = layer(outputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"", line 287, in __call__\r\n y = dot_general(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: Failed to initialize BLAS support\r\nE0728 18:13:59.400252 665497 cuda_blas.cc:196] failed to create cublas handle: the resource allocation failed\r\nE0728 18:13:59.400405 665497 cuda_blas.cc:199] Failure to initialize cublas may be due to OOM (cublas needs some free memory when you initialize it, and your deep-learning framework may have preallocated more than its fair share), or may be because this binary was not built with support for the GPU in your machine.\r\n",,terminal_output +1099,1727032,"TERMINAL",0,0,"jax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 94, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/models/tokenizer.py"", line 72, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 117, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 113, in __call__\r\n outputs = layer(outputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"", line 287, in __call__\r\n y = dot_general(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: Failed to initialize BLAS support\r\n",,terminal_output +1100,1733285,"TERMINAL",0,0,"2025-07-28 18:14:05.714855: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-28 18:14:05.714856: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1101,1734826,"TERMINAL",0,0,"E0728 18:14:07.241578 665496 cuda_blas.cc:196] failed to create cublas handle: the resource allocation failed\r\nE0728 18:14:07.241750 665496 cuda_blas.cc:199] Failure to initialize cublas may be due to OOM (cublas needs some free memory when you initialize it, and your deep-learning framework may have preallocated more than its fair share), or may be because this binary was not built with support for the GPU in your machine.\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 94, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/models/tokenizer.py"", line 72, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 117, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 113, in __call__\r\n outputs = layer(outputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"", line 287, in __call__\r\n y = dot_general(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: Failed to initialize BLAS support\r\n",,terminal_output +1102,1749057,"TERMINAL",0,0,"2025-07-28 18:14:21.512617: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1103,1752219,"TERMINAL",0,0,"2025-07-28 18:14:24.685947: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1104,1758792,"TERMINAL",0,0,"2025-07-28 18:14:31.268667: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1105,1761949,"TERMINAL",0,0,"2025-07-28 18:14:34.423875: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1106,1765546,"train_dynamics.py",0,0,"",python,tab +1107,1767065,"models/dynamics.py",0,0,"",python,tab +1108,1768352,"sample.py",0,0,"",python,tab +1109,1777747,"TERMINAL",0,0,"2025-07-28 18:14:50.212981: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1110,1780804,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 54000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/054000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 53000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/053000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 55000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/055000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/020000/metrics/metrics not found.\r\n",,terminal_output +1111,1788709,"TERMINAL",0,0,"2025-07-28 18:15:01.088077: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 19.53MiB (rounded to 20480000)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-07-28 18:15:01.088577: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] *_***********************************************************************************_**************\r\nE0728 18:15:01.088596 665495 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20480000 bytes. [tf-allocator-allocation-error='']\r\n",,terminal_output +1112,1788865,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 106, in __call__\r\n dyna_outputs = self.dynamics(outputs, training)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/models/dynamics.py"", line 71, in __call__\r\n logits = self.dynamics(vid_embed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 135, in __call__\r\n x = STBlock(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 44, in __call__\r\n z = PositionalEncoding(self.dim)(x)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 23, in setup\r\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array_methods.py"", line 814, in set\r\n return scatter._scatter_update(self.array, self.index, values, lax.scatter,\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/ops/scatter.py"", line 92, in _scatter_update\r\n return internal_scatter(x, y, dynamic_idx)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/ops/scatter.py"", line 145, in _scatter_impl\r\n out = scatter_op(\r\njax._src.source_info_util.JaxStackTraceBeforeTransformation: ValueError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20480000 bytes.\r\n\r\nThe preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception.\r\n\r\n--------------------\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 106, in __call__\r\n dyna_outputs = self.dynamics(outputs, training)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/models/dynamics.py"", line 71, in __call__\r\n logits = self.dynamics(vid_embed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/utils/nn.py"", line 135, in __call__\r\n x = STBlock(\r\nValueError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20480000 bytes.\r\n",,terminal_output +1113,1817212,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 166, in \r\n video_batch = next(iter(dataloader))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_loader.py"", line 479, in __next__\r\n result_record = next(self._iterator)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_loader.py"", line 380, in _iterator_with_context\r\n yield from it\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 784, in __next__\r\n raise element\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 607, in _process_elements_in_grain_pool\r\n with GrainPool(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 422, in __init__\r\n parallel.run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/core/parallel.py"", line 70, in run_in_parallel\r\n raise completed.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 423, in \r\n function=lambda child_process: child_process.start(),\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/process.py"", line 121, in start\r\n",,terminal_output +1114,1817274,"TERMINAL",0,0," self._popen = self._Popen(self)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/context.py"", line 288, in _Popen\r\n return Popen(process_obj)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/popen_spawn_posix.py"", line 32, in __init__\r\n super().__init__(process_obj)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/popen_fork.py"", line 19, in __init__\r\n self._launch(process_obj)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/popen_spawn_posix.py"", line 42, in _launch\r\n prep_data = spawn.get_preparation_data(process_obj._name)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 154, in get_preparation_data\r\n _check_not_importing_main()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 134, in _check_not_importing_main\r\n raise RuntimeError('''\r\nRuntimeError: \r\n An attempt has been made to start a new process before the\r\n current process has finished its bootstrapping phase.\r\n\r\n This probably means that you are not using fork to start your\r\n child processes and you have forgotten to use the proper idiom\r\n in the main module:\r\n\r\n if __name__ == '__main__':\r\n freeze_support()\r\n ...\r\n\r\n The ""freeze_support()"" line can be omitted if the program\r\n is not going to be frozen to produce an executable.\r\n",,terminal_output +1115,1839501,"sample.py",2879,0,"",python,selection_mouse +1116,1839502,"sample.py",2878,0,"",python,selection_command +1117,1875093,"TERMINAL",0,0,"bash",,terminal_focus +1118,1882508,"TERMINAL",0,0,"srun",,terminal_focus +1119,1885365,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3380176.5 task 0: running\r\n",,terminal_output +1120,1885543,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3380176.5\r\nsrun: forcing job termination\r\nsrun: Job step aborted: Waiting up to 32 seconds for job step to finish.\r\nslurmstepd: error: *** STEP 3380176.5 ON hkn0706 CANCELLED AT 2025-07-28T18:16:37 ***\r\n",,terminal_output +1121,1885750,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3380176.5\r\nsrun: job abort in progress\r\n",,terminal_output +1122,1885999,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3380176.5\r\n",,terminal_output +1123,1886104,"TERMINAL",0,0,"]0;tum_cte0515@hkn0706:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ ",,terminal_output +1124,1886783,"TERMINAL",0,0,"g",,terminal_output +1125,1887031,"TERMINAL",0,0,"[?25li[?25h[?25lt[?25h",,terminal_output +1126,1887097,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1127,1887317,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1128,1887401,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1129,1887590,"TERMINAL",0,0,"[?25la[?25h[?25lt[?25h",,terminal_output +1130,1887796,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1131,1887940,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1132,1888206,"TERMINAL",0,0,"\r\n[?2004l\rfatal: not a git repository (or any parent up to mount point /hkfs)\r\nStopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set).\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ ",,terminal_output +1133,1890038,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1134,1890124,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1135,1890306,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1136,1890413,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +1137,1890542,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +1138,1890737,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0706:~/Projects[?2004h(jafar) [tum_cte0515@hkn0706 Projects]$ ",,terminal_output +1139,1891000,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1140,1891073,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1141,1891234,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1142,1891499,"TERMINAL",0,0,"[?25lj[?25h",,terminal_output +1143,1891674,"TERMINAL",0,0,"afar",,terminal_output +1144,1892189,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1145,1892519,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +1146,1892612,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +1147,1892698,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1148,1892766,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1149,1892932,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1150,1893182,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1151,1893787,"TERMINAL",0,0,"[?25lt[?25h[?25la[?25h",,terminal_output +1152,1893940,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1153,1894016,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1154,1894201,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1155,1894280,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1156,1894638,"TERMINAL",0,0,"On branch new-arch-sampling\r\n",,terminal_output +1157,1894777,"TERMINAL",0,0,"Your branch is ahead of 'origin/new-arch-sampling' by 1 commit.\r\n (use ""git push"" to publish your local commits)\r\n\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: genie.py\r\n\tmodified: sample.py\r\n\tmodified: train_dynamics.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdiff.diff\r\n\tdiff.log\r\n\tlogs/\r\n\toverfit_dir.zip\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tscripts_cremers/\r\n\tscripts_horeka/\r\n\tslurm-3373409.out\r\n\tslurm-3373410.out\r\n\tslurm-3379613.out\r\n\tslurm-3379615.out\r\n\tslurm-3379616.out\r\n\tslurm/\r\n\tutils/logger_bak.py\r\n\tutils/visualizer.py\r\n\tweekend-job-requeuer.sh\r\n\tweekend-job-starter.sh\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1158,1897941,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +1159,1898088,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +1160,1898162,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1161,1898228,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1162,1898384,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1163,1898501,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +1164,1898596,"TERMINAL",0,0,"[?25lf[?25h",,terminal_output +1165,1898680,"TERMINAL",0,0,"[?25lf[?25h",,terminal_output +1166,1898821,"TERMINAL",0,0,"\r\n[?2004l\r[?1h=\rdiff --git a/genie.py b/genie.py\r\nindex ed933ff..0cb56ba 100644\r\n--- a/genie.py\r\n+++ b/genie.py\r\n@@ -191,9 +191,12 @@ class Genie(nn.Module):\r\n next_token = jax.random.categorical(\r\n step_rng, next_token_logits / temperature, axis=-1\r\n ) # (B, 1)\r\n-\r\n+ \r\n # Insert the generated tokens into the sequence\r\n token_idxs_full = token_idxs_full.at[:, t, n].set(next_token)\r\n+ # FIXME @mihir: HACK\r\n+ # token_idxs_full = jnp.argmax(dyna_outputs[""token_logits""].astype(jnp.float32) , axis=-1)\r\n+ # break\r\n \r\n # --- Decode all tokens at once at the end ---\r\n final_frames = self.tokenizer.decode(\r\ndiff --git a/sample.py b/sample.py\r\nindex 2ac0eef..e453531 100644\r\n--- a/sample.py\r\n+++ b/sample.py\r\n@@ -172,13 +172,17 @@ def _get_dataloader_iterator():\r\n return grain_iterator\r\n \r\n # --- Get video + latent actions ---\r\n-# grain_iterator = _get_dataloader_iterator()\r\n-# video_batch = next(grain_iterator)\r\n+grain_iterator = _get_dataloader_iterator()\r\n+video_batch = next(grain_iterator)\r\n:",,terminal_output +1167,1899747,"TERMINAL",0,0,"\r # video_batch = np.load(""overfit_dir/single_sample_corner.npy"")\r\n:",,terminal_output +1168,1899995,"TERMINAL",0,0,"\r-video_batch = np.load(""overfit_dir/oai_sample_seed69_1.npy"") # *255.\r\n:",,terminal_output +1169,1900353,"TERMINAL",0,0,"\r-\r\n:",,terminal_output +1170,1900498,"TERMINAL",0,0,"\r-\r\n:",,terminal_output +1171,1900561,"TERMINAL",0,0,"\r-video_batch = video_batch.astype(args.dtype) / 255.0\r\n:",,terminal_output +1172,1900718,"TERMINAL",0,0,"\r+# video_batch = np.load(""overfit_dir/oai_sample_seed69_1.npy"") # *255.\r\n:",,terminal_output +1173,1901623,"TERMINAL",0,0,"\r+\r\n:",,terminal_output +1174,1901893,"TERMINAL",0,0,"\r+video_batch = jnp.array(video_batch)\r\n:",,terminal_output +1175,1902065,"TERMINAL",0,0,"\r+print(video_batch.dtype)\r\n:",,terminal_output +1176,1902257,"TERMINAL",0,0,"\r+video_batch = video_batch.astype(args.dtype) # / 255.0\r\n:",,terminal_output +1177,1902422,"TERMINAL",0,0,"\r+print(video_batch.dtype)\r\n:",,terminal_output +1178,1902589,"TERMINAL",0,0,"\r+video_batch = video_batch / 255.0\r\n:",,terminal_output +1179,1902818,"TERMINAL",0,0,"\r+print(video_batch.dtype)\r\n:",,terminal_output +1180,1902882,"TERMINAL",0,0,"\r # Get latent actions for all videos in the batch\r\n:",,terminal_output +1181,1903050,"TERMINAL",0,0,"\r batch = dict(videos=video_batch[:,:args.seq_len])\r\n:",,terminal_output +1182,1903210,"TERMINAL",0,0,"\r action_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\r\n:",,terminal_output +1183,1903342,"TERMINAL",0,0,"\rdiff --git a/train_dynamics.py b/train_dynamics.py\r\n:",,terminal_output +1184,1903510,"TERMINAL",0,0,"\rindex 4f4bff2..c1565f4 100644\r\n:",,terminal_output +1185,1903744,"TERMINAL",0,0,"\r--- a/train_dynamics.py\r\n:",,terminal_output +1186,1903830,"TERMINAL",0,0,"\r+++ b/train_dynamics.py\r\n:",,terminal_output +1187,1904064,"TERMINAL",0,0,"\r@@ -345,15 +345,15 @@ if __name__ == ""__main__"":\r\n:",,terminal_output +1188,1904198,"TERMINAL",0,0,"\r # --- TRAIN LOOP ---\r\n:",,terminal_output +1189,1904362,"TERMINAL",0,0,"\r dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\r\n:",,terminal_output +1190,1904533,"TERMINAL",0,0,"\r while step < args.num_steps:\r\n:",,terminal_output +1191,1904701,"TERMINAL",0,0,"\r- # for videos in dataloader:\r\n:",,terminal_output +1192,1904871,"TERMINAL",0,0,"\r+ for videos in dataloader:\r\n:",,terminal_output +1193,1905116,"TERMINAL",0,0,"\r # for i in range(videos.shape[0]):\r\n:",,terminal_output +1194,1905227,"TERMINAL",0,0,"\r # video_i = videos[i:i+1] # shape (1, T, H, W, C)\r\n:",,terminal_output +1195,1905810,"TERMINAL",0,0,"\r # np.save(f""overfit_dir/oai_sample_seed69_{i}.npy"", video_i)\r\n:\r # jax.debug.breakpoint()\r\n:\r- videos = np.load(""overfit_dir/oai_sample_seed69_1.npy"") # *255.\r\n:",,terminal_output +1196,1905978,"TERMINAL",0,0,"\r+ # videos = np.load(""overfit_dir/oai_sample_seed69_1.npy"") # *255.\r\n:\r # videos = videos.astype(np.uint8)\r\n:\r- videos = jax.make_array_from_process_local_data(videos_sharding, videos)\r\n:\r- while True:\r\n:\r+ # videos = jax.make_array_from_process_local_data(videos_sharding, videos)\r\n:\r+ # while True:\r\n:",,terminal_output +1197,1906040,"TERMINAL",0,0,"\r # --- Train step ---\r\n:\r rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\r\n:",,terminal_output +1198,1906244,"TERMINAL",0,0,"\r \r\n:",,terminal_output +1199,1906557,"TERMINAL",0,0,"\r\r(END)",,terminal_output +1200,1906764,"TERMINAL",0,0,"[?25l\r\r(END)[?25h",,terminal_output +1201,1909515,"TERMINAL",0,0,"[?25l\r:11[?25h",,terminal_output +1202,1910023,"TERMINAL",0,0,"[?25l\rM+video_batch = next(grain_iterator)\r\nM+grain_iterator = _get_dataloader_iterator()\r\nM-# video_batch = next(grain_iterator)\r\nM-# grain_iterator = _get_dataloader_iterator()\r\nM # --- Get video + latent actions ---\r\nM \r\nM return grain_iterator\r\nM@@ -172,13 +172,17 @@ def _get_dataloader_iterator():\r\nM+++ b/sample.py\r\nM--- a/sample.py\r\nMindex 2ac0eef..e453531 100644\r\nMdiff --git a/sample.py b/sample.py\r\nM final_frames = self.tokenizer.decode(\r\nM # --- Decode all tokens at once at the end ---\r\nM \r\nM+ # break\r\nM+ # token_idxs_full = jnp.argmax(dyna_outputs[""token_logits""].astype(jnp.float32) , axis=-1)\r\nM+ # FIXME @mihir: HACK\r\nM token_idxs_full = token_idxs_full.at[:, t, n].set(next_token)\r\nM # Insert the generated tokens into the sequence\r\nM+ \r\nM-\r\nM ) # (B, 1)\r\nM step_rng, next_token_logits / temperature, axis=-1\r\nM next_token = jax.random.categorical(\r\nM@@ -191,9 +191,12 @@ class Genie(nn.Module):\r\nM+++ b/genie.py\r\nM--- a/genie.py\r\nMindex ed933ff..0cb56ba 100644\r\nMdiff --git a/genie.py b/genie.py\r\n\r:[?25h",,terminal_output +1203,1911519,"TERMINAL",0,0,"\r[?1l>]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1204,1912086,"TERMINAL",0,0,"[?25lg[?25h[?25lit[?25h",,terminal_output +1205,1912225,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1206,1912358,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1207,1912638,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1208,1913174,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1209,1913471,"TERMINAL",0,0,"[?25la[?25h[?25ls[?25h",,terminal_output +1210,1915855,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1211,1916106,"TERMINAL",0,0,"[?25lo[?25h[?25lm[?25h",,terminal_output +1212,1916217,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +1213,1916432,"TERMINAL",0,0,"[?25li[?25h[?25lt[?25h",,terminal_output +1214,1916630,"TERMINAL",0,0,"[?25l -[?25h",,terminal_output +1215,1916831,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +1216,1916930,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1217,1917421,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1218,1917568,"TERMINAL",0,0,"[?25lm [?25h",,terminal_output +1219,1917715,"TERMINAL",0,0,"[?25l""[?25h",,terminal_output +1220,1918996,"TERMINAL",0,0,"[?25lf[?25h",,terminal_output +1221,1919179,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +1222,1919340,"TERMINAL",0,0,"[?25lx[?25h",,terminal_output +1223,1919633,"TERMINAL",0,0,"[?25le[?25h[?25ld[?25h[?25l [?25h",,terminal_output +1224,1921953,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1225,1922256,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1226,1922519,"TERMINAL",0,0,"[?25ly[?25h[?25lp[?25h",,terminal_output +1227,1922699,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1228,1922973,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1229,1923241,"TERMINAL",0,0,"[?25lf[?25h",,terminal_output +1230,1923700,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1231,1923871,"TERMINAL",0,0,"[?25lf[?25h",,terminal_output +1232,1924075,"TERMINAL",0,0,"[?25lo[?25h[?25lr[?25h",,terminal_output +1233,1924096,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1234,1926181,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1235,1926315,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1236,1926401,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +1237,1926688,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +1238,1927025,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1239,1927220,"TERMINAL",0,0,"[?25lin[?25h[?25lg[?25h",,terminal_output +1240,1927412,"TERMINAL",0,0,"[?25l2[?25h",,terminal_output +1241,1927819,"TERMINAL",0,0,"\r\n[?2004l\r[?2004h> ",,terminal_output +1242,1928482,"TERMINAL",0,0,"[?25l""[?25h",,terminal_output +1243,1928686,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1244,1929081,"TERMINAL",0,0,"[new-arch-sampling 8fa5092] fixed dtypes for sampling2\r\n 3 files changed, 18 insertions(+), 11 deletions(-)\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1245,1929524,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +1246,1929624,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +1247,1929685,"TERMINAL",0,0,"[?25l t[?25h",,terminal_output +1248,1930224,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +1249,1931147,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1250,1931260,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1251,1931456,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1252,1931549,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +1253,1931683,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1254,1931876,"TERMINAL",0,0,"[?25lk[?25h",,terminal_output +1255,1932346,"TERMINAL",0,0,"[?25lc[?25h[?25lk[?25h",,terminal_output +1256,1932537,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +1257,1932646,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1258,1932891,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1259,1933110,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1260,1933111,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +1261,1933239,"TERMINAL",0,0,"[?25la[?25h[?25lin[?25h",,terminal_output +1262,1933395,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1263,1933784,"TERMINAL",0,0,"Switched to branch 'main'\r\nYour branch is up to date with 'origin/main'.\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1264,1935426,"",0,0,"Switched from branch 'new-arch-sampling' to 'main'",,git_branch_checkout +1265,1936840,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1266,1937020,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1267,1937158,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1268,1937224,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1269,1937358,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1270,1938000,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1271,1938060,"TERMINAL",0,0,"mple.py ",,terminal_output +1272,1938443,"TERMINAL",0,0,"\r\n[?2004l\rfrom dataclasses import dataclass\r\nimport time\r\nimport os\r\n\r\nimport dm_pix as pix\r\nimport einops\r\nimport jax\r\nimport jax.numpy as jnp\r\nimport flax.linen as nn\r\nimport numpy as np\r\nfrom orbax.checkpoint import PyTreeCheckpointer\r\nfrom PIL import Image, ImageDraw\r\nimport tyro\r\n\r\nfrom genie import Genie\r\nfrom utils.dataloader import get_dataloader\r\n\r\n\r\n@dataclass\r\nclass Args:\r\n # Experiment\r\n seed: int = 0\r\n seq_len: int = 16\r\n image_channels: int = 3\r\n image_height: int = 90\r\n image_width: int = 160\r\n data_dir: str = ""data/coinrun_episodes""\r\n checkpoint: str = """"\r\n # Sampling\r\n batch_size: int = 1\r\n maskgit_steps: int = 25\r\n temperature: float = 1.0\r\n sample_argmax: bool = True\r\n start_frame: int = 0\r\n # Tokenizer checkpoint\r\n tokenizer_dim: int = 512\r\n tokenizer_ffn_dim: int = 2048\r\n latent_patch_dim: int = 32\r\n num_patch_latents: int = 1024\r\n patch_size: int = 4\r\n tokenizer_num_blocks: int = 4\r\n tokenizer_num_heads: int = 8\r\n # LAM checkpoint\r\n lam_dim: int = 512\r\n lam_ffn_dim: int = 2048\r\n latent_action_dim: int = 32\r\n num_latent_actions: int = 6\r\n lam_patch_size: int = 16\r\n lam_num_blocks: int = 4\r\n lam_num_heads: int = 8\r\n # Dynamics checkpoint\r\n dyna_dim: int = 512\r\n dyna_ffn_dim: int = 2048\r\n dyna_num_blocks: int = 6\r\n dyna_num_heads: int = 8\r\n param_dtype = jnp.float32\r\n dtype = jnp.bfloat16\r\n use_flash_attention: bool = True\r\n\r\n\r\nargs = tyro.cli(Args)\r\nrng = jax.random.PRNGKey(args.seed)\r\n\r\n# --- Load Genie checkpoint ---\r\ngenie = Genie(\r\n # Tokenizer\r\n in_dim=args.image_channels,\r\n tokenizer_dim=args.tokenizer_dim,\r\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\r\n latent_patch_dim=args.latent_patch_dim,\r\n num_patch_latents=args.num_patch_latents,\r\n patch_size=args.patch_size,\r\n tokenizer_num_blocks=args.tokenizer_num_blocks,\r\n tokenizer_num_heads=args.tokenizer_num_heads,\r\n # LAM\r\n lam_dim=args.lam_dim,\r\n lam_ffn_dim=args.lam_ffn_dim,\r\n latent_action_dim=args.latent_action_dim,\r\n num_latent_actions=args.num_latent_actions,\r\n lam_patch_size=args.lam_patch_size,\r\n lam_num_blocks=args.lam_num_blocks,\r\n lam_num_heads=args.lam_num_heads,\r\n lam_co_train=False,\r\n # Dynamics\r\n dyna_dim=args.dyna_dim,\r\n dyna_ffn_dim=args.dyna_ffn_dim,\r\n dyna_num_blocks=args.dyna_num_blocks,\r\n dyna_num_heads=args.dyna_num_heads,\r\n param_dtype=args.param_dtype,\r\n dtype=args.dtype,\r\n use_flash_attention=args.use_flash_attention,\r\n)\r\nrng, _rng = jax.random.split(rng)\r\nimage_shape = (args.image_height, args.image_width, args.image_channels)\r\ndummy_inputs = dict(\r\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\r\n mask_rng=_rng,\r\n)\r\nrng, _rng = jax.random.split(rng)\r\nparams = genie.init(_rng, dummy_inputs)\r\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\r\nparams[""params""].update(ckpt)\r\n\r\n\r\ndef _sampling_wrapper(module, batch):\r\n return module.sample(\r\n batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax\r\n )\r\n\r\n\r\n# --- Define autoregressive sampling loop ---\r\ndef _autoreg_sample(rng, video_batch, action_batch):\r\n vid = video_batch[:, : args.start_frame + 1]\r\n sampling_fn = jax.jit(nn.apply(_sampling_wrapper, genie))\r\n rng, _rng = jax.random.split(rng)\r\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\r\n generated_vid = sampling_fn(params, batch)\r\n return generated_vid\r\n\r\n\r\n# --- Get video + latent actions ---\r\narray_record_files = [\r\n os.path.join(args.data_dir, x)\r\n for x in os.listdir(args.data_dir)\r\n if x.endswith("".array_record"")\r\n]\r\ndataloader = get_dataloader(\r\n array_record_files,\r\n args.seq_len,\r\n args.batch_size,\r\n args.image_height,\r\n args.image_width,\r\n args.image_channels,\r\n num_workers=8,\r\n prefetch_buffer_size=1,\r\n seed=args.seed,\r\n)\r\nvideo_batch = next(iter(dataloader))\r\n# Get latent actions for all videos in the batch\r\nbatch = dict(videos=video_batch)\r\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\r\naction_batch = action_batch.reshape(video_batch.shape[0], args.seq_len - 1, 1)\r\n\r\n# --- Sample + evaluate video ---\r\nvid = _autoreg_sample(rng, video_batch, action_batch)\r\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\r\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\r\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\r\nprint(f""SSIM: {ssim}"")\r\n\r\n# --- Construct video ---\r\ntrue_videos = (video_batch * 255).astype(np.uint8)\r\npred_videos = (vid * 255).astype(np.uint8)\r\nvideo_comparison = np.zeros((2, *vid.shape), dtype=np.uint8)\r\nvideo_comparison[0] = true_videos[:, : args.seq_len]\r\nvideo_comparison[1] = pred_videos\r\nframes = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\r\n\r\n# --- Save video ---\r\nimgs = [Image.fromarray(img) for img in frames]\r\n# Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\r\nfor t, img in enumerate(imgs[1:]):\r\n d = ImageDraw.Draw(img)\r\n for row in range(action_batch.shape[0]):\r\n action = action_batch[row, t, 0]\r\n y_offset = row * video_batch.shape[2] + 2\r\n d.text((2, y_offset), f""{action}"", fill=255)\r\nimgs[0].save(\r\n f""generation_{time.time()}.gif"",\r\n save_all=True,\r\n append_images=imgs[1:],\r\n duration=250,\r\n loop=0,\r\n)\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1273,1948502,"TERMINAL",0,0,"[?25lg[?25h[?25li[?25h",,terminal_output +1274,1948597,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1275,1948647,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1276,1948767,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +1277,1949022,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1278,1949287,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1279,1949583,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1280,1949663,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1281,1952355,"TERMINAL",0,0,"remote: Enumerating objects: 173, done.\r\nremote: Counting objects: 0% (1/173)\rremote: Counting objects: 1% (2/173)\rremote: Counting objects: 2% (4/173)\rremote: Counting objects: 3% (6/173)\rremote: Counting objects: 4% (7/173)\rremote: Counting objects: 5% (9/173)\rremote: Counting objects: 6% (11/173)\rremote: Counting objects: 7% (13/173)\rremote: Counting objects: 8% (14/173)\rremote: Counting objects: 9% (16/173)\rremote: Counting objects: 10% (18/173)\rremote: Counting objects: 11% (20/173)\rremote: Counting objects: 12% (21/173)\rremote: Counting objects: 13% (23/173)\rremote: Counting objects: 14% (25/173)\rremote: Counting objects: 15% (26/173)\rremote: Counting objects: 16% (28/173)\rremote: Counting objects: 17% (30/173)\rremote: Counting objects: 18% (32/173)\rremote: Counting objects: 19% (33/173)\rremote: Counting objects: 20% (35/173)\rremote: Counting objects: 21% (37/173)\rremote: Counting objects: 22% (39/173)\rremote: Counting objects: 23% (40/173)\rremote: Counting objects: 24% (42/173)\rremote: Counting objects: 25% (44/173)\rremote: Counting objects: 26% (45/173)\rremote: Counting objects: 27% (47/173)\rremote: Counting objects: 28% (49/173)\rremote: Counting objects: 29% (51/173)\rremote: Counting objects: 30% (52/173)\rremote: Counting objects: 31% (54/173)\rremote: Counting objects: 32% (56/173)\rremote: Counting objects: 33% (58/173)\rremote: Counting objects: 34% (59/173)\rremote: Counting objects: 35% (61/173)\rremote: Counting objects: 36% (63/173)\rremote: Counting objects: 37% (65/173)\rremote: Counting objects: 38% (66/173)\rremote: Counting objects: 39% (68/173)\rremote: Counting objects: 40% (70/173)\rremote: Counting objects: 41% (71/173)\rremote: Counting objects: 42% (73/173)\rremote: Counting objects: 43% (75/173)\rremote: Counting objects: 44% (77/173)\rremote: Counting objects: 45% (78/173)\rremote: Counting objects: 46% (80/173)\rremote: Counting objects: 47% (82/173)\rremote: Counting objects: 48% (84/173)\rremote: Counting objects: 49% (85/173)\rremote: Counting objects: 50% (87/173)\rremote: Counting objects: 51% (89/173)\rremote: Counting objects: 52% (90/173)\rremote: Counting objects: 53% (92/173)\rremote: Counting objects: 54% (94/173)\rremote: Counting objects: 55% (96/173)\rremote: Counting objects: 56% (97/173)\rremote: Counting objects: 57% (99/173)\rremote: Counting objects: 58% (101/173)\rremote: Counting objects: 59% (103/173)\rremote: Counting objects: 60% (104/173)\rremote: Counting objects: 61% (106/173)\rremote: Counting objects: 62% (108/173)\rremote: Counting objects: 63% (109/173)\rremote: Counting objects: 64% (111/173)\rremote: Counting objects: 65% (113/173)\rremote: Counting objects: 66% (115/173)\rremote: Counting objects: 67% (116/173)\rremote: Counting objects: 68% (118/173)\rremote: Counting objects: 69% (120/173)\rremote: Counting objects: 70% (122/173)\rremote: Counting objects: 71% (123/173)\rremote: Counting objects: 72% (125/173)\rremote: Counting objects: 73% (127/173)\rremote: Counting objects: 74% (129/173)\rremote: Counting objects: 75% (130/173)\rremote: Counting objects: 76% (132/173)\rremote: Counting objects: 77% (134/173)\rremote: Counting objects: 78% (135/173)\rremote: Counting objects: 79% (137/173)\rremote: Counting objects: 80% (139/173)\rremote: Counting objects: 81% (141/173)\rremote: Counting objects: 82% (142/173)\rremote: Counting objects: 83% (144/173)\rremote: Counting objects: 84% (146/173)\rremote: Counting objects: 85% (148/173)\rremote: Counting objects: 86% (149/173)\rremote: Counting objects: 87% (151/173)\rremote: Counting objects: 88% (153/173)\rremote: Counting objects: 89% (154/173)\rremote: Counting objects: 90% (156/173)\rremote: Counting objects: 91% (158/173)\rremote: Counting objects: 92% (160/173)\rremote: Counting objects: 93% (161/173)\rremote: Counting objects: 94% (163/173)\rremote: Counting objects: 95% (165/173)\rremote: Counting objects: 96% (167/173)\rremote: Counting objects: 97% (168/173)\rremote: Counting objects: 98% (170/173)\rremote: Counting objects: 99% (172/173)\rremote: Counting objects: 100% (173/173)\rremote: Counting objects: 100% (173/173), done.\r\nremote: Compressing objects: 0% (1/104)\rremote: Compressing objects: 1% (2/104)\rremote: Compressing objects: 2% (3/104)\rremote: Compressing objects: 3% (4/104)\rremote: Compressing objects: 4% (5/104)\rremote: Compressing objects: 5% (6/104)\r",,terminal_output +1282,1952444,"TERMINAL",0,0,"remote: Compressing objects: 6% (7/104)\rremote: Compressing objects: 7% (8/104)\rremote: Compressing objects: 8% (9/104)\rremote: Compressing objects: 9% (10/104)\rremote: Compressing objects: 10% (11/104)\rremote: Compressing objects: 11% (12/104)\rremote: Compressing objects: 12% (13/104)\rremote: Compressing objects: 13% (14/104)\rremote: Compressing objects: 14% (15/104)\rremote: Compressing objects: 15% (16/104)\rremote: Compressing objects: 16% (17/104)\rremote: Compressing objects: 17% (18/104)\rremote: Compressing objects: 18% (19/104)\rremote: Compressing objects: 19% (20/104)\rremote: Compressing objects: 20% (21/104)\rremote: Compressing objects: 21% (22/104)\rremote: Compressing objects: 22% (23/104)\rremote: Compressing objects: 23% (24/104)\rremote: Compressing objects: 24% (25/104)\rremote: Compressing objects: 25% (26/104)\rremote: Compressing objects: 26% (28/104)\rremote: Compressing objects: 27% (29/104)\rremote: Compressing objects: 28% (30/104)\rremote: Compressing objects: 29% (31/104)\rremote: Compressing objects: 30% (32/104)\rremote: Compressing objects: 31% (33/104)\rremote: Compressing objects: 32% (34/104)\rremote: Compressing objects: 33% (35/104)\rremote: Compressing objects: 34% (36/104)\rremote: Compressing objects: 35% (37/104)\rremote: Compressing objects: 36% (38/104)\rremote: Compressing objects: 37% (39/104)\rremote: Compressing objects: 38% (40/104)\rremote: Compressing objects: 39% (41/104)\rremote: Compressing objects: 40% (42/104)\rremote: Compressing objects: 41% (43/104)\rremote: Compressing objects: 42% (44/104)\rremote: Compressing objects: 43% (45/104)\rremote: Compressing objects: 44% (46/104)\rremote: Compressing objects: 45% (47/104)\rremote: Compressing objects: 46% (48/104)\rremote: Compressing objects: 47% (49/104)\rremote: Compressing objects: 48% (50/104)\rremote: Compressing objects: 49% (51/104)\rremote: Compressing objects: 50% (52/104)\rremote: Compressing objects: 51% (54/104)\rremote: Compressing objects: 52% (55/104)\rremote: Compressing objects: 53% (56/104)\rremote: Compressing objects: 54% (57/104)\rremote: Compressing objects: 55% (58/104)\rremote: Compressing objects: 56% (59/104)\rremote: Compressing objects: 57% (60/104)\rremote: Compressing objects: 58% (61/104)\rremote: Compressing objects: 59% (62/104)\rremote: Compressing objects: 60% (63/104)\rremote: Compressing objects: 61% (64/104)\rremote: Compressing objects: 62% (65/104)\rremote: Compressing objects: 63% (66/104)\rremote: Compressing objects: 64% (67/104)\rremote: Compressing objects: 65% (68/104)\rremote: Compressing objects: 66% (69/104)\rremote: Compressing objects: 67% (70/104)\rremote: Compressing objects: 68% (71/104)\rremote: Compressing objects: 69% (72/104)\rremote: Compressing objects: 70% (73/104)\rremote: Compressing objects: 71% (74/104)\rremote: Compressing objects: 72% (75/104)\rremote: Compressing objects: 73% (76/104)\rremote: Compressing objects: 74% (77/104)\rremote: Compressing objects: 75% (78/104)\rremote: Compressing objects: 76% (80/104)\rremote: Compressing objects: 77% (81/104)\rremote: Compressing objects: 78% (82/104)\rremote: Compressing objects: 79% (83/104)\rremote: Compressing objects: 80% (84/104)\rremote: Compressing objects: 81% (85/104)\rremote: Compressing objects: 82% (86/104)\rremote: Compressing objects: 83% (87/104)\rremote: Compressing objects: 84% (88/104)\rremote: Compressing objects: 85% (89/104)\rremote: Compressing objects: 86% (90/104)\rremote: Compressing objects: 87% (91/104)\rremote: Compressing objects: 88% (92/104)\rremote: Compressing objects: 89% (93/104)\rremote: Compressing objects: 90% (94/104)\rremote: Compressing objects: 91% (95/104)\rremote: Compressing objects: 92% (96/104)\rremote: Compressing objects: 93% (97/104)\rremote: Compressing objects: 94% (98/104)\rremote: Compressing objects: 95% (99/104)\rremote: Compressing objects: 96% (100/104)\rremote: Compressing objects: 97% (101/104)\rremote: Compressing objects: 98% (102/104)\rremote: Compressing objects: 99% (103/104)\rremote: Compressing objects: 100% (104/104)\rremote: Compressing objects: 100% (104/104), done.\r\nReceiving objects: 0% (1/173)\rReceiving objects: 1% (2/173)\rReceiving objects: 2% (4/173)\rReceiving objects: 3% (6/173)\rReceiving objects: 4% (7/173)\rReceiving objects: 5% (9/173)\rReceiving objects: 6% (11/173)\rReceiving objects: 7% (13/173)\rReceiving objects: 8% (14/173)\rReceiving objects: 9% (16/173)\rReceiving objects: 10% (18/173)\rReceiving objects: 11% (20/173)\rReceiving objects: 12% (21/173)\rReceiving objects: 13% (23/173)\rReceiving objects: 14% (25/173)\rReceiving objects: 15% (26/173)\rReceiving objects: 16% (28/173)\rReceiving objects: 17% (30/173)\rReceiving objects: 18% (32/173)\r",,terminal_output +1283,1952722,"TERMINAL",0,0,"Receiving objects: 19% (33/173)\rReceiving objects: 20% (35/173)\rReceiving objects: 21% (37/173)\rReceiving objects: 22% (39/173)\rReceiving objects: 23% (40/173)\rReceiving objects: 24% (42/173)\rReceiving objects: 25% (44/173)\rReceiving objects: 26% (45/173)\rReceiving objects: 27% (47/173)\rReceiving objects: 28% (49/173)\rReceiving objects: 29% (51/173)\rReceiving objects: 30% (52/173)\rReceiving objects: 31% (54/173)\rReceiving objects: 32% (56/173)\rReceiving objects: 33% (58/173)\rReceiving objects: 34% (59/173)\rReceiving objects: 35% (61/173)\rReceiving objects: 36% (63/173)\rReceiving objects: 37% (65/173)\rReceiving objects: 38% (66/173)\rReceiving objects: 39% (68/173)\rReceiving objects: 40% (70/173)\rReceiving objects: 41% (71/173)\rReceiving objects: 42% (73/173)\rReceiving objects: 43% (75/173)\rReceiving objects: 44% (77/173)\rReceiving objects: 45% (78/173)\rReceiving objects: 46% (80/173)\rremote: Total 173 (delta 115), reused 121 (delta 69), pack-reused 0 (from 0)\r\nReceiving objects: 47% (82/173)\rReceiving objects: 48% (84/173)\rReceiving objects: 49% (85/173)\rReceiving objects: 50% (87/173)\rReceiving objects: 51% (89/173)\rReceiving objects: 52% (90/173)\rReceiving objects: 53% (92/173)\rReceiving objects: 54% (94/173)\rReceiving objects: 55% (96/173)\rReceiving objects: 56% (97/173)\rReceiving objects: 57% (99/173)\rReceiving objects: 58% (101/173)\rReceiving objects: 59% (103/173)\rReceiving objects: 60% (104/173)\rReceiving objects: 61% (106/173)\rReceiving objects: 62% (108/173)\rReceiving objects: 63% (109/173)\rReceiving objects: 64% (111/173)\rReceiving objects: 65% (113/173)\rReceiving objects: 66% (115/173)\rReceiving objects: 67% (116/173)\rReceiving objects: 68% (118/173)\rReceiving objects: 69% (120/173)\rReceiving objects: 70% (122/173)\rReceiving objects: 71% (123/173)\rReceiving objects: 72% (125/173)\rReceiving objects: 73% (127/173)\rReceiving objects: 74% (129/173)\rReceiving objects: 75% (130/173)\rReceiving objects: 76% (132/173)\rReceiving objects: 77% (134/173)\rReceiving objects: 78% (135/173)\rReceiving objects: 79% (137/173)\rReceiving objects: 80% (139/173)\rReceiving objects: 81% (141/173)\rReceiving objects: 82% (142/173)\rReceiving objects: 83% (144/173)\rReceiving objects: 84% (146/173)\rReceiving objects: 85% (148/173)\rReceiving objects: 86% (149/173)\rReceiving objects: 87% (151/173)\rReceiving objects: 88% (153/173)\rReceiving objects: 89% (154/173)\rReceiving objects: 90% (156/173)\rReceiving objects: 91% (158/173)\rReceiving objects: 92% (160/173)\rReceiving objects: 93% (161/173)\rReceiving objects: 94% (163/173)\rReceiving objects: 95% (165/173)\rReceiving objects: 96% (167/173)\rReceiving objects: 97% (168/173)\rReceiving objects: 98% (170/173)\rReceiving objects: 99% (172/173)\rReceiving objects: 100% (173/173)\rReceiving objects: 100% (173/173), 63.36 KiB | 697.00 KiB/s, done.\r\nResolving deltas: 0% (0/115)\rResolving deltas: 1% (2/115)\rResolving deltas: 2% (3/115)\rResolving deltas: 3% (4/115)\rResolving deltas: 4% (5/115)\rResolving deltas: 5% (6/115)\rResolving deltas: 6% (7/115)\rResolving deltas: 7% (9/115)\rResolving deltas: 8% (10/115)\rResolving deltas: 9% (11/115)\rResolving deltas: 10% (12/115)\rResolving deltas: 11% (13/115)\rResolving deltas: 12% (14/115)\rResolving deltas: 13% (15/115)\rResolving deltas: 14% (17/115)\rResolving deltas: 15% (18/115)\rResolving deltas: 16% (19/115)\rResolving deltas: 17% (20/115)\rResolving deltas: 18% (21/115)\rResolving deltas: 19% (22/115)\rResolving deltas: 20% (23/115)\rResolving deltas: 21% (25/115)\rResolving deltas: 22% (26/115)\rResolving deltas: 23% (27/115)\rResolving deltas: 24% (28/115)\rResolving deltas: 25% (29/115)\rResolving deltas: 26% (30/115)\rResolving deltas: 27% (32/115)\rResolving deltas: 28% (33/115)\rResolving deltas: 29% (34/115)\rResolving deltas: 30% (35/115)\rResolving deltas: 31% (36/115)\rResolving deltas: 32% (37/115)\rResolving deltas: 33% (38/115)\rResolving deltas: 34% (40/115)\rResolving deltas: 35% (41/115)\rResolving deltas: 36% (42/115)\rResolving deltas: 37% (43/115)\rResolving deltas: 38% (44/115)\rResolving deltas: 39% (45/115)\rResolving deltas: 40% (46/115)\rResolving deltas: 41% (48/115)\rResolving deltas: 42% (49/115)\rResolving deltas: 43% (50/115)\rResolving deltas: 44% (51/115)\rResolving deltas: 45% (52/115)\rResolving deltas: 46% (53/115)\rResolving deltas: 47% (55/115)\rResolving deltas: 48% (56/115)\rResolving deltas: 49% (57/115)\rResolving deltas: 50% (58/115)\rResolving deltas: 51% (59/115)\rResolving deltas: 52% (60/115)\rResolving deltas: 53% (61/115)\rResolving deltas: 54% (63/115)\rResolving deltas: 55% (64/115)\rResolving deltas: 56% (65/115)\rResolving deltas: 57% (66/115)\rResolving deltas: 58% (67/115)\rResolving deltas: 59% (68/115)\rResolving deltas: 60% (69/115)\rResolving deltas: 61% (71/115)\rResolving deltas: 62% (72/115)\rResolving deltas: 63% (73/115)\rResolving deltas: 64% (74/115)\rResolving deltas: 65% (75/115)\rResolving deltas: 66% (76/115)\rResolving deltas: 67% (78/115)\rResolving deltas: 68% (79/115)\rResolving deltas: 69% (80/115)\rResolving deltas: 70% (81/115)\rResolving deltas: 71% (82/115)\rResolving deltas: 72% (83/115)\rResolving deltas: 73% (84/115)\rResolving deltas: 74% (86/115)\rResolving deltas: 75% (87/115)\rResolving deltas: 76% (88/115)\rResolving deltas: 77% (89/115)\rResolving deltas: 78% (90/115)\rResolving deltas: 79% (91/115)\rResolving deltas: 80% (92/115)\rResolving deltas: 81% (94/115)\rResolving deltas: 82% (95/115)\rResolving deltas: 83% (96/115)\rResolving deltas: 84% (97/115)\rResolving deltas: 85% (98/115)\rResolving deltas: 86% (99/115)\rResolving deltas: 87% (101/115)\rResolving deltas: 88% (102/115)\rResolving deltas: 89% (103/115)\rResolving deltas: 90% (104/115)\rResolving deltas: 91% (105/115)\rResolving deltas: 92% (106/115)\rResolving deltas: 93% (107/115)\rResolving deltas: 94% (109/115)\rResolving deltas: 95% (110/115)\rResolving deltas: 96% (111/115)\rResolving deltas: 97% (112/115)\rResolving deltas: 98% (113/115)\rResolving deltas: 99% (114/115)\rResolving deltas: 100% (115/115)\rResolving deltas: 100% (115/115), completed with 12 local objects.\r\n",,terminal_output +1284,1952981,"TERMINAL",0,0,"From github.com:p-doom/jafar\r\n e296267..42a7655 main -> origin/main\r\n * [new branch] causal-transformer-nnx -> origin/causal-transformer-nnx\r\n c179a45..9470c69 feat/actions_in_dummy_data -> origin/feat/actions_in_dummy_data\r\n * [new branch] features-in-readme -> origin/features-in-readme\r\n * [new branch] flax-nnx-migration -> origin/flax-nnx-migration\r\n * [new branch] gt_actions_ablation_baseline_3373155 -> origin/gt_actions_ablation_baseline_3373155\r\n * [new branch] proper-readme -> origin/proper-readme\r\n * [new branch] readme-typo -> origin/readme-typo\r\n * [new branch] readme-typo-2 -> origin/readme-typo-2\r\n * [new branch] readme-typo-bibtex -> origin/readme-typo-bibtex\r\nUpdating e296267..42a7655\r\n",,terminal_output +1285,1953029,"TERMINAL",0,0,"Fast-forward\r\n",,terminal_output +1286,1953127,"TERMINAL",0,0," README.md | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++++--------------\r\n tests/data/generate_dummy_data.py | 10 ++++++++++\r\n 2 files changed, 62 insertions(+), 14 deletions(-)\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1287,1992299,"TERMINAL",0,0,"[?25lq[?25h",,terminal_output +1288,1992498,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1289,1992546,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1290,1993319,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1291,1993560,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1292,1993739,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1293,1993740,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1294,1993905,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0706.localdomain: Mon Jul 28 18:18:26 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3379590 accelerat train_dy tum_cte0 PD\t0:00\t 3 (Priority)3371238 accelerat train_dy tum_cte0 R 1:29:02\t 2 hkn[0509,0532]3377693 accelerat train_dy tum_cte0 R 23:45:53\t 8 hkn[0426,0436,0510,0524,0603,0627,0805,0812]3373408 accelerat train_dy tum_cte0 R 1-05:28:21\t 8 hkn[0417-0419,0422,0527,0621,0625,0628]3380176 accelerat interact tum_cte0 R26:51\t 1 hkn0706",,terminal_output +1295,1994818,"TERMINAL",0,0,"73422",,terminal_output +1296,1995854,"TERMINAL",0,0,"84533",,terminal_output +1297,1996872,"TERMINAL",0,0,"95644",,terminal_output +1298,1997450,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1299,2083045,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n use_maskgit: bool = False\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n outputs[""recon""] = outputs[""recon""].astype(jnp.float32)\n logits = outputs[""token_logits""]\n targets = outputs[""video_tokens""]\n\n # if not args.use_maskgit:\n # logits = outputs[""token_logits""][:, :, :-1]\n # targets = outputs[""video_tokens""][:, :, 1:]\n # mask = outputs[""mask""][:, :, 1:]\n\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(logits, targets)\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = logits.argmax(-1) == targets\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(logits)\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=logits.max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n use_maskgit=args.use_maskgit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=args.dtype\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n # Restore full dynamics model\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n for videos in dataloader:\n # for i in range(videos.shape[0]):\n # video_i = videos[i:i+1] # shape (1, T, H, W, C)\n # np.save(f""overfit_dir/oai_sample_seed69_{i}.npy"", video_i)\n # jax.debug.breakpoint()\n # videos = np.load(""overfit_dir/oai_sample_seed69_1.npy"") # *255.\n # videos = videos.astype(np.uint8)\n # videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n # while True:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +1300,2083259,"train_dynamics.py",595,11655,"@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=args.dtype\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n # Restore full dynamics model\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n for videos in dataloader:\n",python,content +1301,2097777,"train_dynamics.py",10391,0,"",python,selection_mouse +1302,2097932,"train_dynamics.py",10379,20,"abstract_train_state",python,selection_mouse +1303,2137594,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nimport optax\nimport orbax.checkpoint as ocp\nfrom flax.training.train_state import TrainState\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\ndef _sampling_wrapper(module, batch):\n return module.sample(\n batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax\n )\n\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n sampling_fn = jax.jit(nn.apply(_sampling_wrapper, genie))\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\n generated_vid = sampling_fn(params, batch)\n return generated_vid\n\n\n# --- Get video + latent actions ---\narray_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n]\ndataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n)\nvideo_batch = next(iter(dataloader))\nvideo_batch = jnp.array(video_batch)\nprint(video_batch.dtype)\nvideo_batch = video_batch.astype(args.dtype)\nprint(video_batch.dtype)\nvideo_batch = video_batch / 255.0\nprint(video_batch.dtype) \n# Get latent actions for all videos in the batch\nbatch = dict(videos=video_batch)\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(video_batch.shape[0], args.seq_len - 1, 1)\n\n# --- Sample + evaluate video ---\nvid = _autoreg_sample(rng, video_batch, action_batch)\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\nprint(f""SSIM: {ssim}"")\n\n# --- Construct video ---\ntrue_videos = (video_batch * 255).astype(np.uint8)\npred_videos = (vid * 255).astype(np.uint8)\nvideo_comparison = np.zeros((2, *vid.shape), dtype=np.uint8)\nvideo_comparison[0] = true_videos[:, : args.seq_len]\nvideo_comparison[1] = pred_videos\nframes = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n# --- Save video ---\nimgs = [Image.fromarray(img) for img in frames]\n# Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\nfor t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(action_batch.shape[0]):\n action = action_batch[row, t, 0]\n y_offset = row * video_batch.shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\nimgs[0].save(\n f""generation_{time.time()}.gif"",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n)\n",python,tab +1304,2139997,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2789,0,"",python,selection_mouse +1305,2140372,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2788,6,"params",python,selection_mouse +1306,2141551,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,0,"",python,selection_mouse +1307,2142351,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2818,0,"",python,selection_mouse +1308,2142539,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2814,12,"dummy_inputs",python,selection_mouse +1309,2143383,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2792,0,"",python,selection_mouse +1310,2143562,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2788,6,"params",python,selection_mouse +1311,2227215,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",3035,0,"",python,selection_mouse +1312,2227221,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",3034,0,"",python,selection_command +1313,2239989,"sample.py",0,0,"from dataclasses import dataclass\nfrom typing import Optional\nimport time\nimport os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nfrom flax.training.train_state import TrainState\nimport grain\nimport orbax.checkpoint as ocp\nimport optax\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n checkpoint_step: Optional[int] = None\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_co_train: bool = True\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=args.lam_co_train,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n use_maskgit=False,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n # sampling_fn = jax.jit(nn.apply(_sampling_wrapper, genie)) \n sampling_fn = nn.apply(_sampling_wrapper, genie)\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\n generated_vid = sampling_fn(params, batch)\n return generated_vid\n\ndef _get_dataloader_iterator():\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=0,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n return grain_iterator\n\n# --- Get video + latent actions ---\ngrain_iterator = _get_dataloader_iterator()\nvideo_batch = next(grain_iterator)\n# video_batch = np.load(""overfit_dir/single_sample_corner.npy"")\n# video_batch = np.load(""overfit_dir/oai_sample_seed69_1.npy"") # *255.\n\nvideo_batch = jnp.array(video_batch)\nprint(video_batch.dtype)\nvideo_batch = video_batch.astype(args.dtype) # / 255.0\nprint(video_batch.dtype)\nvideo_batch = video_batch / 255.0\nprint(video_batch.dtype)\n# Get latent actions for all videos in the batch\nbatch = dict(videos=video_batch[:,:args.seq_len])\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(video_batch.shape[0], args.seq_len - 1, 1)\n\n# --- Sample + evaluate video ---\nprint(""autoreg sampling..."")\nvid = _autoreg_sample(rng, video_batch, action_batch)\nprint(""autoreg sampling done. calculating ssim and saving video"")\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\nprint(f""SSIM: {ssim}"")\n\n# --- Construct video ---\ntrue_videos = (video_batch * 255).astype(np.uint8)\npred_videos = (vid * 255).astype(np.uint8)\nvideo_comparison = np.zeros((2, *vid.shape), dtype=np.uint8)\nvideo_comparison[0] = true_videos[:, : args.seq_len]\nvideo_comparison[1] = pred_videos\nframes = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n# --- Save video ---\nimgs = [Image.fromarray(img) for img in frames]\n# Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\nfor t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(action_batch.shape[0]):\n action = action_batch[row, t, 0]\n y_offset = row * video_batch.shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\nimgs[0].save(\n f""generation_{time.time()}.gif"",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n)\n",python,tab +1314,2240209,"sample.py",34,6144,"import time\nimport os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n\n\ndef _sampling_wrapper(module, batch):\n return module.sample(\n batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax\n )\n\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n sampling_fn = jax.jit(nn.apply(_sampling_wrapper, genie))\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\n generated_vid = sampling_fn(params, batch)\n return generated_vid\n\n\n# --- Get video + latent actions ---\narray_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n]\ndataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n)\nvideo_batch = next(iter(dataloader))\n# Get latent actions for all videos in the batch\nbatch = dict(videos=video_batch)\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(video_batch.shape[0], args.seq_len - 1, 1)\n\n# --- Sample + evaluate video ---\nvid = _autoreg_sample(rng, video_batch, action_batch)\n",python,content +1315,2241507,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",0,0,"",python,tab +1316,2243680,"sample.py",0,0,"",python,tab +1317,2300374,"TERMINAL",0,0,"queue",,terminal_output +1318,2301042,"TERMINAL",0,0,"git pull",,terminal_output +1319,2301345,"TERMINAL",0,0,"cat sample.py ",,terminal_output +1320,2301967,"TERMINAL",0,0,"git pull",,terminal_output +1321,2302106,"TERMINAL",0,0,"queue",,terminal_output +1322,2302340,"TERMINAL",0,0,"",,terminal_output +1323,2303402,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1324,2303464,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1325,2303634,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1326,2303724,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +1327,2303920,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +1328,2305427,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +1329,2305593,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1330,2305750,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1331,2305922,"TERMINAL",0,0,"[?25lne[?25h[?25lr[?25h",,terminal_output +1332,2306120,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0706:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ ",,terminal_output +1333,2306724,"TERMINAL",0,0,"runner",,terminal_output +1334,2306985,"TERMINAL",0,0,"queue",,terminal_output +1335,2307522,"TERMINAL",0,0,"git pull",,terminal_output +1336,2307890,"TERMINAL",0,0,"cat sample.py ",,terminal_output +1337,2308642,"TERMINAL",0,0,"git checkout main",,terminal_output +1338,2308864,"TERMINAL",0,0,"ommit -am ""fixed dtypes for sampling2\r\n\r""",,terminal_output +1339,2309409,"TERMINAL",0,0,"diff\r\n\r",,terminal_output +1340,2309852,"TERMINAL",0,0,"status",,terminal_output +1341,2310231,"TERMINAL",0,0,"cd jafar",,terminal_output +1342,2310548,"TERMINAL",0,0,"..",,terminal_output +1343,2310927,"TERMINAL",0,0,"git status",,terminal_output +1344,2311224,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/",,terminal_output +1345,2311570,"TERMINAL",0,0,"cat slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh \r\n\r",,terminal_output +1346,2312108,"TERMINAL",0,0,"\rsh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/",,terminal_output +1347,2312501,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=2 \\r\n --batch_size=1 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +1348,2312684,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=653563\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0706\r\nSLURM_JOB_START_TIME=1753717895\r\nSLURM_STEP_NODELIST=hkn0706\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753721495\r\nSLURM_PMI2_SRUN_PORT=35695\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3380176\r\nSLURM_PTY_PORT=33197\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=31\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e8.hkn0706\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0706\r\nSLURM_SRUN_COMM_PORT=44649\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3380176\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0706\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=44649\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0706\r\n",,terminal_output +1349,2312781,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +1350,2318276,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +1351,2320350,"TERMINAL",0,0,"2025-07-28 18:23:52.748584: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1352,2331718,"TERMINAL",0,0,"2025-07-28 18:24:04.193020: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1353,2339283,"TERMINAL",0,0,"2025-07-28 18:24:11.694425: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1354,2354563,"TERMINAL",0,0,"2025-07-28 18:24:27.038541: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1355,2357495,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 53000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/053000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 54000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/054000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 55000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/055000/metrics/metrics not found.\r\n",,terminal_output +1356,2398493,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +1357,2400384,"TERMINAL",0,0,"E0728 18:25:12.756476 671529 cuda_dnn.cc:535] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR\r\nE0728 18:25:12.756513 671529 cuda_dnn.cc:539] Memory usage: 1572864 bytes free, 42406903808 bytes total.\r\nE0728 18:25:12.758637 671529 cuda_dnn.cc:535] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR\r\nE0728 18:25:12.758657 671529 cuda_dnn.cc:539] Memory usage: 1572864 bytes free, 42406903808 bytes total.\r\nE0728 18:25:12.801768 671529 cuda_dnn.cc:535] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR\r\nE0728 18:25:12.801819 671529 cuda_dnn.cc:539] Memory usage: 1572864 bytes free, 42406903808 bytes total.\r\nE0728 18:25:12.805002 671529 cuda_dnn.cc:535] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR\r\nE0728 18:25:12.805047 671529 cuda_dnn.cc:539] Memory usage: 1572864 bytes free, 42406903808 bytes total.\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 742, in backends\r\nTraceback (most recent call last):\r\n backend = _init_backend(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 828, in _init_backend\r\n backend = registration.factory()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 518, in factory\r\n return xla_client.make_c_api_client(plugin_name, updated_options, None)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jaxlib/xla_client.py"", line 153, in make_c_api_client\r\n return _xla.get_c_api_client(plugin_name, options, distributed_client)\r\nRuntimeError: Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 742, in backends\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n backend = _init_backend(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 828, in _init_backend\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 742, in backends\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n backend = registration.factory()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 518, in factory\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 64, in \r\n rng = jax.random.PRNGKey(args.seed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 249, in PRNGKey\r\n backend = _init_backend(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 828, in _init_backend\r\n return _return_prng_keys(True, _key('PRNGKey', seed, impl))\r\n return xla_client.make_c_api_client(plugin_name, updated_options, None)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 201, in _key\r\n return prng.random_seed(seed, impl=impl)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/prng.py"", line 551, in random_seed\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jaxlib/xla_client.py"", line 153, in make_c_api_client\r\n backend = registration.factory()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 518, in factory\r\n seeds_arr = jnp.asarray(np.int64(seeds))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 383, in asarray\r\n return _xla.get_c_api_client(plugin_name, options, distributed_client)\r\nRuntimeError: Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n return array(a, dtype=dtype, copy=bool(copy), order=order, device=device)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 268, in array\r\n return xla_client.make_c_api_client(plugin_name, updated_options, None)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jaxlib/xla_client.py"", line 153, in make_c_api_client\r\n exitcode = _main(fd, parent_sentinel)\r\n out_array: Array = lax._convert_element_type(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 1726, in _convert_element_type\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n return _xla.get_c_api_client(plugin_name, options, distributed_client)\r\n prepare(preparation_data)\r\nRuntimeError: Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n return convert_element_type_p.bind(\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n exitcode = _main(fd, parent_sentinel)\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 4902, in _convert_element_type_bind_with_trace\r\n operand = core.Primitive.bind_with_trace(convert_element_type_p, trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n return _run_module_code(code, init_globals, run_name,\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n return primitive.impl(*args, **params)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n outs = fun(*args)\r\nRuntimeError: Unable to initialize backend 'cuda': Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 64, in \r\n rng = jax.random.PRNGKey(args.seed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 249, in PRNGKey\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n return _return_prng_keys(True, _key('PRNGKey', seed, impl))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 201, in _key\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n return prng.random_seed(seed, impl=impl)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/prng.py"", line 551, in random_seed\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 64, in \r\n rng = jax.random.PRNGKey(args.seed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 249, in PRNGKey\r\n seeds_arr = jnp.asarray(np.int64(seeds))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 383, in asarray\r\n return _return_prng_keys(True, _key('PRNGKey', seed, impl))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 201, in _key\r\n return prng.random_seed(seed, impl=impl)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/prng.py"", line 551, in random_seed\r\n return array(a, dtype=dtype, copy=bool(copy), order=order, device=device)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 268, in array\r\n seeds_arr = jnp.asarray(np.int64(seeds))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 383, in asarray\r\n out_array: Array = lax._convert_element_type(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 1726, in _convert_element_type\r\n return array(a, dtype=dtype, copy=bool(copy), order=order, device=device)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 268, in array\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n return convert_element_type_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\nTraceback (most recent call last):\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n out_array: Array = lax._convert_element_type(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 1726, in _convert_element_type\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 742, in backends\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 4902, in _convert_element_type_bind_with_trace\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n return convert_element_type_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n backend = _init_backend(platform)\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 828, in _init_backend\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 4902, in _convert_element_type_bind_with_trace\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 64, in \r\n rng = jax.random.PRNGKey(args.seed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 249, in PRNGKey\r\n operand = core.Primitive.bind_with_trace(convert_element_type_p, trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n return _return_prng_keys(True, _key('PRNGKey', seed, impl))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 201, in _key\r\n return prng.random_seed(seed, impl=impl)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/prng.py"", line 551, in random_seed\r\n return trace.process_primitive(self, args, params)\r\n seeds_arr = jnp.asarray(np.int64(seeds))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 383, in asarray\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n return array(a, dtype=dtype, copy=bool(copy), order=order, device=device)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 268, in array\r\n backend = registration.factory()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 518, in factory\r\n out_array: Array = lax._convert_element_type(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 1726, in _convert_element_type\r\n return convert_element_type_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\n return xla_client.make_c_api_client(plugin_name, updated_options, None)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jaxlib/xla_client.py"", line 153, in make_c_api_client\r\n return primitive.impl(*args, **params)\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 4902, in _convert_element_type_bind_with_trace\r\n return _xla.get_c_api_client(plugin_name, options, distributed_client)\r\nRuntimeError: Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n operand = core.Primitive.bind_with_trace(convert_element_type_p, trace, args, params)\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n operand = core.Primitive.bind_with_trace(convert_element_type_p, trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n main_content = runpy.run_path(main_path,\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n outs = fun(*args)\r\n outs = fun(*args)\r\nRuntimeError: Unable to initialize backend 'cuda': Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\r\njaxlib._jax.XlaRuntimeError: FAILED_PRECONDITION: DNN library initialization failed. Look at the errors above for more details.\r\n outs = fun(*args)\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\nRuntimeError: Unable to initialize backend 'cuda': Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 64, in \r\n rng = jax.random.PRNGKey(args.seed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 249, in PRNGKey\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 742, in backends\r\n return _return_prng_keys(True, _key('PRNGKey', seed, impl))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 201, in _key\r\n return prng.random_seed(seed, impl=impl)\r\n backend = _init_backend(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/prng.py"", line 551, in random_seed\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 828, in _init_backend\r\n seeds_arr = jnp.asarray(np.int64(seeds))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 383, in asarray\r\n backend = registration.factory()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 518, in factory\r\n return array(a, dtype=dtype, copy=bool(copy), order=order, device=device)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 268, in array\r\n return xla_client.make_c_api_client(plugin_name, updated_options, None)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jaxlib/xla_client.py"", line 153, in make_c_api_client\r\n out_array: Array = lax._convert_element_type(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 1726, in _convert_element_type\r\n return _xla.get_c_api_client(plugin_name, options, distributed_client)\r\nRuntimeError: Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n return convert_element_type_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 4902, in _convert_element_type_bind_with_trace\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 64, in \r\n rng = jax.random.PRNGKey(args.seed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 249, in PRNGKey\r\n operand = core.Primitive.bind_with_trace(convert_element_type_p, trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n return _return_prng_keys(True, _key('PRNGKey', seed, impl))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 201, in _key\r\n return prng.random_seed(seed, impl=impl)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/prng.py"", line 551, in random_seed\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n seeds_arr = jnp.asarray(np.int64(seeds))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 383, in asarray\r\n return primitive.impl(*args, **params)\r\n return array(a, dtype=dtype, copy=bool(copy), order=order, device=device)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array.py"", line 268, in array\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n out_array: Array = lax._convert_element_type(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 1726, in _convert_element_type\r\n outs = fun(*args)\r\nRuntimeError: Unable to initialize backend 'cuda': Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\r\n return convert_element_type_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 4902, in _convert_element_type_bind_with_trace\r\n operand = core.Primitive.bind_with_trace(convert_element_type_p, trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n outs = fun(*args)\r\nRuntimeError: Unable to initialize backend 'cuda': Bad StatusOr access: RESOURCE_EXHAUSTED: : CUDA_ERROR_OUT_OF_MEMORY: out of memory (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\r\n",,terminal_output +1358,2402104,"TERMINAL",0,0,"ERROR:absl:Processing Failed. Shutting down.\r\n",,terminal_output +1359,2402305,"TERMINAL",0,0,"2025-07-28 18:25:14.781497: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-28 18:25:14.781495: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1360,2416612,"TERMINAL",0,0,"2025-07-28 18:25:29.086948: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1361,2417390,"TERMINAL",0,0,"2025-07-28 18:25:29.829457: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1362,2423153,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py"", line 166, in \r\n video_batch = next(iter(dataloader))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_loader.py"", line 479, in __next__\r\n result_record = next(self._iterator)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_loader.py"", line 380, in _iterator_with_context\r\n yield from it\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 784, in __next__\r\n raise element\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 616, in _process_elements_in_grain_pool\r\n for element in g_pool:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 505, in __next__\r\n raise RuntimeError(\r\nRuntimeError: Grain worker process 0 was terminated unexpectedly with exit code 1. Search the logs above for the source of the crash.\r\n",,terminal_output +1363,2424644,"TERMINAL",0,0,"srun: error: hkn0706: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0706 jafar_jobs]$ ",,terminal_output +1364,2467703,"TERMINAL",0,0,"c",,terminal_output +1365,2467869,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1366,2468035,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1367,2468677,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1368,2468799,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +1369,2468884,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +1370,2469800,"TERMINAL",0,0,"[?25l/[?25h",,terminal_output +1371,2470235,"TERMINAL",0,0,"[?25lj[?25h",,terminal_output +1372,2470879,"TERMINAL",0,0,"[?25la[?25hfar",,terminal_output +1373,2471409,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1374,2471703,"TERMINAL",0,0,"cd ../jafar",,terminal_output +1375,2471928,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/",,terminal_output +1376,2472965,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=2 \\r\n --seed=69 \\r\n --batch_size=10 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +1377,2473057,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=653563\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0706\r\nSLURM_JOB_START_TIME=1753717895\r\nSLURM_STEP_NODELIST=hkn0706\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753721495\r\nSLURM_PMI2_SRUN_PORT=35695\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3380176\r\nSLURM_PTY_PORT=33197\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=31\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e8.hkn0706\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0706\r\nSLURM_SRUN_COMM_PORT=44649\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3380176\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0706\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=44649\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0706\r\n",,terminal_output +1378,2473200,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +1379,2476407,"TERMINAL",0,0,"2025-07-28 18:26:28.809561: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1380,2476505,"sample.py",0,0,"",python,tab +1381,2478942,"TERMINAL",0,0,"bash",,terminal_focus +1382,2489250,"TERMINAL",0,0,"2025-07-28 18:26:41.717864: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1383,2489784,"TERMINAL",0,0,"ls ../jafar_jobs/sample.py",,terminal_command +1384,2491092,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nimport optax\nimport orbax.checkpoint as ocp\nfrom flax.training.train_state import TrainState\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\ndef _sampling_wrapper(module, batch):\n return module.sample(\n batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax\n )\n\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n sampling_fn = jax.jit(nn.apply(_sampling_wrapper, genie))\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\n generated_vid = sampling_fn(params, batch)\n return generated_vid\n\n\n# --- Get video + latent actions ---\narray_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n]\ndataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n)\nvideo_batch = next(iter(dataloader))\nvideo_batch = jnp.array(video_batch)\nprint(video_batch.dtype)\nvideo_batch = video_batch.astype(args.dtype)\nprint(video_batch.dtype)\nvideo_batch = video_batch / 255.0\nprint(video_batch.dtype) \n# Get latent actions for all videos in the batch\nbatch = dict(videos=video_batch)\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(video_batch.shape[0], args.seq_len - 1, 1)\n\n# --- Sample + evaluate video ---\nvid = _autoreg_sample(rng, video_batch, action_batch)\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\nprint(f""SSIM: {ssim}"")\n\n# --- Construct video ---\ntrue_videos = (video_batch * 255).astype(np.uint8)\npred_videos = (vid * 255).astype(np.uint8)\nvideo_comparison = np.zeros((2, *vid.shape), dtype=np.uint8)\nvideo_comparison[0] = true_videos[:, : args.seq_len]\nvideo_comparison[1] = pred_videos\nframes = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n# --- Save video ---\nimgs = [Image.fromarray(img) for img in frames]\n# Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\nfor t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(action_batch.shape[0]):\n action = action_batch[row, t, 0]\n y_offset = row * video_batch.shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\nimgs[0].save(\n f""generation_{time.time()}.gif"",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n)\n",python,tab +1385,2493292,"sample.py",0,0,"",python,tab +1386,2494004,"sample.py",0,0,"",python,tab +1387,2509079,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",0,0,"",python,tab +1388,2509081,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2752,0,"",python,selection_mouse +1389,2509128,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2751,0,"",python,selection_command +1390,2510482,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2809,0,"",python,selection_mouse +1391,2510998,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2827,0,"",python,selection_mouse +1392,2511953,"TERMINAL",0,0,"2025-07-28 18:27:04.430432: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1393,2511999,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,0,"",python,selection_mouse +1394,2513093,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,39,"\ndummy_train_state = TrainState.create(",python,selection_command +1395,2513582,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,65,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,",python,selection_command +1396,2513607,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,84,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,",python,selection_command +1397,2513642,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,104,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(",python,selection_command +1398,2513674,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,148,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(",python,selection_command +1399,2513717,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,186,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values",python,selection_command +1400,2513797,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,196,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )",python,selection_command +1401,2513798,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,204,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), ",python,selection_command +1402,2513802,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,206,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)",python,selection_command +1403,2513823,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,273,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()",python,selection_command +1404,2514333,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,375,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)",python,selection_command +1405,2514413,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 101, in \r\n ckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 803, in restore\r\n structure, use_zarr3_metadata = self._get_internal_metadata(directory)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 959, in _get_internal_metadata\r\n raise FileNotFoundError(\r\nFileNotFoundError: No structure could be identified for the checkpoint at /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237.\r\n",,terminal_output +1406,2514533,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,419,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(",python,selection_command +1407,2514663,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,440,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,",python,selection_command +1408,2514808,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,510,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),",python,selection_command +1409,2515002,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,548,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry",python,selection_command +1410,2515162,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,550,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)",python,selection_command +1411,2515275,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,597,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(",python,selection_command +1412,2515420,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,652,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state",python,selection_command +1413,2515480,"TERMINAL",0,0,"srun: error: hkn0706: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1414,2515566,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,654,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)",python,selection_command +1415,2515732,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,655,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n",python,selection_command +1416,2515882,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,694,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(",python,selection_command +1417,2516070,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,732,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),",python,selection_command +1418,2516196,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,761,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(",python,selection_command +1419,2516328,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,829,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),",python,selection_command +1420,2516500,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,836,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),",python,selection_command +1421,2516651,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,838,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)",python,selection_command +1422,2517100,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,885,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]",python,selection_command +1423,2517275,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,922,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_command +1424,2521129,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",2828,0,"",python,selection_command +1425,2523548,"sample.py",0,0,"",python,tab +1426,2523549,"sample.py",2873,0,"",python,selection_mouse +1427,2525120,"sample.py",2843,0,"",python,selection_command +1428,2525271,"sample.py",2761,0,"",python,selection_command +1429,2525426,"sample.py",2721,0,"",python,selection_command +1430,2525834,"sample.py",2760,0,"\n",python,content +1431,2526664,"sample.py",2761,0,"\n",python,content +1432,2527168,"sample.py",2762,0,"\n",python,content +1433,2527817,"sample.py",2762,0,"",python,selection_command +1434,2529133,"sample.py",2762,0,"\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,content +1435,2529169,"sample.py",2763,0,"",python,selection_command +1436,2531053,"sample.py",3696,0,"",python,selection_mouse +1437,2531652,"sample.py",3687,112,"",python,content +1438,2533425,"sample.py",2762,0,"",python,selection_mouse +1439,2533847,"sample.py",2762,1,"",python,content +1440,2534545,"sample.py",2762,1,"",python,content +1441,2537827,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",0,0,"",python,tab +1442,2537829,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",254,0,"",python,selection_mouse +1443,2539399,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",212,0,"",python,selection_command +1444,2541548,"sample.py",0,0,"",python,tab +1445,2541549,"sample.py",199,0,"",python,selection_mouse +1446,2541822,"sample.py",217,0,"\nimport orbax.checkpoint as ocp\nfrom flax.training.train_state import TrainState",python,content +1447,2541855,"sample.py",218,0,"",python,selection_command +1448,2542208,"sample.py",170,0,"",python,selection_command +1449,2542572,"sample.py",170,48,"",python,content +1450,2544347,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",0,0,"",python,tab +1451,2544349,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",182,0,"",python,selection_mouse +1452,2544349,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",181,0,"",python,selection_command +1453,2546360,"sample.py",0,0,"",python,tab +1454,2546361,"sample.py",163,0,"",python,selection_mouse +1455,2546640,"sample.py",169,0,"\nimport optax",python,content +1456,2546647,"sample.py",170,0,"",python,selection_command +1457,2558136,"TERMINAL",0,0,"srun",,terminal_focus +1458,2558368,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1459,2561281,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/",,terminal_output +1460,2563777,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=2 \\r\n --seed=69 \\r\n --batch_size=10 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +1461,2563909,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=653563\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0706\r\nSLURM_JOB_START_TIME=1753717895\r\nSLURM_STEP_NODELIST=hkn0706\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753721495\r\nSLURM_PMI2_SRUN_PORT=35695\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3380176\r\nSLURM_PTY_PORT=33197\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=31\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e8.hkn0706\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0706\r\nSLURM_SRUN_COMM_PORT=44649\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3380176\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0706\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=44649\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0706\r\n",,terminal_output +1462,2564042,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +1463,2567129,"TERMINAL",0,0,"2025-07-28 18:27:59.605644: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1464,2579731,"TERMINAL",0,0,"2025-07-28 18:28:12.205173: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1465,2603268,"TERMINAL",0,0,"2025-07-28 18:28:35.701850: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1466,2606775,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 53000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/053000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 55000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/055000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 54000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/054000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/020000/metrics/metrics not found.\r\n",,terminal_output +1467,2656782,"TERMINAL",0,0,"2025-07-28 18:29:29.257388: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1468,2656911,"TERMINAL",0,0,"2025-07-28 18:29:29.369647: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1469,2657066,"TERMINAL",0,0,"2025-07-28 18:29:29.535284: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1470,2658984,"TERMINAL",0,0,"E0728 18:29:31.436648 675454 cuda_blas.cc:196] failed to create cublas handle: the resource allocation failed\r\nE0728 18:29:31.436860 675454 cuda_blas.cc:199] Failure to initialize cublas may be due to OOM (cublas needs some free memory when you initialize it, and your deep-learning framework may have preallocated more than its fair share), or may be because this binary was not built with support for the GPU in your machine.\r\n",,terminal_output +1471,2659088,"TERMINAL",0,0,"E0728 18:29:31.513488 675460 cuda_blas.cc:196] failed to create cublas handle: the resource allocation failed\r\nE0728 18:29:31.513522 675460 cuda_blas.cc:199] Failure to initialize cublas may be due to OOM (cublas needs some free memory when you initialize it, and your deep-learning framework may have preallocated more than its fair share), or may be because this binary was not built with support for the GPU in your machine.\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 94, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/tokenizer.py"", line 72, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 117, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 113, in __call__\r\n outputs = layer(outputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"", line 287, in __call__\r\n y = dot_general(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: Failed to initialize BLAS support\r\n2025-07-28 18:29:31.538169: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_0_bfc) ran out of memory trying to allocate 16.05MiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 94, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/tokenizer.py"", line 72, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 117, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 113, in __call__\r\n outputs = layer(outputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"", line 287, in __call__\r\n y = dot_general(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: Failed to initialize BLAS support\r\n",,terminal_output +1472,2659162,"TERMINAL",0,0,"jax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 102, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 94, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/tokenizer.py"", line 72, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 117, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 113, in __call__\r\n outputs = layer(outputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"", line 287, in __call__\r\n y = dot_general(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 16826368 bytes.\r\n",,terminal_output +1473,2659956,"TERMINAL",0,0,"ERROR:absl:Processing Failed. Shutting down.\r\n",,terminal_output +1474,2665777,"TERMINAL",0,0,"2025-07-28 18:29:38.127416: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 3.30MiB (rounded to 3456000)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-07-28 18:29:38.127681: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] *___________________________________________________________________________________________________\r\nE0728 18:29:38.127742 675461 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 3456000 bytes. [tf-allocator-allocation-error='']\r\n2025-07-28 18:29:38.138113: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 3.30MiB (rounded to 3456000)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-07-28 18:29:38.138184: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] *___________________________________________________________________________________________________\r\nE0728 18:29:38.138197 675458 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 3456000 bytes. [tf-allocator-allocation-error='']\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 116, in spawn_main\r\n exitcode = _main(fd, parent_sentinel)\r\n exitcode = _main(fd, parent_sentinel)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 125, in _main\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n prepare(preparation_data)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 236, in prepare\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n _fixup_main_from_path(data['init_main_from_path'])\r\n main_content = runpy.run_path(main_path,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/spawn.py"", line 287, in _fixup_main_from_path\r\n return _run_module_code(code, init_globals, run_name,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n main_content = runpy.run_path(main_path,\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 289, in run_path\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 98, in \r\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array_creation.py"", line 83, in zeros\r\n return _run_module_code(code, init_globals, run_name,\r\n return lax.full(shape, 0, dtypes.jax_dtype(dtype), sharding=util.normalize_device_to_sharding(device))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 3356, in full\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 96, in _run_module_code\r\n return broadcast(fill_value, shape)\r\n _run_code(code, mod_globals, init_globals,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/runpy.py"", line 86, in _run_code\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 2691, in broadcast\r\n return broadcast_in_dim(operand, tuple(sizes) + np.shape(operand), dims,\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 2725, in broadcast_in_dim\r\n exec(code, run_globals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 98, in \r\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array_creation.py"", line 83, in zeros\r\n return broadcast_in_dim_p.bind(\r\n return lax.full(shape, 0, dtypes.jax_dtype(dtype), sharding=util.normalize_device_to_sharding(device))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 3356, in full\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\n return broadcast(fill_value, shape)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 2691, in broadcast\r\n return self._true_bind(*args, **params)\r\n return broadcast_in_dim(operand, tuple(sizes) + np.shape(operand), dims,\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 2725, in broadcast_in_dim\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n return broadcast_in_dim_p.bind(\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 536, in bind\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n return self._true_bind(*args, **params)\r\n outs = fun(*args)\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 3456000 bytes.\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 552, in _true_bind\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 562, in bind_with_trace\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1066, in process_primitive\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 91, in apply_primitive\r\n outs = fun(*args)\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 3456000 bytes.\r\n",,terminal_output +1475,2667035,"TERMINAL",0,0,"2025-07-28 18:29:39.506210: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1476,2667163,"TERMINAL",0,0,"2025-07-28 18:29:39.569068: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-28 18:29:39.602044: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1477,2687789,"TERMINAL",0,0,"2025-07-28 18:30:00.262393: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1478,2688919,"TERMINAL",0,0,"2025-07-28 18:30:01.397348: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1479,2689115,"TERMINAL",0,0,"2025-07-28 18:30:01.558102: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1480,2689203,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3380176.8 task 0: running\r\n",,terminal_output +1481,2691028,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3380176.8 task 0: running\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 168, in \r\n video_batch = next(iter(dataloader))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_loader.py"", line 479, in __next__\r\n result_record = next(self._iterator)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_loader.py"", line 380, in _iterator_with_context\r\n yield from it\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 784, in __next__\r\n raise element\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 616, in _process_elements_in_grain_pool\r\n for element in g_pool:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/grain_pool.py"", line 505, in __next__\r\n raise RuntimeError(\r\nRuntimeError: Grain worker process 0 was terminated unexpectedly with exit code 1. Search the logs above for the source of the crash.\r\n",,terminal_output +1482,2691171,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3380176.8\r\nException ignored in: .remove at 0x149c092bcd30>\r\nTraceback (most recent call last):\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/weakref.py"", line 370, in remove\r\nsrun: forcing job termination\r\nsrun: Job step aborted: Waiting up to 32 seconds for job step to finish.\r\nslurmstepd: error: *** STEP 3380176.8 ON hkn0706 CANCELLED AT 2025-07-28T18:30:03 ***\r\n",,terminal_output +1483,2691512,"TERMINAL",0,0,"]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1484,2707904,"genie.py",0,0,"from typing import Dict, Any\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nfrom flax.training.train_state import TrainState\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT, DynamicsAutoregressive\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\nimport grain\n\n\nclass Genie(nn.Module):\n """"""Genie model""""""\n\n # --- Tokenizer ---\n in_dim: int\n tokenizer_dim: int\n tokenizer_ffn_dim: int\n latent_patch_dim: int\n num_patch_latents: int\n patch_size: int\n tokenizer_num_blocks: int\n tokenizer_num_heads: int\n # --- LAM ---\n lam_dim: int\n lam_ffn_dim: int\n latent_action_dim: int\n num_latent_actions: int\n lam_patch_size: int\n lam_num_blocks: int\n lam_num_heads: int\n lam_co_train: bool\n # --- Dynamics ---\n dyna_dim: int\n dyna_ffn_dim: int\n dyna_num_blocks: int\n dyna_num_heads: int\n use_maskgit: bool\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n dropout: float = 0.0\n mask_limit: float = 0.0\n\n def setup(self):\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n ffn_dim=self.tokenizer_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n ffn_dim=self.lam_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n\n if self.use_maskgit:\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n else:\n self.dynamics = DynamicsAutoregressive(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\n lam_outputs = self.lam.vq_encode(batch[""videos""], training=False)\n latent_actions = jax.lax.cond(\n self.lam_co_train,\n lambda: lam_outputs[""z_q""],\n lambda: jax.lax.stop_gradient(lam_outputs[""z_q""]),\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\n latent_actions=latent_actions,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )\n outputs[""lam_indices""] = lam_outputs[""indices""]\n return outputs\n\n def sample_causal(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n temperature: float = 1,\n sample_argmax: bool = False,\n ):\n """"""\n Autoregressively samples up to `seq_len` future frames using the causal transformer backend.\n\n - Input frames are tokenized once.\n - Future frames are generated one at a time, each conditioned on all previous frames.\n - All frames are detokenized in a single pass at the end.\n\n Args:\n batch: Dict with at least ""videos"" (B, T, H, W, C)\n seq_len: total number of frames to generate (including context)\n temperature: sampling temperature\n sample_argmax: if True, use argmax instead of sampling\n\n Returns:\n Generated video frames (B, seq_len, H, W, C)\n """"""\n # --- Encode context frames ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""] # (B, T, N)\n B, T, N = token_idxs.shape\n\n # jax.debug.print(""token_idxs shape: {}"", token_idxs.shape)\n # --- Prepare initial token sequence ---\n # Pad with zeros for future frames\n pad_shape = (B, seq_len - T, N)\n token_idxs_full = jnp.concatenate(\n [token_idxs, jnp.zeros(pad_shape, dtype=token_idxs.dtype)], axis=1\n ) # (B, seq_len, N)\n\n # --- Prepare latent actions ---\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""]) # (B, S-1, )\n # --- Autoregressive generation loop ---\n rng = batch[""rng""]\n for t in range(T, seq_len):\n for n in range(N):\n jax.debug.print(""Sampling token {} from frame {}"", n, t)\n dyna_inputs = {\n ""video_tokens"": token_idxs_full,\n ""latent_actions"": action_tokens,\n }\n # jax.debug.print(""token_idxs_full 0: {}"", token_idxs_full[0,:,0])\n dyna_outputs = self.dynamics(dyna_inputs, training=False)\n # # dyna_outputs[""token_logits""]: (B, t, N, vocab_size)\n # # We want the logits for the last time step (frame t-1 predicting t)\n # jax.debug.breakpoint()\n next_token_logits = dyna_outputs[""token_logits""][:, t, n, :].astype(\n jnp.float32\n ) # (B, 1, vocab_size)\n\n # Sample or argmax for each patch\n if sample_argmax:\n next_token = jnp.argmax(next_token_logits, axis=-1) # (B, 1)\n else:\n rng, step_rng = jax.random.split(rng)\n next_token = jax.random.categorical(\n step_rng, next_token_logits / temperature, axis=-1\n ) # (B, 1)\n \n # Insert the generated tokens into the sequence\n token_idxs_full = token_idxs_full.at[:, t, n].set(next_token)\n # FIXME @mihir: HACK\n # token_idxs_full = jnp.argmax(dyna_outputs[""token_logits""].astype(jnp.float32) , axis=-1)\n # break\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n token_idxs_full, video_hw=batch[""videos""].shape[2:4]\n )\n return final_frames\n\n @nn.compact\n def sample_maskgit(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> Any:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: patches per frame\n S: sequence length\n A: action space\n D: model latent dimension\n """"""\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""] # (B, T, N)\n B, T, N = token_idxs.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs.dtype)\n token_idxs = jnp.concatenate([token_idxs, pad], axis=1) # (B, S, N)\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n\n def generation_step_fn(carry, step_t):\n rng, current_token_idxs = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current and future frames (i.e., t >= step_t)\n mask = jnp.arange(seq_len) >= step_t # (S,)\n mask = jnp.broadcast_to(mask[None, :, None], (B, seq_len, N)) # (B, S, N)\n mask = mask.astype(bool)\n masked_token_idxs = current_token_idxs * ~mask\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs,\n mask,\n action_tokens,\n )\n final_carry_maskgit, _ = loop_fn(init_carry_maskgit, jnp.arange(steps))\n updated_token_idxs = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs)\n return new_carry, None\n\n # --- Run the autoregressive generation using scan ---\n initial_carry = (batch[""rng""], token_idxs)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn, initial_carry, timesteps_to_scan\n )\n final_token_idxs = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n final_token_idxs,\n video_hw=batch[""videos""].shape[2:4],\n )\n return final_frames\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, token_idxs, mask, action_tokens = carry\n step = x\n N = token_idxs.shape[2]\n\n # --- Construct + encode video ---\n vid_embed = self.dynamics.patch_embed(token_idxs) # (B, S, N, D)\n mask_token = self.dynamics.mask_token # (1, 1, 1, D,)\n mask_expanded = mask[..., None] # (B, S, N, 1)\n vid_embed = jnp.where(mask_expanded, mask_token, vid_embed)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed) / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jax.random.categorical(_rng, final_logits)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, token_idxs, new_mask, action_tokens)\n return new_carry, None\n\n\ndef restore_genie_components(\n train_state: TrainState,\n sharding: jax.sharding.NamedSharding,\n inputs: Dict[str, jax.Array],\n rng: jax.Array,\n args,\n):\n """"""Restore pre-trained Genie components""""""\n rng, _rng = jax.random.split(rng)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.adamw(\n learning_rate=optax.constant_schedule(args.max_lr),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n )\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n ffn_dim=args.tokenizer_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n tokenizer_init_params = dummy_tokenizer.init(_rng, inputs)\n dummy_tokenizer_train_state = TrainState.create(\n apply_fn=dummy_tokenizer.apply, params=tokenizer_init_params, tx=dummy_tx\n )\n abstract_sharded_tokenizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_train_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_tokenizer_state),\n ),\n )[""model_state""]\n restored_tokenizer_params = restored_tokenizer.params[""params""]\n train_state.params[""params""][""tokenizer""].update(restored_tokenizer_params)\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n ffn_dim=args.lam_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n lam_init_params = dummy_lam.init(_rng, inputs)\n dummy_lam_train_state = TrainState.create(\n apply_fn=dummy_lam.apply, params=lam_init_params, tx=dummy_tx\n )\n abstract_sharded_lam_state = _create_abstract_sharded_pytree(\n dummy_lam_train_state, sharding\n )\n restored_lam = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_lam_state),\n ),\n )[""model_state""]\n restored_lam_params = restored_lam.params[""params""]\n # Genie does not initialize all LAM modules, thus we omit those extra modules during restoration\n # (f.srambical) FIXME: Currently, this is a small HBM memory crunch since the LAM's decoder is loaded into HBM and immediately dicarded.\n # A workaround would be to restore to host memory first, and only move the weights to HBM after pruning the decoder\n restored_lam_params = {\n k: v\n for k, v in restored_lam_params.items()\n if k in train_state.params[""params""][""lam""]\n }\n train_state.params[""params""][""lam""].update(restored_lam_params)\n lam_checkpoint_manager.close()\n\n return train_state\n\n\ndef _create_abstract_sharded_pytree(pytree_template, sharding_spec):\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)\n",python,tab +1485,2708110,"genie.py",183,7514,"from models.dynamics import DynamicsMaskGIT\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\nimport grain\n\n\nclass Genie(nn.Module):\n """"""Genie model""""""\n\n # --- Tokenizer ---\n in_dim: int\n tokenizer_dim: int\n tokenizer_ffn_dim: int\n latent_patch_dim: int\n num_patch_latents: int\n patch_size: int\n tokenizer_num_blocks: int\n tokenizer_num_heads: int\n # --- LAM ---\n lam_dim: int\n lam_ffn_dim: int\n latent_action_dim: int\n num_latent_actions: int\n lam_patch_size: int\n lam_num_blocks: int\n lam_num_heads: int\n lam_co_train: bool\n # --- Dynamics ---\n dyna_dim: int\n dyna_ffn_dim: int\n dyna_num_blocks: int\n dyna_num_heads: int\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n dropout: float = 0.0\n mask_limit: float = 0.0\n\n def setup(self):\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n ffn_dim=self.tokenizer_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n ffn_dim=self.lam_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\n lam_outputs = self.lam.vq_encode(batch[""videos""], training=False)\n latent_actions = jax.lax.cond(\n self.lam_co_train,\n lambda: lam_outputs[""z_q""],\n lambda: jax.lax.stop_gradient(lam_outputs[""z_q""]),\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\n latent_actions=latent_actions,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )\n outputs[""lam_indices""] = lam_outputs[""indices""]\n return outputs\n\n @nn.compact\n def sample(\n",python,content +1486,2711055,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/sample.py",0,0,"",python,tab +1487,2716201,"sample.py",0,0,"",python,tab +1488,2716202,"sample.py",6063,0,"",python,selection_mouse +1489,2716207,"sample.py",6062,0,"",python,selection_command +1490,2720314,"sample.py",4629,0,"",python,selection_mouse +1491,2721592,"sample.py",4629,1,"0",python,content +1492,2723864,"TERMINAL",0,0,"bash",,terminal_focus +1493,2724980,"TERMINAL",0,0,"srun",,terminal_focus +1494,2726543,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/",,terminal_output +1495,2727312,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=2 \\r\n --seed=69 \\r\n --batch_size=10 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +1496,2727441,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=653563\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0706\r\nSLURM_JOB_START_TIME=1753717895\r\nSLURM_STEP_NODELIST=hkn0706\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753721495\r\nSLURM_PMI2_SRUN_PORT=35695\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3380176\r\nSLURM_PTY_PORT=33197\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=31\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e8.hkn0706\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0706\r\nSLURM_SRUN_COMM_PORT=44649\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3380176\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0706\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=44649\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0706\r\n",,terminal_output +1497,2727568,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +1498,2730510,"TERMINAL",0,0,"2025-07-28 18:30:42.988442: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1499,2743765,"TERMINAL",0,0,"2025-07-28 18:30:56.242779: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1500,2766722,"TERMINAL",0,0,"2025-07-28 18:31:19.198600: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1501,2770201,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 54000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/054000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 53000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/053000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 55000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/055000/metrics/metrics not found.\r\n",,terminal_output +1502,2782538,"sample.py",0,0,"",python,tab +1503,2782539,"sample.py",4659,0,"",python,selection_mouse +1504,2782539,"sample.py",4658,0,"",python,selection_command +1505,2783217,"sample.py",4631,0,"",python,selection_mouse +1506,2783222,"sample.py",4630,0,"",python,selection_command +1507,2784079,"sample.py",4679,0,"",python,selection_mouse +1508,2784085,"sample.py",4678,0,"",python,selection_command +1509,2784940,"sample.py",4659,0,"",python,selection_mouse +1510,2784945,"sample.py",4658,0,"",python,selection_command +1511,2785354,"sample.py",4659,0,"",python,selection_mouse +1512,2785359,"sample.py",4658,0,"",python,selection_command +1513,2785560,"sample.py",4659,0,"",python,selection_mouse +1514,2785560,"sample.py",4658,0,"",python,selection_command +1515,2786125,"sample.py",4631,0,"",python,selection_mouse +1516,2786130,"sample.py",4630,0,"",python,selection_command +1517,2788720,"sample.py",4679,0,"",python,selection_mouse +1518,2788727,"sample.py",4678,0,"",python,selection_command +1519,2789378,"sample.py",4681,0,"",python,selection_mouse +1520,2789409,"sample.py",4680,0,"",python,selection_command +1521,2816885,"TERMINAL",0,0,"2025-07-28 18:32:09.329676: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-28 18:32:09.330133: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-28 18:32:09.330155: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1522,2839312,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 178, in \r\n ssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/dm_pix/_src/metrics.py"", line 221, in ssim\r\n chex.assert_type([a, b], float)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/chex/_src/asserts_internal.py"", line 279, in _chex_assert_fn\r\n host_assertion_fn(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/chex/_src/asserts_internal.py"", line 197, in _assert_on_host\r\n raise exception_type(error_msg)\r\nAssertionError: [Chex] Assertion assert_type failed: Error in type compatibility check: input 0 has type uint8 but expected .\r\n",,terminal_output +1523,2840845,"TERMINAL",0,0,"srun: error: hkn0706: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1524,2891616,"sample.py",0,0,"",python,tab +1525,2892215,"sample.py",4800,0,"",python,selection_mouse +1526,2892216,"sample.py",4799,0,"",python,selection_command +1527,2892878,"sample.py",4718,0,"",python,selection_mouse +1528,2892878,"sample.py",4717,0,"",python,selection_command +1529,2894039,"sample.py",4987,0,"",python,selection_mouse +1530,2894046,"sample.py",4986,0,"",python,selection_command +1531,2894627,"sample.py",4923,0,"",python,selection_mouse +1532,2895142,"sample.py",4764,0,"",python,selection_mouse +1533,2895673,"sample.py",4846,0,"",python,selection_mouse +1534,2896332,"sample.py",4767,0,"",python,selection_mouse +1535,2896333,"sample.py",4766,0,"",python,selection_command +1536,2901918,"sample.py",0,0,"",python,tab +1537,2901918,"sample.py",5189,0,"",python,selection_mouse +1538,2902100,"sample.py",5187,4,"ssim",python,selection_mouse +1539,2905487,"sample.py",5364,0,"",python,selection_mouse +1540,2905534,"sample.py",5363,0,"",python,selection_command +1541,2908365,"sample.py",5158,0,"",python,selection_mouse +1542,2969999,"sample.py",4953,0,"",python,selection_mouse +1543,2970706,"sample.py",4696,0,"",python,selection_mouse +1544,2972916,"sample.py",4695,0,"",python,selection_command +1545,2973026,"sample.py",4718,0,"\n",python,content +1546,2974063,"sample.py",4719,0,"v",python,content +1547,2974064,"sample.py",4720,0,"",python,selection_keyboard +1548,2974163,"sample.py",4720,0,"i",python,content +1549,2974166,"sample.py",4721,0,"",python,selection_keyboard +1550,2975488,"sample.py",4719,2,"video_batch",python,content +1551,2975764,"sample.py",4730,0," ",python,content +1552,2975767,"sample.py",4731,0,"",python,selection_keyboard +1553,2975934,"sample.py",4731,0,"=",python,content +1554,2975935,"sample.py",4732,0,"",python,selection_keyboard +1555,2976017,"sample.py",4732,0," ",python,content +1556,2976018,"sample.py",4733,0,"",python,selection_keyboard +1557,2976253,"sample.py",4733,0,"j",python,content +1558,2976254,"sample.py",4734,0,"",python,selection_keyboard +1559,2976333,"sample.py",4734,0,"a",python,content +1560,2976335,"sample.py",4735,0,"",python,selection_keyboard +1561,2976526,"sample.py",4735,0,"x",python,content +1562,2976528,"sample.py",4736,0,"",python,selection_keyboard +1563,2976671,"sample.py",4736,0,".",python,content +1564,2976672,"sample.py",4737,0,"",python,selection_keyboard +1565,2977405,"sample.py",4737,0,"a",python,content +1566,2977407,"sample.py",4738,0,"",python,selection_keyboard +1567,2977607,"sample.py",4738,0,"r",python,content +1568,2977609,"sample.py",4739,0,"",python,selection_keyboard +1569,2977763,"sample.py",4739,0,"r",python,content +1570,2977765,"sample.py",4740,0,"",python,selection_keyboard +1571,2977946,"sample.py",4740,0,"a",python,content +1572,2977948,"sample.py",4741,0,"",python,selection_keyboard +1573,2978169,"sample.py",4741,0,"y",python,content +1574,2978171,"sample.py",4742,0,"",python,selection_keyboard +1575,2978622,"sample.py",4737,5,"",python,content +1576,2979164,"sample.py",4736,1,"",python,content +1577,2979314,"sample.py",4735,1,"",python,content +1578,2979445,"sample.py",4734,1,"",python,content +1579,2979844,"sample.py",4734,0,"p",python,content +1580,2979845,"sample.py",4735,0,"",python,selection_keyboard +1581,2980211,"sample.py",4734,1,"",python,content +1582,2980413,"sample.py",4734,0,"n",python,content +1583,2980415,"sample.py",4735,0,"",python,selection_keyboard +1584,2980570,"sample.py",4735,0,"p",python,content +1585,2980572,"sample.py",4736,0,"",python,selection_keyboard +1586,2980794,"sample.py",4736,0,".",python,content +1587,2980795,"sample.py",4737,0,"",python,selection_keyboard +1588,2981102,"sample.py",4737,0,"a",python,content +1589,2981103,"sample.py",4738,0,"",python,selection_keyboard +1590,2981424,"sample.py",4738,0,"r",python,content +1591,2981425,"sample.py",4739,0,"",python,selection_keyboard +1592,2981701,"sample.py",4739,0,"a",python,content +1593,2981703,"sample.py",4740,0,"",python,selection_keyboard +1594,2981904,"sample.py",4740,0,"y",python,content +1595,2981905,"sample.py",4741,0,"",python,selection_keyboard +1596,2982269,"sample.py",4740,1,"",python,content +1597,2982381,"sample.py",4739,1,"",python,content +1598,2982518,"sample.py",4739,0,"r",python,content +1599,2982520,"sample.py",4740,0,"",python,selection_keyboard +1600,2982690,"sample.py",4740,0,"a",python,content +1601,2982691,"sample.py",4741,0,"",python,selection_keyboard +1602,2982881,"sample.py",4741,0,"y",python,content +1603,2982882,"sample.py",4742,0,"",python,selection_keyboard +1604,2983753,"sample.py",4742,0,"()",python,content +1605,2983754,"sample.py",4743,0,"",python,selection_keyboard +1606,2983941,"sample.py",4743,0,"v",python,content +1607,2983943,"sample.py",4744,0,"",python,selection_keyboard +1608,2984049,"sample.py",4744,0,"i",python,content +1609,2984051,"sample.py",4745,0,"",python,selection_keyboard +1610,2984161,"sample.py",4745,0,"d",python,content +1611,2984163,"sample.py",4746,0,"",python,selection_keyboard +1612,2984259,"sample.py",4746,0,"e",python,content +1613,2984260,"sample.py",4747,0,"",python,selection_keyboard +1614,2984951,"sample.py",4743,4,"video_batch",python,content +1615,2985343,"sample.py",4753,0,"",python,selection_command +1616,2985956,"sample.py",4755,0,"\n",python,content +1617,2986881,"sample.py",4756,0,"\n",python,content +1618,2987160,"sample.py",4757,0,"v",python,content +1619,2987161,"sample.py",4758,0,"",python,selection_keyboard +1620,2987321,"sample.py",4758,0,"i",python,content +1621,2987322,"sample.py",4759,0,"",python,selection_keyboard +1622,2988055,"sample.py",4757,2,"video_batch",python,content +1623,2989197,"sample.py",4768,0," ",python,content +1624,2989198,"sample.py",4769,0,"",python,selection_keyboard +1625,2989380,"sample.py",4769,0,"=",python,content +1626,2989381,"sample.py",4770,0,"",python,selection_keyboard +1627,2989654,"sample.py",4770,0," ",python,content +1628,2989655,"sample.py",4771,0,"",python,selection_keyboard +1629,2990740,"sample.py",4771,0,"v",python,content +1630,2990741,"sample.py",4772,0,"",python,selection_keyboard +1631,2990806,"sample.py",4772,0,"i",python,content +1632,2990808,"sample.py",4773,0,"",python,selection_keyboard +1633,2991499,"sample.py",4771,2,"video_batch",python,content +1634,2991897,"sample.py",4782,0,".",python,content +1635,2991898,"sample.py",4783,0,"",python,selection_keyboard +1636,2992252,"sample.py",4783,0,"a",python,content +1637,2992252,"sample.py",4784,0,"",python,selection_keyboard +1638,2992388,"sample.py",4784,0,"s",python,content +1639,2992390,"sample.py",4785,0,"",python,selection_keyboard +1640,2993284,"sample.py",4783,2,"astype",python,content +1641,2994062,"sample.py",4789,0,"()",python,content +1642,2994062,"sample.py",4790,0,"",python,selection_keyboard +1643,2994589,"sample.py",4790,0,"a",python,content +1644,2994590,"sample.py",4791,0,"",python,selection_keyboard +1645,2994767,"sample.py",4791,0,"r",python,content +1646,2994767,"sample.py",4792,0,"",python,selection_keyboard +1647,2994950,"sample.py",4792,0,"g",python,content +1648,2994951,"sample.py",4793,0,"",python,selection_keyboard +1649,2995062,"sample.py",4793,0,"s",python,content +1650,2995064,"sample.py",4794,0,"",python,selection_keyboard +1651,2995179,"sample.py",4794,0,".",python,content +1652,2995180,"sample.py",4795,0,"",python,selection_keyboard +1653,2996161,"sample.py",4795,0,"d",python,content +1654,2996162,"sample.py",4796,0,"",python,selection_keyboard +1655,2996933,"sample.py",4796,0,"t",python,content +1656,2996934,"sample.py",4797,0,"",python,selection_keyboard +1657,2997583,"sample.py",4795,2,"dtype",python,content +1658,2998289,"sample.py",4801,0,"",python,selection_command +1659,2998568,"sample.py",4801,0," ",python,content +1660,2998570,"sample.py",4802,0,"",python,selection_keyboard +1661,2999221,"sample.py",4802,0,"/",python,content +1662,2999222,"sample.py",4803,0,"",python,selection_keyboard +1663,3000281,"sample.py",4803,0,"2",python,content +1664,3000282,"sample.py",4804,0,"",python,selection_keyboard +1665,3000623,"sample.py",4804,0,"5",python,content +1666,3000625,"sample.py",4805,0,"",python,selection_keyboard +1667,3000797,"sample.py",4805,0,"5",python,content +1668,3000798,"sample.py",4806,0,"",python,selection_keyboard +1669,3001775,"sample.py",4806,0,".",python,content +1670,3001775,"sample.py",4807,0,"",python,selection_keyboard +1671,3002251,"sample.py",4806,0,"",python,selection_command +1672,3002444,"sample.py",4805,0,"",python,selection_command +1673,3002597,"sample.py",4804,0,"",python,selection_command +1674,3002754,"sample.py",4803,0,"",python,selection_command +1675,3003134,"sample.py",4803,0," ",python,content +1676,3003135,"sample.py",4804,0,"",python,selection_keyboard +1677,3003510,"sample.py",4803,0,"",python,selection_command +1678,3005178,"sample.py",4865,0,"",python,selection_mouse +1679,3005327,"sample.py",4865,1," ",python,selection_mouse +1680,3005911,"sample.py",4860,0,"",python,selection_mouse +1681,3006050,"sample.py",4858,5,"batch",python,selection_mouse +1682,3006900,"sample.py",4880,0,"",python,selection_mouse +1683,3007041,"sample.py",4878,11,"video_batch",python,selection_mouse +1684,3029761,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/",,terminal_output +1685,3041964,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=2 \\r\n --seed=69 \\r\n --batch_size=10 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +1686,3042105,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=653563\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0706\r\nSLURM_JOB_START_TIME=1753717895\r\nSLURM_STEP_NODELIST=hkn0706\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753721495\r\nSLURM_PMI2_SRUN_PORT=35695\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3380176\r\nSLURM_PTY_PORT=33197\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=31\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e8.hkn0706\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0706\r\nSLURM_SRUN_COMM_PORT=44649\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3380176\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0706\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=44649\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0706\r\n",,terminal_output +1687,3042237,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +1688,3048922,"TERMINAL",0,0,"2025-07-28 18:36:01.311664: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1689,3062055,"TERMINAL",0,0,"2025-07-28 18:36:14.365301: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1690,3084501,"TERMINAL",0,0,"2025-07-28 18:36:36.981482: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1691,3088279,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 53000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/053000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 54000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/054000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 55000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/055000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/020000/metrics/metrics not found.\r\n",,terminal_output +1692,3135928,"TERMINAL",0,0,"2025-07-28 18:37:28.399546: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-28 18:37:28.400030: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-28 18:37:28.400061: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1693,3161664,"TERMINAL",0,0,"SSIM: 0.3301030397415161\r\n",,terminal_output +1694,3163524,"TERMINAL",0,0,"]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1695,3264655,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\ndynamics_ckpt_dir=$1\necho $dynamics_ckpt_dir\n\nenv | grep SLURM\n\nsrun python sample.py \\n --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --seed=69 \\n --batch_size=10 \\n --start_frame=0 \\n --data_dir $array_records_dir\n\n# srun python sample.py \\n # --checkpoint $dynamics_ckpt_dir \\n # --start_frame=0 \\n # --batch_size=12 \\n # --seq_len=2 \\n # --data_dir $array_records_dir\n",shellscript,tab +1696,3267213,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",788,0,"",shellscript,selection_mouse +1697,3268223,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",787,1,"",shellscript,content +1698,3268355,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",786,1,"",shellscript,content +1699,3268737,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",785,0,"",shellscript,selection_command +1700,3269677,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",775,14,"",shellscript,content +1701,3269706,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",779,0,"",shellscript,selection_command +1702,3306521,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +1703,3309029,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=01:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/yoloruns/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/yoloruns/%x_%j.log\n#SBATCH --job-name=train_dynamics_overfit_sample_causal\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/causal/overfit-seed69-1-no-noise/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --seed=69 \\n --num_steps=5000 \\n --warmup_steps=0 \\n --wsd_decay_steps=0 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --init_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=100 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-overfit-no-noise$slurm_job_id \\n --tags dynamics causal overfit \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir \\n --dyna_dim=128 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4\n ",shellscript,tab +1704,3311074,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +1705,3311936,"models/dynamics.py",0,0,"from typing import Dict, Any\n\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport einops\n\nfrom utils.nn import STTransformer\n\n\nclass DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n ffn_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n spatial_bert=True,\n use_flash_attention=self.use_flash_attention,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n _rng_prob, *_rngs_mask = jax.random.split(batch[""mask_rng""], batch_size + 1)\n mask_prob = jax.random.uniform(\n _rng_prob, shape=(batch_size,), minval=self.mask_limit\n )\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(jnp.asarray(_rngs_mask), mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n return dict(token_logits=logits, mask=mask)\n\n\nclass DynamicsAutoregressive(nn.Module):\n """"""Autoregressive (causal) dynamics model""""""\n\n model_dim: int\n ffn_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n self.use_flash_attention,\n spatial_bert=False,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.action_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n vid_embed = self.patch_embed(batch[""video_tokens""])\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n # vid_embed_padded = jnp.pad(vid_embed, ((0, 0), (1, 0), (1, 0), (0, 0)))\n # logits = self.dynamics(vid_embed_padded)[:, :-1, :-1]\n vid_embed_padded = jnp.pad(vid_embed, ((0, 0), (0, 0), (1, 0), (0, 0)))\n\n # FIXME mihir: HACK\n # rng1, _rng = jax.random.split(batch[""mask_rng""])\n # noise = jax.random.normal(_rng, vid_embed_padded.shape)\n # logits = self.dynamics(noise)[:, :, :-1]\n\n # rng1, _rng = jax.random.split(batch[""mask_rng""])\n # noise = 0.25 * jax.random.normal(_rng, vid_embed_padded.shape)\n # logits = self.dynamics(vid_embed_padded + noise)[:, :, :-1]\n\n logits = self.dynamics(vid_embed_padded)[:, :, :-1]\n\n mask = jnp.ones(vid_embed.shape[:-1])\n return dict(token_logits=logits, mask=mask)\n",python,tab +1706,3312136,"models/dynamics.py",89,4125,"\nfrom utils.nn import STTransformer\n\n\nclass DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n ffn_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n _rng_prob, *_rngs_mask = jax.random.split(batch[""mask_rng""], batch_size + 1)\n mask_prob = jax.random.uniform(\n _rng_prob, shape=(batch_size,), minval=self.mask_limit\n )\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(jnp.asarray(_rngs_mask), mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n",python,content +1707,3376474,"TERMINAL",0,0,"bash",,terminal_focus +1708,3377856,"TERMINAL",0,0,"srun",,terminal_focus +1709,3380888,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +1710,3385049,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/",,terminal_output +1711,3385189,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=2 \\r\n --batch_size=10 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +1712,3385406,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=653563\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0706\r\nSLURM_JOB_START_TIME=1753717895\r\nSLURM_STEP_NODELIST=hkn0706\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753721495\r\nSLURM_PMI2_SRUN_PORT=35695\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3380176\r\nSLURM_PTY_PORT=33197\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=31\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e8.hkn0706\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0706\r\nSLURM_SRUN_COMM_PORT=44649\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3380176\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0706\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=44649\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0706\r\n",,terminal_output +1713,3385448,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +1714,3390442,"TERMINAL",0,0,"bash",,terminal_focus +1715,3391046,"TERMINAL",0,0,"2025-07-28 18:41:43.513773: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1716,3404079,"TERMINAL",0,0,"2025-07-28 18:41:56.367944: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1717,3411897,"TERMINAL",0,0,"srun",,terminal_focus +1718,3426571,"TERMINAL",0,0,"2025-07-28 18:42:18.994085: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1719,3430071,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 53000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/053000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 54000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/054000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 55000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/055000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237/020000/metrics/metrics not found.\r\n",,terminal_output +1720,3444404,"sample.py",0,0,"",python,tab +1721,3444405,"sample.py",4870,0,"",python,selection_mouse +1722,3447574,"sample.py",5081,0,"",python,selection_mouse +1723,3448526,"sample.py",5057,0,"",python,selection_mouse +1724,3449146,"sample.py",5091,0,"",python,selection_mouse +1725,3456808,"genie.py",0,0,"",python,tab +1726,3464617,"genie.py",5776,0,"",python,selection_mouse +1727,3464762,"genie.py",5763,18,"generation_step_fn",python,selection_mouse +1728,3471258,"genie.py",6943,0,"",python,selection_mouse +1729,3471405,"genie.py",6929,16,"final_token_idxs",python,selection_mouse +1730,3472135,"genie.py",6938,0,"",python,selection_mouse +1731,3472135,"genie.py",6929,16,"final_token_idxs",python,selection_mouse +1732,3480201,"TERMINAL",0,0,"2025-07-28 18:43:12.669531: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-28 18:43:12.669968: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-28 18:43:12.669989: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1733,3483529,"genie.py",7089,0,"",python,selection_mouse +1734,3483685,"genie.py",7077,16,"final_token_idxs",python,selection_mouse +1735,3494174,"genie.py",6939,0,"",python,selection_mouse +1736,3494327,"genie.py",6929,16,"final_token_idxs",python,selection_mouse +1737,3495340,"genie.py",6953,0,"",python,selection_mouse +1738,3495481,"genie.py",6948,11,"final_carry",python,selection_mouse +1739,3496223,"genie.py",6940,0,"",python,selection_mouse +1740,3496381,"genie.py",6929,16,"final_token_idxs",python,selection_mouse +1741,3497195,"genie.py",6963,0,"",python,selection_mouse +1742,3498001,"genie.py",6953,0,"",python,selection_mouse +1743,3498153,"genie.py",6948,11,"final_carry",python,selection_mouse +1744,3498738,"genie.py",6944,0,"",python,selection_mouse +1745,3498899,"genie.py",6929,16,"final_token_idxs",python,selection_mouse +1746,3501457,"genie.py",6963,0,"",python,selection_mouse +1747,3502051,"genie.py",6951,0,"",python,selection_mouse +1748,3502214,"genie.py",6948,11,"final_carry",python,selection_mouse +1749,3505180,"TERMINAL",0,0,"SSIM: 0.4515930116176605\r\n",,terminal_output +1750,3505874,"genie.py",6444,0,"",python,selection_mouse +1751,3506030,"genie.py",6428,19,"final_carry_maskgit",python,selection_mouse +1752,3506859,"TERMINAL",0,0,"]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1753,3506946,"genie.py",6459,0,"",python,selection_mouse +1754,3507135,"genie.py",6453,7,"loop_fn",python,selection_mouse +1755,3507876,"genie.py",6468,0,"",python,selection_mouse +1756,3508052,"genie.py",6461,18,"init_carry_maskgit",python,selection_mouse +1757,3508697,"genie.py",6455,0,"",python,selection_mouse +1758,3510231,"genie.py",6456,0,"",python,selection_mouse +1759,3511492,"genie.py",6278,0,"",python,selection_mouse +1760,3511640,"genie.py",6265,18,"init_carry_maskgit",python,selection_mouse +1761,3513425,"genie.py",6343,0,"",python,selection_mouse +1762,3513550,"genie.py",6330,17,"masked_token_idxs",python,selection_mouse +1763,3526345,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0706 jafar]$ ",,terminal_output +1764,3712040,"TERMINAL",0,0,"bash",,terminal_focus +1765,3713515,"TERMINAL",0,0,"srun",,terminal_focus +1766,3728299,"TERMINAL",0,0,"s",,terminal_output +1767,3728927,"TERMINAL",0,0,"",,terminal_output +1768,3729225,"TERMINAL",0,0,"[?25lq[?25h",,terminal_output +1769,3729345,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1770,3729429,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1771,3729513,"TERMINAL",0,0,"[?25lu[?25h[?25le[?25h",,terminal_output +1772,3729690,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0706.localdomain: Mon Jul 28 18:47:22 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3379590 accelerat train_dy tum_cte0 PD\t0:00\t 3 (Priority)3371238 accelerat train_dy tum_cte0 R 1:57:58\t 2 hkn[0509,0532]3377693 accelerat train_dy tum_cte0 R 1-00:14:49\t 8 hkn[0426,0436,0510,0524,0603,0627,0805,0812]3373408 accelerat train_dy tum_cte0 R 1-05:57:17\t 8 hkn[0417-0419,0422,0527,0621,0625,0628]3380176 accelerat interact tum_cte0 R55:47\t 1 hkn0706",,terminal_output +1773,3730692,"TERMINAL",0,0,"395088",,terminal_output +1774,3731675,"TERMINAL",0,0,"48:00199",,terminal_output +1775,3732695,"TERMINAL",0,0,"5122050",,terminal_output +1776,3733733,"TERMINAL",0,0,"62311",,terminal_output +1777,3734738,"TERMINAL",0,0,"73422",,terminal_output +1778,3735750,"TERMINAL",0,0,"84533",,terminal_output +1779,3736552,"TERMINAL",0,0,"bash",,terminal_focus +1780,3736794,"TERMINAL",0,0,"95644",,terminal_output +1781,3737822,"TERMINAL",0,0,"306755",,terminal_output +1782,3738807,"TERMINAL",0,0,"17866",,terminal_output +1783,3739438,"TERMINAL",0,0,"scancel 3373408",,terminal_command +1784,3739495,"TERMINAL",0,0,"]633;E;2025-07-28 18:47:31 scancel 3373408;54c1b5ea-730a-4f9f-ba91-1c5dfbf41461]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237]633;D;0",,terminal_output +1785,3739822,"TERMINAL",0,0,"M23373408 accelerat train_dy tum_cte0 CG 1-05:57:26\t 8 hkn[0417-0419,0422,0527,0621,0625,0628]897",,terminal_output +1786,3740450,"TERMINAL",0,0,"srun",,terminal_focus +1787,3740868,"TERMINAL",0,0,"395:008",,terminal_output +1788,3741679,"TERMINAL",0,0,"bash",,terminal_focus +1789,3741885,"TERMINAL",0,0,"41019",,terminal_output +1790,3742875,"TERMINAL",0,0,"5126:00",,terminal_output +1791,3743900,"TERMINAL",0,0,"6231",,terminal_output +1792,3744959,"TERMINAL",0,0,"7342",,terminal_output +1793,3745614,"TERMINAL",0,0,"scancel 3371238",,terminal_command +1794,3745931,"TERMINAL",0,0,"8123 1:58:14\t 2\t0509,0532]3408CG 1-05:57:26\t 8 hkn[0417-0419,0422,0527,0621,0625,0628]9590PD 0:00\t 3 (Priority)53",,terminal_output +1795,3747008,"TERMINAL",0,0,"964",,terminal_output +1796,3747877,"TERMINAL",0,0,"srun",,terminal_focus +1797,3747964,"TERMINAL",0,0,"4075",,terminal_output +1798,3749054,"TERMINAL",0,0,"186",,terminal_output +1799,3749381,"TERMINAL",0,0,"bash",,terminal_focus +1800,3750028,"TERMINAL",0,0,"297",,terminal_output +1801,3751017,"TERMINAL",0,0,"3108",,terminal_output +1802,3751221,"TERMINAL",0,0,"scancel 3379590",,terminal_command +1803,3751228,"TERMINAL",0,0,"]633;E;2025-07-28 18:47:43 scancel 3379590;54c1b5ea-730a-4f9f-ba91-1c5dfbf41461]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237]633;D;0",,terminal_output +1804,3752037,"TERMINAL",0,0,"\r47693 R 1-00:15:11\t 8 hkn[0426,0436,0510,0524,0603,0627,0805,0812]80176interact 56:09\t 1 hkn0706",,terminal_output +1805,3752729,"TERMINAL",0,0,"srun",,terminal_focus +1806,3753412,"TERMINAL",0,0,"5210",,terminal_output +1807,3754257,"TERMINAL",0,0,"bash",,terminal_focus +1808,3754431,"TERMINAL",0,0,"631",,terminal_output +1809,3755434,"TERMINAL",0,0,"742",,terminal_output +1810,3755920,"TERMINAL",0,0,"scancel 3377693",,terminal_command +1811,3755926,"TERMINAL",0,0,"]633;E;2025-07-28 18:47:48 scancel 3377693;54c1b5ea-730a-4f9f-ba91-1c5dfbf41461]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237]633;D;0",,terminal_output +1812,3756483,"TERMINAL",0,0,"876930:15:1526,0436,0510,0524,0603,0627,0805,0812]3408CG 1-05:57:2617-0419,0422,0527,0621,0625,0628]3",,terminal_output +1813,3757490,"TERMINAL",0,0,"94",,terminal_output +1814,3758511,"TERMINAL",0,0,"505",,terminal_output +1815,3759551,"TERMINAL",0,0,"16",,terminal_output +1816,3759652,"TERMINAL",0,0,"srun",,terminal_focus +1817,3760543,"TERMINAL",0,0,"27",,terminal_output +1818,3761538,"TERMINAL",0,0,"39",,terminal_output +1819,3762350,"TERMINAL",0,0,"bash",,terminal_focus +1820,3762568,"TERMINAL",0,0,"520",,terminal_output +1821,3763654,"TERMINAL",0,0,"61",,terminal_output +1822,3764340,"TERMINAL",0,0,"idling",,terminal_command +1823,3764405,"TERMINAL",0,0,"]633;E;2025-07-28 18:47:56 idling;54c1b5ea-730a-4f9f-ba91-1c5dfbf41461]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1993.localdomain: Mon Jul 28 18:47:56 2025Partition dev_cpuonly: 12 nodes idle\rPartition cpuonly: 180 nodes idle\rPartition dev_accelerated:\t 0 nodes idle\rPartition accelerated: 18 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 1 nodes idle\rPartition large:\t 8 nodes idle",,terminal_output +1824,3764607,"TERMINAL",0,0,"72",,terminal_output +1825,3765541,"TERMINAL",0,0,"7\t",,terminal_output +1826,3765627,"TERMINAL",0,0,"83",,terminal_output +1827,3766699,"TERMINAL",0,0,"8\t",,terminal_output +1828,3766718,"TERMINAL",0,0,"94",,terminal_output +1829,3767536,"TERMINAL",0,0,"9\t",,terminal_output +1830,3767664,"TERMINAL",0,0,"8:005",,terminal_output +1831,3768717,"TERMINAL",0,0,"8:01\t",,terminal_output +1832,3768727,"TERMINAL",0,0,"16",,terminal_output +1833,3769626,"TERMINAL",0,0,"2\t",,terminal_output +1834,3769698,"TERMINAL",0,0,"27",,terminal_output +1835,3770694,"TERMINAL",0,0,"3\t",,terminal_output +1836,3770719,"TERMINAL",0,0,"38",,terminal_output +1837,3771709,"TERMINAL",0,0,"4\t",,terminal_output +1838,3771740,"TERMINAL",0,0,"49",,terminal_output +1839,3772765,"TERMINAL",0,0,"5\t",,terminal_output +1840,3772792,"TERMINAL",0,0,"530",,terminal_output +1841,3773808,"TERMINAL",0,0,"61",,terminal_output +1842,3773840,"TERMINAL",0,0,"6\t",,terminal_output +1843,3774794,"TERMINAL",0,0,"72",,terminal_output +1844,3774845,"TERMINAL",0,0,"7\t",,terminal_output +1845,3775807,"TERMINAL",0,0,"83",,terminal_output +1846,3775886,"TERMINAL",0,0,"8\t",,terminal_output +1847,3776826,"TERMINAL",0,0,"94",,terminal_output +1848,3776954,"TERMINAL",0,0,"9\t",,terminal_output +1849,3777846,"TERMINAL",0,0,"105",,terminal_output +1850,3777982,"TERMINAL",0,0,"10\t",,terminal_output +1851,3778893,"TERMINAL",0,0,"16",,terminal_output +1852,3779027,"TERMINAL",0,0,"1\t",,terminal_output +1853,3779099,"TERMINAL",0,0,"bash",,terminal_focus +1854,3779981,"TERMINAL",0,0,"27",,terminal_output +1855,3780110,"TERMINAL",0,0,"2\t",,terminal_output +1856,3780921,"TERMINAL",0,0,"38",,terminal_output +1857,3781123,"TERMINAL",0,0,"3\t",,terminal_output +1858,3782131,"TERMINAL",0,0,"49",,terminal_output +1859,3782183,"TERMINAL",0,0,"4\t",,terminal_output +1860,3782967,"TERMINAL",0,0,"540",,terminal_output +1861,3783254,"TERMINAL",0,0,"5\t",,terminal_output +1862,3783978,"TERMINAL",0,0,"61",,terminal_output +1863,3784337,"TERMINAL",0,0,"6\t",,terminal_output +1864,3785094,"TERMINAL",0,0,"72",,terminal_output +1865,3785327,"TERMINAL",0,0,"7\t",,terminal_output +1866,3786024,"TERMINAL",0,0,"\r880176interact R 56:43\t 1 hkn0706",,terminal_output +1867,3786357,"TERMINAL",0,0,"8\t",,terminal_output +1868,3787091,"TERMINAL",0,0,"94",,terminal_output +1869,3787419,"TERMINAL",0,0,"9\t",,terminal_output +1870,3788069,"TERMINAL",0,0,"205",,terminal_output +1871,3788523,"TERMINAL",0,0,"20\t",,terminal_output +1872,3789093,"TERMINAL",0,0,"16",,terminal_output +1873,3789502,"TERMINAL",0,0,"1\t",,terminal_output +1874,3790120,"TERMINAL",0,0,"27",,terminal_output +1875,3790535,"TERMINAL",0,0,"2\t",,terminal_output +1876,3791145,"TERMINAL",0,0,"38",,terminal_output +1877,3791575,"TERMINAL",0,0,"4\t",,terminal_output +1878,3792164,"TERMINAL",0,0,"\r49",,terminal_output +1879,3792607,"TERMINAL",0,0,"5\t",,terminal_output +1880,3793143,"TERMINAL",0,0,"550",,terminal_output +1881,3793665,"TERMINAL",0,0,"6\t",,terminal_output +1882,3794147,"TERMINAL",0,0,"61",,terminal_output +1883,3794688,"TERMINAL",0,0,"7\t",,terminal_output +1884,3795184,"TERMINAL",0,0,"72",,terminal_output +1885,3795733,"TERMINAL",0,0,"8\t",,terminal_output +1886,3796256,"TERMINAL",0,0,"83",,terminal_output +1887,3796774,"TERMINAL",0,0,"9\t",,terminal_output +1888,3797285,"TERMINAL",0,0,"94",,terminal_output +1889,3797861,"TERMINAL",0,0,"30\t",,terminal_output +1890,3798231,"TERMINAL",0,0,"305",,terminal_output +1891,3798877,"TERMINAL",0,0,"1\t",,terminal_output +1892,3799262,"TERMINAL",0,0,"16",,terminal_output +1893,3800051,"TERMINAL",0,0,"2\t",,terminal_output +1894,3800265,"TERMINAL",0,0,"27",,terminal_output +1895,3800969,"TERMINAL",0,0,"3\t",,terminal_output +1896,3801282,"TERMINAL",0,0,"38",,terminal_output +1897,3802012,"TERMINAL",0,0,"4\t",,terminal_output +1898,3802334,"TERMINAL",0,0,"\r480176interact R 56:59\t 1 hkn0706",,terminal_output +1899,3803056,"TERMINAL",0,0,"5\t",,terminal_output +1900,3803333,"TERMINAL",0,0,"57:00",,terminal_output +1901,3804144,"TERMINAL",0,0,"6\t",,terminal_output +1902,3804332,"TERMINAL",0,0,"61",,terminal_output +1903,3805166,"TERMINAL",0,0,"7\t",,terminal_output +1904,3805350,"TERMINAL",0,0,"72",,terminal_output +1905,3806397,"TERMINAL",0,0,"8\t",,terminal_output +1906,3806430,"TERMINAL",0,0,"83",,terminal_output +1907,3807334,"TERMINAL",0,0,"9\t",,terminal_output +1908,3807390,"TERMINAL",0,0,"94",,terminal_output +1909,3808342,"TERMINAL",0,0,"40\t",,terminal_output +1910,3808412,"TERMINAL",0,0,"405",,terminal_output +1911,3809328,"TERMINAL",0,0,"11",,terminal_output +1912,3809465,"TERMINAL",0,0,"16",,terminal_output +1913,3810528,"TERMINAL",0,0,"2\t",,terminal_output +1914,3810529,"TERMINAL",0,0,"27",,terminal_output +1915,3811621,"TERMINAL",0,0,"3\t",,terminal_output +1916,3811622,"TERMINAL",0,0,"38",,terminal_output +1917,3812467,"TERMINAL",0,0,"4\t",,terminal_output +1918,3812505,"TERMINAL",0,0,"49",,terminal_output +1919,3813665,"TERMINAL",0,0,"5\t",,terminal_output +1920,3813666,"TERMINAL",0,0,"510",,terminal_output +1921,3814592,"TERMINAL",0,0,"61",,terminal_output +1922,3814593,"TERMINAL",0,0,"6\t",,terminal_output +1923,3815640,"TERMINAL",0,0,"83",,terminal_output +1924,3815640,"TERMINAL",0,0,"8\t",,terminal_output +1925,3816561,"TERMINAL",0,0,"94",,terminal_output +1926,3816644,"TERMINAL",0,0,"9\t",,terminal_output +1927,3817572,"TERMINAL",0,0,"505",,terminal_output +1928,3817711,"TERMINAL",0,0,"50\t",,terminal_output +1929,3820041,"TERMINAL",0,0,"16",,terminal_output +1930,3820042,"TERMINAL",0,0,"1\t",,terminal_output +1931,3820108,"TERMINAL",0,0,"27",,terminal_output +1932,3820108,"TERMINAL",0,0,"2\t",,terminal_output +1933,3849137,"TERMINAL",0,0,"38",,terminal_output +1934,3849574,"TERMINAL",0,0,"49",,terminal_output +1935,3849574,"TERMINAL",0,0,"3\t4\t",,terminal_output +1936,3853947,"TERMINAL",0,0,"5\t6\t7\t8\t",,terminal_output +1937,3853947,"TERMINAL",0,0,"520617283",,terminal_output +1938,3854015,"TERMINAL",0,0,"9\t9:00\t1\t2\t3\t",,terminal_output +1939,3854015,"TERMINAL",0,0,"949:00516273849",,terminal_output +1940,3860973,"utils/nn.py",0,0,"import math\nfrom typing import Tuple\n\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass PositionalEncoding(nn.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n d_model: int # Hidden dimensionality of the input.\n max_len: int = 5000 # Maximum length of a sequence to expect.\n\n def setup(self):\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n self.pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\n self.pe = self.pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def __call__(self, x):\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nn.Module):\n dim: int\n ffn_dim: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.remat\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=False\n ),\n )(z)\n x = x + z\n\n # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n # FIXME (f.srambical): check whether we should still pass the mask if we set is_causal=True\n )(z, mask=causal_mask)\n x = x + z\n x = x.swapaxes(1, 2)\n\n # --- Feedforward ---\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n z = nn.Dense(\n self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.gelu(z)\n z = nn.Dense(\n self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n x = x + z\n\n return x\n\n\nclass STTransformer(nn.Module):\n model_dim: int\n ffn_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n x = nn.Sequential(\n [\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n ]\n )(x)\n for _ in range(self.num_blocks):\n x = STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )(x)\n x = nn.Dense(\n self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n return x # (B, T, E)\n\n\ndef normalize(x):\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nn.Module):\n latent_dim: int\n num_latents: int\n dropout: float\n\n def setup(self):\n self.codebook = normalize(\n self.param(\n ""codebook"",\n nn.initializers.lecun_uniform(),\n (self.num_latents, self.latent_dim),\n )\n )\n self.drop = nn.Dropout(self.dropout, deterministic=False)\n\n def __call__(\n self, x: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x = normalize(x)\n codebook = normalize(self.codebook)\n distance = -jnp.matmul(x, codebook.T)\n if training:\n dropout_key = self.make_rng(""dropout"")\n distance = self.drop(distance, rng=dropout_key)\n\n # --- Get indices and embeddings ---\n indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]\n\n # --- Straight through estimator ---\n z_q = x + jax.lax.stop_gradient(z - x)\n return z_q, z, x, indices\n\n def get_codes(self, indices: jax.Array):\n return self.codebook[indices]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool):\n """"""\n Create an attention function that uses flash attention if enabled.\n\n Flax MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim)\n jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim).\n\n We need to reshape to ensure compatibility. cuDNN's flash attention additionally\n requires a sequence length that is a multiple of 4. We pad the sequence length to the nearest\n multiple of 4 and mask accordingly.\n """"""\n\n def attention_fn(query, key, value, bias=None, mask=None, **kwargs):\n implementation = ""cudnn"" if use_flash_attention else None\n\n def _rearrange(x):\n return einops.rearrange(x, ""... l h d -> (...) l h d"")\n\n def _pad(x):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n\n def _fuse_masks(mask: jax.Array, attention_mask: jax.Array) -> jax.Array:\n mask_bool = mask.astype(jnp.bool_)\n expanded_mask = jnp.pad(\n mask_bool, ((0, pad_size), (0, pad_size)), constant_values=False\n )\n return jnp.logical_and(attention_mask, expanded_mask)\n\n original_shape = query.shape\n original_seq_len = query.shape[-3]\n\n # Pad to nearest multiple of 4\n target_seq_len = ((original_seq_len + 3) // 4) * 4\n pad_size = target_seq_len - original_seq_len\n\n query_4d = _pad(_rearrange(query))\n key_4d = _pad(_rearrange(key))\n value_4d = _pad(_rearrange(value))\n\n attention_mask = jnp.ones((target_seq_len, target_seq_len), dtype=jnp.bool_)\n attention_mask = attention_mask.at[original_seq_len:, :].set(False)\n attention_mask = attention_mask.at[:, original_seq_len:].set(False)\n\n mask_4d = (\n _fuse_masks(mask, attention_mask) if mask is not None else attention_mask\n )\n mask_4d = mask_4d[jnp.newaxis, jnp.newaxis, :, :] # (1, 1, seq_len, seq_len)\n\n bias_4d = _pad(_rearrange(bias)) if bias is not None else None\n\n output_4d = jax.nn.dot_product_attention(\n query=query_4d,\n key=key_4d,\n value=value_4d,\n bias=bias_4d,\n mask=mask_4d,\n implementation=implementation,\n is_causal=is_causal,\n **kwargs\n )\n return output_4d[..., :original_seq_len, :, :].reshape(original_shape)\n\n return attention_fn\n",python,tab +1941,3863107,"TERMINAL",0,0,"53061",,terminal_output +1942,3863108,"TERMINAL",0,0,"4\t5\t",,terminal_output +1943,3865042,"TERMINAL",0,0,"728394",,terminal_output +1944,3865043,"TERMINAL",0,0,"6\t7\t8\t9\t",,terminal_output +1945,3865043,"TERMINAL",0,0,"1051627",,terminal_output +1946,3865043,"TERMINAL",0,0,"11\t2\t",,terminal_output +1947,3865530,"TERMINAL",0,0,"354\t5\t6\t",,terminal_output +1948,3865530,"TERMINAL",0,0,"384954061",,terminal_output +1949,3866000,"TERMINAL",0,0,"7\t8\t",,terminal_output +1950,3866001,"TERMINAL",0,0,"72",,terminal_output +1951,3866071,"TERMINAL",0,0,"9\t",,terminal_output +1952,3866072,"TERMINAL",0,0,"8394",,terminal_output +1953,3866492,"TERMINAL",0,0,"20516",,terminal_output +1954,3866492,"TERMINAL",0,0,"2001\t",,terminal_output +1955,3866754,"TERMINAL",0,0,"2\t",,terminal_output +1956,3866755,"TERMINAL",0,0,"27",,terminal_output +1957,3873318,"TERMINAL",0,0,"3\t",,terminal_output +1958,3873318,"TERMINAL",0,0,"38",,terminal_output +1959,3874999,"TERMINAL",0,0,"49550617283",,terminal_output +1960,3875000,"TERMINAL",0,0,"4\t5\t6\t7\t8\t",,terminal_output +1961,3875512,"TERMINAL",0,0,"9\t30\t",,terminal_output +1962,3875513,"TERMINAL",0,0,"94305",,terminal_output +1963,3892627,"TERMINAL",0,0,"16",,terminal_output +1964,3892627,"TERMINAL",0,0,"1\t",,terminal_output +1965,3893214,"TERMINAL",0,0,"27",,terminal_output +1966,3893415,"TERMINAL",0,0,"2\t",,terminal_output +1967,3894880,"TERMINAL",0,0,"38",,terminal_output +1968,3894881,"TERMINAL",0,0,"4\t",,terminal_output +1969,3895111,"TERMINAL",0,0,"5\t6\t7\t8\t9\t40\t1\t2\t3\t4\t5\t6 08\t9\t50\t1\t2\t3\t4\t5\t6\t7\t8\t9\t50:00\t",,terminal_output +1970,3895111,"TERMINAL",0,0,"4958:0061728394405162738495128394505162738495206172839450:005",,terminal_output +1971,3895290,"TERMINAL",0,0,"1\t2\t4\t",,terminal_output +1972,3895291,"TERMINAL",0,0,"16273849",,terminal_output +1973,3897107,"TERMINAL",0,0,"530617283",,terminal_output +1974,3897108,"TERMINAL",0,0,"566\t7\t8\t9\t",,terminal_output +1975,3897395,"TERMINAL",0,0,"94",,terminal_output +1976,3897919,"TERMINAL",0,0,"10\t",,terminal_output +1977,3898335,"TERMINAL",0,0,"105",,terminal_output +1978,3898955,"TERMINAL",0,0,"1\t",,terminal_output +1979,3899382,"TERMINAL",0,0,"16",,terminal_output +1980,3899990,"TERMINAL",0,0,"2\t",,terminal_output +1981,3900397,"TERMINAL",0,0,"27",,terminal_output +1982,3901014,"TERMINAL",0,0,"3\t",,terminal_output +1983,3901390,"TERMINAL",0,0,"38",,terminal_output +1984,3902089,"TERMINAL",0,0,"4\t",,terminal_output +1985,3902727,"TERMINAL",0,0,"49",,terminal_output +1986,3903088,"TERMINAL",0,0,"5\t",,terminal_output +1987,3903473,"TERMINAL",0,0,"540",,terminal_output +1988,3904125,"TERMINAL",0,0,"6\t",,terminal_output +1989,3904442,"TERMINAL",0,0,"61",,terminal_output +1990,3905213,"TERMINAL",0,0,"7\t",,terminal_output +1991,3905556,"TERMINAL",0,0,"72",,terminal_output +1992,3906210,"TERMINAL",0,0,"8\t",,terminal_output +1993,3906467,"TERMINAL",0,0,"83",,terminal_output +1994,3907266,"TERMINAL",0,0,"9\t",,terminal_output +1995,3907527,"TERMINAL",0,0,"94",,terminal_output +1996,3908459,"TERMINAL",0,0,"20\t",,terminal_output +1997,3908518,"TERMINAL",0,0,"205",,terminal_output +1998,3909357,"TERMINAL",0,0,"1\t",,terminal_output +1999,3909564,"TERMINAL",0,0,"17",,terminal_output +2000,3910440,"TERMINAL",0,0,"2\t",,terminal_output +2001,3910601,"TERMINAL",0,0,"38",,terminal_output +2002,3911421,"TERMINAL",0,0,"3\t",,terminal_output +2003,3911569,"TERMINAL",0,0,"49",,terminal_output +2004,3912467,"TERMINAL",0,0,"4\t",,terminal_output +2005,3912596,"TERMINAL",0,0,"550",,terminal_output +2006,3913514,"TERMINAL",0,0,"5\t",,terminal_output +2007,3913598,"TERMINAL",0,0,"61",,terminal_output +2008,3914565,"TERMINAL",0,0,"6\t",,terminal_output +2009,3914622,"TERMINAL",0,0,"72",,terminal_output +2010,3915573,"TERMINAL",0,0,"8\t",,terminal_output +2011,3915668,"TERMINAL",0,0,"83",,terminal_output +2012,3916658,"TERMINAL",0,0,"9\t",,terminal_output +2013,3916670,"TERMINAL",0,0,"94",,terminal_output +2014,3917669,"TERMINAL",0,0,"30\t",,terminal_output +2015,3917681,"TERMINAL",0,0,"305",,terminal_output +2016,3918711,"TERMINAL",0,0,"16",,terminal_output +2017,3918745,"TERMINAL",0,0,"1\t",,terminal_output +2018,3919728,"TERMINAL",0,0,"27",,terminal_output +2019,3919746,"TERMINAL",0,0,"2\t",,terminal_output +2020,3920724,"TERMINAL",0,0,"38",,terminal_output +2021,3920780,"TERMINAL",0,0,"3\t",,terminal_output +2022,3921733,"TERMINAL",0,0,"49",,terminal_output +2023,3921860,"TERMINAL",0,0,"4\t",,terminal_output +2024,3922759,"TERMINAL",0,0,"59:00",,terminal_output +2025,3922897,"TERMINAL",0,0,"5\t",,terminal_output +2026,3923784,"TERMINAL",0,0,"61",,terminal_output +2027,3923924,"TERMINAL",0,0,"6\t",,terminal_output +2028,3924801,"TERMINAL",0,0,"72",,terminal_output +2029,3924964,"TERMINAL",0,0,"7\t",,terminal_output +2030,3925822,"TERMINAL",0,0,"83",,terminal_output +2031,3926005,"TERMINAL",0,0,"8\t",,terminal_output +2032,3926832,"TERMINAL",0,0,"94",,terminal_output +2033,3927046,"TERMINAL",0,0,"9\t",,terminal_output +2034,3927917,"TERMINAL",0,0,"405",,terminal_output +2035,3928084,"TERMINAL",0,0,"40\t",,terminal_output +2036,3928868,"TERMINAL",0,0,"16",,terminal_output +2037,3929319,"TERMINAL",0,0,"1\t",,terminal_output +2038,3929885,"TERMINAL",0,0,"27",,terminal_output +2039,3930198,"TERMINAL",0,0,"2\t",,terminal_output +2040,3930915,"TERMINAL",0,0,"38",,terminal_output +2041,3931220,"TERMINAL",0,0,"3\t",,terminal_output +2042,3931943,"TERMINAL",0,0,"49",,terminal_output +2043,3932334,"TERMINAL",0,0,"4\t",,terminal_output +2044,3932962,"TERMINAL",0,0,"510",,terminal_output +2045,3933293,"TERMINAL",0,0,"5\t",,terminal_output +2046,3933995,"TERMINAL",0,0,"61",,terminal_output +2047,3934336,"TERMINAL",0,0,"6\t",,terminal_output +2048,3935016,"TERMINAL",0,0,"72",,terminal_output +2049,3935370,"TERMINAL",0,0,"71",,terminal_output +2050,3936052,"TERMINAL",0,0,"83",,terminal_output +2051,3936451,"TERMINAL",0,0,"8\t",,terminal_output +2052,3937065,"TERMINAL",0,0,"94",,terminal_output +2053,3937456,"TERMINAL",0,0,"98",,terminal_output +2054,3938094,"TERMINAL",0,0,"505",,terminal_output +2055,3938601,"TERMINAL",0,0,"50\t",,terminal_output +2056,3939043,"TERMINAL",0,0,"16",,terminal_output +2057,3939578,"TERMINAL",0,0,"1\t",,terminal_output +2058,3940134,"TERMINAL",0,0,"27",,terminal_output +2059,3940614,"TERMINAL",0,0,"3\t",,terminal_output +2060,3941158,"TERMINAL",0,0,"38",,terminal_output +2061,3941673,"TERMINAL",0,0,"4\t",,terminal_output +2062,3942188,"TERMINAL",0,0,"49",,terminal_output +2063,3942685,"TERMINAL",0,0,"5\t",,terminal_output +2064,3943132,"TERMINAL",0,0,"520",,terminal_output +2065,3943730,"TERMINAL",0,0,"6\t",,terminal_output +2066,3944235,"TERMINAL",0,0,"61",,terminal_output +2067,3944771,"TERMINAL",0,0,"7\t",,terminal_output +2068,3945160,"TERMINAL",0,0,"72",,terminal_output +2069,3945824,"TERMINAL",0,0,"8\t",,terminal_output +2070,3946293,"TERMINAL",0,0,"83",,terminal_output +2071,3946838,"TERMINAL",0,0,"9\t",,terminal_output +2072,3947306,"TERMINAL",0,0,"94",,terminal_output +2073,3947874,"TERMINAL",0,0,"1:00\t",,terminal_output +2074,3948224,"TERMINAL",0,0,"1:005",,terminal_output +2075,3948946,"TERMINAL",0,0,"1\t",,terminal_output +2076,3949229,"TERMINAL",0,0,"16",,terminal_output +2077,3950010,"TERMINAL",0,0,"2\t",,terminal_output +2078,3950386,"TERMINAL",0,0,"27",,terminal_output +2079,3951090,"TERMINAL",0,0,"3\t",,terminal_output +2080,3951261,"TERMINAL",0,0,"38",,terminal_output +2081,3952060,"TERMINAL",0,0,"4\t",,terminal_output +2082,3952281,"TERMINAL",0,0,"49",,terminal_output +2083,3953105,"TERMINAL",0,0,"5\t",,terminal_output +2084,3953297,"TERMINAL",0,0,"530",,terminal_output +2085,3954154,"TERMINAL",0,0,"6\t",,terminal_output +2086,3954346,"TERMINAL",0,0,"61",,terminal_output +2087,3955184,"TERMINAL",0,0,"7\t",,terminal_output +2088,3955337,"TERMINAL",0,0,"72",,terminal_output +2089,3956209,"TERMINAL",0,0,"8\t",,terminal_output +2090,3956377,"TERMINAL",0,0,"83",,terminal_output +2091,3957253,"TERMINAL",0,0,"9\t",,terminal_output +2092,3957403,"TERMINAL",0,0,"94",,terminal_output +2093,3958362,"TERMINAL",0,0,"10\t",,terminal_output +2094,3958432,"TERMINAL",0,0,"105",,terminal_output +2095,3959348,"TERMINAL",0,0,"1\t",,terminal_output +2096,3959487,"TERMINAL",0,0,"16",,terminal_output +2097,3960426,"TERMINAL",0,0,"2\t",,terminal_output +2098,3960447,"TERMINAL",0,0,"27",,terminal_output +2099,3961573,"TERMINAL",0,0,"3\t",,terminal_output +2100,3961573,"TERMINAL",0,0,"38",,terminal_output +2101,3962453,"TERMINAL",0,0,"4\t",,terminal_output +2102,3962480,"TERMINAL",0,0,"49",,terminal_output +2103,3963558,"TERMINAL",0,0,"540",,terminal_output +2104,3963586,"TERMINAL",0,0,"5\t",,terminal_output +2105,3964524,"TERMINAL",0,0,"61",,terminal_output +2106,3964658,"TERMINAL",0,0,"6\t",,terminal_output +2107,3965643,"TERMINAL",0,0,"73",,terminal_output +2108,3965643,"TERMINAL",0,0,"8\t",,terminal_output +2109,3966649,"TERMINAL",0,0,"94",,terminal_output +2110,3966655,"TERMINAL",0,0,"9\t",,terminal_output +2111,3967577,"TERMINAL",0,0,"205",,terminal_output +2112,3967706,"TERMINAL",0,0,"20\t",,terminal_output +2113,3968722,"TERMINAL",0,0,"16",,terminal_output +2114,3968763,"TERMINAL",0,0,"1\t",,terminal_output +2115,3969643,"TERMINAL",0,0,"27",,terminal_output +2116,3969770,"TERMINAL",0,0,"2\t",,terminal_output +2117,3970676,"TERMINAL",0,0,"38",,terminal_output +2118,3970813,"TERMINAL",0,0,"3\t",,terminal_output +2119,3971705,"TERMINAL",0,0,"49",,terminal_output +2120,3971885,"TERMINAL",0,0,"4\t",,terminal_output +2121,3972736,"TERMINAL",0,0,"550",,terminal_output +2122,3972943,"TERMINAL",0,0,"5\t",,terminal_output +2123,3973695,"TERMINAL",0,0,"61",,terminal_output +2124,3973955,"TERMINAL",0,0,"6\t",,terminal_output +2125,3974696,"TERMINAL",0,0,"72",,terminal_output +2126,3975051,"TERMINAL",0,0,"7\t",,terminal_output +2127,3975714,"TERMINAL",0,0,"83",,terminal_output +2128,3976282,"TERMINAL",0,0,"8\t",,terminal_output +2129,3976796,"TERMINAL",0,0,"94",,terminal_output +2130,3977082,"TERMINAL",0,0,"9\t",,terminal_output +2131,3977752,"TERMINAL",0,0,"305",,terminal_output +2132,3978226,"TERMINAL",0,0,"30\t",,terminal_output +2133,3978765,"TERMINAL",0,0,"16",,terminal_output +2134,3979251,"TERMINAL",0,0,"1\t",,terminal_output +2135,3979899,"TERMINAL",0,0,"27",,terminal_output +2136,3980202,"TERMINAL",0,0,"2\t",,terminal_output +2137,3980805,"TERMINAL",0,0,"38",,terminal_output +2138,3981248,"TERMINAL",0,0,"3\t",,terminal_output +2139,3981913,"TERMINAL",0,0,"49",,terminal_output +2140,3982325,"TERMINAL",0,0,"4\t",,terminal_output +2141,3982836,"TERMINAL",0,0,"51:00:00",,terminal_output +2142,3983460,"TERMINAL",0,0,"5\t",,terminal_output +2143,3983869,"TERMINAL",0,0,"61",,terminal_output +2144,3984379,"TERMINAL",0,0,"6\t",,terminal_output +2145,3984869,"TERMINAL",0,0,"72",,terminal_output +2146,3985412,"TERMINAL",0,0,"7\t",,terminal_output +2147,3985910,"TERMINAL",0,0,"83",,terminal_output +2148,3986524,"TERMINAL",0,0,"8\t",,terminal_output +2149,3986935,"TERMINAL",0,0,"94",,terminal_output +2150,3987505,"TERMINAL",0,0,"9\t",,terminal_output +2151,3987931,"TERMINAL",0,0,"405",,terminal_output +2152,3988587,"TERMINAL",0,0,"40\t",,terminal_output +2153,3988945,"TERMINAL",0,0,"16",,terminal_output +2154,3989712,"TERMINAL",0,0,"2\t",,terminal_output +2155,3990001,"TERMINAL",0,0,"27",,terminal_output +2156,3990631,"TERMINAL",0,0,"3\t",,terminal_output +2157,3990979,"TERMINAL",0,0,"38",,terminal_output +2158,3991701,"TERMINAL",0,0,"4\t",,terminal_output +2159,3992042,"TERMINAL",0,0,"49",,terminal_output +2160,3992729,"TERMINAL",0,0,"5\t",,terminal_output +2161,3993034,"TERMINAL",0,0,"510",,terminal_output +2162,3993795,"TERMINAL",0,0,"6\t",,terminal_output +2163,3994061,"TERMINAL",0,0,"61",,terminal_output +2164,3994871,"TERMINAL",0,0,"7\t",,terminal_output +2165,3995235,"TERMINAL",0,0,"72",,terminal_output +2166,3995865,"TERMINAL",0,0,"87",,terminal_output +2167,3996265,"TERMINAL",0,0,"83",,terminal_output +2168,3996566,"TERMINAL",0,0,"salloc: Job 3380176 has exceeded its time limit and its allocation has been revoked.\nslurmstepd: error: *** STEP 3380176.interactive ON hkn0706 CANCELLED AT 2025-07-28T18:51:49 DUE TO TIME LIMIT ***\n[?1049l\r[?1l>]0;tum_cte0515@hkn0706:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0706 jafar]$ srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\r\n",,terminal_output +2169,3996914,"TERMINAL",0,0,"9\t",,terminal_output +2170,3997860,"utils/nn.py",5456,0,"",python,selection_mouse +2171,3997972,"TERMINAL",0,0,"50\t",,terminal_output +2172,3999017,"TERMINAL",0,0,"1\t",,terminal_output +2173,4000062,"TERMINAL",0,0,"2\t",,terminal_output +2174,4001265,"TERMINAL",0,0,"3\t",,terminal_output +2175,4002137,"TERMINAL",0,0,"4\t",,terminal_output +2176,4003175,"TERMINAL",0,0,"5\t",,terminal_output +2177,4004220,"TERMINAL",0,0,"6\t",,terminal_output +2178,4005369,"TERMINAL",0,0,"7\t",,terminal_output +2179,4006308,"TERMINAL",0,0,"8\t",,terminal_output +2180,4007359,"TERMINAL",0,0,"9\t",,terminal_output +2181,4008434,"TERMINAL",0,0,"2:00\t",,terminal_output +2182,4009444,"TERMINAL",0,0,"1\t",,terminal_output +2183,4010696,"TERMINAL",0,0,"2\t",,terminal_output +2184,4011534,"TERMINAL",0,0,"3\t",,terminal_output +2185,4012685,"TERMINAL",0,0,"4\t",,terminal_output +2186,4013586,"TERMINAL",0,0,"6\t",,terminal_output +2187,4014667,"TERMINAL",0,0,"7\t",,terminal_output +2188,4015705,"TERMINAL",0,0,"8\t",,terminal_output +2189,4016731,"TERMINAL",0,0,"9\t",,terminal_output +2190,4017769,"TERMINAL",0,0,"107",,terminal_output +2191,4018825,"TERMINAL",0,0,"18",,terminal_output +2192,4019867,"TERMINAL",0,0,"2\t",,terminal_output +2193,4020915,"TERMINAL",0,0,"3\t",,terminal_output +2194,4021929,"TERMINAL",0,0,"4\t",,terminal_output +2195,4022983,"TERMINAL",0,0,"5\t",,terminal_output +2196,4024101,"TERMINAL",0,0,"6\t",,terminal_output +2197,4025126,"TERMINAL",0,0,"7\t",,terminal_output +2198,4026106,"TERMINAL",0,0,"8\t",,terminal_output +2199,4026250,"TERMINAL",0,0,"srun: error: hkn0706: task 0: Killed\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;137",,terminal_output +2200,4027173,"TERMINAL",0,0,"9\t",,terminal_output +2201,4028197,"TERMINAL",0,0,"200",,terminal_output +2202,4029224,"TERMINAL",0,0,"1\t",,terminal_output +2203,4030271,"TERMINAL",0,0,"2\t",,terminal_output +2204,4031474,"TERMINAL",0,0,"3\t",,terminal_output +2205,4032349,"TERMINAL",0,0,"4\t",,terminal_output +2206,4033528,"TERMINAL",0,0,"5\t",,terminal_output +2207,4034430,"TERMINAL",0,0,"6\t",,terminal_output +2208,4035707,"TERMINAL",0,0,"7\t",,terminal_output +2209,4036636,"TERMINAL",0,0,"8\t",,terminal_output +2210,4037556,"TERMINAL",0,0,"9\t",,terminal_output +2211,4038652,"TERMINAL",0,0,"31\t",,terminal_output +2212,4039626,"TERMINAL",0,0,"2\t",,terminal_output +2213,4040808,"TERMINAL",0,0,"3\t",,terminal_output +2214,4041832,"TERMINAL",0,0,"48",,terminal_output +2215,4042771,"TERMINAL",0,0,"5\t",,terminal_output +2216,4043779,"TERMINAL",0,0,"6\t",,terminal_output +2217,4044826,"TERMINAL",0,0,"7\t",,terminal_output +2218,4045915,"TERMINAL",0,0,"8\t",,terminal_output +2219,4046896,"TERMINAL",0,0,"9\t",,terminal_output +2220,4047938,"TERMINAL",0,0,"40\t",,terminal_output +2221,4048987,"TERMINAL",0,0,"1\t",,terminal_output +2222,4050023,"TERMINAL",0,0,"2\t",,terminal_output +2223,4051063,"TERMINAL",0,0,"3\t",,terminal_output +2224,4052101,"TERMINAL",0,0,"4\t",,terminal_output +2225,4053286,"TERMINAL",0,0,"5\t",,terminal_output +2226,4054210,"TERMINAL",0,0,"6\t",,terminal_output +2227,4055232,"TERMINAL",0,0,"7\t",,terminal_output +2228,4056356,"TERMINAL",0,0,"8\t",,terminal_output +2229,4057329,"TERMINAL",0,0,"9\t",,terminal_output +2230,4058353,"TERMINAL",0,0,"50\t",,terminal_output +2231,4059402,"TERMINAL",0,0,"1\t",,terminal_output +2232,4060446,"TERMINAL",0,0,"2\t",,terminal_output +2233,4061589,"TERMINAL",0,0,"3\t",,terminal_output +2234,4062531,"TERMINAL",0,0,"4\t",,terminal_output +2235,4063648,"TERMINAL",0,0,"6\t",,terminal_output +2236,4064626,"TERMINAL",0,0,"7\t",,terminal_output +2237,4065670,"TERMINAL",0,0,"8\t",,terminal_output +2238,4066705,"TERMINAL",0,0,"9\t",,terminal_output +2239,4067741,"TERMINAL",0,0,"3:00\t",,terminal_output +2240,4068784,"TERMINAL",0,0,"1\t",,terminal_output +2241,4069820,"TERMINAL",0,0,"2\t",,terminal_output +2242,4070866,"TERMINAL",0,0,"3\t",,terminal_output +2243,4071907,"TERMINAL",0,0,"4\t",,terminal_output +2244,4072945,"TERMINAL",0,0,"5\t",,terminal_output +2245,4073993,"TERMINAL",0,0,"6\t",,terminal_output +2246,4075277,"TERMINAL",0,0,"7\t",,terminal_output +2247,4076144,"TERMINAL",0,0,"8\t",,terminal_output +2248,4077110,"TERMINAL",0,0,"9\t",,terminal_output +2249,4078191,"TERMINAL",0,0,"10\t",,terminal_output +2250,4079200,"TERMINAL",0,0,"1\t",,terminal_output +2251,4080320,"TERMINAL",0,0,"2\t",,terminal_output +2252,4081256,"TERMINAL",0,0,"3\t",,terminal_output +2253,4082305,"TERMINAL",0,0,"4\t",,terminal_output +2254,4083497,"TERMINAL",0,0,"5\t",,terminal_output +2255,4084422,"TERMINAL",0,0,"6\t",,terminal_output +2256,4085439,"TERMINAL",0,0,"7\t",,terminal_output +2257,4086439,"TERMINAL",0,0,"8\t",,terminal_output +2258,4087479,"TERMINAL",0,0,"910",,terminal_output +2259,4088514,"TERMINAL",0,0,"206",,terminal_output +2260,4089558,"TERMINAL",0,0,"18",,terminal_output +2261,4090704,"TERMINAL",0,0,"3\t",,terminal_output +2262,4091659,"TERMINAL",0,0,"4\t",,terminal_output +2263,4092688,"TERMINAL",0,0,"5\t",,terminal_output +2264,4093740,"TERMINAL",0,0,"6\t",,terminal_output +2265,4094783,"TERMINAL",0,0,"7\t",,terminal_output +2266,4095829,"TERMINAL",0,0,"8\t",,terminal_output +2267,4096862,"TERMINAL",0,0,"9\t",,terminal_output +2268,4097901,"TERMINAL",0,0,"30\t",,terminal_output +2269,4098957,"TERMINAL",0,0,"1\t",,terminal_output +2270,4100101,"TERMINAL",0,0,"2\t",,terminal_output +2271,4101027,"TERMINAL",0,0,"3\t",,terminal_output +2272,4102069,"TERMINAL",0,0,"4\t",,terminal_output +2273,4103158,"TERMINAL",0,0,"5\t",,terminal_output +2274,4104182,"TERMINAL",0,0,"6\t",,terminal_output +2275,4105409,"TERMINAL",0,0,"7\t",,terminal_output +2276,4106241,"TERMINAL",0,0,"8\t",,terminal_output +2277,4107287,"TERMINAL",0,0,"9\t",,terminal_output +2278,4108328,"TERMINAL",0,0,"40\t",,terminal_output +2279,4109380,"TERMINAL",0,0,"1\t",,terminal_output +2280,4110429,"TERMINAL",0,0,"2\t",,terminal_output +2281,4111553,"TERMINAL",0,0,"3\t",,terminal_output +2282,4112508,"TERMINAL",0,0,"4\t",,terminal_output +2283,4113545,"TERMINAL",0,0,"5\t",,terminal_output +2284,4114583,"TERMINAL",0,0,"7\t",,terminal_output +2285,4115663,"TERMINAL",0,0,"8241 9",,terminal_output +2286,4116684,"TERMINAL",0,0,"9\t",,terminal_output +2287,4117729,"TERMINAL",0,0,"50\t",,terminal_output +2288,4118770,"TERMINAL",0,0,"1\t",,terminal_output +2289,4119809,"TERMINAL",0,0,"2\t",,terminal_output +2290,4120844,"TERMINAL",0,0,"3\t",,terminal_output +2291,4121893,"TERMINAL",0,0,"4\t",,terminal_output +2292,4122934,"TERMINAL",0,0,"5\t",,terminal_output +2293,4123981,"TERMINAL",0,0,"6\t",,terminal_output +2294,4125036,"TERMINAL",0,0,"7\t",,terminal_output +2295,4126068,"TERMINAL",0,0,"8\t",,terminal_output +2296,4127117,"TERMINAL",0,0,"9\t",,terminal_output +2297,4128242,"TERMINAL",0,0,"4:00\t",,terminal_output +2298,4129213,"TERMINAL",0,0,"1\t",,terminal_output +2299,4130255,"TERMINAL",0,0,"2\t",,terminal_output +2300,4131299,"TERMINAL",0,0,"3\t",,terminal_output +2301,4132347,"TERMINAL",0,0,"4\t",,terminal_output +2302,4133394,"TERMINAL",0,0,"5\t",,terminal_output +2303,4134450,"TERMINAL",0,0,"6\t",,terminal_output +2304,4135490,"TERMINAL",0,0,"7\t",,terminal_output +2305,4136539,"TERMINAL",0,0,"8\t",,terminal_output +2306,4137582,"TERMINAL",0,0,"10\t",,terminal_output +2307,4138692,"TERMINAL",0,0,"1\t",,terminal_output +2308,4139674,"TERMINAL",0,0,"2\t",,terminal_output +2309,4140715,"TERMINAL",0,0,"3\t",,terminal_output +2310,4141777,"TERMINAL",0,0,"4\t",,terminal_output +2311,4142807,"TERMINAL",0,0,"5\t",,terminal_output +2312,4143855,"TERMINAL",0,0,"6\t",,terminal_output +2313,4144941,"TERMINAL",0,0,"7\t",,terminal_output +2314,4145935,"TERMINAL",0,0,"8\t",,terminal_output +2315,4147004,"TERMINAL",0,0,"9\t",,terminal_output +2316,4148029,"TERMINAL",0,0,"20\t",,terminal_output +2317,4149132,"TERMINAL",0,0,"1\t",,terminal_output +2318,4150154,"TERMINAL",0,0,"2\t",,terminal_output +2319,4151162,"TERMINAL",0,0,"3\t",,terminal_output +2320,4152210,"TERMINAL",0,0,"4\t",,terminal_output +2321,4153330,"TERMINAL",0,0,"5\t",,terminal_output +2322,4154324,"TERMINAL",0,0,"6\t",,terminal_output +2323,4155337,"TERMINAL",0,0,"7\t",,terminal_output +2324,4156390,"TERMINAL",0,0,"8\t",,terminal_output +2325,4157405,"TERMINAL",0,0,"9\t",,terminal_output +2326,4158459,"TERMINAL",0,0,"30\t",,terminal_output +2327,4159476,"TERMINAL",0,0,"1\t",,terminal_output +2328,4160514,"TERMINAL",0,0,"2\t",,terminal_output +2329,4161558,"TERMINAL",0,0,"3\t",,terminal_output +2330,4162641,"TERMINAL",0,0,"5\t",,terminal_output +2331,4163679,"TERMINAL",0,0,"6\t",,terminal_output +2332,4164688,"TERMINAL",0,0,"7\t",,terminal_output +2333,4165740,"TERMINAL",0,0,"810",,terminal_output +2334,4166777,"TERMINAL",0,0,"9\t",,terminal_output +2335,4167816,"TERMINAL",0,0,"40\t",,terminal_output +2336,4168872,"TERMINAL",0,0,"1\t",,terminal_output +2337,4169910,"TERMINAL",0,0,"2\t",,terminal_output +2338,4170952,"TERMINAL",0,0,"3\t",,terminal_output +2339,4172023,"TERMINAL",0,0,"4\t",,terminal_output +2340,4173027,"TERMINAL",0,0,"5\t",,terminal_output +2341,4174074,"TERMINAL",0,0,"6\t",,terminal_output +2342,4175145,"TERMINAL",0,0,"7\t",,terminal_output +2343,4176132,"TERMINAL",0,0,"8\t",,terminal_output +2344,4177197,"TERMINAL",0,0,"9\t",,terminal_output +2345,4178633,"TERMINAL",0,0,"501",,terminal_output +2346,4179696,"TERMINAL",0,0,"27",,terminal_output +2347,4180747,"TERMINAL",0,0,"3\t",,terminal_output +2348,4181752,"TERMINAL",0,0,"4\t",,terminal_output +2349,4182783,"TERMINAL",0,0,"5\t",,terminal_output +2350,4183854,"TERMINAL",0,0,"6\t",,terminal_output +2351,4184883,"TERMINAL",0,0,"7\t",,terminal_output +2352,4186015,"TERMINAL",0,0,"88",,terminal_output +2353,4186968,"TERMINAL",0,0,"9\t",,terminal_output +2354,4188093,"TERMINAL",0,0,"5:00\t",,terminal_output +2355,4189041,"TERMINAL",0,0,"1\t",,terminal_output +2356,4190093,"TERMINAL",0,0,"2\t",,terminal_output +2357,4191129,"TERMINAL",0,0,"3\t",,terminal_output +2358,4192173,"TERMINAL",0,0,"4\t",,terminal_output +2359,4193213,"TERMINAL",0,0,"5\t",,terminal_output +2360,4194328,"TERMINAL",0,0,"6\t",,terminal_output +2361,4195293,"TERMINAL",0,0,"7\t",,terminal_output +2362,4196339,"TERMINAL",0,0,"8\t",,terminal_output +2363,4197372,"TERMINAL",0,0,"9\t",,terminal_output +2364,4198594,"TERMINAL",0,0,"10\t",,terminal_output +2365,4199457,"TERMINAL",0,0,"1\t",,terminal_output +2366,4200503,"TERMINAL",0,0,"2\t",,terminal_output +2367,4201570,"TERMINAL",0,0,"3\t",,terminal_output +2368,4202594,"TERMINAL",0,0,"5\t",,terminal_output +2369,4203629,"TERMINAL",0,0,"6\t",,terminal_output +2370,4204711,"TERMINAL",0,0,"7\t",,terminal_output +2371,4205712,"TERMINAL",0,0,"8\t",,terminal_output +2372,4206758,"TERMINAL",0,0,"9\t",,terminal_output +2373,4207807,"TERMINAL",0,0,"20\t",,terminal_output +2374,4208854,"TERMINAL",0,0,"1\t",,terminal_output +2375,4209906,"TERMINAL",0,0,"2\t",,terminal_output +2376,4210951,"TERMINAL",0,0,"3\t",,terminal_output +2377,4212012,"TERMINAL",0,0,"4\t",,terminal_output +2378,4213045,"TERMINAL",0,0,"5\t",,terminal_output +2379,4214088,"TERMINAL",0,0,"6\t",,terminal_output +2380,4215137,"TERMINAL",0,0,"7\t",,terminal_output +2381,4216185,"TERMINAL",0,0,"8\t",,terminal_output +2382,4217228,"TERMINAL",0,0,"9\t",,terminal_output +2383,4218558,"TERMINAL",0,0,"30\t",,terminal_output +2384,4219327,"TERMINAL",0,0,"1\t",,terminal_output +2385,4220411,"TERMINAL",0,0,"2\t",,terminal_output +2386,4221533,"TERMINAL",0,0,"3\t",,terminal_output +2387,4222450,"TERMINAL",0,0,"4\t",,terminal_output +2388,4223577,"TERMINAL",0,0,"5\t",,terminal_output +2389,4224533,"TERMINAL",0,0,"6\t",,terminal_output +2390,4225625,"TERMINAL",0,0,"8\t",,terminal_output +2391,4226615,"TERMINAL",0,0,"9\t",,terminal_output +2392,4227718,"TERMINAL",0,0,"40\t",,terminal_output +2393,4228746,"TERMINAL",0,0,"1\t",,terminal_output +2394,4229751,"TERMINAL",0,0,"2\t",,terminal_output +2395,4230802,"TERMINAL",0,0,"3\t",,terminal_output +2396,4231846,"TERMINAL",0,0,"4\t",,terminal_output +2397,4232883,"TERMINAL",0,0,"5\t",,terminal_output +2398,4233921,"TERMINAL",0,0,"6\t",,terminal_output +2399,4234955,"TERMINAL",0,0,"7\t",,terminal_output +2400,4235999,"TERMINAL",0,0,"837 6",,terminal_output +2401,4237040,"TERMINAL",0,0,"9\t",,terminal_output +2402,4238092,"TERMINAL",0,0,"50\t",,terminal_output +2403,4239146,"TERMINAL",0,0,"1\t",,terminal_output +2404,4240268,"TERMINAL",0,0,"2\t",,terminal_output +2405,4241220,"TERMINAL",0,0,"3\t",,terminal_output +2406,4242318,"TERMINAL",0,0,"4\t",,terminal_output +2407,4243307,"TERMINAL",0,0,"5\t",,terminal_output +2408,4244343,"TERMINAL",0,0,"6\t",,terminal_output +2409,4245492,"TERMINAL",0,0,"7\t",,terminal_output +2410,4246495,"TERMINAL",0,0,"8\t",,terminal_output +2411,4247485,"TERMINAL",0,0,"9\t",,terminal_output +2412,4248563,"TERMINAL",0,0,"6:00\t",,terminal_output +2413,4249585,"TERMINAL",0,0,"2\t",,terminal_output +2414,4250903,"TERMINAL",0,0,"3\t",,terminal_output +2415,4251649,"TERMINAL",0,0,"4\t",,terminal_output +2416,4252763,"TERMINAL",0,0,"5\t",,terminal_output +2417,4253735,"TERMINAL",0,0,"6\t",,terminal_output +2418,4254915,"TERMINAL",0,0,"7\t",,terminal_output +2419,4255837,"TERMINAL",0,0,"8\t",,terminal_output +2420,4256876,"TERMINAL",0,0,"9\t",,terminal_output +2421,4258088,"TERMINAL",0,0,"10\t",,terminal_output +2422,4259125,"TERMINAL",0,0,"1\t",,terminal_output +2423,4260139,"TERMINAL",0,0,"2\t",,terminal_output +2424,4261059,"TERMINAL",0,0,"3\t",,terminal_output +2425,4262079,"TERMINAL",0,0,"4\t",,terminal_output +2426,4263208,"TERMINAL",0,0,"5\t",,terminal_output +2427,4264154,"TERMINAL",0,0,"6\t",,terminal_output +2428,4265198,"TERMINAL",0,0,"7\t",,terminal_output +2429,4266275,"TERMINAL",0,0,"8\t",,terminal_output +2430,4267303,"TERMINAL",0,0,"9\t",,terminal_output +2431,4268531,"TERMINAL",0,0,"20\t",,terminal_output +2432,4269382,"TERMINAL",0,0,"1\t",,terminal_output +2433,4270432,"TERMINAL",0,0,"2\t",,terminal_output +2434,4271605,"TERMINAL",0,0,"3\t",,terminal_output +2435,4272516,"TERMINAL",0,0,"4\t",,terminal_output +2436,4274020,"TERMINAL",0,0,"5\t",,terminal_output +2437,4274745,"TERMINAL",0,0,"7\t",,terminal_output +2438,4275614,"TERMINAL",0,0,"8\t",,terminal_output +2439,4276788,"TERMINAL",0,0,"9\t",,terminal_output +2440,4277748,"TERMINAL",0,0,"30\t",,terminal_output +2441,4278745,"TERMINAL",0,0,"18",,terminal_output +2442,4279859,"TERMINAL",0,0,"2\t",,terminal_output +2443,4280839,"TERMINAL",0,0,"3\t",,terminal_output +2444,4281882,"TERMINAL",0,0,"47",,terminal_output +2445,4282929,"TERMINAL",0,0,"58",,terminal_output +2446,4283995,"TERMINAL",0,0,"6\t",,terminal_output +2447,4285023,"TERMINAL",0,0,"7\t",,terminal_output +2448,4286066,"TERMINAL",0,0,"8\t",,terminal_output +2449,4287115,"TERMINAL",0,0,"9\t",,terminal_output +2450,4288162,"TERMINAL",0,0,"409",,terminal_output +2451,4289228,"TERMINAL",0,0,"1\t",,terminal_output +2452,4290350,"TERMINAL",0,0,"21",,terminal_output +2453,4291368,"TERMINAL",0,0,"3\t",,terminal_output +2454,4292336,"TERMINAL",0,0,"4\t",,terminal_output +2455,4293366,"TERMINAL",0,0,"5\t",,terminal_output +2456,4294408,"TERMINAL",0,0,"6\t",,terminal_output +2457,4295570,"TERMINAL",0,0,"7\t",,terminal_output +2458,4296591,"TERMINAL",0,0,"8\t",,terminal_output +2459,4297544,"TERMINAL",0,0,"9\t",,terminal_output +2460,4298640,"TERMINAL",0,0,"51\t",,terminal_output +2461,4299662,"TERMINAL",0,0,"210",,terminal_output +2462,4300689,"TERMINAL",0,0,"3\t",,terminal_output +2463,4301816,"TERMINAL",0,0,"40",,terminal_output +2464,4302841,"TERMINAL",0,0,"5\t",,terminal_output +2465,4303818,"TERMINAL",0,0,"6\t",,terminal_output +2466,4304865,"TERMINAL",0,0,"79",,terminal_output +2467,4305910,"TERMINAL",0,0,"8\t",,terminal_output +2468,4307039,"TERMINAL",0,0,"9\t",,terminal_output +2469,4308065,"TERMINAL",0,0,"7:00\t",,terminal_output +2470,4309056,"TERMINAL",0,0,"1\t",,terminal_output +2471,4310095,"TERMINAL",0,0,"2\t",,terminal_output +2472,4311141,"TERMINAL",0,0,"3\t",,terminal_output +2473,4312182,"TERMINAL",0,0,"4\t",,terminal_output +2474,4313282,"TERMINAL",0,0,"5\t",,terminal_output +2475,4314307,"TERMINAL",0,0,"6\t",,terminal_output +2476,4315537,"TERMINAL",0,0,"7\t",,terminal_output +2477,4316461,"TERMINAL",0,0,"8\t",,terminal_output +2478,4317447,"TERMINAL",0,0,"9\t",,terminal_output +2479,4318650,"TERMINAL",0,0,"10\t",,terminal_output +2480,4319494,"TERMINAL",0,0,"1\t",,terminal_output +2481,4320682,"TERMINAL",0,0,"2\t",,terminal_output +2482,4321703,"TERMINAL",0,0,"4\t",,terminal_output +2483,4322626,"TERMINAL",0,0,"5\t",,terminal_output +2484,4323851,"TERMINAL",0,0,"6\t",,terminal_output +2485,4324999,"TERMINAL",0,0,"7\t",,terminal_output +2486,4325761,"TERMINAL",0,0,"8\t",,terminal_output +2487,4326807,"TERMINAL",0,0,"9\t",,terminal_output +2488,4327856,"TERMINAL",0,0,"20\t",,terminal_output +2489,4328886,"TERMINAL",0,0,"1\t",,terminal_output +2490,4329929,"TERMINAL",0,0,"2\t",,terminal_output +2491,4330968,"TERMINAL",0,0,"3\t",,terminal_output +2492,4332023,"TERMINAL",0,0,"4\t",,terminal_output +2493,4333056,"TERMINAL",0,0,"5\t",,terminal_output +2494,4334119,"TERMINAL",0,0,"6\t",,terminal_output +2495,4335138,"TERMINAL",0,0,"7\t",,terminal_output +2496,4336238,"TERMINAL",0,0,"8\t",,terminal_output +2497,4337320,"TERMINAL",0,0,"9\t",,terminal_output +2498,4338269,"TERMINAL",0,0,"30\t",,terminal_output +2499,4339310,"TERMINAL",0,0,"1\t",,terminal_output +2500,4340367,"TERMINAL",0,0,"2\t",,terminal_output +2501,4341459,"TERMINAL",0,0,"340",,terminal_output +2502,4342447,"TERMINAL",0,0,"4\t",,terminal_output +2503,4343511,"TERMINAL",0,0,"5\t",,terminal_output +2504,4344537,"TERMINAL",0,0,"6\t",,terminal_output +2505,4345675,"TERMINAL",0,0,"8\t",,terminal_output +2506,4346788,"TERMINAL",0,0,"9\t",,terminal_output +2507,4347671,"TERMINAL",0,0,"40\t",,terminal_output +2508,4348717,"TERMINAL",0,0,"1\t",,terminal_output +2509,4349973,"TERMINAL",0,0,"2\t",,terminal_output +2510,4350796,"TERMINAL",0,0,"3\t",,terminal_output +2511,4351851,"TERMINAL",0,0,"41",,terminal_output +2512,4352891,"TERMINAL",0,0,"5\t",,terminal_output +2513,4353953,"TERMINAL",0,0,"6\t",,terminal_output +2514,4354981,"TERMINAL",0,0,"7\t",,terminal_output +2515,4356056,"TERMINAL",0,0,"8190 1",,terminal_output +2516,4357107,"TERMINAL",0,0,"9\t",,terminal_output +2517,4358132,"TERMINAL",0,0,"50\t",,terminal_output +2518,4359318,"TERMINAL",0,0,"1\t",,terminal_output +2519,4360208,"TERMINAL",0,0,"2\t",,terminal_output +2520,4361253,"TERMINAL",0,0,"3\t",,terminal_output +2521,4362297,"TERMINAL",0,0,"4\t",,terminal_output +2522,4363374,"TERMINAL",0,0,"5\t",,terminal_output +2523,4364379,"TERMINAL",0,0,"6\t",,terminal_output +2524,4365528,"TERMINAL",0,0,"7\t",,terminal_output +2525,4366464,"TERMINAL",0,0,"8\t",,terminal_output +2526,4367573,"TERMINAL",0,0,"9\t",,terminal_output +2527,4368537,"TERMINAL",0,0,"8:00\t",,terminal_output +2528,4369627,"TERMINAL",0,0,"2\t",,terminal_output +2529,4370614,"TERMINAL",0,0,"3\t",,terminal_output +2530,4371673,"TERMINAL",0,0,"4\t",,terminal_output +2531,4372804,"TERMINAL",0,0,"5\t",,terminal_output +2532,4373733,"TERMINAL",0,0,"6\t",,terminal_output +2533,4374848,"TERMINAL",0,0,"7\t",,terminal_output +2534,4375816,"TERMINAL",0,0,"8\t",,terminal_output +2535,4376855,"TERMINAL",0,0,"9\t",,terminal_output +2536,4377889,"TERMINAL",0,0,"10\t",,terminal_output +2537,4378938,"TERMINAL",0,0,"1\t",,terminal_output +2538,4380064,"TERMINAL",0,0,"2\t",,terminal_output +2539,4381088,"TERMINAL",0,0,"3\t",,terminal_output +2540,4382075,"TERMINAL",0,0,"4\t",,terminal_output +2541,4383125,"TERMINAL",0,0,"5\t",,terminal_output +2542,4384166,"TERMINAL",0,0,"6\t",,terminal_output +2543,4385210,"TERMINAL",0,0,"7\t",,terminal_output +2544,4386257,"TERMINAL",0,0,"8\t",,terminal_output +2545,4387314,"TERMINAL",0,0,"9\t",,terminal_output +2546,4388670,"TERMINAL",0,0,"20\t",,terminal_output +2547,4389370,"TERMINAL",0,0,"1\t",,terminal_output +2548,4390614,"TERMINAL",0,0,"2\t",,terminal_output +2549,4391476,"TERMINAL",0,0,"3\t",,terminal_output +2550,4392505,"TERMINAL",0,0,"4\t",,terminal_output +2551,4393584,"TERMINAL",0,0,"5\t",,terminal_output +2552,4394586,"TERMINAL",0,0,"7\t",,terminal_output +2553,4395870,"TERMINAL",0,0,"8\t",,terminal_output +2554,4396774,"TERMINAL",0,0,"9\t",,terminal_output +2555,4397833,"TERMINAL",0,0,"30\t",,terminal_output +2556,4398802,"TERMINAL",0,0,"1\t",,terminal_output +2557,4399839,"TERMINAL",0,0,"2\t",,terminal_output +2558,4400858,"TERMINAL",0,0,"3\t",,terminal_output +2559,4401988,"TERMINAL",0,0,"4\t",,terminal_output +2560,4403010,"TERMINAL",0,0,"5\t",,terminal_output +2561,4404026,"TERMINAL",0,0,"6\t",,terminal_output +2562,4405050,"TERMINAL",0,0,"7\t",,terminal_output +2563,4406087,"TERMINAL",0,0,"8\t",,terminal_output +2564,4407106,"TERMINAL",0,0,"9\t",,terminal_output +2565,4408139,"TERMINAL",0,0,"40\t",,terminal_output +2566,4409184,"TERMINAL",0,0,"1\t",,terminal_output +2567,4410392,"TERMINAL",0,0,"2\t",,terminal_output +2568,4411400,"TERMINAL",0,0,"3\t",,terminal_output +2569,4412322,"TERMINAL",0,0,"4\t",,terminal_output +2570,4413351,"TERMINAL",0,0,"5\t",,terminal_output +2571,4414398,"TERMINAL",0,0,"6\t",,terminal_output +2572,4415505,"TERMINAL",0,0,"7\t",,terminal_output +2573,4416604,"TERMINAL",0,0,"8\t",,terminal_output +2574,4417604,"TERMINAL",0,0,"96",,terminal_output +2575,4418918,"TERMINAL",0,0,"50\t",,terminal_output +2576,4419598,"TERMINAL",0,0,"2\t",,terminal_output +2577,4420746,"TERMINAL",0,0,"3\t",,terminal_output +2578,4421690,"TERMINAL",0,0,"4\t",,terminal_output +2579,4422869,"TERMINAL",0,0,"5\t",,terminal_output +2580,4423819,"TERMINAL",0,0,"6\t",,terminal_output +2581,4424928,"TERMINAL",0,0,"7\t",,terminal_output +2582,4425869,"TERMINAL",0,0,"8\t",,terminal_output +2583,4426922,"TERMINAL",0,0,"9\t",,terminal_output +2584,4427989,"TERMINAL",0,0,"9:00\t",,terminal_output +2585,4429011,"TERMINAL",0,0,"1\t",,terminal_output +2586,4430548,"TERMINAL",0,0,"2\t",,terminal_output +2587,4431174,"TERMINAL",0,0,"3\t",,terminal_output +2588,4432138,"TERMINAL",0,0,"4\t",,terminal_output +2589,4433210,"TERMINAL",0,0,"5\t",,terminal_output +2590,4434215,"TERMINAL",0,0,"6\t",,terminal_output +2591,4435256,"TERMINAL",0,0,"7\t",,terminal_output +2592,4436385,"TERMINAL",0,0,"8\t",,terminal_output +2593,4437322,"TERMINAL",0,0,"9\t",,terminal_output +2594,4438369,"TERMINAL",0,0,"10\t",,terminal_output +2595,4439406,"TERMINAL",0,0,"1\t",,terminal_output +2596,4440450,"TERMINAL",0,0,"2\t",,terminal_output +2597,4441508,"TERMINAL",0,0,"3\t",,terminal_output +2598,4442539,"TERMINAL",0,0,"4\t",,terminal_output +2599,4443659,"TERMINAL",0,0,"6\t",,terminal_output +2600,4444697,"TERMINAL",0,0,"7\t",,terminal_output +2601,4445686,"TERMINAL",0,0,"8\t",,terminal_output +2602,4446733,"TERMINAL",0,0,"9\t",,terminal_output +2603,4447859,"TERMINAL",0,0,"20\t",,terminal_output +2604,4448842,"TERMINAL",0,0,"1\t",,terminal_output +2605,4449861,"TERMINAL",0,0,"2\t",,terminal_output +2606,4450937,"TERMINAL",0,0,"3\t",,terminal_output +2607,4451955,"TERMINAL",0,0,"4\t",,terminal_output +2608,4453001,"TERMINAL",0,0,"5\t",,terminal_output +2609,4454037,"TERMINAL",0,0,"6\t",,terminal_output diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-1e710288-b2c9-4a56-b520-437d0e33067b1758276663990-2025_09_19-12.11.58.76/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-1e710288-b2c9-4a56-b520-437d0e33067b1758276663990-2025_09_19-12.11.58.76/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..b2e4f44eee617bfe3210e59ddc34f6a11c76426b --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-1e710288-b2c9-4a56-b520-437d0e33067b1758276663990-2025_09_19-12.11.58.76/source.csv @@ -0,0 +1,83 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +1,5,"train_dynamics.py",0,0,"import os\n\n\nos.environ.setdefault(""XLA_PYTHON_CLIENT_MEM_FRACTION"", ""0.98"")\n\nfrom dataclasses import dataclass, field\nimport itertools\nfrom typing import cast, Optional\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.train_utils import (\n get_lr_schedule,\n count_parameters_by_component,\n print_mem_stats,\n print_compiled_memory_stats,\n print_compiled_cost_analysis,\n)\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_type: str = ""maskgit"" # supported options: maskgit, causal\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n use_gt_actions: bool = False\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n val_data_dir: str = """"\n val_interval: int = 20_000\n val_steps: int = 50\n eval_full_frame: bool = False\n val_maskgit_steps: int = 25\n val_temperature: float = 1\n val_sample_argmax: bool = False\n wandb_id: str = """"\n\n\ndef build_model(args: Args, rng: jax.Array) -> tuple[Genie, jax.Array]:\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n use_gt_actions=args.use_gt_actions,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=False,\n rngs=rngs,\n )\n assert not (\n args.lam_checkpoint and args.use_gt_actions\n ), ""Can not use LAM when using GT actions. Please choose either.""\n if not args.use_gt_actions:\n del genie.lam.decoder\n return genie, rng\n\n\ndef build_optimizer(genie: Genie, args: Args) -> tuple[nnx.Optimizer, optax.Schedule]:\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.param_dtype, # moments in full precision\n )\n optimizer = nnx.Optimizer(genie, tx)\n return optimizer, lr_schedule\n\n\ndef build_mesh_and_sharding(\n num_devices: int,\n) -> tuple[Mesh, NamedSharding, NamedSharding, NamedSharding]:\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n actions_sharding = NamedSharding(mesh, PartitionSpec(""data"", None))\n return mesh, replicated_sharding, videos_sharding, actions_sharding\n\n\ndef shard_optimizer_states(\n optimizer: nnx.Optimizer, replicated_sharding: NamedSharding\n) -> None:\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n\ndef build_dataloader(args: Args, data_dir: str) -> grain.DataLoaderIterator:\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(data_dir, x)\n for x in os.listdir(data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n return grain_iterator\n\n\ndef build_checkpoint_manager(args: Args) -> ocp.CheckpointManager:\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""train_dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""train_dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n if args.val_data_dir:\n handler_registry.add(\n ""val_dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""val_dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n return checkpoint_manager\n\n\ndef restore_or_initialize_components(\n args: Args,\n checkpoint_manager: ocp.CheckpointManager,\n optimizer: nnx.Optimizer,\n train_iterator: grain.DataLoaderIterator,\n rng: jax.Array,\n replicated_sharding: NamedSharding,\n val_iterator: Optional[grain.DataLoaderIterator],\n restore_step: Optional[int] = None,\n) -> tuple[\n int, nnx.Optimizer, grain.DataLoaderIterator, grain.DataLoaderIterator, jax.Array\n]:\n step = 0\n if restore_step is None:\n restore_step = checkpoint_manager.latest_step()\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n if val_iterator:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n val_dataloader_state=grain.checkpoint.CheckpointRestore(val_iterator), # type: ignore\n )\n else:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(), args=restore_args\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n train_iterator = restored[""train_dataloader_state""]\n if val_iterator:\n val_iterator = restored[""val_dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n rng, _rng = jax.random.split(rng)\n optimizer = restore_genie_components(optimizer, replicated_sharding, _rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n return step, optimizer, train_iterator, val_iterator, rng\n\n\ndef _calculate_step_metrics(\n outputs: dict[str, jax.Array],\n gt: jax.Array,\n num_latent_actions: int,\n num_patch_latents: int,\n) -> tuple[jax.Array, dict]:\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt_val = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt_val, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt_val, recon)).mean()\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]),\n size=num_patch_latents,\n fill_value=0,\n )\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n if ""lam_indices"" in outputs.keys():\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]),\n size=num_latent_actions,\n fill_value=0,\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n metrics[""codebook_usage_lam""] = codebook_usage_lam\n return ce_loss, metrics\n\n\ndef main(args: Args) -> None:\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n genie, rng = build_model(args, rng)\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n optimizer, lr_schedule = build_optimizer(genie, args)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n _, replicated_sharding, videos_sharding, actions_sharding = build_mesh_and_sharding(\n num_devices\n )\n\n shard_optimizer_states(optimizer, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n checkpoint_manager = build_checkpoint_manager(args)\n\n # --- Create DataLoaderIterator from dataloader ---\n train_iterator = build_dataloader(args, args.data_dir)\n val_iterator = None\n if args.val_data_dir:\n val_iterator = build_dataloader(args, args.val_data_dir)\n\n # --- Restore checkpoint ---\n step, optimizer, train_iterator, val_iterator, rng = (\n restore_or_initialize_components(\n args,\n checkpoint_manager,\n optimizer,\n train_iterator,\n rng,\n replicated_sharding,\n val_iterator,\n )\n )\n\n # --- Define loss and train step (close over args) ---\n def dynamics_loss_fn(\n model: Genie,\n inputs: dict,\n training: bool = False,\n ) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n outputs = model(inputs, training=training)\n ce_loss, metrics = _calculate_step_metrics(\n outputs, gt, args.num_latent_actions, args.num_patch_latents\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n @nnx.jit(donate_argnums=0)\n def train_step(\n optimizer: nnx.Optimizer, inputs: dict\n ) -> tuple[jax.Array, jax.Array, dict]:\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n model.train()\n return dynamics_loss_fn(model, inputs, training=True)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(\n optimizer.model\n )\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n @nnx.jit\n def val_step(genie: Genie, inputs: dict) -> dict:\n """"""Evaluate model and compute metrics""""""\n genie.eval()\n (loss, (recon, metrics)) = dynamics_loss_fn(genie, inputs, training=False)\n val_output = {""loss"": loss, ""recon"": recon, ""metrics"": metrics}\n\n # --- Evaluate full frame prediction (sampling) ---\n if args.eval_full_frame:\n lam_indices = genie.vq_encode(inputs, training=False)\n tokenizer_outputs = genie.tokenizer.vq_encode(\n inputs[""videos""], training=False\n )\n tokens_full_frame = tokenizer_outputs[""indices""]\n inputs[""latent_actions""] = lam_indices\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt[:, :-1].astype(\n args.dtype\n ) # remove last frame for generation\n recon_full_frame, logits_full_frame = genie.sample(\n inputs,\n args.seq_len,\n args.val_temperature,\n args.val_sample_argmax,\n args.val_maskgit_steps,\n )\n step_outputs = {\n ""recon"": recon_full_frame,\n ""token_logits"": logits_full_frame,\n ""video_tokens"": tokens_full_frame,\n ""mask"": jnp.zeros_like(tokens_full_frame).at[:, -1].set(True),\n ""lam_indices"": lam_indices,\n }\n loss_full_frame, metrics_full_frame = _calculate_step_metrics(\n step_outputs, gt, args.num_latent_actions, args.num_patch_latents\n )\n val_output.update(\n {\n ""loss_full_frame"": loss_full_frame,\n ""recon_full_frame"": recon_full_frame,\n ""metrics_full_frame"": metrics_full_frame,\n }\n )\n return val_output\n\n def calculate_validation_metrics(val_dataloader, genie, rng):\n step = 0\n loss_per_step = []\n metrics_per_step = []\n loss_full_frame_per_step = []\n metrics_full_frame_per_step = []\n batch = None\n recon = None\n recon_full_frame = None\n for batch in val_dataloader:\n rng, _rng_mask = jax.random.split(rng, 2)\n batch[""rng""] = _rng_mask\n val_outputs = val_step(genie, batch)\n loss_per_step.append(val_outputs[""loss""])\n metrics_per_step.append(val_outputs[""metrics""])\n recon = val_outputs[""recon""]\n if args.eval_full_frame:\n loss_full_frame_per_step.append(val_outputs[""loss_full_frame""])\n metrics_full_frame_per_step.append(val_outputs[""metrics_full_frame""])\n recon_full_frame = val_outputs[""recon_full_frame""]\n step += 1\n if step > args.val_steps:\n break\n\n if step < args.val_steps:\n print(\n f""Warning: Your validation dataset is too small to make val_steps many steps. Made {step} steps, expected {args.val_steps}""\n )\n\n val_metrics = {\n f""val_{key}"": np.mean([float(m[key]) for m in metrics_per_step])\n for key in metrics_per_step[0].keys()\n }\n val_metrics[""val_loss""] = np.mean(loss_per_step)\n if args.eval_full_frame:\n val_metrics_full_frame = {\n f""val_full_frame_{key}"": np.mean(\n [float(m[key]) for m in metrics_full_frame_per_step]\n )\n for key in metrics_full_frame_per_step[0].keys()\n }\n val_metrics.update(val_metrics_full_frame)\n val_metrics[""val_loss_full_frame""] = np.mean(loss_full_frame_per_step)\n return val_metrics, batch, recon, recon_full_frame\n\n # --- TRAIN LOOP ---\n dataloader_train = (\n {\n ""videos"": jax.make_array_from_process_local_data(\n videos_sharding, local_data=elem[""videos""]\n ),\n ""actions"": (\n jax.make_array_from_process_local_data(\n actions_sharding, elem[""actions""]\n )\n if args.use_gt_actions\n else None\n ),\n }\n for elem in train_iterator\n )\n dataloader_val = None\n if val_iterator:\n dataloader_val = (\n {\n ""videos"": jax.make_array_from_process_local_data(\n videos_sharding, elem[""videos""]\n ),\n ""actions"": (\n jax.make_array_from_process_local_data(\n actions_sharding, elem[""actions""]\n )\n if args.use_gt_actions\n else None\n ),\n }\n for elem in val_iterator\n )\n if jax.process_index() == 0:\n first_batch = next(dataloader_train)\n first_batch[""rng""] = rng # type: ignore\n compiled = train_step.lower(optimizer, first_batch).compile()\n print_compiled_memory_stats(compiled.memory_analysis())\n print_compiled_cost_analysis(compiled.cost_analysis())\n # Do not skip the first batch during training\n dataloader_train = itertools.chain([first_batch], dataloader_train)\n print(f""Starting training from step {step}..."")\n first_step = step\n while step < args.num_steps:\n for batch in dataloader_train:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n batch[""rng""] = _rng_mask\n loss, recon, metrics = train_step(optimizer, batch)\n if step == first_step:\n print_mem_stats(""After params initialized"")\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Validation loss ---\n val_results = {}\n if dataloader_val and step % args.val_interval == 0:\n rng, _rng_mask_val = jax.random.split(rng, 2)\n print(""Calculating validation metrics..."")\n val_metrics, val_gt_batch, val_recon, val_recon_full_frame = (\n calculate_validation_metrics(\n dataloader_val, optimizer.model, _rng_mask_val\n )\n )\n print(f""Step {step}, validation loss: {val_metrics['val_loss']}"")\n val_results = {\n ""metrics"": val_metrics,\n ""gt_batch"": val_gt_batch,\n ""recon"": val_recon,\n ""full_frame"": val_recon_full_frame,\n }\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n log_dict = {""loss"": loss, ""step"": step, **metrics}\n if val_results:\n log_dict.update(val_results[""metrics""])\n wandb.log(log_dict)\n if step % args.log_image_interval == 0:\n gt_seq = batch[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if val_results:\n val_results[""gt_seq_val""] = (\n val_results[""gt_batch""][""videos""][0].astype(jnp.float32)\n / 255.0\n )\n val_results[""recon_seq_val""] = val_results[""recon""][0].clip(0, 1)\n val_comparison_seq = jnp.concatenate(\n (val_results[""gt_seq_val""], val_results[""recon_seq_val""]),\n axis=1,\n )\n val_results[""val_comparison_seq""] = einops.rearrange(\n val_comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if args.eval_full_frame:\n val_results[""full_frame_seq_val""] = val_results[\n ""full_frame""\n ][0].clip(0, 1)\n val_results[""val_full_frame_comparison_seq""] = (\n jnp.concatenate(\n (\n val_results[""gt_seq_val""],\n val_results[""full_frame_seq_val""],\n ),\n axis=1,\n )\n )\n val_results[""val_full_frame_comparison_seq""] = (\n einops.rearrange(\n val_results[""val_full_frame_comparison_seq""] * 255,\n ""t h w c -> h (t w) c"",\n )\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n if val_results:\n log_images.update(\n dict(\n val_image=wandb.Image(\n np.asarray(\n val_results[""gt_seq_val""][args.seq_len - 1]\n )\n ),\n val_recon=wandb.Image(\n np.asarray(\n val_results[""recon_seq_val""][\n args.seq_len - 1\n ]\n )\n ),\n val_true_vs_recon=wandb.Image(\n np.asarray(\n val_results[""val_comparison_seq""].astype(\n np.uint8\n )\n )\n ),\n )\n )\n if args.eval_full_frame:\n log_images.update(\n dict(\n val_full_frame=wandb.Image(\n np.asarray(\n val_results[""full_frame_seq_val""][\n args.seq_len - 1\n ]\n )\n ),\n val_true_vs_full_frame=wandb.Image(\n np.asarray(\n val_results[\n ""val_full_frame_comparison_seq""\n ].astype(np.uint8)\n )\n ),\n )\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n if args.val_data_dir:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n val_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n val_iterator # type: ignore\n ),\n )\n else:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n )\n checkpoint_manager.save(step, args=ckpt_manager_args)\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(Args)\n main(args)\n",python,tab +2,662,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"12:11:58 PM [info] Activating crowd-code\n12:11:58 PM [info] Recording started\n12:11:58 PM [info] Initializing git provider using file system watchers...\n",Log,tab +3,903,"extension-output-pdoom-org.crowd-code-#1-crowd-code",153,0,"12:11:58 PM [info] Git repository found\n12:11:58 PM [info] Git provider initialized successfully\n12:11:58 PM [info] Initial git state: [object Object]\n",Log,content +4,2735,"train_dynamics.py",0,0,"",python,tab +5,7840,"TERMINAL",0,0,"git status",,terminal_command +6,7897,"TERMINAL",0,0,"]633;C",,terminal_output +7,7971,"TERMINAL",0,0,"On branch gt-actions\r\nYour branch is up to date with 'origin/gt-actions'.\r\n\r\nLast commands done (2 commands done):\r\n pick ba37453 feat: generate coinrun dataset with val split\r\n pick faadd10 feat: implemented validation loss for all three models\r\nNext commands to do (26 remaining commands):\r\n pick 9a17dbb fix: pass val data path to dataloader\r\n pick 6e69cdb fix typo in image logging\r\n (use ""git rebase --edit-todo"" to view and edit)\r\nYou are currently editing a commit while rebasing branch 'gt-actions' on 'c7522f2'.\r\n (use ""git commit --amend"" to amend the current commit)\r\n (use ""git rebase --continue"" once you are satisfied with your changes)\r\n\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: train_dynamics.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdiff.diff\r\n\tinput_pipeline/generate_breakout_dataset.py\r\n\tinput_pipeline/generate_breakout_dataset_agent.py\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\toverfit_dir.zip\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\ttest.py\r\n\tutils/visualizer.py\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +8,12334,"TERMINAL",0,0,"git branch",,terminal_command +9,12408,"TERMINAL",0,0,"]633;C[?1h=\r action-mapper\r\n add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n coinrun-gt-actions\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n:",,terminal_output +10,15265,"TERMINAL",0,0,"...skipping...\r\n action-mapper\r\n add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n coinrun-gt-actions\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/darkness-filter\r\n:...skipping...\r\n action-mapper\r\n add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n coinrun-gt-actions\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/darkness-filter\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n:...skipping...\r\n action-mapper\r\n add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n coinrun-gt-actions\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/darkness-filter\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n* gt-actions\r\n input_pipeline/add-npy2array_record\r\n:...skipping...\r\n action-mapper\r\n add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n coinrun-gt-actions\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/darkness-filter\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n* gt-actions\r\n input_pipeline/add-npy2array_record\r\n logging-variants\r\n lr-schedules\r\n:",,terminal_output +11,16304,"TERMINAL",0,0,"\r main\r\n:",,terminal_output +12,16521,"TERMINAL",0,0,"\r maskgit-different-maskprob-per-sample\r\n:",,terminal_output +13,16900,"TERMINAL",0,0,"\r maskgit-sampling-iterative-unmasking-fix\r\n:",,terminal_output +14,17211,"TERMINAL",0,0,"\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +15,33539,"TERMINAL",0,0,"git checkout -b ""generate-minatar-breakout-dataset""",,terminal_command +16,33582,"TERMINAL",0,0,"]633;C",,terminal_output +17,33662,"TERMINAL",0,0,"Switched to a new branch 'generate-minatar-breakout-dataset'\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +18,35411,"train_dynamics.py",0,0,"Switched from branch 'gt-actions' to 'generate-minatar-breakout-dataset'",python,git_branch_checkout +19,36353,"TERMINAL",0,0,"git status",,terminal_command +20,36414,"TERMINAL",0,0,"]633;COn branch generate-minatar-breakout-dataset\r\nLast commands done (2 commands done):\r\n pick ba37453 feat: generate coinrun dataset with val split\r\n pick faadd10 feat: implemented validation loss for all three models\r\nNext commands to do (26 remaining commands):\r\n pick 9a17dbb fix: pass val data path to dataloader\r\n pick 6e69cdb fix typo in image logging\r\n (use ""git rebase --edit-todo"" to view and edit)\r\nYou are currently editing a commit while rebasing branch 'gt-actions' on 'c7522f2'.\r\n (use ""git commit --amend"" to amend the current commit)\r\n (use ""git rebase --continue"" once you are satisfied with your changes)\r\n\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: train_dynamics.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdiff.diff\r\n\tinput_pipeline/generate_breakout_dataset.py\r\n\tinput_pipeline/generate_breakout_dataset_agent.py\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\toverfit_dir.zip\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\ttest.py\r\n\tutils/visualizer.py\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +21,43270,"TERMINAL",0,0,"git add input_pipeline/generate_breakout_dataset.py",,terminal_command +22,43322,"TERMINAL",0,0,"]633;C",,terminal_output +23,43354,"TERMINAL",0,0,"]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +24,55113,"TERMINAL",0,0,"git diff",,terminal_command +25,55160,"TERMINAL",0,0,"]633;C[?1h=\rdiff --git a/train_dynamics.py b/train_dynamics.py\r\nindex 7fd6f54..4ff43a3 100644\r\n--- a/train_dynamics.py\r\n+++ b/train_dynamics.py\r\n@@ -531,9 +531,11 @@ def main(args: Args) -> None:\r\n val_outputs = val_step(genie, batch)\r\n loss_per_step.append(val_outputs[""loss""])\r\n metrics_per_step.append(val_outputs[""metrics""])\r\n+ recon = val_outputs[""recon""]\r\n if args.eval_full_frame:\r\n loss_full_frame_per_step.append(val_outputs[""loss_full_frame""])\r\n metrics_full_frame_per_step.append(val_outputs[""metrics_full_frame""])\r\n+ recon_full_frame = val_outputs[""recon_full_frame""]\r\n step += 1\r\n if step > args.val_steps:\r\n break\r\n@@ -651,7 +653,7 @@ def main(args: Args) -> None:\r\n val_results[""gt_batch""][""videos""][0].astype(jnp.float32)\r\n / 255.0\r\n )\r\n- val_results[""recon_seq_val""] = val_results[""recon""].clip(0, 1)\r\n+ val_results[""recon_seq_val""] = val_results[""recon""][0].clip(0, 1)\r\n val_comparison_seq = jnp.concatenate(\r\n (val_results[""gt_seq_val""], val_results[""recon_seq_val""]),\r\n axis=1,\r\n:",,terminal_output +26,57923,"TERMINAL",0,0,"\r\r(END)",,terminal_output +27,58537,"TERMINAL",0,0,"\r\r(END)\r\r(END)\r\r(END)\r\r(END)\r\r(END)",,terminal_output +28,58857,"TERMINAL",0,0,"\r\r(END)",,terminal_output +29,62859,"TERMINAL",0,0,"\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +30,65707,"TERMINAL",0,0,"git status",,terminal_command +31,65735,"TERMINAL",0,0,"]633;COn branch generate-minatar-breakout-dataset\r\nLast commands done (2 commands done):\r\n pick ba37453 feat: generate coinrun dataset with val split\r\n pick faadd10 feat: implemented validation loss for all three models\r\nNext commands to do (26 remaining commands):\r\n pick 9a17dbb fix: pass val data path to dataloader\r\n pick 6e69cdb fix typo in image logging\r\n (use ""git rebase --edit-todo"" to view and edit)\r\nYou are currently editing a commit while rebasing branch 'gt-actions' on 'c7522f2'.\r\n (use ""git commit --amend"" to amend the current commit)\r\n (use ""git rebase --continue"" once you are satisfied with your changes)\r\n\r\nChanges to be committed:\r\n (use ""git restore --staged ..."" to unstage)\r\n\tnew file: input_pipeline/generate_breakout_dataset.py\r\n\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: train_dynamics.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdiff.diff\r\n\tinput_pipeline/generate_breakout_dataset_agent.py\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\toverfit_dir.zip\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\ttest.py\r\n\tutils/visualizer.py\r\n\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +32,81320,"TERMINAL",0,0,"git commit -m ""added generate_breakout_dataset.py""",,terminal_command +33,81393,"TERMINAL",0,0,"]633;C",,terminal_output +34,83913,"TERMINAL",0,0,"[WARNING] Unstaged files detected.\r\n[INFO] Stashing unstaged files to /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cache/pre-commit/patch1758276801-311557.\r\n",,terminal_output +35,84197,"TERMINAL",0,0,"black....................................................................",,terminal_output +36,86080,"TERMINAL",0,0,"Failed\r\n- hook id: black\r\n- files were modified by this hook\r\n\r\nreformatted input_pipeline/generate_breakout_dataset.py\r\n\r\nAll done! ✨ 🍰 ✨\r\n1 file reformatted.\r\n\r\n[INFO] Restored changes from /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cache/pre-commit/patch1758276801-311557.\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +37,87215,"TERMINAL",0,0,"git commit -m ""added generate_breakout_dataset.py""",,terminal_command +38,87323,"TERMINAL",0,0,"]633;C",,terminal_output +39,87617,"TERMINAL",0,0,"[WARNING] Unstaged files detected.\r\n[INFO] Stashing unstaged files to /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cache/pre-commit/patch1758276805-311801.\r\n",,terminal_output +40,87822,"TERMINAL",0,0,"black....................................................................",,terminal_output +41,88326,"TERMINAL",0,0,"Failed\r\n- hook id: black\r\n- files were modified by this hook\r\n\r\nreformatted input_pipeline/generate_breakout_dataset.py\r\n\r\nAll done! ✨ 🍰 ✨\r\n1 file reformatted.\r\n\r\n[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...\r\n[INFO] Restored changes from /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cache/pre-commit/patch1758276805-311801.\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +42,105449,"TERMINAL",0,0,"git status",,terminal_command +43,105462,"TERMINAL",0,0,"]633;COn branch generate-minatar-breakout-dataset\r\nLast commands done (2 commands done):\r\n pick ba37453 feat: generate coinrun dataset with val split\r\n pick faadd10 feat: implemented validation loss for all three models\r\nNext commands to do (26 remaining commands):\r\n pick 9a17dbb fix: pass val data path to dataloader\r\n pick 6e69cdb fix typo in image logging\r\n (use ""git rebase --edit-todo"" to view and edit)\r\nYou are currently editing a commit while rebasing branch 'gt-actions' on 'c7522f2'.\r\n (use ""git commit --amend"" to amend the current commit)\r\n (use ""git rebase --continue"" once you are satisfied with your changes)\r\n\r\nChanges to be committed:\r\n (use ""git restore --staged ..."" to unstage)\r\n\tnew file: input_pipeline/generate_breakout_dataset.py\r\n\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: input_pipeline/generate_breakout_dataset.py\r\n\tmodified: train_dynamics.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdiff.diff\r\n\tinput_pipeline/generate_breakout_dataset_agent.py\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\toverfit_dir.zip\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\ttest.py\r\n\tutils/visualizer.py\r\n\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +44,117545,"TERMINAL",0,0,"git add input_pipeline/generate_breakout_dataset.py",,terminal_command +45,117571,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +46,119352,"TERMINAL",0,0,"git commit -m ""added generate_breakout_dataset.py""",,terminal_command +47,119396,"TERMINAL",0,0,"]633;C",,terminal_output +48,119827,"TERMINAL",0,0,"[WARNING] Unstaged files detected.\r\n[INFO] Stashing unstaged files to /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cache/pre-commit/patch1758276837-313382.\r\n",,terminal_output +49,119987,"TERMINAL",0,0,"black....................................................................",,terminal_output +50,120187,"TERMINAL",0,0,"Passed\r\n[INFO] Restored changes from /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cache/pre-commit/patch1758276837-313382.\r\n",,terminal_output +51,120284,"TERMINAL",0,0,"[generate-minatar-breakout-dataset 1699bc7] added generate_breakout_dataset.py\r\n 1 file changed, 176 insertions(+)\r\n create mode 100644 input_pipeline/generate_breakout_dataset.py\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +52,122806,"TERMINAL",0,0,"git push",,terminal_command +53,126787,"TERMINAL",0,0,"git push --set-upstream origin generate-minatar-breakout-dataset",,terminal_command +54,126861,"TERMINAL",0,0,"]633;C",,terminal_output +55,128495,"TERMINAL",0,0,"Enumerating objects: 78, done.\r\nCounting objects: 1% (1/71)\rCounting objects: 2% (2/71)\rCounting objects: 4% (3/71)\rCounting objects: 5% (4/71)\rCounting objects: 7% (5/71)\rCounting objects: 8% (6/71)\rCounting objects: 9% (7/71)\rCounting objects: 11% (8/71)\rCounting objects: 12% (9/71)\rCounting objects: 14% (10/71)\rCounting objects: 15% (11/71)\rCounting objects: 16% (12/71)\rCounting objects: 18% (13/71)\rCounting objects: 19% (14/71)\rCounting objects: 21% (15/71)\rCounting objects: 22% (16/71)\rCounting objects: 23% (17/71)\rCounting objects: 25% (18/71)\rCounting objects: 26% (19/71)\rCounting objects: 28% (20/71)\rCounting objects: 29% (21/71)\rCounting objects: 30% (22/71)\rCounting objects: 32% (23/71)\rCounting objects: 33% (24/71)\rCounting objects: 35% (25/71)\rCounting objects: 36% (26/71)\rCounting objects: 38% (27/71)\rCounting objects: 39% (28/71)\rCounting objects: 40% (29/71)\rCounting objects: 42% (30/71)\rCounting objects: 43% (31/71)\rCounting objects: 45% (32/71)\rCounting objects: 46% (33/71)\rCounting objects: 47% (34/71)\rCounting objects: 49% (35/71)\rCounting objects: 50% (36/71)\rCounting objects: 52% (37/71)\rCounting objects: 53% (38/71)\rCounting objects: 54% (39/71)\rCounting objects: 56% (40/71)\rCounting objects: 57% (41/71)\rCounting objects: 59% (42/71)\rCounting objects: 60% (43/71)\rCounting objects: 61% (44/71)\rCounting objects: 63% (45/71)\rCounting objects: 64% (46/71)\rCounting objects: 66% (47/71)\rCounting objects: 67% (48/71)\rCounting objects: 69% (49/71)\rCounting objects: 70% (50/71)\rCounting objects: 71% (51/71)\rCounting objects: 73% (52/71)\rCounting objects: 74% (53/71)\rCounting objects: 76% (54/71)\rCounting objects: 77% (55/71)\rCounting objects: 78% (56/71)\rCounting objects: 80% (57/71)\rCounting objects: 81% (58/71)\rCounting objects: 83% (59/71)\rCounting objects: 84% (60/71)\rCounting objects: 85% (61/71)\rCounting objects: 87% (62/71)\rCounting objects: 88% (63/71)\rCounting objects: 90% (64/71)\rCounting objects: 91% (65/71)\rCounting objects: 92% (66/71)\rCounting objects: 94% (67/71)\rCounting objects: 95% (68/71)\rCounting objects: 97% (69/71)\rCounting objects: 98% (70/71)\rCounting objects: 100% (71/71)\rCounting objects: 100% (71/71), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 2% (1/50)\rCompressing objects: 4% (2/50)\rCompressing objects: 6% (3/50)\rCompressing objects: 8% (4/50)\rCompressing objects: 10% (5/50)\rCompressing objects: 12% (6/50)\rCompressing objects: 14% (7/50)\rCompressing objects: 16% (8/50)\rCompressing objects: 18% (9/50)\rCompressing objects: 20% (10/50)\rCompressing objects: 22% (11/50)\rCompressing objects: 24% (12/50)\rCompressing objects: 26% (13/50)\rCompressing objects: 28% (14/50)\rCompressing objects: 30% (15/50)\rCompressing objects: 32% (16/50)\rCompressing objects: 34% (17/50)\rCompressing objects: 36% (18/50)\rCompressing objects: 38% (19/50)\rCompressing objects: 40% (20/50)\rCompressing objects: 42% (21/50)\rCompressing objects: 44% (22/50)\rCompressing objects: 46% (23/50)\rCompressing objects: 48% (24/50)\rCompressing objects: 50% (25/50)\rCompressing objects: 52% (26/50)\rCompressing objects: 54% (27/50)\rCompressing objects: 56% (28/50)\rCompressing objects: 58% (29/50)\rCompressing objects: 60% (30/50)\rCompressing objects: 62% (31/50)\rCompressing objects: 64% (32/50)\rCompressing objects: 66% (33/50)\rCompressing objects: 68% (34/50)\rCompressing objects: 70% (35/50)\rCompressing objects: 72% (36/50)\rCompressing objects: 74% (37/50)\rCompressing objects: 76% (38/50)\rCompressing objects: 78% (39/50)\rCompressing objects: 80% (40/50)\rCompressing objects: 82% (41/50)\rCompressing objects: 84% (42/50)\rCompressing objects: 86% (43/50)\rCompressing objects: 88% (44/50)\rCompressing objects: 90% (45/50)\rCompressing objects: 92% (46/50)\rCompressing objects: 94% (47/50)\rCompressing objects: 96% (48/50)\rCompressing objects: 98% (49/50)\rCompressing objects: 100% (50/50)\rCompressing objects: 100% (50/50), done.\r\nWriting objects: 2% (1/50)\rWriting objects: 4% (2/50)\rWriting objects: 6% (3/50)\rWriting objects: 8% (4/50)\rWriting objects: 10% (5/50)\rWriting objects: 12% (6/50)\rWriting objects: 14% (7/50)\rWriting objects: 16% (8/50)\rWriting objects: 18% (9/50)\rWriting objects: 20% (10/50)\rWriting objects: 22% (11/50)\rWriting objects: 24% (12/50)\rWriting objects: 26% (13/50)\rWriting objects: 28% (14/50)\rWriting objects: 32% (16/50)\rWriting objects: 34% (17/50)\rWriting objects: 36% (18/50)\rWriting objects: 38% (19/50)\rWriting objects: 40% (20/50)\rWriting objects: 42% (21/50)\rWriting objects: 44% (22/50)\rWriting objects: 48% (24/50)\rWriting objects: 52% (26/50)\rWriting objects: 56% (28/50)\rWriting objects: 58% (29/50)\rWriting objects: 60% (30/50)\rWriting objects: 62% (31/50)\rWriting objects: 64% (32/50)\rWriting objects: 66% (33/50)\rWriting objects: 68% (34/50)\rWriting objects: 70% (35/50)\rWriting objects: 72% (36/50)\rWriting objects: 74% (37/50)\rWriting objects: 76% (38/50)\rWriting objects: 78% (39/50)\rWriting objects: 80% (40/50)\rWriting objects: 82% (41/50)\rWriting objects: 84% (42/50)\rWriting objects: 86% (43/50)\rWriting objects: 88% (44/50)\rWriting objects: 90% (45/50)\rWriting objects: 92% (46/50)\rWriting objects: 94% (47/50)\rWriting objects: 96% (48/50)\rWriting objects: 98% (49/50)\rWriting objects: 100% (50/50)\rWriting objects: 100% (50/50), 12.64 KiB | 1.40 MiB/s, done.\r\nTotal 50 (delta 35), reused 0 (delta 0), pack-reused 0\r\nremote: Resolving deltas: 0% (0/35)\rremote: Resolving deltas: 2% (1/35)\rremote: Resolving deltas: 5% (2/35)\rremote: Resolving deltas: 8% (3/35)\rremote: Resolving deltas: 11% (4/35)\rremote: Resolving deltas: 14% (5/35)\rremote: Resolving deltas: 17% (6/35)\rremote: Resolving deltas: 20% (7/35)\rremote: Resolving deltas: 22% (8/35)\rremote: Resolving deltas: 25% (9/35)\rremote: Resolving deltas: 28% (10/35)\rremote: Resolving deltas: 31% (11/35)\rremote: Resolving deltas: 34% (12/35)\rremote: Resolving deltas: 37% (13/35)\rremote: Resolving deltas: 40% (14/35)\rremote: Resolving deltas: 42% (15/35)\rremote: Resolving deltas: 45% (16/35)\rremote: Resolving deltas: 48% (17/35)\rremote: Resolving deltas: 51% (18/35)\rremote: Resolving deltas: 54% (19/35)\rremote: Resolving deltas: 57% (20/35)\rremote: Resolving deltas: 60% (21/35)\rremote: Resolving deltas: 62% (22/35)\rremote: Resolving deltas: 65% (23/35)\rremote: Resolving deltas: 68% (24/35)\rremote: Resolving deltas: 71% (25/35)\rremote: Resolving deltas: 74% (26/35)\rremote: Resolving deltas: 77% (27/35)\rremote: Resolving deltas: 80% (28/35)\rremote: Resolving deltas: 82% (29/35)\rremote: Resolving deltas: 85% (30/35)\rremote: Resolving deltas: 88% (31/35)\rremote: Resolving deltas: 91% (32/35)\rremote: Resolving deltas: 94% (33/35)\rremote: Resolving deltas: 97% (34/35)\r",,terminal_output +56,128583,"TERMINAL",0,0,"remote: Resolving deltas: 100% (35/35)\rremote: Resolving deltas: 100% (35/35), completed with 13 local objects.\r\n",,terminal_output +57,128782,"TERMINAL",0,0,"remote: \r\nremote: Create a pull request for 'generate-minatar-breakout-dataset' on GitHub by visiting:\r\nremote: https://github.com/p-doom/jasmine/pull/new/generate-minatar-breakout-dataset\r\nremote: \r\nTo github.com:p-doom/jasmine.git\r\n * [new branch] generate-minatar-breakout-dataset -> generate-minatar-breakout-dataset\r\nbranch 'generate-minatar-breakout-dataset' set up to track 'origin/generate-minatar-breakout-dataset'.\r\n",,terminal_output +58,128846,"TERMINAL",0,0,"]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +59,133163,"TERMINAL",0,0,"git checkout gt-actions",,terminal_command +60,133200,"TERMINAL",0,0,"]633;CM\ttrain_dynamics.py\r\nSwitched to branch 'gt-actions'\r\nYour branch is up to date with 'origin/gt-actions'.\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +61,135441,"train_dynamics.py",0,0,"Switched from branch 'generate-minatar-breakout-dataset' to 'gt-actions'",python,git_branch_checkout +62,136449,"TERMINAL",0,0,"git pull",,terminal_command +63,136485,"TERMINAL",0,0,"]633;C",,terminal_output +64,138341,"TERMINAL",0,0,"remote: Enumerating objects: 3, done.\r\nremote: Counting objects: 33% (1/3)\rremote: Counting objects: 66% (2/3)\rremote: Counting objects: 100% (3/3)\rremote: Counting objects: 100% (3/3), done.\r\nremote: Compressing objects: 33% (1/3)\rremote: Compressing objects: 66% (2/3)\rremote: Compressing objects: 100% (3/3)\rremote: Compressing objects: 100% (3/3), done.\r\nremote: Total 3 (delta 0), reused 0 (delta 0), pack-reused 0 (from 0)\r\nUnpacking objects: 33% (1/3)\rUnpacking objects: 66% (2/3)\r",,terminal_output +65,138397,"TERMINAL",0,0,"Unpacking objects: 100% (3/3)\rUnpacking objects: 100% (3/3), 1.56 KiB | 33.00 KiB/s, done.\r\n",,terminal_output +66,138585,"TERMINAL",0,0,"From github.com:p-doom/jasmine\r\n 96d560e..1b6b878 gt-actions -> origin/gt-actions\r\n",,terminal_output +67,138669,"TERMINAL",0,0,"Updating 96d560e..1b6b878\r\nFast-forward\r\n",,terminal_output +68,138702,"TERMINAL",0,0," input_pipeline/generate_coinrun_dataset.py | 2 +-\r\n 1 file changed, 1 insertion(+), 1 deletion(-)\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +69,149767,"TERMINAL",0,0,"git status",,terminal_command +70,163544,"TERMINAL",0,0,"git commit -am ""bugfixes in train dynamics""",,terminal_command +71,163609,"TERMINAL",0,0,"]633;C",,terminal_output +72,164153,"TERMINAL",0,0,"black....................................................................",,terminal_output +73,165382,"TERMINAL",0,0,"Failed\r\n- hook id: black\r\n- files were modified by this hook\r\n\r\nreformatted train_dynamics.py\r\n\r\nAll done! ✨ 🍰 ✨\r\n1 file reformatted.\r\n\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +74,168508,"TERMINAL",0,0,"git commit -am ""bugfixes in train dynamics""",,terminal_command +75,168578,"TERMINAL",0,0,"]633;C",,terminal_output +76,169136,"TERMINAL",0,0,"black....................................................................",,terminal_output +77,169401,"TERMINAL",0,0,"Passed\r\n[gt-actions 7c97398] bugfixes in train dynamics\r\n 1 file changed, 5 insertions(+), 1 deletion(-)\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +78,170941,"TERMINAL",0,0,"git push",,terminal_command +79,171014,"TERMINAL",0,0,"]633;C",,terminal_output +80,172468,"TERMINAL",0,0,"Enumerating objects: 5, done.\r\nCounting objects: 20% (1/5)\rCounting objects: 40% (2/5)\rCounting objects: 60% (3/5)\rCounting objects: 80% (4/5)\rCounting objects: 100% (5/5)\rCounting objects: 100% (5/5), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 33% (1/3)\rCompressing objects: 66% (2/3)\rCompressing objects: 100% (3/3)\rCompressing objects: 100% (3/3), done.\r\nWriting objects: 33% (1/3)\rWriting objects: 66% (2/3)\rWriting objects: 100% (3/3)\rWriting objects: 100% (3/3), 402 bytes | 402.00 KiB/s, done.\r\nTotal 3 (delta 2), reused 0 (delta 0), pack-reused 0\r\nremote: Resolving deltas: 0% (0/2)\rremote: Resolving deltas: 50% (1/2)\rremote: Resolving deltas: 100% (2/2)\rremote: Resolving deltas: 100% (2/2), completed with 2 local objects.\r\n",,terminal_output +81,172742,"TERMINAL",0,0,"To github.com:p-doom/jasmine.git\r\n 1b6b878..7c97398 gt-actions -> gt-actions\r\n]0;tum_cte0515@hkn1993:~/Projects/jasmine",,terminal_output +82,180646,"train_dynamics.py",24058,90," val_results[""recon_seq_val""] = val_results[""recon""][0].clip(\n 0, 1\n )\n",python,content diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-268e2d5f-0a66-4008-8495-15de70c8a2e51751028407664-2025_06_27-14.47.06.44/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-268e2d5f-0a66-4008-8495-15de70c8a2e51751028407664-2025_06_27-14.47.06.44/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..3bb97e1af91e409a7a29823edc5bd24af126620c --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-268e2d5f-0a66-4008-8495-15de70c8a2e51751028407664-2025_06_27-14.47.06.44/source.csv @@ -0,0 +1,5029 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +2,333,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:47:05 PM [info] Activating crowd-code\n2:47:06 PM [info] Recording started\n2:47:06 PM [info] Initializing git provider using file system watchers...\n2:47:06 PM [info] Git repository found\n2:47:06 PM [info] Git provider initialized successfully\n2:47:06 PM [info] Initial git state: [object Object]\n",Log,tab +3,2643,"TERMINAL",0,0,"/bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command +4,2683,"TERMINAL",0,0,"]633;E;2025-06-27 14:47:08 /bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;6542423c-4d64-4635-916f-9b4ada1d894e]633;C",,terminal_output +5,2728,"TERMINAL",0,0,"]0;tum_cte0515@hkn1993:/hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output +6,119973,"scripts_horeka/overfit_sample/train_dynamics_overfit_sample.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --mail-user=mihir.mahajan2002@gmail.com\n#SBATCH --job-name=train_dynamics_minecraft_overfit_sample_yolo_lr\n#SBATCH --mem=50G\n#SBATCH --mail-type=ALL\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=$ws_dir/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\necho Running dynamics model overfit run. Slurm id: $slurm_job_id\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3290391/tokenizer_1750845012_50000/\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3290392/lam_1750845133_180000/\n\npython train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=100 \\n --log \\n --name=dynamics-model-size-scaling-36M-$slurm_job_id \\n --tags dynamics overfit yolo-lr \\n --log_checkpoint_interval=500 \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --lam_checkpoint=$lam_ckpt_dir \\n --data_dir $tf_records_dir\n",shellscript,tab +7,133917,"scripts_horeka/overfit_sample/train_lam_overfit_sample.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --mail-user=mihir.mahajan2002@gmail.com\n#SBATCH --job-name=train_tokenizer_minecraft_overfit_sample\n#SBATCH --mem=50G\n#SBATCH --mail-type=ALL\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=$ws_dir/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,tab +8,136648,"scripts_horeka/overfit_sample/train_tokenizer_overfit_sample.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --mail-user=mihir.mahajan2002@gmail.com\n#SBATCH --job-name=train_tokenizer_minecraft_overfit_sample\n#SBATCH --mem=50G\n#SBATCH --mail-type=ALL\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=$ws_dir/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,tab +9,155604,"TERMINAL",0,0,"bash",,terminal_focus +10,156572,"TERMINAL",0,0,"bash",,terminal_focus +11,165910,"TERMINAL",0,0,"cursor ../jafar_jobs/logs/",,terminal_command +12,165992,"TERMINAL",0,0,"]633;E;2025-06-27 14:49:51 cursor ../jafar_jobs/logs/;8c62fbe5-e97b-4606-8b90-e30af988a4fa]633;C",,terminal_output +13,219874,"scripts_horeka/modelsize_scaling/dynamics/A_train_dyn_1.5M.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=10:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --mail-user=mihir.mahajan2002@gmail.com\n#SBATCH --job-name=train_dynamics_minecraft_overfit_sample_1.5M\n#SBATCH --mem=50G\n#SBATCH --mail-type=ALL\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\necho Running dynamics model overfit run (tiny model A, ~1.5M params). Slurm id: $slurm_job_id\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3290391/tokenizer_1750845012_50000/\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3290392/lam_1750845133_180000/\n\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=100 \\n --log \\n --name=dynamics-model-size-scaling-1.5M-$slurm_job_id \\n --tags dynamics model-size-scaling 1.5M tiny A \\n --log_checkpoint_interval=500 \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --lam_checkpoint=$lam_ckpt_dir \\n --data_dir $tf_records_dir \\n --dyna_dim=128 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4",shellscript,tab +14,232866,"scripts_horeka/modelsize_scaling/dynamics/A_train_dyn_1.5M.sbatch",902,0,"",shellscript,selection_mouse +15,233926,"scripts_horeka/modelsize_scaling/dynamics/A_train_dyn_1.5M.sbatch",901,0,"",shellscript,selection_command +16,234105,"scripts_horeka/modelsize_scaling/dynamics/A_train_dyn_1.5M.sbatch",901,1,"",shellscript,content +17,235994,"scripts_horeka/modelsize_scaling/dynamics/A_train_dyn_1.5M.sbatch",927,0,"",shellscript,selection_mouse +18,236146,"scripts_horeka/modelsize_scaling/dynamics/A_train_dyn_1.5M.sbatch",927,1,"",shellscript,content +19,239627,"scripts_horeka/modelsize_scaling/dynamics/B_train_dyn_3.5.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=10:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --mail-user=mihir.mahajan2002@gmail.com\n#SBATCH --job-name=train_dynamics_minecraft_overfit_sample_3.5M\n#SBATCH --mem=50G\n#SBATCH --mail-type=ALL\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\necho Running dynamics model overfit run. Slurm id: $slurm_job_id\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3290391/tokenizer_1750845012_50000/\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3290392/lam_1750845133_180000/\n\n# Option B: dyna_dim=256, dyna_num_blocks=2, dyna_num_heads=4 (~3.5M params)\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=100 \\n --log \\n --name=dynamics-model-size-scaling-3.5M-$slurm_job_id \\n --tags dynamics model-size-scaling 3.5M \\n --log_checkpoint_interval=500 \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --lam_checkpoint=$lam_ckpt_dir \\n --data_dir $tf_records_dir \\n --dyna_dim=256 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4",shellscript,tab +20,243598,"scripts_horeka/modelsize_scaling/dynamics/C_train_dyn_6M.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=10:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --mail-user=mihir.mahajan2002@gmail.com\n#SBATCH --job-name=train_dynamics_minecraft_overfit_sample_6M\n#SBATCH --mem=50G\n#SBATCH --mail-type=ALL\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\necho Running dynamics model overfit run (tiny model C, ~6M params). Slurm id: $slurm_job_id\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3290391/tokenizer_1750845012_50000/\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3290392/lam_1750845133_180000/\n\n# Option C: dyna_dim=256, dyna_num_blocks=4, dyna_num_heads=4 (~6M params)\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=100 \\n --log \\n --name=dynamics-model-size-scaling-6M-$slurm_job_id \\n --tags dynamics model-size-scaling 6M tiny C \\n --log_checkpoint_interval=500 \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --lam_checkpoint=$lam_ckpt_dir \\n --data_dir $tf_records_dir \\n --dyna_dim=256 \\n --dyna_num_blocks=4 \\n --dyna_num_heads=4",shellscript,tab +21,245751,"scripts_horeka/modelsize_scaling/dynamics/C_train_dyn_6M.sbatch",900,0,"",shellscript,selection_mouse +22,245967,"scripts_horeka/modelsize_scaling/dynamics/C_train_dyn_6M.sbatch",900,1,"",shellscript,content +23,247920,"scripts_horeka/modelsize_scaling/dynamics/C_train_dyn_6M.sbatch",899,1,"",shellscript,content +24,248637,"scripts_horeka/modelsize_scaling/dynamics/C_train_dyn_6M.sbatch",899,0,"t",shellscript,content +25,248638,"scripts_horeka/modelsize_scaling/dynamics/C_train_dyn_6M.sbatch",900,0,"",shellscript,selection_keyboard +26,249844,"scripts_horeka/modelsize_scaling/dynamics/C_train_dyn_6M.sbatch",924,0,"",shellscript,selection_mouse +27,250358,"scripts_horeka/modelsize_scaling/dynamics/C_train_dyn_6M.sbatch",923,1,"",shellscript,content +28,251556,"scripts_horeka/modelsize_scaling/dynamics/D_train_dyn_12M.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=10:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --mail-user=mihir.mahajan2002@gmail.com\n#SBATCH --job-name=train_dynamics_minecraft_overfit_sample_12M\n#SBATCH --mem=50G\n#SBATCH --mail-type=ALL\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\necho Running dynamics model overfit run (tiny model D, ~12M params). Slurm id: $slurm_job_id\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3290391/tokenizer_1750845012_50000/\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3290392/lam_1750845133_180000/\n\n# Option D: dyna_dim=384, dyna_num_blocks=4, dyna_num_heads=6 (~12M params)\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=100 \\n --log \\n --name=dynamics-model-size-scaling-12M-$slurm_job_id \\n --tags dynamics model-size-scaling 12M tiny D \\n --log_checkpoint_interval=500 \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --lam_checkpoint=$lam_ckpt_dir \\n --data_dir $tf_records_dir \\n --dyna_dim=384 \\n --dyna_num_blocks=4 \\n --dyna_num_heads=6",shellscript,tab +29,253271,"scripts_horeka/modelsize_scaling/dynamics/D_train_dyn_12M.sbatch",900,0,"",shellscript,selection_mouse +30,254086,"scripts_horeka/modelsize_scaling/dynamics/D_train_dyn_12M.sbatch",900,1,"",shellscript,content +31,255313,"scripts_horeka/modelsize_scaling/dynamics/D_train_dyn_12M.sbatch",925,0,"",shellscript,selection_mouse +32,255425,"scripts_horeka/modelsize_scaling/dynamics/D_train_dyn_12M.sbatch",925,1,"",shellscript,content +33,257016,"scripts_horeka/modelsize_scaling/dynamics/E_train_dyn_18M.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=10:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --mail-user=mihir.mahajan2002@gmail.com\n#SBATCH --job-name=train_dynamics_minecraft_overfit_sample_18M\n#SBATCH --mem=50G\n#SBATCH --mail-type=ALL\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\necho Running dynamics model overfit run (tiny model E, ~18M params). Slurm id: $slurm_job_id\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3290391/tokenizer_1750845012_50000/\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3290392/lam_1750845133_180000/\n\n# Option E: dyna_dim=512, dyna_num_blocks=4, dyna_num_heads=8 (~18M params)\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=100 \\n --log \\n --name=dynamics-model-size-scaling-18M-$slurm_job_id \\n --tags dynamics model-size-scaling 18M tiny E \\n --log_checkpoint_interval=500 \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --lam_checkpoint=$lam_ckpt_dir \\n --data_dir $tf_records_dir \\n --dyna_dim=512 \\n --dyna_num_blocks=4 \\n --dyna_num_heads=8",shellscript,tab +34,259327,"scripts_horeka/modelsize_scaling/dynamics/E_train_dyn_18M.sbatch",900,0,"",shellscript,selection_mouse +35,259647,"scripts_horeka/modelsize_scaling/dynamics/E_train_dyn_18M.sbatch",900,1,"",shellscript,content +36,261334,"scripts_horeka/modelsize_scaling/dynamics/E_train_dyn_18M.sbatch",925,0,"",shellscript,selection_mouse +37,261478,"scripts_horeka/modelsize_scaling/dynamics/E_train_dyn_18M.sbatch",925,1,"",shellscript,content +38,264125,"TERMINAL",0,0,"bash",,terminal_focus +39,372695,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom genie import Genie, restore_genie_components\nfrom models.tokenizer import TokenizerVQVAE\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n # Optimization\n batch_size: int = 36\n min_lr: float = 3e-6\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=jnp.float32\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Restore checkpoint ---\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n # dataloader = get_dataloader(\n # # NOTE: We deliberately pass the global batch size\n # # The dataloader shards the dataset across all processes\n # tfrecord_files,\n # args.seq_len,\n # args.batch_size,\n # *image_shape,\n # )\n step = 0\n while step < args.num_steps:\n # for videos in dataloader:\n npy_path = ""overfit_dir/single_sample_corner.npy""\n # npy_path = ""overfit_dir/single_batch_3_elems.npy""\n videos = np.load(npy_path)\n print(""batch shape: "", videos.shape)\n while(True):\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n start_time = time.time()\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""genie_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +40,373147,"scripts_horeka/overfit_sample/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +41,380347,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --mail-user=mihir.mahajan2002@gmail.com\n#SBATCH --job-name=train_tokenizer_minecraft_overfit_sample\n#SBATCH --mem=50G\n#SBATCH --mail-type=ALL\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=$ws_dir/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,tab +42,386587,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5 --mem=50G --mail-user=mihir@pdoom.org --mail-type=ALL",,terminal_command +43,386660,"TERMINAL",0,0,"]633;E;2025-06-27 14:53:32 salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5 --mem=50G --mail-user=mihir@pdoom.org --mail-type=ALL;77164738-3379-4db1-b4b8-ccd452227802]633;Csalloc: Pending job allocation 3298895\r\nsalloc: job 3298895 queued and waiting for resources\r\n",,terminal_output +44,387552,"TERMINAL",0,0,"bash",,terminal_focus +45,391043,"TERMINAL",0,0,"queue",,terminal_command +46,391139,"TERMINAL",0,0,"]633;E;2025-06-27 14:53:36 queue;8c62fbe5-e97b-4606-8b90-e30af988a4fa]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Fri Jun 27 14:53:37 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3298895 accelerat interact tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output +47,392184,"TERMINAL",0,0,"8",,terminal_output +48,393127,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +49,393781,"TERMINAL",0,0,"idle",,terminal_command +50,393824,"TERMINAL",0,0,"]633;E;2025-06-27 14:53:39 idle;8c62fbe5-e97b-4606-8b90-e30af988a4fa]633;CPartition dev_cpuonly : 10 nodes idle\r\nPartition cpuonly : 41 nodes idle\r\nPartition dev_accelerated : 0 nodes idle\r\nPartition accelerated : 8 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 0 nodes idle\r\nPartition large : 7 nodes idle\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +51,395744,"TERMINAL",0,0,"queue",,terminal_command +52,395820,"TERMINAL",0,0,"]633;E;2025-06-27 14:53:41 queue;8c62fbe5-e97b-4606-8b90-e30af988a4fa]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Fri Jun 27 14:53:41 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3298895 accelerat interact tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output +53,396858,"TERMINAL",0,0,"2",,terminal_output +54,397900,"TERMINAL",0,0,"3",,terminal_output +55,398957,"TERMINAL",0,0,"4",,terminal_output +56,399991,"TERMINAL",0,0,"5",,terminal_output +57,401040,"TERMINAL",0,0,"6",,terminal_output +58,402103,"TERMINAL",0,0,"7",,terminal_output +59,403143,"TERMINAL",0,0,"9",,terminal_output +60,404188,"TERMINAL",0,0,"50",,terminal_output +61,405231,"TERMINAL",0,0,"1",,terminal_output +62,406336,"TERMINAL",0,0,"2",,terminal_output +63,407375,"TERMINAL",0,0,"3",,terminal_output +64,408386,"TERMINAL",0,0,"4",,terminal_output +65,409434,"TERMINAL",0,0,"5",,terminal_output +66,410517,"TERMINAL",0,0,"6",,terminal_output +67,411508,"TERMINAL",0,0,"7",,terminal_output +68,412554,"TERMINAL",0,0,"8",,terminal_output +69,413601,"TERMINAL",0,0,"9",,terminal_output +70,414647,"TERMINAL",0,0,"4:00",,terminal_output +71,415725,"TERMINAL",0,0,"1",,terminal_output +72,416742,"TERMINAL",0,0,"2",,terminal_output +73,417778,"TERMINAL",0,0,"3",,terminal_output +74,418824,"TERMINAL",0,0,"4",,terminal_output +75,419909,"TERMINAL",0,0,"5",,terminal_output +76,420927,"TERMINAL",0,0,"6",,terminal_output +77,421975,"TERMINAL",0,0,"7",,terminal_output +78,423013,"TERMINAL",0,0,"8",,terminal_output +79,424061,"TERMINAL",0,0,"9",,terminal_output +80,425097,"TERMINAL",0,0,"11",,terminal_output +81,426148,"TERMINAL",0,0,"2",,terminal_output +82,427202,"TERMINAL",0,0,"3",,terminal_output +83,428246,"TERMINAL",0,0,"4",,terminal_output +84,429292,"TERMINAL",0,0,"5",,terminal_output +85,429757,"TERMINAL",0,0,"salloc",,terminal_focus +86,430337,"TERMINAL",0,0,"6",,terminal_output +87,431299,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +88,431300,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",663,0,"",shellscript,selection_mouse +89,431454,"TERMINAL",0,0,"7",,terminal_output +90,432436,"TERMINAL",0,0,"8",,terminal_output +91,433508,"TERMINAL",0,0,"9",,terminal_output +92,434557,"TERMINAL",0,0,"20",,terminal_output +93,435604,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1055,0,"",shellscript,selection_mouse +94,435608,"TERMINAL",0,0,"1",,terminal_output +95,436237,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1054,0,"",shellscript,selection_mouse +96,436241,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1053,0,"",shellscript,selection_command +97,436620,"TERMINAL",0,0,"2",,terminal_output +98,437664,"TERMINAL",0,0,"3",,terminal_output +99,438707,"TERMINAL",0,0,"4",,terminal_output +100,439781,"TERMINAL",0,0,"5",,terminal_output +101,440800,"TERMINAL",0,0,"6",,terminal_output +102,441847,"TERMINAL",0,0,"7",,terminal_output +103,442893,"TERMINAL",0,0,"8",,terminal_output +104,443943,"TERMINAL",0,0,"9",,terminal_output +105,444991,"TERMINAL",0,0,"30",,terminal_output +106,446041,"TERMINAL",0,0,"1",,terminal_output +107,447096,"TERMINAL",0,0,"2",,terminal_output +108,448138,"TERMINAL",0,0,"4",,terminal_output +109,449185,"TERMINAL",0,0,"5",,terminal_output +110,450374,"TERMINAL",0,0,"6",,terminal_output +111,451311,"TERMINAL",0,0,"7",,terminal_output +112,452332,"TERMINAL",0,0,"8",,terminal_output +113,453369,"TERMINAL",0,0,"9",,terminal_output +114,454428,"TERMINAL",0,0,"40",,terminal_output +115,455477,"TERMINAL",0,0,"1",,terminal_output +116,456515,"TERMINAL",0,0,"2",,terminal_output +117,457578,"TERMINAL",0,0,"3",,terminal_output +118,458601,"TERMINAL",0,0,"4",,terminal_output +119,459706,"TERMINAL",0,0,"5",,terminal_output +120,460697,"TERMINAL",0,0,"6",,terminal_output +121,461742,"TERMINAL",0,0,"7",,terminal_output +122,462790,"TERMINAL",0,0,"8",,terminal_output +123,463823,"TERMINAL",0,0,"9",,terminal_output +124,464881,"TERMINAL",0,0,"50",,terminal_output +125,465924,"TERMINAL",0,0,"1",,terminal_output +126,466962,"TERMINAL",0,0,"2",,terminal_output +127,468010,"TERMINAL",0,0,"3",,terminal_output +128,469081,"TERMINAL",0,0,"4",,terminal_output +129,470099,"TERMINAL",0,0,"6",,terminal_output +130,471162,"TERMINAL",0,0,"7",,terminal_output +131,472201,"TERMINAL",0,0,"8",,terminal_output +132,473245,"TERMINAL",0,0,"9",,terminal_output +133,474325,"TERMINAL",0,0,"5:00",,terminal_output +134,475334,"TERMINAL",0,0,"1",,terminal_output +135,476400,"TERMINAL",0,0,"2",,terminal_output +136,477475,"TERMINAL",0,0,"3",,terminal_output +137,478494,"TERMINAL",0,0,"4",,terminal_output +138,479548,"TERMINAL",0,0,"5",,terminal_output +139,480643,"TERMINAL",0,0,"6",,terminal_output +140,481620,"TERMINAL",0,0,"7",,terminal_output +141,482736,"TERMINAL",0,0,"8",,terminal_output +142,483714,"TERMINAL",0,0,"9",,terminal_output +143,484758,"TERMINAL",0,0,"10",,terminal_output +144,485864,"TERMINAL",0,0,"1",,terminal_output +145,486875,"TERMINAL",0,0,"2",,terminal_output +146,487957,"TERMINAL",0,0,"3",,terminal_output +147,489044,"TERMINAL",0,0,"4",,terminal_output +148,490086,"TERMINAL",0,0,"5",,terminal_output +149,491076,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",803,0,"",shellscript,selection_mouse +150,491104,"TERMINAL",0,0,"6",,terminal_output +151,491853,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1048,0,"",shellscript,selection_mouse +152,492184,"TERMINAL",0,0,"7",,terminal_output +153,492493,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",831,0,"",shellscript,selection_mouse +154,493198,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1001,0,"",shellscript,selection_mouse +155,493203,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1000,0,"",shellscript,selection_command +156,493253,"TERMINAL",0,0,"9",,terminal_output +157,493644,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1054,0,"",shellscript,selection_mouse +158,493646,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1053,0,"",shellscript,selection_command +159,494241,"TERMINAL",0,0,"20",,terminal_output +160,495304,"TERMINAL",0,0,"1",,terminal_output +161,496350,"TERMINAL",0,0,"2",,terminal_output +162,497339,"TERMINAL",0,0,"3",,terminal_output +163,498372,"TERMINAL",0,0,"4",,terminal_output +164,499409,"TERMINAL",0,0,"5",,terminal_output +165,500449,"TERMINAL",0,0,"6",,terminal_output +166,501505,"TERMINAL",0,0,"7",,terminal_output +167,502536,"TERMINAL",0,0,"8",,terminal_output +168,503580,"TERMINAL",0,0,"9",,terminal_output +169,504647,"TERMINAL",0,0,"30",,terminal_output +170,505676,"TERMINAL",0,0,"1",,terminal_output +171,506718,"TERMINAL",0,0,"2",,terminal_output +172,507792,"TERMINAL",0,0,"3",,terminal_output +173,508796,"TERMINAL",0,0,"4",,terminal_output +174,509855,"TERMINAL",0,0,"5",,terminal_output +175,510923,"TERMINAL",0,0,"6",,terminal_output +176,511930,"TERMINAL",0,0,"7",,terminal_output +177,512969,"TERMINAL",0,0,"8",,terminal_output +178,514060,"TERMINAL",0,0,"9",,terminal_output +179,515068,"TERMINAL",0,0,"40",,terminal_output +180,516119,"TERMINAL",0,0,"2",,terminal_output +181,517219,"TERMINAL",0,0,"3",,terminal_output +182,518198,"TERMINAL",0,0,"4",,terminal_output +183,519262,"TERMINAL",0,0,"5",,terminal_output +184,520343,"TERMINAL",0,0,"6",,terminal_output +185,521348,"TERMINAL",0,0,"7",,terminal_output +186,522392,"TERMINAL",0,0,"8",,terminal_output +187,523433,"TERMINAL",0,0,"9",,terminal_output +188,524480,"TERMINAL",0,0,"50",,terminal_output +189,525579,"TERMINAL",0,0,"1",,terminal_output +190,526566,"TERMINAL",0,0,"2",,terminal_output +191,527619,"TERMINAL",0,0,"3",,terminal_output +192,528691,"TERMINAL",0,0,"4",,terminal_output +193,529719,"TERMINAL",0,0,"5",,terminal_output +194,530763,"TERMINAL",0,0,"6",,terminal_output +195,531815,"TERMINAL",0,0,"7",,terminal_output +196,532846,"TERMINAL",0,0,"8",,terminal_output +197,533898,"TERMINAL",0,0,"9",,terminal_output +198,534930,"TERMINAL",0,0,"6:00",,terminal_output +199,535976,"TERMINAL",0,0,"1",,terminal_output +200,537023,"TERMINAL",0,0,"2",,terminal_output +201,538076,"TERMINAL",0,0,"3",,terminal_output +202,539155,"TERMINAL",0,0,"5",,terminal_output +203,540200,"TERMINAL",0,0,"6",,terminal_output +204,541252,"TERMINAL",0,0,"7",,terminal_output +205,542293,"TERMINAL",0,0,"8",,terminal_output +206,543367,"TERMINAL",0,0,"9",,terminal_output +207,544377,"TERMINAL",0,0,"10",,terminal_output +208,545424,"TERMINAL",0,0,"1",,terminal_output +209,546493,"TERMINAL",0,0,"2",,terminal_output +210,547515,"TERMINAL",0,0,"3",,terminal_output +211,548560,"TERMINAL",0,0,"4",,terminal_output +212,549596,"TERMINAL",0,0,"5",,terminal_output +213,550707,"TERMINAL",0,0,"6",,terminal_output +214,551683,"TERMINAL",0,0,"7",,terminal_output +215,552716,"TERMINAL",0,0,"8",,terminal_output +216,553784,"TERMINAL",0,0,"9",,terminal_output +217,554862,"TERMINAL",0,0,"20",,terminal_output +218,555870,"TERMINAL",0,0,"1",,terminal_output +219,556934,"TERMINAL",0,0,"2",,terminal_output +220,557956,"TERMINAL",0,0,"3",,terminal_output +221,559045,"TERMINAL",0,0,"4",,terminal_output +222,560057,"TERMINAL",0,0,"5",,terminal_output +223,561103,"TERMINAL",0,0,"6",,terminal_output +224,562145,"TERMINAL",0,0,"8",,terminal_output +225,563184,"TERMINAL",0,0,"9",,terminal_output +226,564228,"TERMINAL",0,0,"30",,terminal_output +227,565289,"TERMINAL",0,0,"1",,terminal_output +228,566374,"TERMINAL",0,0,"2",,terminal_output +229,567383,"TERMINAL",0,0,"3",,terminal_output +230,568451,"TERMINAL",0,0,"4",,terminal_output +231,569460,"TERMINAL",0,0,"5",,terminal_output +232,570514,"TERMINAL",0,0,"6",,terminal_output +233,571587,"TERMINAL",0,0,"7",,terminal_output +234,572614,"TERMINAL",0,0,"8",,terminal_output +235,573654,"TERMINAL",0,0,"9",,terminal_output +236,574689,"TERMINAL",0,0,"40",,terminal_output +237,575809,"TERMINAL",0,0,"1",,terminal_output +238,576837,"TERMINAL",0,0,"2",,terminal_output +239,577858,"TERMINAL",0,0,"3",,terminal_output +240,578883,"TERMINAL",0,0,"4",,terminal_output +241,579982,"TERMINAL",0,0,"5",,terminal_output +242,581001,"TERMINAL",0,0,"6",,terminal_output +243,582033,"TERMINAL",0,0,"7",,terminal_output +244,583116,"TERMINAL",0,0,"8",,terminal_output +245,584150,"TERMINAL",0,0,"50",,terminal_output +246,585204,"TERMINAL",0,0,"1",,terminal_output +247,586257,"TERMINAL",0,0,"2",,terminal_output +248,587304,"TERMINAL",0,0,"3",,terminal_output +249,588497,"TERMINAL",0,0,"4",,terminal_output +250,589520,"TERMINAL",0,0,"5",,terminal_output +251,590442,"TERMINAL",0,0,"6",,terminal_output +252,591505,"TERMINAL",0,0,"7",,terminal_output +253,592544,"TERMINAL",0,0,"8",,terminal_output +254,593591,"TERMINAL",0,0,"9",,terminal_output +255,594637,"TERMINAL",0,0,"7:00",,terminal_output +256,595689,"TERMINAL",0,0,"1",,terminal_output +257,596786,"TERMINAL",0,0,"2",,terminal_output +258,597791,"TERMINAL",0,0,"3",,terminal_output +259,598844,"TERMINAL",0,0,"4",,terminal_output +260,599876,"TERMINAL",0,0,"5",,terminal_output +261,600941,"TERMINAL",0,0,"6",,terminal_output +262,602015,"TERMINAL",0,0,"7",,terminal_output +263,603052,"TERMINAL",0,0,"8",,terminal_output +264,604084,"TERMINAL",0,0,"9",,terminal_output +265,605123,"TERMINAL",0,0,"11",,terminal_output +266,606186,"TERMINAL",0,0,"2",,terminal_output +267,607248,"TERMINAL",0,0,"3",,terminal_output +268,608275,"TERMINAL",0,0,"4",,terminal_output +269,609361,"TERMINAL",0,0,"5",,terminal_output +270,610514,"TERMINAL",0,0,"6",,terminal_output +271,611446,"TERMINAL",0,0,"7",,terminal_output +272,612461,"TERMINAL",0,0,"8",,terminal_output +273,613514,"TERMINAL",0,0,"9",,terminal_output +274,614568,"TERMINAL",0,0,"20",,terminal_output +275,615615,"TERMINAL",0,0,"1",,terminal_output +276,616662,"TERMINAL",0,0,"2",,terminal_output +277,617700,"TERMINAL",0,0,"3",,terminal_output +278,618812,"TERMINAL",0,0,"4",,terminal_output +279,619803,"TERMINAL",0,0,"5",,terminal_output +280,620924,"TERMINAL",0,0,"6",,terminal_output +281,621881,"TERMINAL",0,0,"7",,terminal_output +282,622978,"TERMINAL",0,0,"8",,terminal_output +283,624046,"TERMINAL",0,0,"9",,terminal_output +284,625064,"TERMINAL",0,0,"30",,terminal_output +285,626085,"TERMINAL",0,0,"1",,terminal_output +286,627127,"TERMINAL",0,0,"3",,terminal_output +287,628200,"TERMINAL",0,0,"4",,terminal_output +288,629211,"TERMINAL",0,0,"5",,terminal_output +289,630262,"TERMINAL",0,0,"6",,terminal_output +290,631314,"TERMINAL",0,0,"7",,terminal_output +291,632394,"TERMINAL",0,0,"8",,terminal_output +292,633410,"TERMINAL",0,0,"9",,terminal_output +293,634458,"TERMINAL",0,0,"40",,terminal_output +294,635499,"TERMINAL",0,0,"1",,terminal_output +295,636544,"TERMINAL",0,0,"2",,terminal_output +296,637587,"TERMINAL",0,0,"3",,terminal_output +297,638638,"TERMINAL",0,0,"4",,terminal_output +298,639681,"TERMINAL",0,0,"5",,terminal_output +299,640734,"TERMINAL",0,0,"6",,terminal_output +300,641779,"TERMINAL",0,0,"7",,terminal_output +301,642835,"TERMINAL",0,0,"8",,terminal_output +302,643856,"TERMINAL",0,0,"9",,terminal_output +303,644899,"TERMINAL",0,0,"50",,terminal_output +304,645951,"TERMINAL",0,0,"1",,terminal_output +305,646981,"TERMINAL",0,0,"2",,terminal_output +306,648053,"TERMINAL",0,0,"3",,terminal_output +307,649118,"TERMINAL",0,0,"4",,terminal_output +308,650150,"TERMINAL",0,0,"6",,terminal_output +309,651154,"TERMINAL",0,0,"7",,terminal_output +310,652207,"TERMINAL",0,0,"8",,terminal_output +311,653235,"TERMINAL",0,0,"9",,terminal_output +312,654318,"TERMINAL",0,0,"8:00",,terminal_output +313,655369,"TERMINAL",0,0,"1",,terminal_output +314,656373,"TERMINAL",0,0,"2",,terminal_output +315,657394,"TERMINAL",0,0,"3",,terminal_output +316,658477,"TERMINAL",0,0,"4",,terminal_output +317,659493,"TERMINAL",0,0,"5",,terminal_output +318,660624,"TERMINAL",0,0,"6",,terminal_output +319,661613,"TERMINAL",0,0,"7",,terminal_output +320,662602,"TERMINAL",0,0,"8",,terminal_output +321,663648,"TERMINAL",0,0,"9",,terminal_output +322,665002,"TERMINAL",0,0,"10",,terminal_output +323,666002,"TERMINAL",0,0,"1",,terminal_output +324,667174,"TERMINAL",0,0,"2",,terminal_output +325,668091,"TERMINAL",0,0,"3",,terminal_output +326,669130,"TERMINAL",0,0,"5",,terminal_output +327,670214,"TERMINAL",0,0,"6",,terminal_output +328,671381,"TERMINAL",0,0,"7",,terminal_output +329,672323,"TERMINAL",0,0,"8",,terminal_output +330,673316,"TERMINAL",0,0,"9",,terminal_output +331,674396,"TERMINAL",0,0,"20",,terminal_output +332,675539,"TERMINAL",0,0,"1",,terminal_output +333,676193,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1054,0,"",shellscript,selection_mouse +334,676196,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1053,0,"",shellscript,selection_command +335,676533,"TERMINAL",0,0,"2",,terminal_output +336,677674,"TERMINAL",0,0,"3",,terminal_output +337,678595,"TERMINAL",0,0,"4",,terminal_output +338,679598,"TERMINAL",0,0,"5",,terminal_output +339,680638,"TERMINAL",0,0,"6",,terminal_output +340,681772,"TERMINAL",0,0,"7",,terminal_output +341,682725,"TERMINAL",0,0,"8",,terminal_output +342,683903,"TERMINAL",0,0,"9",,terminal_output +343,684577,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1055,0,"",shellscript,selection_mouse +344,684816,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1023,32,"\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +345,684817,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",957,98,"al=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +346,684818,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",907,148,"e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +347,684818,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",881,174,"_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +348,684819,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",879,176,"ch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +349,684822,"TERMINAL",0,0,"30",,terminal_output +350,684836,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",843,212,"ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +351,684865,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",842,213,"-ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +352,684869,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",841,214,"--ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +353,684886,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",807,248,"n python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +354,684921,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",806,249,"un python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +355,685034,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",805,250,"run python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +356,685049,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",804,251,"srun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +357,685764,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",804,0,"",shellscript,selection_command +358,685915,"TERMINAL",0,0,"1",,terminal_output +359,686910,"TERMINAL",0,0,"2",,terminal_output +360,688024,"TERMINAL",0,0,"3",,terminal_output +361,689017,"TERMINAL",0,0,"4",,terminal_output +362,690077,"TERMINAL",0,0,"5",,terminal_output +363,691122,"TERMINAL",0,0,"6",,terminal_output +364,691545,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1024,0," --data_dir $tf_records_dir \\n",shellscript,content +365,691622,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1057,0," --model_dim 384 \\n",shellscript,content +366,691683,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1079,0," --latent_dim 32 \\n",shellscript,content +367,691698,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1101,0," --num_latents 1024 \\n",shellscript,content +368,691732,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1126,0," --patch_size 4 \\n",shellscript,content +369,691752,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1147,0," --num_blocks 8 \\n",shellscript,content +370,691834,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1168,0," --num_heads 8\n",shellscript,content +371,691835,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1186,31,"",shellscript,content +372,692160,"TERMINAL",0,0,"8",,terminal_output +373,693268,"TERMINAL",0,0,"9",,terminal_output +374,694267,"TERMINAL",0,0,"40",,terminal_output +375,695288,"TERMINAL",0,0,"1",,terminal_output +376,696385,"TERMINAL",0,0,"2",,terminal_output +377,697411,"TERMINAL",0,0,"3",,terminal_output +378,698458,"TERMINAL",0,0,"4",,terminal_output +379,699499,"TERMINAL",0,0,"5",,terminal_output +380,700566,"TERMINAL",0,0,"6",,terminal_output +381,701593,"TERMINAL",0,0,"7",,terminal_output +382,702644,"TERMINAL",0,0,"8",,terminal_output +383,703684,"TERMINAL",0,0,"9",,terminal_output +384,704728,"TERMINAL",0,0,"50",,terminal_output +385,705774,"TERMINAL",0,0,"1",,terminal_output +386,706820,"TERMINAL",0,0,"2",,terminal_output +387,707857,"TERMINAL",0,0,"3",,terminal_output +388,708915,"TERMINAL",0,0,"4",,terminal_output +389,709984,"TERMINAL",0,0,"5",,terminal_output +390,711031,"TERMINAL",0,0,"6",,terminal_output +391,712051,"TERMINAL",0,0,"7",,terminal_output +392,713093,"TERMINAL",0,0,"8",,terminal_output +393,714145,"TERMINAL",0,0,"9:00",,terminal_output +394,715210,"TERMINAL",0,0,"1",,terminal_output +395,716249,"TERMINAL",0,0,"2",,terminal_output +396,717291,"TERMINAL",0,0,"3",,terminal_output +397,718334,"TERMINAL",0,0,"4",,terminal_output +398,719409,"TERMINAL",0,0,"5",,terminal_output +399,720420,"TERMINAL",0,0,"6",,terminal_output +400,721509,"TERMINAL",0,0,"7",,terminal_output +401,722520,"TERMINAL",0,0,"8",,terminal_output +402,723564,"TERMINAL",0,0,"9",,terminal_output +403,724610,"TERMINAL",0,0,"10",,terminal_output +404,725667,"TERMINAL",0,0,"1",,terminal_output +405,726718,"TERMINAL",0,0,"2",,terminal_output +406,727766,"TERMINAL",0,0,"3",,terminal_output +407,728811,"TERMINAL",0,0,"4",,terminal_output +408,729862,"TERMINAL",0,0,"5",,terminal_output +409,730910,"TERMINAL",0,0,"6",,terminal_output +410,731963,"TERMINAL",0,0,"7",,terminal_output +411,733007,"TERMINAL",0,0,"8",,terminal_output +412,734059,"TERMINAL",0,0,"9",,terminal_output +413,735100,"TERMINAL",0,0,"20",,terminal_output +414,736137,"TERMINAL",0,0,"2",,terminal_output +415,737184,"TERMINAL",0,0,"3",,terminal_output +416,738227,"TERMINAL",0,0,"4",,terminal_output +417,739281,"TERMINAL",0,0,"5",,terminal_output +418,740299,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +419,740300,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",961,0,"",shellscript,selection_mouse +420,740389,"TERMINAL",0,0,"6",,terminal_output +421,741418,"TERMINAL",0,0,"7",,terminal_output +422,741575,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",960,1,"",shellscript,content +423,742108,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",960,0,"1",shellscript,content +424,742109,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",961,0,"",shellscript,selection_keyboard +425,742168,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",961,0,"0",shellscript,content +426,742168,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",962,0,"",shellscript,selection_keyboard +427,742292,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",962,0,"0",shellscript,content +428,742292,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",963,0,"",shellscript,selection_keyboard +429,742419,"TERMINAL",0,0,"8",,terminal_output +430,743381,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",959,0,"",shellscript,selection_mouse +431,743528,"TERMINAL",0,0,"9",,terminal_output +432,743873,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",977,0,"",shellscript,selection_mouse +433,744525,"TERMINAL",0,0,"30",,terminal_output +434,745596,"TERMINAL",0,0,"1",,terminal_output +435,746654,"TERMINAL",0,0,"2",,terminal_output +436,747716,"TERMINAL",0,0,"3",,terminal_output +437,748768,"TERMINAL",0,0,"4",,terminal_output +438,749800,"TERMINAL",0,0,"5",,terminal_output +439,750837,"TERMINAL",0,0,"6",,terminal_output +440,751289,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --mail-user=mihir.mahajan2002@gmail.com\n#SBATCH --job-name=train_tokenizer_minecraft_overfit_sample\n#SBATCH --mem=50G\n#SBATCH --mail-type=ALL\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=$ws_dir/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,tab +441,751895,"TERMINAL",0,0,"7",,terminal_output +442,752957,"TERMINAL",0,0,"8",,terminal_output +443,753986,"TERMINAL",0,0,"9",,terminal_output +444,755050,"TERMINAL",0,0,"40",,terminal_output +445,756097,"TERMINAL",0,0,"1",,terminal_output +446,757137,"TERMINAL",0,0,"3",,terminal_output +447,758016,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1069,0,"",shellscript,selection_mouse +448,758017,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1068,0,"",shellscript,selection_command +449,758120,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1068,1,"r",shellscript,selection_mouse +450,758122,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1069,0,"",shellscript,selection_command +451,758182,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",990,79,"\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +452,758185,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",919,150,"e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +453,758202,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",836,233,"-ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +454,758269,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",806,263,"un python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +455,758270,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",804,265,"srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +456,758281,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",803,266,"\nsrun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +457,758307,"TERMINAL",0,0,"4",,terminal_output +458,758612,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",804,265,"srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +459,759243,"TERMINAL",0,0,"5",,terminal_output +460,759392,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",804,0,"",shellscript,selection_command +461,760305,"TERMINAL",0,0,"6",,terminal_output +462,761331,"TERMINAL",0,0,"7",,terminal_output +463,762418,"TERMINAL",0,0,"8",,terminal_output +464,763471,"TERMINAL",0,0,"9",,terminal_output +465,764476,"TERMINAL",0,0,"50",,terminal_output +466,765530,"TERMINAL",0,0,"1",,terminal_output +467,766165,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1039,0," --data_dir $tf_records_dir \\n",shellscript,content +468,766183,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1072,0," --model_dim=384 \\n",shellscript,content +469,766200,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1094,0," --latent_dim=32 \\n",shellscript,content +470,766308,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1116,0," --num_latents=6 \\n",shellscript,content +471,766467,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1138,0," --patch_size=16 \\n",shellscript,content +472,766558,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1160,0," --num_blocks=8 \\n",shellscript,content +473,766637,"TERMINAL",0,0,"2",,terminal_output +474,766676,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1181,0," --num_heads=8 \\n",shellscript,content +475,766718,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1201,0," --codebook_dropout=0.0\n",shellscript,content +476,766719,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1227,31,"",shellscript,content +477,767616,"TERMINAL",0,0,"3",,terminal_output +478,768721,"TERMINAL",0,0,"4",,terminal_output +479,769709,"TERMINAL",0,0,"5",,terminal_output +480,770753,"TERMINAL",0,0,"6",,terminal_output +481,771807,"TERMINAL",0,0,"7",,terminal_output +482,772846,"TERMINAL",0,0,"8",,terminal_output +483,773896,"TERMINAL",0,0,"9",,terminal_output +484,774967,"TERMINAL",0,0,"5:00:00",,terminal_output +485,775043,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +486,775993,"TERMINAL",0,0,"1",,terminal_output +487,777037,"TERMINAL",0,0,"2",,terminal_output +488,778117,"TERMINAL",0,0,"3",,terminal_output +489,779140,"TERMINAL",0,0,"5",,terminal_output +490,780195,"TERMINAL",0,0,"6",,terminal_output +491,781278,"TERMINAL",0,0,"7",,terminal_output +492,782290,"TERMINAL",0,0,"8",,terminal_output +493,783338,"TERMINAL",0,0,"9",,terminal_output +494,783385,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1187,0,"",shellscript,selection_mouse +495,784363,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1187,0," ",shellscript,content +496,784364,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1188,0,"",shellscript,selection_keyboard +497,784405,"TERMINAL",0,0,"10",,terminal_output +498,784505,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1188,0,"\",shellscript,content +499,784506,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1189,0,"",shellscript,selection_keyboard +500,784755,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1189,0,"\n ",shellscript,content +501,785137,"TERMINAL",0,0,"salloc: job 3298895 has been allocated resources\r\nsalloc: Granted job allocation 3298895\r\n",,terminal_output +502,785284,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +503,785375,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1194,0,"codebook_dropout = 0.0",shellscript,content +504,785459,"TERMINAL",0,0,"1 Rhkn0507",,terminal_output +505,786501,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1193,0,"",shellscript,selection_mouse +506,786543,"TERMINAL",0,0,"21",,terminal_output +507,787528,"TERMINAL",0,0,"32",,terminal_output +508,788063,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1194,0,"",shellscript,selection_mouse +509,788571,"TERMINAL",0,0,"43",,terminal_output +510,788802,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1194,0,"-",shellscript,content +511,788804,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1195,0,"",shellscript,selection_keyboard +512,788909,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1195,0,"-",shellscript,content +513,788910,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1196,0,"",shellscript,selection_keyboard +514,789616,"TERMINAL",0,0,"54",,terminal_output +515,790111,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1214,0,"",shellscript,selection_mouse +516,790494,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1213,1,"",shellscript,content +517,790698,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1212,1,"",shellscript,content +518,790701,"TERMINAL",0,0,"65",,terminal_output +519,791710,"TERMINAL",0,0,"76",,terminal_output +520,792756,"TERMINAL",0,0,"87",,terminal_output +521,793179,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +522,793180,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1217,0,"",shellscript,selection_mouse +523,793707,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1216,0,"",shellscript,selection_mouse +524,793799,"TERMINAL",0,0,"98",,terminal_output +525,794858,"TERMINAL",0,0,"209",,terminal_output +526,795457,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",0,0,"",shellscript,tab +527,795935,"TERMINAL",0,0,"110",,terminal_output +528,796991,"TERMINAL",0,0,"21",,terminal_output +529,797665,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1224,0,"",shellscript,selection_mouse +530,798008,"TERMINAL",0,0,"32",,terminal_output +531,799055,"TERMINAL",0,0,"43",,terminal_output +532,799973,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1090,0,"",shellscript,selection_mouse +533,800115,"TERMINAL",0,0,"65",,terminal_output +534,801178,"TERMINAL",0,0,"76",,terminal_output +535,802185,"TERMINAL",0,0,"87",,terminal_output +536,803187,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +537,803266,"TERMINAL",0,0,"98",,terminal_output +538,804288,"TERMINAL",0,0,"309",,terminal_output +539,804690,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1217,0,"",shellscript,selection_mouse +540,805334,"TERMINAL",0,0,"120",,terminal_output +541,806415,"TERMINAL",0,0,"21",,terminal_output +542,807425,"TERMINAL",0,0,"32",,terminal_output +543,808477,"TERMINAL",0,0,"43",,terminal_output +544,809561,"TERMINAL",0,0,"54",,terminal_output +545,810569,"TERMINAL",0,0,"65",,terminal_output +546,811622,"TERMINAL",0,0,"76",,terminal_output +547,812712,"TERMINAL",0,0,"87",,terminal_output +548,813709,"TERMINAL",0,0,"98",,terminal_output +549,814764,"TERMINAL",0,0,"409",,terminal_output +550,815804,"TERMINAL",0,0,"130",,terminal_output +551,816846,"TERMINAL",0,0,"21",,terminal_output +552,817810,"TERMINAL",0,0,"salloc: Nodes hkn0507 are ready for job\r\n",,terminal_output +553,817907,"TERMINAL",0,0,"32",,terminal_output +554,818666,"TERMINAL",0,0,"]0;tum_cte0515@hkn0507:~/Projects/jafar[?2004h[tum_cte0515@hkn0507 jafar]$ ",,terminal_output +555,818939,"TERMINAL",0,0,"43",,terminal_output +556,820042,"TERMINAL",0,0,"54",,terminal_output +557,821047,"TERMINAL",0,0,"65",,terminal_output +558,822097,"TERMINAL",0,0,"77",,terminal_output +559,822256,"TERMINAL",0,0,"s",,terminal_output +560,822459,"TERMINAL",0,0,"ou",,terminal_output +561,822602,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +562,822694,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +563,823059,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +564,823140,"TERMINAL",0,0,"98",,terminal_output +565,823194,"TERMINAL",0,0,"[?25l .[?25h",,terminal_output +566,823291,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +567,823486,"TERMINAL",0,0,"env/",,terminal_output +568,823988,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +569,824114,"TERMINAL",0,0,"in/",,terminal_output +570,824250,"TERMINAL",0,0,"509",,terminal_output +571,824340,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +572,824661,"TERMINAL",0,0,"ctivate",,terminal_output +573,825213,"TERMINAL",0,0,"140",,terminal_output +574,825315,"TERMINAL",0,0,"[?25l[?2004l\r[?25h]0;tum_cte0515@hkn0507:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0507 jafar]$ ",,terminal_output +575,826287,"TERMINAL",0,0,"21",,terminal_output +576,827309,"TERMINAL",0,0,"32",,terminal_output +577,828354,"TERMINAL",0,0,"43",,terminal_output +578,829443,"TERMINAL",0,0,"54",,terminal_output +579,830459,"TERMINAL",0,0,"65",,terminal_output +580,831513,"TERMINAL",0,0,"76",,terminal_output +581,832535,"TERMINAL",0,0,"87",,terminal_output +582,833591,"TERMINAL",0,0,"98",,terminal_output +583,834634,"TERMINAL",0,0,"1:009",,terminal_output +584,835683,"TERMINAL",0,0,"150",,terminal_output +585,836771,"TERMINAL",0,0,"21",,terminal_output +586,837768,"TERMINAL",0,0,"32",,terminal_output +587,838805,"TERMINAL",0,0,"43",,terminal_output +588,839850,"TERMINAL",0,0,"54",,terminal_output +589,840204,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +590,840903,"TERMINAL",0,0,"65",,terminal_output +591,841943,"TERMINAL",0,0,"76",,terminal_output +592,842997,"TERMINAL",0,0,"87",,terminal_output +593,844036,"TERMINAL",0,0,"98",,terminal_output +594,845082,"TERMINAL",0,0,"101:00",,terminal_output +595,846146,"TERMINAL",0,0,"21",,terminal_output +596,846349,"scripts_horeka/train_tokenizer.sh",0,0,"#!/usr/bin/env bash\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=$ws_dir/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""0000""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\npython train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,tab +597,847179,"TERMINAL",0,0,"32",,terminal_output +598,847335,"scripts_horeka/train_tokenizer.sh",691,0,"",shellscript,selection_mouse +599,847336,"scripts_horeka/train_tokenizer.sh",690,0,"",shellscript,selection_command +600,847456,"scripts_horeka/train_tokenizer.sh",690,1,"r",shellscript,selection_mouse +601,847457,"scripts_horeka/train_tokenizer.sh",691,0,"",shellscript,selection_command +602,847496,"scripts_horeka/train_tokenizer.sh",557,134,"-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +603,847506,"scripts_horeka/train_tokenizer.sh",224,467,"\njob_name=""debug""\nslurm_job_id=""0000""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\npython train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +604,847526,"scripts_horeka/train_tokenizer.sh",0,691,"#!/usr/bin/env bash\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=$ws_dir/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""0000""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\npython train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +605,848309,"TERMINAL",0,0,"43",,terminal_output +606,849311,"TERMINAL",0,0,"54",,terminal_output +607,850311,"TERMINAL",0,0,"65",,terminal_output +608,850786,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +609,851341,"TERMINAL",0,0,"76",,terminal_output +610,852248,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"#!/usr/bin/env bash\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=$ws_dir/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""0000""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\npython train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,content +611,852384,"TERMINAL",0,0,"87",,terminal_output +612,853421,"TERMINAL",0,0,"98",,terminal_output +613,854143,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +614,854483,"TERMINAL",0,0,"209",,terminal_output +615,855521,"TERMINAL",0,0,"110",,terminal_output +616,855807,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1216,1,"\n",shellscript,selection_mouse +617,855832,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1189,28,"\n --codebook_dropout 0.0\n",shellscript,selection_mouse +618,855861,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1148,69,"\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +619,855863,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1100,117," \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +620,855890,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",943,274,"g_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +621,855900,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",873,344," --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +622,855927,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",839,378," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +623,855937,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",838,379," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +624,855964,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",837,380," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +625,856079,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",804,413,"srun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +626,856564,"TERMINAL",0,0,"21",,terminal_output +627,857605,"TERMINAL",0,0,"32",,terminal_output +628,858389,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +629,858776,"TERMINAL",0,0,"43",,terminal_output +630,859382,"scripts_horeka/overfit_sample_tiny/tester.sh",638,53,"\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +631,859422,"scripts_horeka/overfit_sample_tiny/tester.sh",602,89,"er debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +632,859423,"scripts_horeka/overfit_sample_tiny/tester.sh",535,156,"\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +633,859479,"scripts_horeka/overfit_sample_tiny/tester.sh",485,206,"4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +634,859480,"scripts_horeka/overfit_sample_tiny/tester.sh",459,232,"lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +635,859480,"scripts_horeka/overfit_sample_tiny/tester.sh",457,234,"n_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +636,859491,"scripts_horeka/overfit_sample_tiny/tester.sh",456,235,"in_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +637,859525,"scripts_horeka/overfit_sample_tiny/tester.sh",455,236,"min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +638,859526,"scripts_horeka/overfit_sample_tiny/tester.sh",454,237,"-min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +639,859565,"scripts_horeka/overfit_sample_tiny/tester.sh",431,260,"--batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +640,859605,"scripts_horeka/overfit_sample_tiny/tester.sh",430,261," --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +641,859645,"scripts_horeka/overfit_sample_tiny/tester.sh",429,262," --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +642,859689,"scripts_horeka/overfit_sample_tiny/tester.sh",428,263," --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +643,859690,"scripts_horeka/overfit_sample_tiny/tester.sh",394,297," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +644,859779,"TERMINAL",0,0,"54",,terminal_output +645,860108,"scripts_horeka/overfit_sample_tiny/tester.sh",366,325,"python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +646,860613,"scripts_horeka/overfit_sample_tiny/tester.sh",367,324,"ython train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +647,860634,"scripts_horeka/overfit_sample_tiny/tester.sh",368,323,"thon train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +648,860672,"scripts_horeka/overfit_sample_tiny/tester.sh",369,322,"hon train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +649,860804,"TERMINAL",0,0,"65",,terminal_output +650,860984,"scripts_horeka/overfit_sample_tiny/tester.sh",370,321,"on train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +651,861042,"scripts_horeka/overfit_sample_tiny/tester.sh",371,320,"n train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +652,861326,"scripts_horeka/overfit_sample_tiny/tester.sh",370,321,"on train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +653,861356,"scripts_horeka/overfit_sample_tiny/tester.sh",368,323,"thon train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +654,861393,"scripts_horeka/overfit_sample_tiny/tester.sh",367,324,"ython train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +655,861431,"scripts_horeka/overfit_sample_tiny/tester.sh",366,325,"python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +656,861849,"scripts_horeka/overfit_sample_tiny/tester.sh",394,297," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=3 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +657,861857,"TERMINAL",0,0,"76",,terminal_output +658,862896,"TERMINAL",0,0,"87",,terminal_output +659,863949,"TERMINAL",0,0,"98",,terminal_output +660,864983,"TERMINAL",0,0,"309",,terminal_output +661,866034,"TERMINAL",0,0,"120",,terminal_output +662,866866,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",0,0,"",shellscript,tab +663,867164,"TERMINAL",0,0,"22",,terminal_output +664,868124,"TERMINAL",0,0,"43",,terminal_output +665,869178,"TERMINAL",0,0,"54",,terminal_output +666,869271,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +667,870237,"TERMINAL",0,0,"65",,terminal_output +668,871281,"TERMINAL",0,0,"76",,terminal_output +669,872315,"TERMINAL",0,0,"87",,terminal_output +670,873362,"TERMINAL",0,0,"98",,terminal_output +671,873414,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1217,0,"",shellscript,selection_mouse +672,873889,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1216,0,"",shellscript,selection_mouse +673,873965,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1189,27,"\n --codebook_dropout 0.0",shellscript,selection_mouse +674,873999,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1169,47,"\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +675,874016,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1168,48,"\\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +676,874031,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1144,72," 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +677,874047,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1141,75,"ize 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +678,874085,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1114,102,"atents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +679,874086,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1090,126,"ent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +680,874102,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1066,150,"odel_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +681,874117,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1032,184,"data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +682,874151,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1031,185,"-data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +683,874170,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1008,208,"--project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +684,874170,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1007,209," --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +685,874182,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",981,235," --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +686,874200,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",968,248," --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +687,874225,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",967,249," --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +688,874268,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",966,250," --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +689,874268,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",935,281," --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +690,874370,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",913,303," --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +691,874439,"TERMINAL",0,0,"409",,terminal_output +692,874481,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",891,325," --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +693,874576,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",870,346," --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +694,874813,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",837,379," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +695,875478,"TERMINAL",0,0,"130",,terminal_output +696,876516,"TERMINAL",0,0,"21",,terminal_output +697,877165,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +698,877559,"TERMINAL",0,0,"32",,terminal_output +699,878307,"scripts_horeka/overfit_sample_tiny/tester.sh",394,297,"",shellscript,content +700,878672,"TERMINAL",0,0,"43",,terminal_output +701,878932,"scripts_horeka/overfit_sample_tiny/tester.sh",394,0," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,content +702,879651,"TERMINAL",0,0,"54",,terminal_output +703,880700,"TERMINAL",0,0,"65",,terminal_output +704,881558,"TERMINAL",0,0,"[?25lsh[?25h",,terminal_output +705,881674,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +706,881768,"TERMINAL",0,0,"76",,terminal_output +707,881978,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +708,882495,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +709,882797,"TERMINAL",0,0,"87",,terminal_output +710,883360,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +711,883838,"TERMINAL",0,0,"98",,terminal_output +712,884877,"TERMINAL",0,0,"509",,terminal_output +713,885322,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +714,885395,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +715,885654,"TERMINAL",0,0,"ripts_",,terminal_output +716,885934,"TERMINAL",0,0,"140",,terminal_output +717,885977,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +718,886030,"TERMINAL",0,0,"oreka/",,terminal_output +719,886977,"TERMINAL",0,0,"21",,terminal_output +720,887741,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +721,887915,"TERMINAL",0,0,"",,terminal_output +722,888014,"TERMINAL",0,0,"32",,terminal_output +723,889056,"TERMINAL",0,0,"43",,terminal_output +724,889927,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +725,890023,"TERMINAL",0,0,"verfit_",,terminal_output +726,890136,"TERMINAL",0,0,"65",,terminal_output +727,890487,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +728,891144,"TERMINAL",0,0,"76",,terminal_output +729,891216,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +730,891704,"TERMINAL",0,0,"ample",,terminal_output +731,892189,"TERMINAL",0,0,"87",,terminal_output +732,893241,"TERMINAL",0,0,"98",,terminal_output +733,893870,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +734,894062,"TERMINAL",0,0,"tiny/",,terminal_output +735,894296,"TERMINAL",0,0,"2:009",,terminal_output +736,895338,"TERMINAL",0,0,"150",,terminal_output +737,896383,"TERMINAL",0,0,"21",,terminal_output +738,896482,"TERMINAL",0,0,"t",,terminal_output +739,897088,"TERMINAL",0,0,"ester.sh ",,terminal_output +740,897462,"TERMINAL",0,0,"32",,terminal_output +741,897607,"TERMINAL",0,0,"[?25l[?2004l\r[?25h",,terminal_output +742,897755,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=1721172\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0507\r\nSLURM_JOB_START_TIME=1751029211\r\nSLURM_STEP_NODELIST=hkn0507\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751032811\r\nSLURM_PMI2_SRUN_PORT=35623\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3298895\r\nSLURM_PTY_PORT=42613\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=35\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e10.hkn0507\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_MEM_PER_NODE=51200\r\nSLURM_PTY_WIN_COL=73\r\nSLURM_NODELIST=hkn0507\r\nSLURM_SRUN_COMM_PORT=37257\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3298895\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0507\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=37257\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0507\r\n",,terminal_output +743,898480,"TERMINAL",0,0,"43",,terminal_output +744,899527,"TERMINAL",0,0,"54",,terminal_output +745,900569,"TERMINAL",0,0,"65",,terminal_output +746,901617,"TERMINAL",0,0,"76",,terminal_output +747,902719,"TERMINAL",0,0,"87",,terminal_output +748,903704,"TERMINAL",0,0,"98",,terminal_output +749,904752,"TERMINAL",0,0,"109",,terminal_output +750,905809,"TERMINAL",0,0,"12:00",,terminal_output +751,906840,"TERMINAL",0,0,"21",,terminal_output +752,907879,"TERMINAL",0,0,"32",,terminal_output +753,908922,"TERMINAL",0,0,"43",,terminal_output +754,909975,"TERMINAL",0,0,"54",,terminal_output +755,911017,"TERMINAL",0,0,"65",,terminal_output +756,912061,"TERMINAL",0,0,"77",,terminal_output +757,913107,"TERMINAL",0,0,"98",,terminal_output +758,914204,"TERMINAL",0,0,"209",,terminal_output +759,915202,"TERMINAL",0,0,"110",,terminal_output +760,916295,"TERMINAL",0,0,"21",,terminal_output +761,917302,"TERMINAL",0,0,"32",,terminal_output +762,918350,"TERMINAL",0,0,"43",,terminal_output +763,919461,"TERMINAL",0,0,"54",,terminal_output +764,920475,"TERMINAL",0,0,"65",,terminal_output +765,921484,"TERMINAL",0,0,"76",,terminal_output +766,922515,"TERMINAL",0,0,"87",,terminal_output +767,923552,"TERMINAL",0,0,"98",,terminal_output +768,924040,"TERMINAL",0,0,"2025-06-27 15:02:29.895077: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n",,terminal_output +769,924202,"TERMINAL",0,0,"WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751029350.018142 1721680 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751029350.023830 1721680 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\n",,terminal_output +770,924599,"TERMINAL",0,0,"W0000 00:00:1751029350.523733 1721680 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751029350.523782 1721680 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751029350.523784 1721680 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751029350.523786 1721680 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +771,924600,"TERMINAL",0,0,"309",,terminal_output +772,925671,"TERMINAL",0,0,"120",,terminal_output +773,926691,"TERMINAL",0,0,"21",,terminal_output +774,927735,"TERMINAL",0,0,"32",,terminal_output +775,928810,"TERMINAL",0,0,"43",,terminal_output +776,929823,"TERMINAL",0,0,"54",,terminal_output +777,930955,"TERMINAL",0,0,"65",,terminal_output +778,931922,"TERMINAL",0,0,"76",,terminal_output +779,933048,"TERMINAL",0,0,"87",,terminal_output +780,934075,"TERMINAL",0,0,"98",,terminal_output +781,935074,"TERMINAL",0,0,"4030",,terminal_output +782,936123,"TERMINAL",0,0,"21",,terminal_output +783,937179,"TERMINAL",0,0,"32",,terminal_output +784,938254,"TERMINAL",0,0,"43",,terminal_output +785,939334,"TERMINAL",0,0,"54",,terminal_output +786,940352,"TERMINAL",0,0,"65",,terminal_output +787,941507,"TERMINAL",0,0,"76",,terminal_output +788,942427,"TERMINAL",0,0,"87",,terminal_output +789,943515,"TERMINAL",0,0,"98",,terminal_output +790,944579,"TERMINAL",0,0,"509",,terminal_output +791,945567,"TERMINAL",0,0,"140",,terminal_output +792,946604,"TERMINAL",0,0,"21",,terminal_output +793,947675,"TERMINAL",0,0,"32",,terminal_output +794,948731,"TERMINAL",0,0,"43",,terminal_output +795,949761,"TERMINAL",0,0,"54",,terminal_output +796,950786,"TERMINAL",0,0,"65",,terminal_output +797,951933,"TERMINAL",0,0,"76",,terminal_output +798,952871,"TERMINAL",0,0,"87",,terminal_output +799,953912,"TERMINAL",0,0,"98",,terminal_output +800,954967,"TERMINAL",0,0,"3:009",,terminal_output +801,956013,"TERMINAL",0,0,"150",,terminal_output +802,957056,"TERMINAL",0,0,"21",,terminal_output +803,957123,"TERMINAL",0,0,"W0000 00:00:1751029383.058469 1721680 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +804,958097,"TERMINAL",0,0,"43",,terminal_output +805,958231,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +806,959146,"TERMINAL",0,0,"54",,terminal_output +807,959406,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +808,960240,"TERMINAL",0,0,"65",,terminal_output +809,960328,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250627_150305-7mhnpbtw\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run train_tokenizer\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/7mhnpbtw\r\n",,terminal_output +810,961254,"TERMINAL",0,0,"76",,terminal_output +811,962298,"TERMINAL",0,0,"87",,terminal_output +812,962970,"TERMINAL",0,0,"2025-06-27 15:03:08.836946: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +813,963394,"TERMINAL",0,0,"98",,terminal_output +814,964417,"TERMINAL",0,0,"109",,terminal_output +815,965472,"TERMINAL",0,0,"13:00",,terminal_output +816,966511,"TERMINAL",0,0,"21",,terminal_output +817,967558,"TERMINAL",0,0,"32",,terminal_output +818,968592,"TERMINAL",0,0,"43",,terminal_output +819,969642,"TERMINAL",0,0,"54",,terminal_output +820,970731,"TERMINAL",0,0,"65",,terminal_output +821,971756,"TERMINAL",0,0,"76",,terminal_output +822,972806,"TERMINAL",0,0,"87",,terminal_output +823,973851,"TERMINAL",0,0,"98",,terminal_output +824,974900,"TERMINAL",0,0,"209",,terminal_output +825,975992,"TERMINAL",0,0,"110",,terminal_output +826,976256,"TERMINAL",0,0,"2025-06-27 15:03:22.183846: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +827,976987,"TERMINAL",0,0,"21",,terminal_output +828,978017,"TERMINAL",0,0,"32",,terminal_output +829,978238,"TERMINAL",0,0,"2025-06-27 15:03:24.164259: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +830,979090,"TERMINAL",0,0,"44",,terminal_output +831,980113,"TERMINAL",0,0,"65",,terminal_output +832,981162,"TERMINAL",0,0,"76",,terminal_output +833,982190,"TERMINAL",0,0,"Starting training from step 0...\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 219, in \r\n print(""batch shape: "", videos.shape)\r\nNameError: name 'videos' is not defined\r\n",,terminal_output +834,982228,"TERMINAL",0,0,"87",,terminal_output +835,983323,"TERMINAL",0,0,"98",,terminal_output +836,983435,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run train_tokenizer at: https://wandb.ai/instant-uv/jafar/runs/7mhnpbtw\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250627_150305-7mhnpbtw/logs\r\n",,terminal_output +837,984293,"TERMINAL",0,0,"309",,terminal_output +838,985301,"TERMINAL",0,0,"]0;tum_cte0515@hkn0507:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0507 jafar]$ ",,terminal_output +839,985340,"TERMINAL",0,0,"120",,terminal_output +840,986431,"TERMINAL",0,0,"21",,terminal_output +841,987495,"TERMINAL",0,0,"32",,terminal_output +842,988555,"TERMINAL",0,0,"43",,terminal_output +843,989564,"TERMINAL",0,0,"54",,terminal_output +844,990579,"TERMINAL",0,0,"65",,terminal_output +845,991629,"TERMINAL",0,0,"76",,terminal_output +846,992706,"TERMINAL",0,0,"87",,terminal_output +847,993719,"TERMINAL",0,0,"98",,terminal_output +848,994805,"TERMINAL",0,0,"409",,terminal_output +849,995807,"TERMINAL",0,0,"130",,terminal_output +850,996072,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer__\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n # for videos in dataloader:\n npy_path = ""overfit_dir/single_sample_corner.npy""\n # npy_path = ""overfit_dir/single_batch_12_elems.npy""\n # videos = np.load(npy_path)\n print(""batch shape: "", videos.shape)\n while(True):\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n start_time = time.time()\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n jax.block_until_ready(loss)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +851,996858,"TERMINAL",0,0,"21",,terminal_output +852,997903,"TERMINAL",0,0,"32",,terminal_output +853,999006,"TERMINAL",0,0,"43",,terminal_output +854,1000006,"TERMINAL",0,0,"54",,terminal_output +855,1001061,"TERMINAL",0,0,"65",,terminal_output +856,1002072,"TERMINAL",0,0,"77",,terminal_output +857,1003143,"TERMINAL",0,0,"98",,terminal_output +858,1003932,"train_tokenizer.py",7012,0,"",python,selection_mouse +859,1004200,"TERMINAL",0,0,"509",,terminal_output +860,1004605,"train_tokenizer.py",6767,0,"",python,selection_mouse +861,1005366,"TERMINAL",0,0,"140",,terminal_output +862,1006011,"train_tokenizer.py",6908,0,"",python,selection_mouse +863,1006264,"TERMINAL",0,0,"21",,terminal_output +864,1007080,"train_tokenizer.py",6945,0,"",python,selection_command +865,1007310,"TERMINAL",0,0,"32",,terminal_output +866,1007987,"train_tokenizer.py",6908,0,"",python,selection_command +867,1008194,"train_tokenizer.py",6907,0,"",python,selection_command +868,1008356,"TERMINAL",0,0,"43",,terminal_output +869,1008451,"train_tokenizer.py",6907,1,"",python,content +870,1008591,"train_tokenizer.py",6907,1,"",python,content +871,1009421,"TERMINAL",0,0,"54",,terminal_output +872,1010487,"TERMINAL",0,0,"65",,terminal_output +873,1011083,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/tester.sh ",,terminal_output +874,1011521,"TERMINAL",0,0,"76",,terminal_output +875,1012539,"TERMINAL",0,0,"87",,terminal_output +876,1013591,"TERMINAL",0,0,"98",,terminal_output +877,1014665,"TERMINAL",0,0,"4:009",,terminal_output +878,1015669,"TERMINAL",0,0,"150",,terminal_output +879,1015838,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +880,1015972,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=1721172\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0507\r\nSLURM_JOB_START_TIME=1751029211\r\nSLURM_STEP_NODELIST=hkn0507\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751032811\r\nSLURM_PMI2_SRUN_PORT=35623\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3298895\r\nSLURM_PTY_PORT=42613\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=35\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e10.hkn0507\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_MEM_PER_NODE=51200\r\nSLURM_PTY_WIN_COL=73\r\nSLURM_NODELIST=hkn0507\r\nSLURM_SRUN_COMM_PORT=37257\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3298895\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0507\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=37257\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0507\r\n",,terminal_output +881,1016751,"TERMINAL",0,0,"21",,terminal_output +882,1017780,"TERMINAL",0,0,"32",,terminal_output +883,1018410,"TERMINAL",0,0,"2025-06-27 15:04:04.322955: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751029444.336065 1722627 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751029444.340426 1722627 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751029444.352659 1722627 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751029444.352677 1722627 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751029444.352680 1722627 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751029444.352682 1722627 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +884,1018801,"TERMINAL",0,0,"43",,terminal_output +885,1019853,"TERMINAL",0,0,"54",,terminal_output +886,1020887,"TERMINAL",0,0,"65",,terminal_output +887,1021661,"TERMINAL",0,0,"W0000 00:00:1751029447.606913 1722627 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +888,1021964,"TERMINAL",0,0,"76",,terminal_output +889,1022003,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +890,1022788,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +891,1023080,"TERMINAL",0,0,"87",,terminal_output +892,1023764,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250627_150408-sf5s8uy4\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run train_tokenizer\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/sf5s8uy4\r\n",,terminal_output +893,1024067,"TERMINAL",0,0,"98",,terminal_output +894,1025053,"TERMINAL",0,0,"109",,terminal_output +895,1025122,"TERMINAL",0,0,"2025-06-27 15:04:11.064711: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +896,1026092,"TERMINAL",0,0,"14:01",,terminal_output +897,1027127,"TERMINAL",0,0,"32",,terminal_output +898,1028169,"TERMINAL",0,0,"43",,terminal_output +899,1029208,"TERMINAL",0,0,"54",,terminal_output +900,1030257,"TERMINAL",0,0,"65",,terminal_output +901,1031305,"TERMINAL",0,0,"76",,terminal_output +902,1032359,"TERMINAL",0,0,"87",,terminal_output +903,1033452,"TERMINAL",0,0,"98",,terminal_output +904,1034478,"TERMINAL",0,0,"209",,terminal_output +905,1035511,"TERMINAL",0,0,"110",,terminal_output +906,1036545,"TERMINAL",0,0,"21",,terminal_output +907,1037560,"TERMINAL",0,0,"32",,terminal_output +908,1038286,"TERMINAL",0,0,"2025-06-27 15:04:24.225466: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +909,1038608,"TERMINAL",0,0,"43",,terminal_output +910,1039671,"TERMINAL",0,0,"54",,terminal_output +911,1040399,"TERMINAL",0,0,"2025-06-27 15:04:26.343314: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +912,1040701,"TERMINAL",0,0,"65",,terminal_output +913,1041743,"TERMINAL",0,0,"76",,terminal_output +914,1042821,"TERMINAL",0,0,"87",,terminal_output +915,1043840,"TERMINAL",0,0,"98",,terminal_output +916,1044101,"TERMINAL",0,0,"Starting training from step 0...\r\nbatch shape: (1, 16, 90, 160, 3)\r\n",,terminal_output +917,1044863,"TERMINAL",0,0,"309",,terminal_output +918,1045922,"TERMINAL",0,0,"120",,terminal_output +919,1046953,"TERMINAL",0,0,"21",,terminal_output +920,1048006,"TERMINAL",0,0,"32",,terminal_output +921,1049111,"TERMINAL",0,0,"43",,terminal_output +922,1050087,"TERMINAL",0,0,"55",,terminal_output +923,1051134,"TERMINAL",0,0,"76",,terminal_output +924,1052173,"TERMINAL",0,0,"87",,terminal_output +925,1053207,"TERMINAL",0,0,"98",,terminal_output +926,1054246,"TERMINAL",0,0,"409",,terminal_output +927,1055301,"TERMINAL",0,0,"130",,terminal_output +928,1056153,"TERMINAL",0,0,"2025-06-27 15:04:42.061083: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:04:42.061649: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:04:42.061671: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:04:42.061783: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:04:42.063490: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +929,1056406,"TERMINAL",0,0,"21",,terminal_output +930,1057372,"TERMINAL",0,0,"32",,terminal_output +931,1058397,"TERMINAL",0,0,"43",,terminal_output +932,1059444,"TERMINAL",0,0,"54",,terminal_output +933,1060540,"TERMINAL",0,0,"65",,terminal_output +934,1061598,"TERMINAL",0,0,"76",,terminal_output +935,1062762,"TERMINAL",0,0,"87",,terminal_output +936,1064025,"TERMINAL",0,0,"98",,terminal_output +937,1064824,"TERMINAL",0,0,"509",,terminal_output +938,1065914,"TERMINAL",0,0,"140",,terminal_output +939,1067101,"TERMINAL",0,0,"21",,terminal_output +940,1068470,"TERMINAL",0,0,"32",,terminal_output +941,1069395,"TERMINAL",0,0,"43",,terminal_output +942,1070050,"TERMINAL",0,0,"54",,terminal_output +943,1071428,"TERMINAL",0,0,"65",,terminal_output +944,1072774,"TERMINAL",0,0,"76",,terminal_output +945,1073425,"TERMINAL",0,0,"88",,terminal_output +946,1074541,"TERMINAL",0,0,"5:009",,terminal_output +947,1075397,"TERMINAL",0,0,"150",,terminal_output +948,1076478,"TERMINAL",0,0,"21",,terminal_output +949,1077528,"TERMINAL",0,0,"32",,terminal_output +950,1078424,"TERMINAL",0,0,"43",,terminal_output +951,1079530,"TERMINAL",0,0,"54",,terminal_output +952,1080845,"TERMINAL",0,0,"65",,terminal_output +953,1081763,"TERMINAL",0,0,"76",,terminal_output +954,1082750,"TERMINAL",0,0,"87",,terminal_output +955,1083846,"TERMINAL",0,0,"98",,terminal_output +956,1085243,"TERMINAL",0,0,"109",,terminal_output +957,1086242,"TERMINAL",0,0,"15:00",,terminal_output +958,1087345,"TERMINAL",0,0,"21",,terminal_output +959,1088410,"TERMINAL",0,0,"32",,terminal_output +960,1089279,"TERMINAL",0,0,"43",,terminal_output +961,1090440,"TERMINAL",0,0,"54",,terminal_output +962,1091292,"TERMINAL",0,0,"65",,terminal_output +963,1092320,"TERMINAL",0,0,"76",,terminal_output +964,1093137,"TERMINAL",0,0,"87",,terminal_output +965,1094463,"TERMINAL",0,0,"98",,terminal_output +966,1095389,"TERMINAL",0,0,"209",,terminal_output +967,1096151,"TERMINAL",0,0,"110",,terminal_output +968,1097346,"TERMINAL",0,0,"22",,terminal_output +969,1098511,"TERMINAL",0,0,"43",,terminal_output +970,1099497,"TERMINAL",0,0,"54",,terminal_output +971,1100405,"TERMINAL",0,0,"65",,terminal_output +972,1101812,"TERMINAL",0,0,"76",,terminal_output +973,1102320,"TERMINAL",0,0,"87",,terminal_output +974,1103792,"TERMINAL",0,0,"98",,terminal_output +975,1104512,"TERMINAL",0,0,"309",,terminal_output +976,1105120,"TERMINAL",0,0,"Step 0, loss: 0.27132558822631836, step time: 60433.52794647217ms\r\n",,terminal_output +977,1105401,"TERMINAL",0,0,"Step 1, loss: 0.2122431993484497, step time: 177.60229110717773ms\r\n",,terminal_output +978,1105738,"TERMINAL",0,0,"Step 2, loss: 0.1855737268924713, step time: 176.76377296447754ms\r\n",,terminal_output +979,1105966,"TERMINAL",0,0,"Step 3, loss: 0.177537739276886, step time: 176.6374111175537ms\r\n",,terminal_output +980,1106177,"TERMINAL",0,0,"120",,terminal_output +981,1106276,"TERMINAL",0,0,"Step 4, loss: 0.17784759402275085, step time: 176.79548263549805ms\r\n",,terminal_output +982,1106410,"TERMINAL",0,0,"Step 5, loss: 0.1761018931865692, step time: 175.90022087097168ms\r\n",,terminal_output +983,1106568,"TERMINAL",0,0,"Step 6, loss: 0.1710786521434784, step time: 176.6335964202881ms\r\n",,terminal_output +984,1106704,"TERMINAL",0,0,"Step 7, loss: 0.1691059023141861, step time: 176.7139434814453ms\r\nStep 8, loss: 0.1638535112142563, step time: 176.24163627624512ms\r\n",,terminal_output +985,1106790,"TERMINAL",0,0,"Step 9, loss: 0.15699408948421478, step time: 176.90730094909668ms\r\n",,terminal_output +986,1106799,"TERMINAL",0,0,"21",,terminal_output +987,1106816,"TERMINAL",0,0,"Step 10, loss: 0.14780470728874207, step time: 176.133394241333ms\r\n",,terminal_output +988,1106868,"TERMINAL",0,0,"Step 11, loss: 0.14296160638332367, step time: 176.53322219848633ms\r\n",,terminal_output +989,1106953,"TERMINAL",0,0,"Step 12, loss: 0.1376754343509674, step time: 176.45883560180664ms\r\n",,terminal_output +990,1107108,"TERMINAL",0,0,"Step 13, loss: 0.1328338235616684, step time: 175.62150955200195ms\r\n",,terminal_output +991,1107373,"TERMINAL",0,0,"Step 14, loss: 0.12999925017356873, step time: 175.83155632019043ms\r\n",,terminal_output +992,1107715,"TERMINAL",0,0,"Step 15, loss: 0.12699566781520844, step time: 175.23789405822754ms\r\n",,terminal_output +993,1107806,"TERMINAL",0,0,"32",,terminal_output +994,1108012,"TERMINAL",0,0,"Step 16, loss: 0.12324760109186172, step time: 175.6739616394043ms\r\n",,terminal_output +995,1108183,"TERMINAL",0,0,"Step 17, loss: 0.11883314698934555, step time: 175.44054985046387ms\r\n",,terminal_output +996,1108339,"TERMINAL",0,0,"Step 18, loss: 0.1147085577249527, step time: 176.1617660522461ms\r\n",,terminal_output +997,1108385,"TERMINAL",0,0,"Step 19, loss: 0.11075019091367722, step time: 175.81701278686523ms\r\nStep 20, loss: 0.10760579258203506, step time: 175.59814453125ms\r\n",,terminal_output +998,1108517,"TERMINAL",0,0,"Step 21, loss: 0.10483525693416595, step time: 175.81748962402344ms\r\n",,terminal_output +999,1108575,"TERMINAL",0,0,"43",,terminal_output +1000,1108796,"TERMINAL",0,0,"Step 22, loss: 0.10189395397901535, step time: 175.7051944732666ms\r\n",,terminal_output +1001,1109159,"TERMINAL",0,0,"Step 23, loss: 0.09971645474433899, step time: 175.9958267211914ms\r\n",,terminal_output +1002,1109493,"TERMINAL",0,0,"Step 24, loss: 0.09809523820877075, step time: 175.933837890625ms\r\n",,terminal_output +1003,1109689,"TERMINAL",0,0,"Step 25, loss: 0.0949634462594986, step time: 175.77457427978516ms\r\nStep 26, loss: 0.09226183593273163, step time: 175.88090896606445ms\r\nStep 27, loss: 0.0907188430428505, step time: 175.75550079345703ms\r\n",,terminal_output +1004,1109689,"TERMINAL",0,0,"54",,terminal_output +1005,1109789,"TERMINAL",0,0,"Step 28, loss: 0.09019973874092102, step time: 176.10573768615723ms\r\n",,terminal_output +1006,1109973,"TERMINAL",0,0,"Step 29, loss: 0.08942239731550217, step time: 175.73213577270508ms\r\n",,terminal_output +1007,1110227,"TERMINAL",0,0,"Step 30, loss: 0.08679584413766861, step time: 175.31275749206543ms\r\n",,terminal_output +1008,1110553,"TERMINAL",0,0,"Step 31, loss: 0.08453794568777084, step time: 175.7657527923584ms\r\n",,terminal_output +1009,1110871,"TERMINAL",0,0,"Step 32, loss: 0.08319561183452606, step time: 176.01680755615234ms\r\n",,terminal_output +1010,1110987,"TERMINAL",0,0,"65",,terminal_output +1011,1111018,"TERMINAL",0,0,"Step 33, loss: 0.08193986117839813, step time: 175.80604553222656ms\r\nStep 34, loss: 0.08053138852119446, step time: 175.97055435180664ms\r\nStep 35, loss: 0.07890529930591583, step time: 175.53353309631348ms\r\n",,terminal_output +1012,1111186,"TERMINAL",0,0,"Step 36, loss: 0.07825177907943726, step time: 175.77052116394043ms\r\n",,terminal_output +1013,1111482,"TERMINAL",0,0,"Step 37, loss: 0.0770043209195137, step time: 175.80819129943848ms\r\n",,terminal_output +1014,1111681,"TERMINAL",0,0,"Step 38, loss: 0.07683085650205612, step time: 175.97126960754395ms\r\n",,terminal_output +1015,1111890,"TERMINAL",0,0,"Step 39, loss: 0.07636699825525284, step time: 175.97341537475586ms\r\n",,terminal_output +1016,1111896,"TERMINAL",0,0,"76",,terminal_output +1017,1111942,"TERMINAL",0,0,"Step 40, loss: 0.07608424872159958, step time: 175.3714084625244ms\r\n",,terminal_output +1018,1112079,"TERMINAL",0,0,"Step 41, loss: 0.0751199722290039, step time: 175.77695846557617ms\r\n",,terminal_output +1019,1112274,"TERMINAL",0,0,"Step 42, loss: 0.07546970248222351, step time: 175.86374282836914ms\r\n",,terminal_output +1020,1112660,"TERMINAL",0,0,"Step 43, loss: 0.07713846117258072, step time: 175.79269409179688ms\r\n",,terminal_output +1021,1112927,"TERMINAL",0,0,"Step 44, loss: 0.07893960177898407, step time: 175.94170570373535ms\r\n",,terminal_output +1022,1113067,"TERMINAL",0,0,"87",,terminal_output +1023,1113068,"TERMINAL",0,0,"Step 45, loss: 0.08062536269426346, step time: 175.91619491577148ms\r\n",,terminal_output +1024,1113155,"TERMINAL",0,0,"Step 46, loss: 0.0867118090391159, step time: 175.8897304534912ms\r\n",,terminal_output +1025,1113279,"TERMINAL",0,0,"Step 47, loss: 0.09151791036128998, step time: 175.811767578125ms\r\n",,terminal_output +1026,1113363,"TERMINAL",0,0,"Step 48, loss: 0.09612606465816498, step time: 175.95911026000977ms\r\n",,terminal_output +1027,1113519,"TERMINAL",0,0,"Step 49, loss: 0.1006508618593216, step time: 176.29003524780273ms\r\n",,terminal_output +1028,1113726,"TERMINAL",0,0,"Step 50, loss: 0.1029004454612732, step time: 175.4295825958252ms\r\n",,terminal_output +1029,1113933,"TERMINAL",0,0,"98",,terminal_output +1030,1113998,"TERMINAL",0,0,"Step 51, loss: 0.10312817245721817, step time: 176.0854721069336ms\r\n",,terminal_output +1031,1114355,"TERMINAL",0,0,"Step 52, loss: 0.10193965584039688, step time: 175.9018898010254ms\r\n",,terminal_output +1032,1114444,"TERMINAL",0,0,"Step 53, loss: 0.0996636226773262, step time: 175.63748359680176ms\r\n",,terminal_output +1033,1114596,"TERMINAL",0,0,"Step 54, loss: 0.09613282978534698, step time: 175.8286952972412ms\r\n",,terminal_output +1034,1114941,"TERMINAL",0,0,"Step 55, loss: 0.0933603048324585, step time: 180.09209632873535ms\r\n",,terminal_output +1035,1115018,"TERMINAL",0,0,"409",,terminal_output +1036,1115018,"TERMINAL",0,0,"Step 56, loss: 0.08963107317686081, step time: 176.29098892211914ms\r\nStep 57, loss: 0.08638574182987213, step time: 176.38778686523438ms\r\n",,terminal_output +1037,1115228,"TERMINAL",0,0,"Step 58, loss: 0.08353684842586517, step time: 176.20515823364258ms\r\n",,terminal_output +1038,1115319,"TERMINAL",0,0,"Step 59, loss: 0.0808611512184143, step time: 176.22780799865723ms\r\n",,terminal_output +1039,1115473,"TERMINAL",0,0,"Step 60, loss: 0.07879140228033066, step time: 175.57501792907715ms\r\n",,terminal_output +1040,1115681,"TERMINAL",0,0,"Step 61, loss: 0.07702076435089111, step time: 176.1631965637207ms\r\n",,terminal_output +1041,1115830,"TERMINAL",0,0,"Step 62, loss: 0.07482707500457764, step time: 176.0258674621582ms\r\n",,terminal_output +1042,1116000,"TERMINAL",0,0,"130",,terminal_output +1043,1116026,"TERMINAL",0,0,"Step 63, loss: 0.07237778604030609, step time: 176.1801242828369ms\r\n",,terminal_output +1044,1116228,"TERMINAL",0,0,"Step 64, loss: 0.07102444022893906, step time: 175.90689659118652ms\r\n",,terminal_output +1045,1116541,"TERMINAL",0,0,"Step 65, loss: 0.07058537751436234, step time: 176.14102363586426ms\r\n",,terminal_output +1046,1116593,"TERMINAL",0,0,"Step 66, loss: 0.06959977746009827, step time: 176.36942863464355ms\r\n",,terminal_output +1047,1116731,"TERMINAL",0,0,"Step 67, loss: 0.06864778697490692, step time: 176.90610885620117ms\r\n",,terminal_output +1048,1116879,"TERMINAL",0,0,"Step 68, loss: 0.06804705411195755, step time: 176.62930488586426ms\r\n",,terminal_output +1049,1116962,"TERMINAL",0,0,"21",,terminal_output +1050,1117084,"TERMINAL",0,0,"Step 69, loss: 0.06743714213371277, step time: 175.9622097015381ms\r\n",,terminal_output +1051,1117272,"TERMINAL",0,0,"Step 70, loss: 0.06703907251358032, step time: 176.27453804016113ms\r\n",,terminal_output +1052,1117437,"TERMINAL",0,0,"Step 71, loss: 0.06686172634363174, step time: 175.98295211791992ms\r\n",,terminal_output +1053,1117597,"TERMINAL",0,0,"Step 72, loss: 0.06618412584066391, step time: 176.33771896362305ms\r\n",,terminal_output +1054,1117809,"TERMINAL",0,0,"Step 73, loss: 0.06566116213798523, step time: 176.30267143249512ms\r\n",,terminal_output +1055,1118016,"TERMINAL",0,0,"Step 74, loss: 0.06580423563718796, step time: 176.79667472839355ms\r\n",,terminal_output +1056,1118183,"TERMINAL",0,0,"32",,terminal_output +1057,1118360,"TERMINAL",0,0,"Step 75, loss: 0.06610541045665741, step time: 176.24807357788086ms\r\n",,terminal_output +1058,1118682,"TERMINAL",0,0,"Step 76, loss: 0.06611763685941696, step time: 176.1474609375ms\r\nStep 77, loss: 0.06606844067573547, step time: 177.24871635437012ms\r\nStep 78, loss: 0.06610164791345596, step time: 176.11002922058105ms\r\n",,terminal_output +1059,1118857,"TERMINAL",0,0,"Step 79, loss: 0.06607328355312347, step time: 176.19061470031738ms\r\n",,terminal_output +1060,1119065,"TERMINAL",0,0,"Step 80, loss: 0.06583497673273087, step time: 175.77672004699707ms\r\n",,terminal_output +1061,1119161,"TERMINAL",0,0,"44",,terminal_output +1062,1119368,"TERMINAL",0,0,"Step 81, loss: 0.06564266234636307, step time: 176.20539665222168ms\r\n",,terminal_output +1063,1119688,"TERMINAL",0,0,"Step 82, loss: 0.06512859463691711, step time: 176.06043815612793ms\r\n",,terminal_output +1064,1119955,"TERMINAL",0,0,"Step 83, loss: 0.06433307379484177, step time: 175.86827278137207ms\r\nStep 84, loss: 0.06344572454690933, step time: 176.3460636138916ms\r\n",,terminal_output +1065,1120007,"TERMINAL",0,0,"Step 85, loss: 0.06259559839963913, step time: 175.71210861206055ms\r\n",,terminal_output +1066,1120124,"TERMINAL",0,0,"Step 86, loss: 0.06183755397796631, step time: 176.6047477722168ms\r\n",,terminal_output +1067,1120145,"TERMINAL",0,0,"65",,terminal_output +1068,1120286,"TERMINAL",0,0,"Step 87, loss: 0.06088436394929886, step time: 176.49531364440918ms\r\n",,terminal_output +1069,1120457,"TERMINAL",0,0,"Step 88, loss: 0.060280438512563705, step time: 176.25761032104492ms\r\n",,terminal_output +1070,1120702,"TERMINAL",0,0,"Step 89, loss: 0.059749897569417953, step time: 176.08070373535156ms\r\n",,terminal_output +1071,1121017,"TERMINAL",0,0,"Step 90, loss: 0.059278443455696106, step time: 175.54426193237305ms\r\n",,terminal_output +1072,1121305,"TERMINAL",0,0,"Step 91, loss: 0.05879035219550133, step time: 176.0549545288086ms\r\n",,terminal_output +1073,1121475,"TERMINAL",0,0,"76",,terminal_output +1074,1121494,"TERMINAL",0,0,"Step 92, loss: 0.0587039589881897, step time: 176.27882957458496ms\r\nStep 93, loss: 0.058253005146980286, step time: 176.35655403137207ms\r\n",,terminal_output +1075,1121549,"TERMINAL",0,0,"Step 94, loss: 0.057881515473127365, step time: 176.38635635375977ms\r\n",,terminal_output +1076,1121707,"TERMINAL",0,0,"Step 95, loss: 0.05756949633359909, step time: 175.6420135498047ms\r\n",,terminal_output +1077,1121914,"TERMINAL",0,0,"Step 96, loss: 0.057405710220336914, step time: 176.16891860961914ms\r\n",,terminal_output +1078,1122189,"TERMINAL",0,0,"Step 97, loss: 0.05735847353935242, step time: 176.0387420654297ms\r\n",,terminal_output +1079,1122457,"TERMINAL",0,0,"87",,terminal_output +1080,1122460,"TERMINAL",0,0,"Step 98, loss: 0.05750742554664612, step time: 176.2840747833252ms\r\n",,terminal_output +1081,1122760,"TERMINAL",0,0,"Step 99, loss: 0.057885438203811646, step time: 184.22293663024902ms\r\n",,terminal_output +1082,1123272,"TERMINAL",0,0,"98",,terminal_output +1083,1123542,"TERMINAL",0,0,"Step 100, loss: 0.05774573236703873, step time: 178.02667617797852ms\r\n",,terminal_output +1084,1123783,"TERMINAL",0,0,"Step 101, loss: 0.057680703699588776, step time: 177.04463005065918ms\r\n",,terminal_output +1085,1124092,"TERMINAL",0,0,"Step 102, loss: 0.05789361894130707, step time: 177.61635780334473ms\r\n",,terminal_output +1086,1124417,"TERMINAL",0,0,"Step 103, loss: 0.05774941295385361, step time: 177.32572555541992ms\r\n",,terminal_output +1087,1124628,"TERMINAL",0,0,"Step 104, loss: 0.05770348012447357, step time: 177.22725868225098ms\r\n",,terminal_output +1088,1124752,"TERMINAL",0,0,"509",,terminal_output +1089,1124757,"TERMINAL",0,0,"Step 105, loss: 0.05775327980518341, step time: 176.30505561828613ms\r\n",,terminal_output +1090,1124898,"TERMINAL",0,0,"Step 106, loss: 0.05738389864563942, step time: 177.38819122314453ms\r\n",,terminal_output +1091,1125018,"TERMINAL",0,0,"Step 107, loss: 0.05697479471564293, step time: 176.0098934173584ms\r\n",,terminal_output +1092,1125069,"TERMINAL",0,0,"Step 108, loss: 0.05702221021056175, step time: 176.76758766174316ms\r\n",,terminal_output +1093,1125179,"TERMINAL",0,0,"Step 109, loss: 0.05668998882174492, step time: 177.10518836975098ms\r\n",,terminal_output +1094,1125689,"TERMINAL",0,0,"Step 110, loss: 0.0565614253282547, step time: 175.89497566223145ms\r\n",,terminal_output +1095,1125776,"TERMINAL",0,0,"140",,terminal_output +1096,1125986,"TERMINAL",0,0,"Step 111, loss: 0.05673818290233612, step time: 176.33366584777832ms\r\nStep 112, loss: 0.05629785358905792, step time: 176.53656005859375ms\r\nStep 113, loss: 0.056037578731775284, step time: 176.59854888916016ms\r\nStep 114, loss: 0.055849432945251465, step time: 176.7432689666748ms\r\n",,terminal_output +1097,1126158,"TERMINAL",0,0,"Step 115, loss: 0.05564339458942413, step time: 176.05280876159668ms\r\n",,terminal_output +1098,1126433,"TERMINAL",0,0,"Step 116, loss: 0.0554083026945591, step time: 176.34344100952148ms\r\n",,terminal_output +1099,1126504,"TERMINAL",0,0,"21",,terminal_output +1100,1126774,"TERMINAL",0,0,"Step 117, loss: 0.054904110729694366, step time: 176.6371726989746ms\r\n",,terminal_output +1101,1126963,"TERMINAL",0,0,"Step 118, loss: 0.05438864603638649, step time: 176.4850616455078ms\r\n",,terminal_output +1102,1127227,"TERMINAL",0,0,"Step 119, loss: 0.05379579961299896, step time: 176.58615112304688ms\r\nStep 120, loss: 0.05320160463452339, step time: 175.8434772491455ms\r\nStep 121, loss: 0.05257153883576393, step time: 176.15151405334473ms\r\n",,terminal_output +1103,1127415,"TERMINAL",0,0,"Step 122, loss: 0.05205819383263588, step time: 176.32389068603516ms\r\n",,terminal_output +1104,1127500,"TERMINAL",0,0,"32",,terminal_output +1105,1127645,"TERMINAL",0,0,"Step 123, loss: 0.05167492479085922, step time: 176.1457920074463ms\r\n",,terminal_output +1106,1127896,"TERMINAL",0,0,"Step 124, loss: 0.05193770304322243, step time: 176.5458583831787ms\r\n",,terminal_output +1107,1128108,"TERMINAL",0,0,"Step 125, loss: 0.052667971700429916, step time: 176.37038230895996ms\r\nStep 126, loss: 0.0540897473692894, step time: 176.55658721923828ms\r\n",,terminal_output +1108,1128312,"TERMINAL",0,0,"Step 127, loss: 0.05362989753484726, step time: 176.2838363647461ms\r\n",,terminal_output +1109,1128538,"TERMINAL",0,0,"Step 128, loss: 0.052419163286685944, step time: 176.48911476135254ms\r\n",,terminal_output +1110,1128586,"TERMINAL",0,0,"43",,terminal_output +1111,1128873,"TERMINAL",0,0,"Step 129, loss: 0.05107907950878143, step time: 176.31030082702637ms\r\n",,terminal_output +1112,1128986,"TERMINAL",0,0,"Step 130, loss: 0.05027181655168533, step time: 176.26667022705078ms\r\n",,terminal_output +1113,1129038,"TERMINAL",0,0,"Step 131, loss: 0.049731552600860596, step time: 176.61094665527344ms\r\n",,terminal_output +1114,1129181,"TERMINAL",0,0,"Step 132, loss: 0.049295783042907715, step time: 176.3620376586914ms\r\n",,terminal_output +1115,1129380,"TERMINAL",0,0,"Step 133, loss: 0.04872075468301773, step time: 176.56421661376953ms\r\n",,terminal_output +1116,1129696,"TERMINAL",0,0,"54",,terminal_output +1117,1129697,"TERMINAL",0,0,"Step 134, loss: 0.04789736121892929, step time: 176.4209270477295ms\r\n",,terminal_output +1118,1129923,"TERMINAL",0,0,"Step 135, loss: 0.04730769246816635, step time: 175.72546005249023ms\r\n",,terminal_output +1119,1129982,"TERMINAL",0,0,"Step 136, loss: 0.04662038013339043, step time: 176.497220993042ms\r\n",,terminal_output +1120,1130093,"TERMINAL",0,0,"Step 137, loss: 0.04616506025195122, step time: 175.9648323059082ms\r\n",,terminal_output +1121,1130316,"TERMINAL",0,0,"Step 138, loss: 0.045688845217227936, step time: 176.3606071472168ms\r\n",,terminal_output +1122,1130486,"TERMINAL",0,0,"Step 139, loss: 0.04532141238451004, step time: 176.30481719970703ms\r\n",,terminal_output +1123,1130644,"TERMINAL",0,0,"65",,terminal_output +1124,1130644,"TERMINAL",0,0,"Step 140, loss: 0.0449722521007061, step time: 176.11336708068848ms\r\n",,terminal_output +1125,1130846,"TERMINAL",0,0,"Step 141, loss: 0.044836945831775665, step time: 176.5122413635254ms\r\n",,terminal_output +1126,1131053,"TERMINAL",0,0,"Step 142, loss: 0.04459158703684807, step time: 176.29575729370117ms\r\n",,terminal_output +1127,1131274,"TERMINAL",0,0,"Step 143, loss: 0.04454885050654411, step time: 176.0995388031006ms\r\n",,terminal_output +1128,1131612,"TERMINAL",0,0,"Step 144, loss: 0.044445332139730453, step time: 176.40995979309082ms\r\n",,terminal_output +1129,1131832,"TERMINAL",0,0,"Step 145, loss: 0.044324230402708054, step time: 175.8592128753662ms\r\n",,terminal_output +1130,1131872,"TERMINAL",0,0,"76",,terminal_output +1131,1131917,"TERMINAL",0,0,"Step 146, loss: 0.04433991014957428, step time: 176.24568939208984ms\r\nStep 147, loss: 0.044048015028238297, step time: 176.18274688720703ms\r\n",,terminal_output +1132,1132059,"TERMINAL",0,0,"Step 148, loss: 0.04409484565258026, step time: 176.25856399536133ms\r\n",,terminal_output +1133,1132235,"TERMINAL",0,0,"Step 149, loss: 0.04351648688316345, step time: 176.08094215393066ms\r\n",,terminal_output +1134,1132545,"TERMINAL",0,0,"Step 150, loss: 0.04330090433359146, step time: 176.0234832763672ms\r\n",,terminal_output +1135,1132616,"TERMINAL",0,0,"Step 151, loss: 0.04325666278600693, step time: 176.3937473297119ms\r\n",,terminal_output +1136,1132714,"TERMINAL",0,0,"87",,terminal_output +1137,1132747,"TERMINAL",0,0,"Step 152, loss: 0.04297052323818207, step time: 176.53393745422363ms\r\n",,terminal_output +1138,1132930,"TERMINAL",0,0,"Step 153, loss: 0.04261509329080582, step time: 176.52511596679688ms\r\n",,terminal_output +1139,1133240,"TERMINAL",0,0,"Step 154, loss: 0.04261152446269989, step time: 176.4073371887207ms\r\n",,terminal_output +1140,1133326,"TERMINAL",0,0,"Step 155, loss: 0.04247554391622543, step time: 175.74238777160645ms\r\n",,terminal_output +1141,1133522,"TERMINAL",0,0,"Step 156, loss: 0.042471520602703094, step time: 176.45692825317383ms\r\n",,terminal_output +1142,1133697,"TERMINAL",0,0,"Step 157, loss: 0.042391337454319, step time: 176.25117301940918ms\r\n",,terminal_output +1143,1133797,"TERMINAL",0,0,"98",,terminal_output +1144,1133952,"TERMINAL",0,0,"Step 158, loss: 0.04249439388513565, step time: 176.4395236968994ms\r\n",,terminal_output +1145,1134197,"TERMINAL",0,0,"Step 159, loss: 0.042769066989421844, step time: 176.38802528381348ms\r\n",,terminal_output +1146,1134430,"TERMINAL",0,0,"Step 160, loss: 0.04294799640774727, step time: 177.6585578918457ms\r\n",,terminal_output +1147,1134519,"TERMINAL",0,0,"Step 161, loss: 0.043109700083732605, step time: 178.18665504455566ms\r\n",,terminal_output +1148,1134599,"TERMINAL",0,0,"Step 162, loss: 0.043095994740724564, step time: 178.1909465789795ms\r\n",,terminal_output +1149,1134746,"TERMINAL",0,0,"Step 163, loss: 0.04317944496870041, step time: 177.99711227416992ms\r\n",,terminal_output +1150,1134842,"TERMINAL",0,0,"6:009",,terminal_output +1151,1134990,"TERMINAL",0,0,"Step 164, loss: 0.04342811554670334, step time: 177.88362503051758ms\r\n",,terminal_output +1152,1135273,"TERMINAL",0,0,"Step 165, loss: 0.043258603662252426, step time: 176.53179168701172ms\r\nStep 166, loss: 0.043432075530290604, step time: 176.53775215148926ms\r\n",,terminal_output +1153,1135446,"TERMINAL",0,0,"Step 167, loss: 0.04339979961514473, step time: 176.5458583831787ms\r\n",,terminal_output +1154,1135659,"TERMINAL",0,0,"Step 168, loss: 0.0435456857085228, step time: 176.8786907196045ms\r\n",,terminal_output +1155,1135946,"TERMINAL",0,0,"Step 169, loss: 0.04357048124074936, step time: 176.2096881866455ms\r\n",,terminal_output +1156,1135951,"TERMINAL",0,0,"150",,terminal_output +1157,1135996,"TERMINAL",0,0,"Step 170, loss: 0.04347694292664528, step time: 176.0842800140381ms\r\n",,terminal_output +1158,1136275,"TERMINAL",0,0,"Step 171, loss: 0.043308962136507034, step time: 176.2862205505371ms\r\n",,terminal_output +1159,1136737,"TERMINAL",0,0,"Step 172, loss: 0.04309915006160736, step time: 176.49269104003906ms\r\nStep 173, loss: 0.04309350624680519, step time: 176.16677284240723ms\r\nStep 174, loss: 0.04272361472249031, step time: 177.2937774658203ms\r\n",,terminal_output +1160,1137123,"TERMINAL",0,0,"Step 175, loss: 0.04291016608476639, step time: 176.0849952697754ms\r\n",,terminal_output +1161,1137123,"TERMINAL",0,0,"21",,terminal_output +1162,1137123,"TERMINAL",0,0,"Step 176, loss: 0.04243340343236923, step time: 176.53465270996094ms\r\n",,terminal_output +1163,1137315,"TERMINAL",0,0,"Step 177, loss: 0.04237191379070282, step time: 176.08332633972168ms\r\n",,terminal_output +1164,1137696,"TERMINAL",0,0,"Step 178, loss: 0.042027197778224945, step time: 176.2256622314453ms\r\n",,terminal_output +1165,1137908,"TERMINAL",0,0,"Step 179, loss: 0.04190116375684738, step time: 176.4242649078369ms\r\n",,terminal_output +1166,1137909,"TERMINAL",0,0,"Step 180, loss: 0.04152737930417061, step time: 175.96149444580078ms\r\n",,terminal_output +1167,1138035,"TERMINAL",0,0,"32",,terminal_output +1168,1138057,"TERMINAL",0,0,"Step 181, loss: 0.04178933426737785, step time: 176.66196823120117ms\r\n",,terminal_output +1169,1138212,"TERMINAL",0,0,"Step 182, loss: 0.041615452617406845, step time: 175.32801628112793ms\r\n",,terminal_output +1170,1138306,"TERMINAL",0,0,"Step 183, loss: 0.041750840842723846, step time: 174.68500137329102ms\r\n",,terminal_output +1171,1138499,"TERMINAL",0,0,"Step 184, loss: 0.04167359694838524, step time: 174.7758388519287ms\r\n",,terminal_output +1172,1138687,"TERMINAL",0,0,"Step 185, loss: 0.041587650775909424, step time: 175.10724067687988ms\r\n",,terminal_output +1173,1138913,"TERMINAL",0,0,"Step 186, loss: 0.04161107912659645, step time: 174.56674575805664ms\r\n",,terminal_output +1174,1139163,"TERMINAL",0,0,"43",,terminal_output +1175,1139193,"TERMINAL",0,0,"Step 187, loss: 0.04179903492331505, step time: 174.71957206726074ms\r\n",,terminal_output +1176,1139468,"TERMINAL",0,0,"Step 188, loss: 0.041835859417915344, step time: 175.18043518066406ms\r\n",,terminal_output +1177,1139967,"TERMINAL",0,0,"Step 189, loss: 0.04137205705046654, step time: 174.51906204223633ms\r\n",,terminal_output +1178,1140117,"TERMINAL",0,0,"Step 190, loss: 0.04181048274040222, step time: 174.47662353515625ms\r\n",,terminal_output +1179,1140214,"TERMINAL",0,0,"Step 191, loss: 0.041652802377939224, step time: 174.95417594909668ms\r\nStep 192, loss: 0.04126514121890068, step time: 174.699068069458ms\r\n",,terminal_output +1180,1140268,"TERMINAL",0,0,"54",,terminal_output +1181,1140283,"TERMINAL",0,0,"Step 193, loss: 0.04120107740163803, step time: 174.6084690093994ms\r\nStep 194, loss: 0.04107694327831268, step time: 174.86190795898438ms\r\n",,terminal_output +1182,1140433,"TERMINAL",0,0,"Step 195, loss: 0.04132668673992157, step time: 174.760103225708ms\r\n",,terminal_output +1183,1140614,"TERMINAL",0,0,"Step 196, loss: 0.04093864560127258, step time: 174.96466636657715ms\r\n",,terminal_output +1184,1140891,"TERMINAL",0,0,"Step 197, loss: 0.04066282510757446, step time: 175.12774467468262ms\r\n",,terminal_output +1185,1141237,"TERMINAL",0,0,"Step 198, loss: 0.040804311633110046, step time: 174.8638153076172ms\r\n",,terminal_output +1186,1141498,"TERMINAL",0,0,"66",,terminal_output +1187,1141566,"TERMINAL",0,0,"Step 199, loss: 0.04054825007915497, step time: 174.58510398864746ms\r\n",,terminal_output +1188,1141709,"TERMINAL",0,0,"Step 200, loss: 0.04064921662211418, step time: 176.01895332336426ms\r\n",,terminal_output +1189,1141785,"TERMINAL",0,0,"Step 201, loss: 0.04070571810007095, step time: 175.9638786315918ms\r\n",,terminal_output +1190,1141862,"TERMINAL",0,0,"Step 202, loss: 0.04020712897181511, step time: 174.8640537261963ms\r\n",,terminal_output +1191,1142033,"TERMINAL",0,0,"Step 203, loss: 0.04001910984516144, step time: 174.7875213623047ms\r\n",,terminal_output +1192,1142291,"TERMINAL",0,0,"Step 204, loss: 0.03987262398004532, step time: 174.6664047241211ms\r\n",,terminal_output +1193,1142291,"TERMINAL",0,0,"87",,terminal_output +1194,1142365,"TERMINAL",0,0,"Step 205, loss: 0.0398213230073452, step time: 175.46415328979492ms\r\n",,terminal_output +1195,1142531,"TERMINAL",0,0,"Step 206, loss: 0.03978770971298218, step time: 174.51000213623047ms\r\n",,terminal_output +1196,1142697,"TERMINAL",0,0,"Step 207, loss: 0.039507023990154266, step time: 174.55267906188965ms\r\n",,terminal_output +1197,1142876,"TERMINAL",0,0,"Step 208, loss: 0.03941311314702034, step time: 175.27294158935547ms\r\n",,terminal_output +1198,1143083,"TERMINAL",0,0,"Step 209, loss: 0.039387550204992294, step time: 174.544095993042ms\r\n",,terminal_output +1199,1143267,"TERMINAL",0,0,"98",,terminal_output +1200,1143268,"TERMINAL",0,0,"Step 210, loss: 0.04033172130584717, step time: 174.45969581604004ms\r\n",,terminal_output +1201,1143346,"TERMINAL",0,0,"Step 211, loss: 0.04027415066957474, step time: 175.05574226379395ms\r\n",,terminal_output +1202,1143768,"TERMINAL",0,0,"Step 212, loss: 0.03988594561815262, step time: 174.93391036987305ms\r\n",,terminal_output +1203,1144068,"TERMINAL",0,0,"Step 213, loss: 0.040004514157772064, step time: 174.43227767944336ms\r\n",,terminal_output +1204,1144481,"TERMINAL",0,0,"Step 214, loss: 0.04011611267924309, step time: 174.6821403503418ms\r\n",,terminal_output +1205,1144701,"TERMINAL",0,0,"Step 215, loss: 0.039371978491544724, step time: 174.7748851776123ms\r\n",,terminal_output +1206,1145026,"TERMINAL",0,0,"109",,terminal_output +1207,1145060,"TERMINAL",0,0,"Step 216, loss: 0.04050193354487419, step time: 174.73316192626953ms\r\nStep 217, loss: 0.03938000276684761, step time: 176.438570022583ms\r\n",,terminal_output +1208,1145117,"TERMINAL",0,0,"Step 218, loss: 0.03949068486690521, step time: 176.81121826171875ms\r\n",,terminal_output +1209,1145379,"TERMINAL",0,0,"Step 219, loss: 0.03960271179676056, step time: 176.21254920959473ms\r\nStep 220, loss: 0.0396963432431221, step time: 175.8286952972412ms\r\n",,terminal_output +1210,1145427,"TERMINAL",0,0,"16:00",,terminal_output +1211,1145516,"TERMINAL",0,0,"Step 221, loss: 0.03917435184121132, step time: 176.78022384643555ms\r\nStep 222, loss: 0.03895251080393791, step time: 176.45955085754395ms\r\nStep 223, loss: 0.03934742510318756, step time: 177.3972511291504ms\r\n",,terminal_output +1212,1145667,"TERMINAL",0,0,"Step 224, loss: 0.03951779007911682, step time: 176.76401138305664ms\r\n",,terminal_output +1213,1145832,"TERMINAL",0,0,"Step 225, loss: 0.03894076123833656, step time: 176.09333992004395ms\r\n",,terminal_output +1214,1146022,"TERMINAL",0,0,"Step 226, loss: 0.03872039169073105, step time: 176.40209197998047ms\r\n",,terminal_output +1215,1146242,"TERMINAL",0,0,"Step 227, loss: 0.03863447532057762, step time: 176.14483833312988ms\r\n",,terminal_output +1216,1146378,"TERMINAL",0,0,"21",,terminal_output +1217,1146474,"TERMINAL",0,0,"Step 228, loss: 0.03872453421354294, step time: 176.33700370788574ms\r\n",,terminal_output +1218,1146743,"TERMINAL",0,0,"Step 229, loss: 0.03858088701963425, step time: 176.32532119750977ms\r\n",,terminal_output +1219,1147016,"TERMINAL",0,0,"Step 230, loss: 0.038399968296289444, step time: 177.3054599761963ms\r\n",,terminal_output +1220,1147349,"TERMINAL",0,0,"Step 231, loss: 0.038291022181510925, step time: 176.74851417541504ms\r\n",,terminal_output +1221,1147674,"TERMINAL",0,0,"Step 232, loss: 0.038084760308265686, step time: 176.8949031829834ms\r\n",,terminal_output +1222,1147839,"TERMINAL",0,0,"Step 233, loss: 0.03831220045685768, step time: 176.45502090454102ms\r\n",,terminal_output +1223,1147894,"TERMINAL",0,0,"32",,terminal_output +1224,1147952,"TERMINAL",0,0,"Step 234, loss: 0.03822682052850723, step time: 176.7575740814209ms\r\nStep 235, loss: 0.03798821568489075, step time: 177.05607414245605ms\r\n",,terminal_output +1225,1148012,"TERMINAL",0,0,"Step 236, loss: 0.03772532567381859, step time: 177.1249771118164ms\r\nStep 237, loss: 0.037198059260845184, step time: 177.42490768432617ms\r\n",,terminal_output +1226,1148154,"TERMINAL",0,0,"Step 238, loss: 0.03647364303469658, step time: 177.2482395172119ms\r\n",,terminal_output +1227,1148361,"TERMINAL",0,0,"Step 239, loss: 0.035684984177351, step time: 176.67460441589355ms\r\n",,terminal_output +1228,1148426,"TERMINAL",0,0,"43",,terminal_output +1229,1148598,"TERMINAL",0,0,"Step 240, loss: 0.03581337258219719, step time: 176.74994468688965ms\r\n",,terminal_output +1230,1148897,"TERMINAL",0,0,"Step 241, loss: 0.03635523468255997, step time: 176.6643524169922ms\r\n",,terminal_output +1231,1149391,"TERMINAL",0,0,"Step 242, loss: 0.035823218524456024, step time: 176.85317993164062ms\r\n",,terminal_output +1232,1149548,"TERMINAL",0,0,"Step 243, loss: 0.03544267266988754, step time: 176.69939994812012ms\r\n",,terminal_output +1233,1149655,"TERMINAL",0,0,"Step 244, loss: 0.03557848930358887, step time: 176.80668830871582ms\r\nStep 245, loss: 0.035736117511987686, step time: 175.93932151794434ms\r\n",,terminal_output +1234,1149673,"TERMINAL",0,0,"54",,terminal_output +1235,1149718,"TERMINAL",0,0,"Step 246, loss: 0.03590724989771843, step time: 176.4979362487793ms\r\n",,terminal_output +1236,1149813,"TERMINAL",0,0,"Step 247, loss: 0.03548513725399971, step time: 177.0181655883789ms\r\n",,terminal_output +1237,1149988,"TERMINAL",0,0,"Step 248, loss: 0.03526565805077553, step time: 176.55444145202637ms\r\n",,terminal_output +1238,1150188,"TERMINAL",0,0,"Step 249, loss: 0.03510245308279991, step time: 176.41830444335938ms\r\n",,terminal_output +1239,1150450,"TERMINAL",0,0,"Step 250, loss: 0.03501562029123306, step time: 176.13935470581055ms\r\n",,terminal_output +1240,1150820,"TERMINAL",0,0,"65",,terminal_output +1241,1150850,"TERMINAL",0,0,"Step 251, loss: 0.03507089242339134, step time: 176.35440826416016ms\r\n",,terminal_output +1242,1151085,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +1243,1151125,"TERMINAL",0,0,"Step 252, loss: 0.034611061215400696, step time: 176.49006843566895ms\r\n",,terminal_output +1244,1151190,"TERMINAL",0,0,"Step 253, loss: 0.03444712609052658, step time: 176.37062072753906ms\r\nStep 254, loss: 0.03394879400730133, step time: 176.42831802368164ms\r\n",,terminal_output +1245,1151333,"TERMINAL",0,0,"Step 255, loss: 0.033820874989032745, step time: 175.98295211791992ms\r\n",,terminal_output +1246,1151430,"TERMINAL",0,0,"Step 256, loss: 0.033662982285022736, step time: 176.76782608032227ms\r\n",,terminal_output +1247,1151665,"TERMINAL",0,0,"76",,terminal_output +1248,1151682,"TERMINAL",0,0,"Step 257, loss: 0.03341974318027496, step time: 176.35440826416016ms\r\n",,terminal_output +1249,1152040,"TERMINAL",0,0,"Step 258, loss: 0.033348310738801956, step time: 176.47123336791992ms\r\n",,terminal_output +1250,1152072,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1025,0,"",shellscript,selection_mouse +1251,1152396,"TERMINAL",0,0,"Step 259, loss: 0.033450137823820114, step time: 176.2101650238037ms\r\n",,terminal_output +1252,1152553,"TERMINAL",0,0,"Step 260, loss: 0.03448721393942833, step time: 176.3174533843994ms\r\nStep 261, loss: 0.033755332231521606, step time: 176.77950859069824ms\r\n",,terminal_output +1253,1152653,"TERMINAL",0,0,"87",,terminal_output +1254,1152653,"TERMINAL",0,0,"Step 262, loss: 0.0335431769490242, step time: 176.3594150543213ms\r\nStep 263, loss: 0.035149332135915756, step time: 176.32079124450684ms\r\n",,terminal_output +1255,1152705,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1216,0,"",shellscript,selection_mouse +1256,1152795,"TERMINAL",0,0,"Step 264, loss: 0.033425670117139816, step time: 176.4204502105713ms\r\n",,terminal_output +1257,1152983,"TERMINAL",0,0,"Step 265, loss: 0.033679597079753876, step time: 176.01561546325684ms\r\n",,terminal_output +1258,1153200,"TERMINAL",0,0,"Step 266, loss: 0.033478446304798126, step time: 176.5589714050293ms\r\n",,terminal_output +1259,1153420,"TERMINAL",0,0,"Step 267, loss: 0.03405001387000084, step time: 176.38278007507324ms\r\n",,terminal_output +1260,1153925,"TERMINAL",0,0,"Step 268, loss: 0.0329088494181633, step time: 176.41401290893555ms\r\n",,terminal_output +1261,1154044,"TERMINAL",0,0,"98",,terminal_output +1262,1154111,"TERMINAL",0,0,"Step 269, loss: 0.03311232477426529, step time: 176.29718780517578ms\r\n",,terminal_output +1263,1154253,"TERMINAL",0,0,"Step 270, loss: 0.03279019147157669, step time: 176.14197731018066ms\r\nStep 271, loss: 0.03351588919758797, step time: 178.09772491455078ms\r\nStep 272, loss: 0.03246379643678665, step time: 176.7728328704834ms\r\n",,terminal_output +1264,1154441,"TERMINAL",0,0,"Step 273, loss: 0.032907165586948395, step time: 176.34105682373047ms\r\n",,terminal_output +1265,1154583,"TERMINAL",0,0,"Step 274, loss: 0.032622937113046646, step time: 176.73611640930176ms\r\n",,terminal_output +1266,1154677,"TERMINAL",0,0,"209",,terminal_output +1267,1154772,"TERMINAL",0,0,"Step 275, loss: 0.033427681773900986, step time: 175.9483814239502ms\r\n",,terminal_output +1268,1155003,"TERMINAL",0,0,"Step 276, loss: 0.03262808173894882, step time: 176.513671875ms\r\n",,terminal_output +1269,1155121,"TERMINAL",0,0,"Step 277, loss: 0.03259911388158798, step time: 176.36895179748535ms\r\n",,terminal_output +1270,1155308,"TERMINAL",0,0,"Step 278, loss: 0.03238487243652344, step time: 176.60903930664062ms\r\n",,terminal_output +1271,1155588,"TERMINAL",0,0,"Step 279, loss: 0.03293538838624954, step time: 176.50318145751953ms\r\n",,terminal_output +1272,1155696,"TERMINAL",0,0,"Step 280, loss: 0.03277508169412613, step time: 176.6953468322754ms\r\n",,terminal_output +1273,1155702,"TERMINAL",0,0,"110",,terminal_output +1274,1156000,"TERMINAL",0,0,"Step 281, loss: 0.032379720360040665, step time: 176.6200065612793ms\r\n",,terminal_output +1275,1156341,"TERMINAL",0,0,"Step 282, loss: 0.03234321251511574, step time: 176.8357753753662ms\r\n",,terminal_output +1276,1156582,"TERMINAL",0,0,"Step 283, loss: 0.03237415850162506, step time: 176.24568939208984ms\r\n",,terminal_output +1277,1156698,"TERMINAL",0,0,"Step 284, loss: 0.032687924802303314, step time: 177.19459533691406ms\r\n",,terminal_output +1278,1156843,"TERMINAL",0,0,"Step 285, loss: 0.03247038647532463, step time: 296.04387283325195ms\r\n",,terminal_output +1279,1156933,"TERMINAL",0,0,"21",,terminal_output +1280,1156933,"TERMINAL",0,0,"Step 286, loss: 0.03206229954957962, step time: 176.70774459838867ms\r\n",,terminal_output +1281,1157132,"TERMINAL",0,0,"Step 287, loss: 0.03237137570977211, step time: 176.50461196899414ms\r\n",,terminal_output +1282,1157344,"TERMINAL",0,0,"Step 288, loss: 0.03216230496764183, step time: 176.38421058654785ms\r\n",,terminal_output +1283,1157545,"TERMINAL",0,0,"Step 289, loss: 0.032340068370103836, step time: 176.05185508728027ms\r\n",,terminal_output +1284,1157583,"TERMINAL",0,0,"Step 290, loss: 0.031753700226545334, step time: 176.24235153198242ms\r\n",,terminal_output +1285,1157866,"TERMINAL",0,0,"Step 291, loss: 0.03185480460524559, step time: 176.76210403442383ms\r\n",,terminal_output +1286,1157868,"TERMINAL",0,0,"32",,terminal_output +1287,1158152,"TERMINAL",0,0,"Step 292, loss: 0.03168875724077225, step time: 177.05035209655762ms\r\n",,terminal_output +1288,1158250,"TERMINAL",0,0,"Step 293, loss: 0.03158101812005043, step time: 176.42498016357422ms\r\n",,terminal_output +1289,1158469,"TERMINAL",0,0,"Step 294, loss: 0.03170286864042282, step time: 176.44810676574707ms\r\n",,terminal_output +1290,1158820,"TERMINAL",0,0,"Step 295, loss: 0.03154695779085159, step time: 176.07951164245605ms\r\n",,terminal_output +1291,1158917,"TERMINAL",0,0,"Step 296, loss: 0.03158295154571533, step time: 176.4826774597168ms\r\n",,terminal_output +1292,1158973,"TERMINAL",0,0,"43",,terminal_output +1293,1158989,"TERMINAL",0,0,"Step 297, loss: 0.03138601407408714, step time: 176.67245864868164ms\r\n",,terminal_output +1294,1159052,"TERMINAL",0,0,"Step 298, loss: 0.031488124281167984, step time: 176.7578125ms\r\n",,terminal_output +1295,1159253,"TERMINAL",0,0,"Step 299, loss: 0.03143727034330368, step time: 176.47099494934082ms\r\n",,terminal_output +1296,1159524,"TERMINAL",0,0,"Step 300, loss: 0.03140614554286003, step time: 178.53879928588867ms\r\n",,terminal_output +1297,1159700,"TERMINAL",0,0,"Step 301, loss: 0.03126083314418793, step time: 177.37960815429688ms\r\n",,terminal_output +1298,1159869,"TERMINAL",0,0,"Step 302, loss: 0.03129999712109566, step time: 176.3589382171631ms\r\n",,terminal_output +1299,1159876,"TERMINAL",0,0,"54",,terminal_output +1300,1160021,"TERMINAL",0,0,"Step 303, loss: 0.03141164407134056, step time: 176.27835273742676ms\r\n",,terminal_output +1301,1160344,"TERMINAL",0,0,"Step 304, loss: 0.03113589994609356, step time: 176.44071578979492ms\r\n",,terminal_output +1302,1160611,"TERMINAL",0,0,"Step 305, loss: 0.031039975583553314, step time: 175.97150802612305ms\r\n",,terminal_output +1303,1160844,"TERMINAL",0,0,"Step 306, loss: 0.031163059175014496, step time: 176.36752128601074ms\r\n",,terminal_output +1304,1161119,"TERMINAL",0,0,"Step 307, loss: 0.030971957370638847, step time: 176.35846138000488ms\r\n",,terminal_output +1305,1161246,"TERMINAL",0,0,"65",,terminal_output +1306,1161303,"TERMINAL",0,0,"Step 308, loss: 0.031104084104299545, step time: 176.33652687072754ms\r\nStep 309, loss: 0.030965279787778854, step time: 176.38301849365234ms\r\nStep 310, loss: 0.030898509547114372, step time: 176.14102363586426ms\r\n",,terminal_output +1307,1161434,"TERMINAL",0,0,"Step 311, loss: 0.031535279005765915, step time: 176.4090061187744ms\r\n",,terminal_output +1308,1161634,"TERMINAL",0,0,"Step 312, loss: 0.03088461235165596, step time: 176.56970024108887ms\r\n",,terminal_output +1309,1161796,"TERMINAL",0,0,"Step 313, loss: 0.03067302703857422, step time: 176.3131618499756ms\r\n",,terminal_output +1310,1161935,"TERMINAL",0,0,"76",,terminal_output +1311,1161999,"TERMINAL",0,0,"Step 314, loss: 0.031512998044490814, step time: 176.61499977111816ms\r\n",,terminal_output +1312,1162241,"TERMINAL",0,0,"Step 315, loss: 0.03069738857448101, step time: 175.7516860961914ms\r\n",,terminal_output +1313,1162387,"TERMINAL",0,0,"Step 316, loss: 0.030641045421361923, step time: 176.4376163482666ms\r\n",,terminal_output +1314,1162593,"TERMINAL",0,0,"Step 317, loss: 0.030918054282665253, step time: 176.344633102417ms\r\n",,terminal_output +1315,1162724,"TERMINAL",0,0,"Step 318, loss: 0.03043203055858612, step time: 176.4845848083496ms\r\n",,terminal_output +1316,1162894,"TERMINAL",0,0,"Step 319, loss: 0.03060261160135269, step time: 176.3286590576172ms\r\n",,terminal_output +1317,1162973,"TERMINAL",0,0,"87",,terminal_output +1318,1163073,"TERMINAL",0,0,"Step 320, loss: 0.030279560014605522, step time: 176.15723609924316ms\r\n",,terminal_output +1319,1163221,"TERMINAL",0,0,"Step 321, loss: 0.030873022973537445, step time: 176.2869358062744ms\r\n",,terminal_output +1320,1163866,"TERMINAL",0,0,"Step 322, loss: 0.03013775497674942, step time: 176.39946937561035ms\r\n",,terminal_output +1321,1163999,"TERMINAL",0,0,"Step 323, loss: 0.030293110758066177, step time: 176.2521266937256ms\r\nStep 324, loss: 0.030277017503976822, step time: 176.37324333190918ms\r\nStep 325, loss: 0.030467061325907707, step time: 176.08261108398438ms\r\n",,terminal_output +1322,1164063,"TERMINAL",0,0,"98",,terminal_output +1323,1164177,"TERMINAL",0,0,"Step 326, loss: 0.03014976903796196, step time: 178.09820175170898ms\r\n",,terminal_output +1324,1164319,"TERMINAL",0,0,"Step 327, loss: 0.029986228793859482, step time: 177.57940292358398ms\r\n",,terminal_output +1325,1164521,"TERMINAL",0,0,"Step 328, loss: 0.030116138979792595, step time: 181.4863681793213ms\r\n",,terminal_output +1326,1164662,"TERMINAL",0,0,"Step 329, loss: 0.030010031536221504, step time: 178.0991554260254ms\r\n",,terminal_output +1327,1164890,"TERMINAL",0,0,"Step 330, loss: 0.029764581471681595, step time: 177.67667770385742ms\r\n",,terminal_output +1328,1165193,"TERMINAL",0,0,"Step 331, loss: 0.02994643524289131, step time: 177.25038528442383ms\r\n",,terminal_output +1329,1165193,"TERMINAL",0,0,"3020",,terminal_output +1330,1165400,"TERMINAL",0,0,"Step 332, loss: 0.02979125641286373, step time: 177.06298828125ms\r\n",,terminal_output +1331,1165493,"TERMINAL",0,0,"Step 333, loss: 0.029617097228765488, step time: 176.44095420837402ms\r\n",,terminal_output +1332,1165626,"TERMINAL",0,0,"Step 334, loss: 0.02953311800956726, step time: 176.74493789672852ms\r\n",,terminal_output +1333,1165770,"TERMINAL",0,0,"Step 335, loss: 0.02966519445180893, step time: 176.2409210205078ms\r\n",,terminal_output +1334,1165913,"TERMINAL",0,0,"Step 336, loss: 0.029331130906939507, step time: 176.61237716674805ms\r\n",,terminal_output +1335,1166088,"TERMINAL",0,0,"Step 337, loss: 0.02943793497979641, step time: 176.56946182250977ms\r\n",,terminal_output +1336,1166104,"TERMINAL",0,0,"21",,terminal_output +1337,1166264,"TERMINAL",0,0,"Step 338, loss: 0.029571348801255226, step time: 176.82743072509766ms\r\n",,terminal_output +1338,1166529,"TERMINAL",0,0,"Step 339, loss: 0.029308238998055458, step time: 176.36537551879883ms\r\n",,terminal_output +1339,1166860,"TERMINAL",0,0,"Step 340, loss: 0.029208358377218246, step time: 176.58638954162598ms\r\nStep 341, loss: 0.029464077204465866, step time: 176.51009559631348ms\r\n",,terminal_output +1340,1167113,"TERMINAL",0,0,"Step 342, loss: 0.02913069725036621, step time: 176.81884765625ms\r\n",,terminal_output +1341,1167209,"TERMINAL",0,0,"32",,terminal_output +1342,1167221,"TERMINAL",0,0,"Step 343, loss: 0.029079051688313484, step time: 176.62525177001953ms\r\n",,terminal_output +1343,1167404,"TERMINAL",0,0,"Step 344, loss: 0.029546314850449562, step time: 176.58042907714844ms\r\n",,terminal_output +1344,1167594,"TERMINAL",0,0,"Step 345, loss: 0.02903992123901844, step time: 176.02181434631348ms\r\n",,terminal_output +1345,1167758,"TERMINAL",0,0,"Step 346, loss: 0.02893538773059845, step time: 176.8045425415039ms\r\n",,terminal_output +1346,1168002,"TERMINAL",0,0,"Step 347, loss: 0.0291603934019804, step time: 176.4214038848877ms\r\n",,terminal_output +1347,1168369,"TERMINAL",0,0,"Step 348, loss: 0.028926163911819458, step time: 177.08230018615723ms\r\n",,terminal_output +1348,1168429,"TERMINAL",0,0,"43",,terminal_output +1349,1168430,"TERMINAL",0,0,"Step 349, loss: 0.02880910411477089, step time: 176.48768424987793ms\r\n",,terminal_output +1350,1168483,"TERMINAL",0,0,"Step 350, loss: 0.028964484110474586, step time: 176.12648010253906ms\r\n",,terminal_output +1351,1168590,"TERMINAL",0,0,"Step 351, loss: 0.0287709329277277, step time: 176.3467788696289ms\r\n",,terminal_output +1352,1168820,"TERMINAL",0,0,"Step 352, loss: 0.028727971017360687, step time: 176.49507522583008ms\r\n",,terminal_output +1353,1169010,"TERMINAL",0,0,"Step 353, loss: 0.028820332139730453, step time: 176.79977416992188ms\r\n",,terminal_output +1354,1169397,"TERMINAL",0,0,"54",,terminal_output +1355,1169406,"TERMINAL",0,0,"Step 354, loss: 0.028667869046330452, step time: 176.81360244750977ms\r\nStep 355, loss: 0.028697045519948006, step time: 176.09238624572754ms\r\n",,terminal_output +1356,1169486,"TERMINAL",0,0,"Step 356, loss: 0.02858535759150982, step time: 176.5754222869873ms\r\n",,terminal_output +1357,1169660,"TERMINAL",0,0,"Step 357, loss: 0.028393715620040894, step time: 176.4686107635498ms\r\n",,terminal_output +1358,1169844,"TERMINAL",0,0,"Step 358, loss: 0.028841663151979446, step time: 176.73563957214355ms\r\n",,terminal_output +1359,1170023,"TERMINAL",0,0,"Step 359, loss: 0.028626158833503723, step time: 176.55420303344727ms\r\n",,terminal_output +1360,1170208,"TERMINAL",0,0,"Step 360, loss: 0.028276506811380386, step time: 176.2537956237793ms\r\n",,terminal_output +1361,1170313,"TERMINAL",0,0,"65",,terminal_output +1362,1170424,"TERMINAL",0,0,"Step 361, loss: 0.02875973843038082, step time: 176.3741970062256ms\r\n",,terminal_output +1363,1170566,"TERMINAL",0,0,"Step 362, loss: 0.028405044227838516, step time: 176.58567428588867ms\r\n",,terminal_output +1364,1170799,"TERMINAL",0,0,"Step 363, loss: 0.028444552794098854, step time: 176.73707008361816ms\r\n",,terminal_output +1365,1170950,"TERMINAL",0,0,"Step 364, loss: 0.028371674939990044, step time: 176.56230926513672ms\r\n",,terminal_output +1366,1171137,"TERMINAL",0,0,"Step 365, loss: 0.02857821248471737, step time: 176.08189582824707ms\r\n",,terminal_output +1367,1171324,"TERMINAL",0,0,"Step 366, loss: 0.028105318546295166, step time: 176.4230728149414ms\r\n",,terminal_output +1368,1171366,"TERMINAL",0,0,"76",,terminal_output +1369,1171481,"TERMINAL",0,0,"Step 367, loss: 0.028348183259367943, step time: 176.34344100952148ms\r\n",,terminal_output +1370,1171771,"TERMINAL",0,0,"Step 368, loss: 0.028110872954130173, step time: 176.6664981842041ms\r\n",,terminal_output +1371,1172005,"TERMINAL",0,0,"Step 369, loss: 0.027938362210989, step time: 176.41639709472656ms\r\n",,terminal_output +1372,1172152,"TERMINAL",0,0,"Step 370, loss: 0.028404945507645607, step time: 176.02014541625977ms\r\n",,terminal_output +1373,1172407,"TERMINAL",0,0,"Step 371, loss: 0.028040066361427307, step time: 176.4695644378662ms\r\n",,terminal_output +1374,1172715,"TERMINAL",0,0,"87",,terminal_output +1375,1172747,"TERMINAL",0,0,"Step 372, loss: 0.028053948655724525, step time: 176.58734321594238ms\r\nStep 373, loss: 0.027894360944628716, step time: 176.5151023864746ms\r\nStep 374, loss: 0.028107739984989166, step time: 176.48792266845703ms\r\n",,terminal_output +1376,1172883,"TERMINAL",0,0,"Step 375, loss: 0.02781219221651554, step time: 175.98581314086914ms\r\n",,terminal_output +1377,1173070,"TERMINAL",0,0,"Step 376, loss: 0.027857379987835884, step time: 176.49555206298828ms\r\n",,terminal_output +1378,1173263,"TERMINAL",0,0,"Step 377, loss: 0.028091998770833015, step time: 176.6676902770996ms\r\n",,terminal_output +1379,1173482,"TERMINAL",0,0,"Step 378, loss: 0.027665769681334496, step time: 176.44953727722168ms\r\n",,terminal_output +1380,1173483,"TERMINAL",0,0,"98",,terminal_output +1381,1173666,"TERMINAL",0,0,"Step 379, loss: 0.02745053544640541, step time: 176.40948295593262ms\r\n",,terminal_output +1382,1173913,"TERMINAL",0,0,"Step 380, loss: 0.027916010469198227, step time: 176.8810749053955ms\r\n",,terminal_output +1383,1174021,"TERMINAL",0,0,"Step 381, loss: 0.027723003178834915, step time: 176.8929958343506ms\r\n",,terminal_output +1384,1174182,"TERMINAL",0,0,"Step 382, loss: 0.027486441656947136, step time: 178.18522453308105ms\r\n",,terminal_output +1385,1174388,"TERMINAL",0,0,"Step 383, loss: 0.027656180784106255, step time: 176.74756050109863ms\r\n",,terminal_output +1386,1174507,"TERMINAL",0,0,"409",,terminal_output +1387,1174514,"TERMINAL",0,0,"Step 384, loss: 0.027606580406427383, step time: 176.8789291381836ms\r\n",,terminal_output +1388,1174918,"TERMINAL",0,0,"Step 385, loss: 0.02748056687414646, step time: 176.48911476135254ms\r\n",,terminal_output +1389,1175140,"TERMINAL",0,0,"Step 386, loss: 0.0274663008749485, step time: 176.90134048461914ms\r\n",,terminal_output +1390,1175263,"TERMINAL",0,0,"Step 387, loss: 0.027559010311961174, step time: 176.3918399810791ms\r\nStep 388, loss: 0.02768154814839363, step time: 176.58090591430664ms\r\n",,terminal_output +1391,1175484,"TERMINAL",0,0,"Step 389, loss: 0.027176247909665108, step time: 176.436185836792ms\r\n",,terminal_output +1392,1175629,"TERMINAL",0,0,"130",,terminal_output +1393,1175629,"TERMINAL",0,0,"Step 390, loss: 0.02758224867284298, step time: 176.21493339538574ms\r\n",,terminal_output +1394,1175860,"TERMINAL",0,0,"^CTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 232, in \r\n jax.block_until_ready(loss)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/api.py"", line 3117, in block_until_ready\r\n try_to_block(arrays[0])\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/api.py"", line 3100, in try_to_block\r\n return x.block_until_ready()\r\nKeyboardInterrupt\r\n",,terminal_output +1395,1176104,"TERMINAL",0,0,"^CException ignored in atexit callback: .teardown_atexit at 0x15155014f400>\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/service_connection.py"", line 94, in teardown_atexit\r\n conn.teardown(hooks.exit_code)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/service_connection.py"", line 226, in teardown\r\n self._router.join()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/interface/router.py"", line 75, in join\r\n self._thread.join()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/threading.py"", line 1096, in join\r\n self._wait_for_tstate_lock()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/threading.py"", line 1116, in _wait_for_tstate_lock\r\n if lock.acquire(block, timeout):\r\nKeyboardInterrupt: \r\n",,terminal_output +1396,1176203,"TERMINAL",0,0,"^CException ignored in: .remove at 0x1515ac98a680>\r\nTraceback (most recent call last):\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/weakref.py"", line 370, in remove\r\n def remove(k, selfref=ref(self)):\r\nKeyboardInterrupt: \r\n",,terminal_output +1397,1176662,"TERMINAL",0,0,"21",,terminal_output +1398,1176790,"TERMINAL",0,0,"^C",,terminal_output +1399,1176970,"TERMINAL",0,0,"^C",,terminal_output +1400,1177139,"TERMINAL",0,0,"\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0507 jafar]$ ",,terminal_output +1401,1177739,"TERMINAL",0,0,"32",,terminal_output +1402,1177930,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +1403,1177931,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1217,0,"",shellscript,selection_mouse +1404,1178397,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1124,0,"",shellscript,selection_mouse +1405,1178803,"TERMINAL",0,0,"43",,terminal_output +1406,1178942,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1217,0,"",shellscript,selection_mouse +1407,1179463,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1216,1,"\n",shellscript,selection_mouse +1408,1179484,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1121,96,"1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1409,1179515,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1017,200," jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1410,1179515,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",919,298,"max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1411,1179528,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",896,321,"-min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1412,1179560,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",895,322,"--min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1413,1179561,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",894,323," --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1414,1179601,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",893,324," --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1415,1179602,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",872,345," --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1416,1179648,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",871,346," --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1417,1179682,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",838,379," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1418,1179733,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",837,380," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1419,1179735,"TERMINAL",0,0,"54",,terminal_output +1420,1180015,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",804,413,"srun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1421,1180808,"TERMINAL",0,0,"65",,terminal_output +1422,1180981,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",819,0,"",shellscript,selection_mouse +1423,1182125,"TERMINAL",0,0,"76",,terminal_output +1424,1182828,"TERMINAL",0,0,"87",,terminal_output +1425,1183947,"TERMINAL",0,0,"98",,terminal_output +1426,1184927,"TERMINAL",0,0,"509",,terminal_output +1427,1186008,"TERMINAL",0,0,"140",,terminal_output +1428,1186487,"scripts_horeka/overfit_sample/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +1429,1187071,"TERMINAL",0,0,"21",,terminal_output +1430,1188156,"TERMINAL",0,0,"32",,terminal_output +1431,1188574,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +1432,1189505,"TERMINAL",0,0,"44",,terminal_output +1433,1190338,"TERMINAL",0,0,"65",,terminal_output +1434,1191337,"TERMINAL",0,0,"76",,terminal_output +1435,1191734,"scripts_horeka/modelsize_scaling/dynamics/3_train_dyn_180M.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=10:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --mail-user=mihir.mahajan2002@gmail.com\n#SBATCH --job-name=train_dynamics_minecraft_overfit_sample_180M\n#SBATCH --mem=50G\n#SBATCH --mail-type=ALL\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=$ws_dir/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\necho Running dynamics model overfit run. Slurm id: $slurm_job_id\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3290391/tokenizer_1750845012_50000/\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3290392/lam_1750845133_180000/\n\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=100 \\n --log \\n --name=dynamics-model-size-scaling-180M-$slurm_job_id \\n --tags dynamics model-size-scaling 180M \\n --log_checkpoint_interval=500 \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --lam_checkpoint=$lam_ckpt_dir \\n --data_dir $tf_records_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16\n",shellscript,tab +1436,1192574,"TERMINAL",0,0,"87",,terminal_output +1437,1193286,"TERMINAL",0,0,"98",,terminal_output +1438,1193710,"scripts_horeka/modelsize_scaling/dynamics/3_train_dyn_180M.sbatch",1330,0,"",shellscript,selection_mouse +1439,1194358,"TERMINAL",0,0,"7:009",,terminal_output +1440,1195139,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +1441,1195434,"TERMINAL",0,0,"150",,terminal_output +1442,1196443,"TERMINAL",0,0,"21",,terminal_output +1443,1197516,"TERMINAL",0,0,"32",,terminal_output +1444,1198252,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1217,0,"",shellscript,selection_mouse +1445,1198402,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1216,1,"\n",shellscript,selection_mouse +1446,1198411,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1169,48,"\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1447,1198435,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1043,174,"f_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1448,1198446,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",977,240,"\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1449,1198462,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",922,295,"_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1450,1198482,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",876,341,"batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1451,1198522,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",841,376,"--ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1452,1198523,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",840,377," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1453,1198604,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",806,411,"un python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1454,1198605,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",805,412,"run python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1455,1198606,"TERMINAL",0,0,"43",,terminal_output +1456,1198671,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",804,413,"srun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0\n",shellscript,selection_mouse +1457,1199642,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",804,0,"",shellscript,selection_command +1458,1199643,"TERMINAL",0,0,"54",,terminal_output +1459,1200737,"TERMINAL",0,0,"65",,terminal_output +1460,1201672,"TERMINAL",0,0,"76",,terminal_output +1461,1202769,"TERMINAL",0,0,"87",,terminal_output +1462,1203811,"TERMINAL",0,0,"98",,terminal_output +1463,1204886,"TERMINAL",0,0,"109",,terminal_output +1464,1205873,"TERMINAL",0,0,"17:00",,terminal_output +1465,1206956,"TERMINAL",0,0,"21",,terminal_output +1466,1207974,"TERMINAL",0,0,"32",,terminal_output +1467,1209052,"TERMINAL",0,0,"43",,terminal_output +1468,1210058,"TERMINAL",0,0,"55",,terminal_output +1469,1211187,"TERMINAL",0,0,"76",,terminal_output +1470,1212151,"TERMINAL",0,0,"87",,terminal_output +1471,1213199,"TERMINAL",0,0,"98",,terminal_output +1472,1214258,"TERMINAL",0,0,"209",,terminal_output +1473,1215303,"TERMINAL",0,0,"110",,terminal_output +1474,1216367,"TERMINAL",0,0,"21",,terminal_output +1475,1217411,"TERMINAL",0,0,"32",,terminal_output +1476,1218463,"TERMINAL",0,0,"43",,terminal_output +1477,1219520,"TERMINAL",0,0,"54",,terminal_output +1478,1220577,"TERMINAL",0,0,"65",,terminal_output +1479,1221596,"TERMINAL",0,0,"76",,terminal_output +1480,1222644,"TERMINAL",0,0,"87",,terminal_output +1481,1223695,"TERMINAL",0,0,"98",,terminal_output +1482,1224740,"TERMINAL",0,0,"309",,terminal_output +1483,1225787,"TERMINAL",0,0,"120",,terminal_output +1484,1226859,"TERMINAL",0,0,"21",,terminal_output +1485,1227890,"TERMINAL",0,0,"32",,terminal_output +1486,1228996,"TERMINAL",0,0,"43",,terminal_output +1487,1229989,"TERMINAL",0,0,"54",,terminal_output +1488,1231037,"TERMINAL",0,0,"65",,terminal_output +1489,1232092,"TERMINAL",0,0,"77",,terminal_output +1490,1233144,"TERMINAL",0,0,"98",,terminal_output +1491,1234189,"TERMINAL",0,0,"409",,terminal_output +1492,1234778,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",978,0," --name=tokenizer-tiny-overfit-$slurm_job_id \\n",shellscript,content +1493,1234835,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1028,0," --tags tokenizer overfit tiny 1.5M \\n",shellscript,content +1494,1235285,"TERMINAL",0,0,"130",,terminal_output +1495,1236302,"TERMINAL",0,0,"21",,terminal_output +1496,1237319,"TERMINAL",0,0,"32",,terminal_output +1497,1238385,"TERMINAL",0,0,"43",,terminal_output +1498,1239420,"TERMINAL",0,0,"54",,terminal_output +1499,1240458,"TERMINAL",0,0,"65",,terminal_output +1500,1241538,"TERMINAL",0,0,"76",,terminal_output +1501,1242552,"TERMINAL",0,0,"87",,terminal_output +1502,1243073,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1063,0,"",shellscript,selection_mouse +1503,1243390,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1063,1,".",shellscript,selection_mouse +1504,1243618,"TERMINAL",0,0,"98",,terminal_output +1505,1243669,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1063,2,".5",shellscript,selection_mouse +1506,1244639,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1063,2,"",shellscript,content +1507,1244692,"TERMINAL",0,0,"509",,terminal_output +1508,1245027,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1062,1,"",shellscript,content +1509,1245437,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1062,1,"",shellscript,content +1510,1245810,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1061,1,"",shellscript,content +1511,1245853,"TERMINAL",0,0,"140",,terminal_output +1512,1246540,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1213,0,"",shellscript,selection_mouse +1513,1246780,"TERMINAL",0,0,"21",,terminal_output +1514,1247047,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1255,0,"",shellscript,selection_mouse +1515,1247812,"TERMINAL",0,0,"32",,terminal_output +1516,1248862,"TERMINAL",0,0,"43",,terminal_output +1517,1249950,"TERMINAL",0,0,"54",,terminal_output +1518,1250950,"TERMINAL",0,0,"65",,terminal_output +1519,1251998,"TERMINAL",0,0,"76",,terminal_output +1520,1253038,"TERMINAL",0,0,"87",,terminal_output +1521,1254104,"TERMINAL",0,0,"99",,terminal_output +1522,1254757,"scripts_horeka/modelsize_scaling/dynamics/3_train_dyn_180M.sbatch",0,0,"",shellscript,tab +1523,1255174,"TERMINAL",0,0,"8:0150",,terminal_output +1524,1255461,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +1525,1256192,"TERMINAL",0,0,"21",,terminal_output +1526,1257278,"train_tokenizer.py",0,0,"",python,tab +1527,1257315,"TERMINAL",0,0,"32",,terminal_output +1528,1258295,"TERMINAL",0,0,"43",,terminal_output +1529,1259351,"TERMINAL",0,0,"54",,terminal_output +1530,1259720,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +1531,1260376,"TERMINAL",0,0,"65",,terminal_output +1532,1261428,"TERMINAL",0,0,"76",,terminal_output +1533,1261953,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1302,0,"",shellscript,selection_mouse +1534,1262219,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1275,27,"\n --codebook_dropout 0.0",shellscript,selection_mouse +1535,1262220,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1255,47,"\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1536,1262220,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1160,142," 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1537,1262221,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1125,177,"r $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1538,1262221,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1075,227,"y instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1539,1262221,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",1037,265,"s tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1540,1262235,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",987,315,"e=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1541,1262274,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",974,328,"g \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1542,1262275,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",973,329,"og \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1543,1262282,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",941,361,"log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1544,1262347,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",919,383,"max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1545,1262392,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",896,406,"-min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1546,1262393,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",895,407,"--min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1547,1262424,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",873,429," --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1548,1262495,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",872,430," --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1549,1262529,"TERMINAL",0,0,"87",,terminal_output +1550,1262539,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",871,431," --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1551,1262732,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",838,464," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1552,1262845,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",837,465," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1553,1263574,"TERMINAL",0,0,"98",,terminal_output +1554,1264586,"TERMINAL",0,0,"109",,terminal_output +1555,1265175,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +1556,1265628,"TERMINAL",0,0,"18:00",,terminal_output +1557,1266282,"scripts_horeka/overfit_sample_tiny/tester.sh",726,47,"\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1558,1266301,"scripts_horeka/overfit_sample_tiny/tester.sh",680,93,"24 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1559,1266314,"scripts_horeka/overfit_sample_tiny/tester.sh",598,175,"$tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1560,1266340,"scripts_horeka/overfit_sample_tiny/tester.sh",533,240,"\\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1561,1266353,"scripts_horeka/overfit_sample_tiny/tester.sh",532,241," \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1562,1266381,"scripts_horeka/overfit_sample_tiny/tester.sh",501,272,"_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1563,1266393,"scripts_horeka/overfit_sample_tiny/tester.sh",500,273,"g_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1564,1266420,"scripts_horeka/overfit_sample_tiny/tester.sh",499,274,"og_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1565,1266421,"scripts_horeka/overfit_sample_tiny/tester.sh",477,296,"ax_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1566,1266433,"scripts_horeka/overfit_sample_tiny/tester.sh",476,297,"max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1567,1266483,"scripts_horeka/overfit_sample_tiny/tester.sh",475,298,"-max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1568,1266483,"scripts_horeka/overfit_sample_tiny/tester.sh",452,321,"--min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1569,1266523,"scripts_horeka/overfit_sample_tiny/tester.sh",451,322," --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1570,1266560,"scripts_horeka/overfit_sample_tiny/tester.sh",429,344," --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1571,1266561,"scripts_horeka/overfit_sample_tiny/tester.sh",428,345," --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1572,1266596,"scripts_horeka/overfit_sample_tiny/tester.sh",427,346," --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1573,1266633,"scripts_horeka/overfit_sample_tiny/tester.sh",394,379," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +1574,1266705,"TERMINAL",0,0,"21",,terminal_output +1575,1267189,"scripts_horeka/overfit_sample_tiny/tester.sh",394,379,"",shellscript,content +1576,1267728,"TERMINAL",0,0,"32",,terminal_output +1577,1267968,"scripts_horeka/overfit_sample_tiny/tester.sh",394,0," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,content +1578,1268798,"TERMINAL",0,0,"43",,terminal_output +1579,1269352,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/tester.sh ",,terminal_output +1580,1269823,"TERMINAL",0,0,"54",,terminal_output +1581,1269932,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1582,1270077,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=1721172\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0507\r\nSLURM_JOB_START_TIME=1751029211\r\nSLURM_STEP_NODELIST=hkn0507\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751032811\r\nSLURM_PMI2_SRUN_PORT=35623\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3298895\r\nSLURM_PTY_PORT=42613\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=35\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e10.hkn0507\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_MEM_PER_NODE=51200\r\nSLURM_PTY_WIN_COL=73\r\nSLURM_NODELIST=hkn0507\r\nSLURM_SRUN_COMM_PORT=37257\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3298895\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0507\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=37257\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0507\r\n",,terminal_output +1583,1270872,"TERMINAL",0,0,"65",,terminal_output +1584,1271929,"TERMINAL",0,0,"76",,terminal_output +1585,1272269,"TERMINAL",0,0,"2025-06-27 15:08:18.171423: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751029698.184763 1724518 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751029698.188979 1724518 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751029698.201342 1724518 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751029698.201362 1724518 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751029698.201365 1724518 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751029698.201367 1724518 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +1586,1273006,"TERMINAL",0,0,"87",,terminal_output +1587,1274016,"TERMINAL",0,0,"98",,terminal_output +1588,1275061,"TERMINAL",0,0,"2010",,terminal_output +1589,1275298,"TERMINAL",0,0,"W0000 00:00:1751029701.215556 1724518 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +1590,1275605,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +1591,1276109,"TERMINAL",0,0,"21",,terminal_output +1592,1276365,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +1593,1277073,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250627_150822-hyj7gmzn\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run tokenizer-tiny-overfit-0000\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/hyj7gmzn\r\n",,terminal_output +1594,1277171,"TERMINAL",0,0,"32",,terminal_output +1595,1278211,"TERMINAL",0,0,"43",,terminal_output +1596,1278449,"TERMINAL",0,0,"2025-06-27 15:08:24.393323: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1597,1279261,"TERMINAL",0,0,"54",,terminal_output +1598,1280321,"TERMINAL",0,0,"65",,terminal_output +1599,1281350,"TERMINAL",0,0,"76",,terminal_output +1600,1282387,"TERMINAL",0,0,"87",,terminal_output +1601,1283437,"TERMINAL",0,0,"98",,terminal_output +1602,1284471,"TERMINAL",0,0,"309",,terminal_output +1603,1285521,"TERMINAL",0,0,"120",,terminal_output +1604,1286583,"TERMINAL",0,0,"21",,terminal_output +1605,1287620,"TERMINAL",0,0,"32",,terminal_output +1606,1288699,"TERMINAL",0,0,"43",,terminal_output +1607,1289711,"TERMINAL",0,0,"54",,terminal_output +1608,1290766,"TERMINAL",0,0,"65",,terminal_output +1609,1291505,"TERMINAL",0,0,"2025-06-27 15:08:37.449966: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1610,1291818,"TERMINAL",0,0,"76",,terminal_output +1611,1292926,"TERMINAL",0,0,"87",,terminal_output +1612,1293479,"TERMINAL",0,0,"2025-06-27 15:08:39.422322: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1613,1293921,"TERMINAL",0,0,"98",,terminal_output +1614,1294973,"TERMINAL",0,0,"409",,terminal_output +1615,1296022,"TERMINAL",0,0,"130",,terminal_output +1616,1297070,"TERMINAL",0,0,"22",,terminal_output +1617,1297212,"TERMINAL",0,0,"Starting training from step 0...\r\nbatch shape: (1, 16, 90, 160, 3)\r\n",,terminal_output +1618,1298119,"TERMINAL",0,0,"43",,terminal_output +1619,1299158,"TERMINAL",0,0,"54",,terminal_output +1620,1300208,"TERMINAL",0,0,"65",,terminal_output +1621,1301253,"TERMINAL",0,0,"76",,terminal_output +1622,1302333,"TERMINAL",0,0,"87",,terminal_output +1623,1303363,"TERMINAL",0,0,"98",,terminal_output +1624,1304407,"TERMINAL",0,0,"509",,terminal_output +1625,1305475,"TERMINAL",0,0,"140",,terminal_output +1626,1306531,"TERMINAL",0,0,"21",,terminal_output +1627,1307578,"TERMINAL",0,0,"32",,terminal_output +1628,1308610,"TERMINAL",0,0,"43",,terminal_output +1629,1308989,"TERMINAL",0,0,"2025-06-27 15:08:54.921548: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:08:54.922157: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:08:54.922181: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:08:54.922306: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:08:54.924016: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1630,1309869,"TERMINAL",0,0,"54",,terminal_output +1631,1310705,"TERMINAL",0,0,"65",,terminal_output +1632,1311737,"TERMINAL",0,0,"76",,terminal_output +1633,1312787,"TERMINAL",0,0,"87",,terminal_output +1634,1313840,"TERMINAL",0,0,"98",,terminal_output +1635,1314978,"TERMINAL",0,0,"9:009",,terminal_output +1636,1315929,"TERMINAL",0,0,"150",,terminal_output +1637,1316980,"TERMINAL",0,0,"21",,terminal_output +1638,1318022,"TERMINAL",0,0,"32",,terminal_output +1639,1319112,"TERMINAL",0,0,"44",,terminal_output +1640,1320118,"TERMINAL",0,0,"65",,terminal_output +1641,1321190,"TERMINAL",0,0,"76",,terminal_output +1642,1322220,"TERMINAL",0,0,"87",,terminal_output +1643,1323294,"TERMINAL",0,0,"98",,terminal_output +1644,1324353,"TERMINAL",0,0,"109",,terminal_output +1645,1325367,"TERMINAL",0,0,"19:00",,terminal_output +1646,1326425,"TERMINAL",0,0,"21",,terminal_output +1647,1327470,"TERMINAL",0,0,"32",,terminal_output +1648,1328548,"TERMINAL",0,0,"43",,terminal_output +1649,1329571,"TERMINAL",0,0,"54",,terminal_output +1650,1330606,"TERMINAL",0,0,"65",,terminal_output +1651,1331646,"TERMINAL",0,0,"76",,terminal_output +1652,1332753,"TERMINAL",0,0,"87",,terminal_output +1653,1333743,"TERMINAL",0,0,"98",,terminal_output +1654,1334795,"TERMINAL",0,0,"209",,terminal_output +1655,1335857,"TERMINAL",0,0,"110",,terminal_output +1656,1336901,"TERMINAL",0,0,"21",,terminal_output +1657,1337966,"TERMINAL",0,0,"32",,terminal_output +1658,1338985,"TERMINAL",0,0,"43",,terminal_output +1659,1340037,"TERMINAL",0,0,"54",,terminal_output +1660,1341107,"TERMINAL",0,0,"66",,terminal_output +1661,1342131,"TERMINAL",0,0,"87",,terminal_output +1662,1343186,"TERMINAL",0,0,"98",,terminal_output +1663,1344211,"TERMINAL",0,0,"309",,terminal_output +1664,1345263,"TERMINAL",0,0,"120",,terminal_output +1665,1346308,"TERMINAL",0,0,"21",,terminal_output +1666,1347426,"TERMINAL",0,0,"32",,terminal_output +1667,1348408,"TERMINAL",0,0,"43",,terminal_output +1668,1349466,"TERMINAL",0,0,"54",,terminal_output +1669,1350506,"TERMINAL",0,0,"65",,terminal_output +1670,1351586,"TERMINAL",0,0,"76",,terminal_output +1671,1352617,"TERMINAL",0,0,"87",,terminal_output +1672,1353644,"TERMINAL",0,0,"98",,terminal_output +1673,1354695,"TERMINAL",0,0,"409",,terminal_output +1674,1355256,"TERMINAL",0,0,"Step 0, loss: 0.27132558822631836, step time: 57831.263303756714ms\r\n",,terminal_output +1675,1355437,"TERMINAL",0,0,"Step 1, loss: 0.2122431993484497, step time: 177.6125431060791ms\r\n",,terminal_output +1676,1355618,"TERMINAL",0,0,"Step 2, loss: 0.1855737268924713, step time: 176.5158176422119ms\r\n",,terminal_output +1677,1355739,"TERMINAL",0,0,"130",,terminal_output +1678,1355828,"TERMINAL",0,0,"Step 3, loss: 0.17754331231117249, step time: 176.58114433288574ms\r\n",,terminal_output +1679,1355983,"TERMINAL",0,0,"Step 4, loss: 0.1778324395418167, step time: 176.67317390441895ms\r\n",,terminal_output +1680,1356157,"TERMINAL",0,0,"Step 5, loss: 0.1761087030172348, step time: 176.38611793518066ms\r\n",,terminal_output +1681,1356328,"TERMINAL",0,0,"Step 6, loss: 0.17107035219669342, step time: 176.67198181152344ms\r\n",,terminal_output +1682,1356510,"TERMINAL",0,0,"Step 7, loss: 0.1691201776266098, step time: 176.7590045928955ms\r\n",,terminal_output +1683,1356705,"TERMINAL",0,0,"Step 8, loss: 0.16386333107948303, step time: 177.32691764831543ms\r\n",,terminal_output +1684,1356836,"TERMINAL",0,0,"21",,terminal_output +1685,1356883,"TERMINAL",0,0,"Step 9, loss: 0.1569884866476059, step time: 176.60999298095703ms\r\n",,terminal_output +1686,1357053,"TERMINAL",0,0,"Step 10, loss: 0.14781567454338074, step time: 176.50222778320312ms\r\n",,terminal_output +1687,1357230,"TERMINAL",0,0,"Step 11, loss: 0.14293907582759857, step time: 176.76329612731934ms\r\n",,terminal_output +1688,1357407,"TERMINAL",0,0,"Step 12, loss: 0.13770121335983276, step time: 176.66053771972656ms\r\n",,terminal_output +1689,1357581,"TERMINAL",0,0,"Step 13, loss: 0.1328345537185669, step time: 177.1833896636963ms\r\n",,terminal_output +1690,1357779,"TERMINAL",0,0,"Step 14, loss: 0.1299932599067688, step time: 177.0493984222412ms\r\n",,terminal_output +1691,1357831,"TERMINAL",0,0,"32",,terminal_output +1692,1357938,"TERMINAL",0,0,"Step 15, loss: 0.1270066648721695, step time: 176.1023998260498ms\r\n",,terminal_output +1693,1358186,"TERMINAL",0,0,"Step 16, loss: 0.12330175936222076, step time: 176.6357421875ms\r\n",,terminal_output +1694,1358322,"TERMINAL",0,0,"Step 17, loss: 0.11889254301786423, step time: 176.1317253112793ms\r\n",,terminal_output +1695,1358473,"TERMINAL",0,0,"Step 18, loss: 0.11472553014755249, step time: 176.24354362487793ms\r\n",,terminal_output +1696,1358652,"TERMINAL",0,0,"Step 19, loss: 0.11073906719684601, step time: 176.2528419494629ms\r\n",,terminal_output +1697,1358828,"TERMINAL",0,0,"Step 20, loss: 0.10764200985431671, step time: 175.99821090698242ms\r\n",,terminal_output +1698,1358881,"TERMINAL",0,0,"43",,terminal_output +1699,1359044,"TERMINAL",0,0,"Step 21, loss: 0.1047714501619339, step time: 176.46193504333496ms\r\n",,terminal_output +1700,1359200,"TERMINAL",0,0,"Step 22, loss: 0.10184679180383682, step time: 176.52320861816406ms\r\n",,terminal_output +1701,1359363,"TERMINAL",0,0,"Step 23, loss: 0.09961242228746414, step time: 176.25689506530762ms\r\n",,terminal_output +1702,1359542,"TERMINAL",0,0,"Step 24, loss: 0.09799212217330933, step time: 176.26237869262695ms\r\n",,terminal_output +1703,1359720,"TERMINAL",0,0,"Step 25, loss: 0.09485970437526703, step time: 175.9178638458252ms\r\n",,terminal_output +1704,1359896,"TERMINAL",0,0,"Step 26, loss: 0.09220339357852936, step time: 176.54085159301758ms\r\n",,terminal_output +1705,1359935,"TERMINAL",0,0,"54",,terminal_output +1706,1360074,"TERMINAL",0,0,"Step 27, loss: 0.09088454395532608, step time: 176.9263744354248ms\r\n",,terminal_output +1707,1360306,"TERMINAL",0,0,"Step 28, loss: 0.09031874686479568, step time: 176.42641067504883ms\r\n",,terminal_output +1708,1360461,"TERMINAL",0,0,"Step 29, loss: 0.08922387659549713, step time: 176.4531135559082ms\r\n",,terminal_output +1709,1360629,"TERMINAL",0,0,"Step 30, loss: 0.08666373789310455, step time: 176.06353759765625ms\r\n",,terminal_output +1710,1360795,"TERMINAL",0,0,"Step 31, loss: 0.08457058668136597, step time: 176.45955085754395ms\r\n",,terminal_output +1711,1360970,"TERMINAL",0,0,"Step 32, loss: 0.08322861790657043, step time: 176.4392852783203ms\r\n",,terminal_output +1712,1360985,"TERMINAL",0,0,"65",,terminal_output +1713,1361144,"TERMINAL",0,0,"Step 33, loss: 0.08199231326580048, step time: 176.1765480041504ms\r\n",,terminal_output +1714,1361336,"TERMINAL",0,0,"Step 34, loss: 0.08048229664564133, step time: 176.39493942260742ms\r\n",,terminal_output +1715,1361514,"TERMINAL",0,0,"Step 35, loss: 0.0788225382566452, step time: 175.8561134338379ms\r\n",,terminal_output +1716,1361693,"TERMINAL",0,0,"Step 36, loss: 0.07813743501901627, step time: 176.51104927062988ms\r\n",,terminal_output +1717,1361858,"TERMINAL",0,0,"Step 37, loss: 0.07707574963569641, step time: 176.46336555480957ms\r\n",,terminal_output +1718,1362073,"TERMINAL",0,0,"76",,terminal_output +1719,1362074,"TERMINAL",0,0,"Step 38, loss: 0.07681620121002197, step time: 176.55634880065918ms\r\n",,terminal_output +1720,1362213,"TERMINAL",0,0,"Step 39, loss: 0.07643307000398636, step time: 176.38611793518066ms\r\n",,terminal_output +1721,1362392,"TERMINAL",0,0,"Step 40, loss: 0.07608877867460251, step time: 176.0578155517578ms\r\n",,terminal_output +1722,1362572,"TERMINAL",0,0,"Step 41, loss: 0.07514745742082596, step time: 176.69200897216797ms\r\n",,terminal_output +1723,1362752,"TERMINAL",0,0,"Step 42, loss: 0.0755932405591011, step time: 177.0625114440918ms\r\n",,terminal_output +1724,1362932,"TERMINAL",0,0,"Step 43, loss: 0.0772780030965805, step time: 177.89340019226074ms\r\n",,terminal_output +1725,1363076,"TERMINAL",0,0,"88",,terminal_output +1726,1363109,"TERMINAL",0,0,"Step 44, loss: 0.07913679629564285, step time: 176.62549018859863ms\r\n",,terminal_output +1727,1363302,"TERMINAL",0,0,"Step 45, loss: 0.08079638332128525, step time: 175.62246322631836ms\r\n",,terminal_output +1728,1363469,"TERMINAL",0,0,"Step 46, loss: 0.08581084758043289, step time: 176.5437126159668ms\r\n",,terminal_output +1729,1363644,"TERMINAL",0,0,"Step 47, loss: 0.0921228677034378, step time: 176.56326293945312ms\r\n",,terminal_output +1730,1363828,"TERMINAL",0,0,"Step 48, loss: 0.09705103188753128, step time: 176.41711235046387ms\r\n",,terminal_output +1731,1364011,"TERMINAL",0,0,"Step 49, loss: 0.10086013376712799, step time: 176.50151252746582ms\r\n",,terminal_output +1732,1364180,"TERMINAL",0,0,"509",,terminal_output +1733,1364200,"TERMINAL",0,0,"Step 50, loss: 0.10333701223134995, step time: 176.08928680419922ms\r\n",,terminal_output +1734,1364378,"TERMINAL",0,0,"Step 51, loss: 0.1035444438457489, step time: 176.19967460632324ms\r\n",,terminal_output +1735,1364542,"TERMINAL",0,0,"Step 52, loss: 0.10211873054504395, step time: 176.58281326293945ms\r\n",,terminal_output +1736,1364751,"TERMINAL",0,0,"Step 53, loss: 0.09968838840723038, step time: 176.4533519744873ms\r\n",,terminal_output +1737,1364932,"TERMINAL",0,0,"Step 54, loss: 0.09630117565393448, step time: 176.36704444885254ms\r\n",,terminal_output +1738,1365084,"TERMINAL",0,0,"Step 55, loss: 0.09330357611179352, step time: 176.5599250793457ms\r\n",,terminal_output +1739,1365167,"TERMINAL",0,0,"140",,terminal_output +1740,1365262,"TERMINAL",0,0,"Step 56, loss: 0.08997335284948349, step time: 176.51891708374023ms\r\n",,terminal_output +1741,1365441,"TERMINAL",0,0,"Step 57, loss: 0.08625142276287079, step time: 176.37324333190918ms\r\n",,terminal_output +1742,1365611,"TERMINAL",0,0,"Step 58, loss: 0.08346395939588547, step time: 176.8357753753662ms\r\n",,terminal_output +1743,1365788,"TERMINAL",0,0,"Step 59, loss: 0.08073443174362183, step time: 176.70845985412598ms\r\n",,terminal_output +1744,1365968,"TERMINAL",0,0,"Step 60, loss: 0.07872174680233002, step time: 176.1937141418457ms\r\n",,terminal_output +1745,1366147,"TERMINAL",0,0,"Step 61, loss: 0.07681784778833389, step time: 176.5766143798828ms\r\n",,terminal_output +1746,1366216,"TERMINAL",0,0,"21",,terminal_output +1747,1366322,"TERMINAL",0,0,"Step 62, loss: 0.07474932819604874, step time: 176.59997940063477ms\r\n",,terminal_output +1748,1366500,"TERMINAL",0,0,"Step 63, loss: 0.07233552634716034, step time: 176.3894557952881ms\r\n",,terminal_output +1749,1366684,"TERMINAL",0,0,"Step 64, loss: 0.0711909607052803, step time: 176.9266128540039ms\r\n",,terminal_output +1750,1366883,"TERMINAL",0,0,"Step 65, loss: 0.07081569731235504, step time: 176.09357833862305ms\r\n",,terminal_output +1751,1367035,"TERMINAL",0,0,"Step 66, loss: 0.07002189010381699, step time: 176.28836631774902ms\r\n",,terminal_output +1752,1367215,"TERMINAL",0,0,"Step 67, loss: 0.06906553357839584, step time: 177.13642120361328ms\r\n",,terminal_output +1753,1367258,"TERMINAL",0,0,"32",,terminal_output +1754,1367393,"TERMINAL",0,0,"Step 68, loss: 0.06828068196773529, step time: 176.42903327941895ms\r\n",,terminal_output +1755,1367580,"TERMINAL",0,0,"Step 69, loss: 0.06786394864320755, step time: 176.8360137939453ms\r\n",,terminal_output +1756,1367750,"TERMINAL",0,0,"Step 70, loss: 0.0673893466591835, step time: 176.18489265441895ms\r\n",,terminal_output +1757,1367932,"TERMINAL",0,0,"Step 71, loss: 0.06713508814573288, step time: 177.15930938720703ms\r\n",,terminal_output +1758,1368132,"TERMINAL",0,0,"Step 72, loss: 0.0665171891450882, step time: 176.65910720825195ms\r\n",,terminal_output +1759,1368280,"TERMINAL",0,0,"Step 73, loss: 0.06593520194292068, step time: 176.70798301696777ms\r\n",,terminal_output +1760,1368308,"TERMINAL",0,0,"43",,terminal_output +1761,1368461,"TERMINAL",0,0,"Step 74, loss: 0.06595632433891296, step time: 176.7578125ms\r\n",,terminal_output +1762,1368643,"TERMINAL",0,0,"Step 75, loss: 0.06618306040763855, step time: 175.95410346984863ms\r\n",,terminal_output +1763,1368820,"TERMINAL",0,0,"Step 76, loss: 0.06599466502666473, step time: 176.81002616882324ms\r\n",,terminal_output +1764,1369010,"TERMINAL",0,0,"Step 77, loss: 0.06574900448322296, step time: 176.774263381958ms\r\n",,terminal_output +1765,1369176,"TERMINAL",0,0,"Step 78, loss: 0.06571226567029953, step time: 176.6359806060791ms\r\n",,terminal_output +1766,1369354,"TERMINAL",0,0,"Step 79, loss: 0.06565368920564651, step time: 176.8784523010254ms\r\n",,terminal_output +1767,1369354,"TERMINAL",0,0,"54",,terminal_output +1768,1369533,"TERMINAL",0,0,"Step 80, loss: 0.06554509699344635, step time: 176.09620094299316ms\r\n",,terminal_output +1769,1369714,"TERMINAL",0,0,"Step 81, loss: 0.06555277109146118, step time: 176.42879486083984ms\r\n",,terminal_output +1770,1369889,"TERMINAL",0,0,"Step 82, loss: 0.0653831884264946, step time: 176.5275001525879ms\r\n",,terminal_output +1771,1370069,"TERMINAL",0,0,"Step 83, loss: 0.06484632194042206, step time: 176.6524314880371ms\r\n",,terminal_output +1772,1370250,"TERMINAL",0,0,"Step 84, loss: 0.06406665593385696, step time: 176.46121978759766ms\r\n",,terminal_output +1773,1370402,"TERMINAL",0,0,"65",,terminal_output +1774,1370431,"TERMINAL",0,0,"Step 85, loss: 0.06346739828586578, step time: 176.2869358062744ms\r\n",,terminal_output +1775,1370604,"TERMINAL",0,0,"Step 86, loss: 0.06267248839139938, step time: 176.61213874816895ms\r\n",,terminal_output +1776,1370787,"TERMINAL",0,0,"Step 87, loss: 0.06163487955927849, step time: 176.7432689666748ms\r\n",,terminal_output +1777,1371002,"TERMINAL",0,0,"Step 88, loss: 0.06101818010210991, step time: 176.63931846618652ms\r\n",,terminal_output +1778,1371183,"TERMINAL",0,0,"Step 89, loss: 0.0605541467666626, step time: 176.6951084136963ms\r\n",,terminal_output +1779,1371351,"TERMINAL",0,0,"Step 90, loss: 0.06001419946551323, step time: 176.00369453430176ms\r\n",,terminal_output +1780,1371458,"TERMINAL",0,0,"76",,terminal_output +1781,1371511,"TERMINAL",0,0,"Step 91, loss: 0.05947208032011986, step time: 176.98073387145996ms\r\n",,terminal_output +1782,1371705,"TERMINAL",0,0,"Step 92, loss: 0.05895468592643738, step time: 176.5885353088379ms\r\n",,terminal_output +1783,1371851,"TERMINAL",0,0,"Step 93, loss: 0.05862986668944359, step time: 176.61643028259277ms\r\n",,terminal_output +1784,1372060,"TERMINAL",0,0,"Step 94, loss: 0.05786709114909172, step time: 176.5730381011963ms\r\n",,terminal_output +1785,1372209,"TERMINAL",0,0,"Step 95, loss: 0.05756083130836487, step time: 176.24759674072266ms\r\n",,terminal_output +1786,1372388,"TERMINAL",0,0,"Step 96, loss: 0.05741829425096512, step time: 178.41601371765137ms\r\n",,terminal_output +1787,1372530,"TERMINAL",0,0,"87",,terminal_output +1788,1372569,"TERMINAL",0,0,"Step 97, loss: 0.057192590087652206, step time: 176.88417434692383ms\r\n",,terminal_output +1789,1372746,"TERMINAL",0,0,"Step 98, loss: 0.05720604956150055, step time: 176.48768424987793ms\r\n",,terminal_output +1790,1372934,"TERMINAL",0,0,"Step 99, loss: 0.0572572723031044, step time: 176.4998435974121ms\r\n",,terminal_output +1791,1373596,"TERMINAL",0,0,"98",,terminal_output +1792,1373838,"TERMINAL",0,0,"Step 100, loss: 0.05731266364455223, step time: 177.83498764038086ms\r\n",,terminal_output +1793,1374013,"TERMINAL",0,0,"Step 101, loss: 0.05745572969317436, step time: 178.0400276184082ms\r\n",,terminal_output +1794,1374195,"TERMINAL",0,0,"Step 102, loss: 0.05786733329296112, step time: 178.80558967590332ms\r\n",,terminal_output +1795,1374374,"TERMINAL",0,0,"Step 103, loss: 0.05805906653404236, step time: 177.8566837310791ms\r\n",,terminal_output +1796,1374550,"TERMINAL",0,0,"Step 104, loss: 0.058420851826667786, step time: 177.42609977722168ms\r\n",,terminal_output +1797,1374627,"TERMINAL",0,0,"10:009",,terminal_output +1798,1374762,"TERMINAL",0,0,"Step 105, loss: 0.057759881019592285, step time: 176.8336296081543ms\r\n",,terminal_output +1799,1374915,"TERMINAL",0,0,"Step 106, loss: 0.05777483433485031, step time: 178.25984954833984ms\r\n",,terminal_output +1800,1375104,"TERMINAL",0,0,"Step 107, loss: 0.057530466467142105, step time: 177.95372009277344ms\r\n",,terminal_output +1801,1375273,"TERMINAL",0,0,"Step 108, loss: 0.05720403417944908, step time: 178.0402660369873ms\r\n",,terminal_output +1802,1375453,"TERMINAL",0,0,"Step 109, loss: 0.057012055069208145, step time: 177.67763137817383ms\r\n",,terminal_output +1803,1375664,"TERMINAL",0,0,"Step 110, loss: 0.056543152779340744, step time: 177.41847038269043ms\r\n",,terminal_output +1804,1375683,"TERMINAL",0,0,"150",,terminal_output +1805,1375831,"TERMINAL",0,0,"Step 111, loss: 0.05649379640817642, step time: 176.38444900512695ms\r\n",,terminal_output +1806,1375992,"TERMINAL",0,0,"Step 112, loss: 0.05634010583162308, step time: 176.804780960083ms\r\n",,terminal_output +1807,1376170,"TERMINAL",0,0,"Step 113, loss: 0.05608486756682396, step time: 176.87606811523438ms\r\n",,terminal_output +1808,1376350,"TERMINAL",0,0,"Step 114, loss: 0.055736929178237915, step time: 176.71465873718262ms\r\n",,terminal_output +1809,1376535,"TERMINAL",0,0,"Step 115, loss: 0.05542559549212456, step time: 176.35822296142578ms\r\n",,terminal_output +1810,1376754,"TERMINAL",0,0,"Step 116, loss: 0.05519620329141617, step time: 176.95379257202148ms\r\n",,terminal_output +1811,1376830,"TERMINAL",0,0,"21",,terminal_output +1812,1376908,"TERMINAL",0,0,"Step 117, loss: 0.054847605526447296, step time: 176.4070987701416ms\r\n",,terminal_output +1813,1377062,"TERMINAL",0,0,"Step 118, loss: 0.054564058780670166, step time: 176.62906646728516ms\r\n",,terminal_output +1814,1377238,"TERMINAL",0,0,"Step 119, loss: 0.053967755287885666, step time: 176.54013633728027ms\r\n",,terminal_output +1815,1377453,"TERMINAL",0,0,"Step 120, loss: 0.05331188440322876, step time: 176.31912231445312ms\r\n",,terminal_output +1816,1377633,"TERMINAL",0,0,"Step 121, loss: 0.05274002254009247, step time: 176.77760124206543ms\r\n",,terminal_output +1817,1377809,"TERMINAL",0,0,"32",,terminal_output +1818,1377810,"TERMINAL",0,0,"Step 122, loss: 0.052078623324632645, step time: 177.04105377197266ms\r\n",,terminal_output +1819,1377956,"TERMINAL",0,0,"Step 123, loss: 0.05172644928097725, step time: 177.11925506591797ms\r\n",,terminal_output +1820,1378136,"TERMINAL",0,0,"Step 124, loss: 0.05152444541454315, step time: 177.04248428344727ms\r\n",,terminal_output +1821,1378340,"TERMINAL",0,0,"Step 125, loss: 0.05154119431972504, step time: 176.52130126953125ms\r\n",,terminal_output +1822,1378522,"TERMINAL",0,0,"Step 126, loss: 0.05203219875693321, step time: 176.99909210205078ms\r\n",,terminal_output +1823,1378681,"TERMINAL",0,0,"Step 127, loss: 0.05245625600218773, step time: 176.6977310180664ms\r\n",,terminal_output +1824,1378816,"TERMINAL",0,0,"43",,terminal_output +1825,1378849,"TERMINAL",0,0,"Step 128, loss: 0.05204249173402786, step time: 176.76210403442383ms\r\n",,terminal_output +1826,1379023,"TERMINAL",0,0,"Step 129, loss: 0.051020946353673935, step time: 178.47156524658203ms\r\n",,terminal_output +1827,1379216,"TERMINAL",0,0,"Step 130, loss: 0.050114307552576065, step time: 176.56803131103516ms\r\n",,terminal_output +1828,1379523,"TERMINAL",0,0,"Step 131, loss: 0.049363039433956146, step time: 176.96189880371094ms\r\n",,terminal_output +1829,1379576,"TERMINAL",0,0,"Step 132, loss: 0.048819832503795624, step time: 176.97620391845703ms\r\n",,terminal_output +1830,1379743,"TERMINAL",0,0,"Step 133, loss: 0.048255644738674164, step time: 176.5730381011963ms\r\n",,terminal_output +1831,1379907,"TERMINAL",0,0,"54",,terminal_output +1832,1379946,"TERMINAL",0,0,"Step 134, loss: 0.04762961342930794, step time: 176.90300941467285ms\r\n",,terminal_output +1833,1380135,"TERMINAL",0,0,"Step 135, loss: 0.04718654975295067, step time: 176.31030082702637ms\r\n",,terminal_output +1834,1380279,"TERMINAL",0,0,"Step 136, loss: 0.04675830155611038, step time: 176.7878532409668ms\r\n",,terminal_output +1835,1380457,"TERMINAL",0,0,"Step 137, loss: 0.04618992283940315, step time: 176.82480812072754ms\r\n",,terminal_output +1836,1380637,"TERMINAL",0,0,"Step 138, loss: 0.0455242283642292, step time: 176.65934562683105ms\r\n",,terminal_output +1837,1380820,"TERMINAL",0,0,"Step 139, loss: 0.045184142887592316, step time: 176.51700973510742ms\r\n",,terminal_output +1838,1380882,"TERMINAL",0,0,"^C",,terminal_output +1839,1380924,"TERMINAL",0,0,"65",,terminal_output +1840,1381019,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 232, in \r\n jax.block_until_ready(loss)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/api.py"", line 3117, in block_until_ready\r\n try_to_block(arrays[0])\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/api.py"", line 3100, in try_to_block\r\n return x.block_until_ready()\r\nKeyboardInterrupt\r\n",,terminal_output +1841,1381077,"TERMINAL",0,0,"^CException ignored in atexit callback: .teardown_atexit at 0x14985419be20>\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/service_connection.py"", line 94, in teardown_atexit\r\n conn.teardown(hooks.exit_code)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/service_connection.py"", line 226, in teardown\r\n self._router.join()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/interface/router.py"", line 75, in join\r\n self._thread.join()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/threading.py"", line 1096, in join\r\n self._wait_for_tstate_lock()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/threading.py"", line 1116, in _wait_for_tstate_lock\r\n if lock.acquire(block, timeout):\r\nKeyboardInterrupt: \r\n",,terminal_output +1842,1381303,"TERMINAL",0,0,"^CException ignored in atexit callback: \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/api.py"", line 3168, in clean_up\r\n clear_backends()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/api.py"", line 3158, in clear_backends\r\n pjit._infer_params_cached.cache_clear()\r\nKeyboardInterrupt: \r\n",,terminal_output +1843,1381956,"TERMINAL",0,0,"76",,terminal_output +1844,1382978,"TERMINAL",0,0,"87",,terminal_output +1845,1384068,"TERMINAL",0,0,"98",,terminal_output +1846,1385072,"TERMINAL",0,0,"1010:00",,terminal_output +1847,1386126,"TERMINAL",0,0,"21",,terminal_output +1848,1387115,"TERMINAL",0,0,"2025-06-27 15:10:13.056037: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:427] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-06-27 15:10:13.056080: F external/xla/xla/pjrt/distributed/client.h:80] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: UNAVAILABLE: Failed to send RPC to coordination service. Either the leader task was preempted/died/restarted unexpectedly or this task is experiencing network issues. Check earlier logs from 1) this task, 2) the leader (usually slice 0 task 0), and 3) cluster scheduler to debug further.\r\nAdditional GRPC error information from remote target coordination_service while calling /tensorflow.CoordinationService/PollForError:\r\n:{""created"":""@1751029813.055940162"",""description"":""Error received from peer ipv4:10.0.1.75:63055"",""file"":""external/com_github_grpc_grpc/src/core/lib/surface/call.cc"",""file_line"":1056,""grpc_message"":""Cancelling all calls"",""grpc_status"":14}\r\n",,terminal_output +1849,1387175,"TERMINAL",0,0,"32",,terminal_output +1850,1387592,"TERMINAL",0,0,"scripts_horeka/overfit_sample_tiny/tester.sh: line 36: 1724518 Aborted (core dumped) python train_tokenizer.py --ckpt_dir $CHECKPOINT_DIR --batch_size=1 --min_lr=4.3e-5 --max_lr=4.3e-5 --log_image_interval=100 --log --name=tokenizer-tiny-overfit-$slurm_job_id --tags tokenizer overfit tiny --entity instant-uv --project jafar --data_dir $tf_records_dir --model_dim 384 --latent_dim 32 --num_latents 1024 --patch_size 4 --num_blocks 8 --num_heads 8 --codebook_dropout 0.0\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0507 jafar]$ ",,terminal_output +1851,1388218,"TERMINAL",0,0,"43",,terminal_output +1852,1389265,"TERMINAL",0,0,"54",,terminal_output +1853,1389417,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +1854,1389418,"scripts_horeka/overfit_sample_tiny/tester.sh",635,0,"",shellscript,selection_mouse +1855,1389879,"scripts_horeka/overfit_sample_tiny/tester.sh",329,0,"",shellscript,selection_mouse +1856,1390322,"TERMINAL",0,0,"65",,terminal_output +1857,1391362,"TERMINAL",0,0,"76",,terminal_output +1858,1391648,"train_tokenizer.py",0,0,"",python,tab +1859,1392414,"TERMINAL",0,0,"87",,terminal_output +1860,1393501,"TERMINAL",0,0,"98",,terminal_output +1861,1394540,"TERMINAL",0,0,"209",,terminal_output +1862,1395579,"TERMINAL",0,0,"110",,terminal_output +1863,1396650,"TERMINAL",0,0,"21",,terminal_output +1864,1397651,"TERMINAL",0,0,"32",,terminal_output +1865,1398725,"TERMINAL",0,0,"43",,terminal_output +1866,1399124,"train_tokenizer.py",7524,0,"",python,selection_mouse +1867,1399777,"TERMINAL",0,0,"54",,terminal_output +1868,1400790,"TERMINAL",0,0,"65",,terminal_output +1869,1401848,"TERMINAL",0,0,"76",,terminal_output +1870,1402930,"TERMINAL",0,0,"87",,terminal_output +1871,1403945,"TERMINAL",0,0,"98",,terminal_output +1872,1404994,"TERMINAL",0,0,"309",,terminal_output +1873,1406057,"TERMINAL",0,0,"120",,terminal_output +1874,1407199,"TERMINAL",0,0,"22",,terminal_output +1875,1408146,"TERMINAL",0,0,"43",,terminal_output +1876,1409196,"TERMINAL",0,0,"54",,terminal_output +1877,1410253,"TERMINAL",0,0,"65",,terminal_output +1878,1411308,"TERMINAL",0,0,"76",,terminal_output +1879,1412368,"TERMINAL",0,0,"87",,terminal_output +1880,1413409,"TERMINAL",0,0,"98",,terminal_output +1881,1413435,"train_tokenizer.py",7516,0,"",python,selection_command +1882,1414455,"TERMINAL",0,0,"409",,terminal_output +1883,1414735,"train_tokenizer.py",7516,0,"#",python,content +1884,1414737,"train_tokenizer.py",7517,0,"",python,selection_keyboard +1885,1414746,"train_tokenizer.py",7517,0," ",python,content +1886,1414748,"train_tokenizer.py",7518,0,"",python,selection_keyboard +1887,1415205,"train_tokenizer.py",7517,0,"",python,selection_command +1888,1415515,"TERMINAL",0,0,"130",,terminal_output +1889,1416582,"TERMINAL",0,0,"21",,terminal_output +1890,1416644,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/tester.sh ",,terminal_output +1891,1417628,"TERMINAL",0,0,"32",,terminal_output +1892,1418677,"TERMINAL",0,0,"43",,terminal_output +1893,1419691,"TERMINAL",0,0,"54",,terminal_output +1894,1420030,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1895,1420168,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=1721172\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0507\r\nSLURM_JOB_START_TIME=1751029211\r\nSLURM_STEP_NODELIST=hkn0507\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751032811\r\nSLURM_PMI2_SRUN_PORT=35623\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3298895\r\nSLURM_PTY_PORT=42613\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=35\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e10.hkn0507\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_MEM_PER_NODE=51200\r\nSLURM_PTY_WIN_COL=73\r\nSLURM_NODELIST=hkn0507\r\nSLURM_SRUN_COMM_PORT=37257\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3298895\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0507\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=37257\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0507\r\n",,terminal_output +1896,1420747,"TERMINAL",0,0,"65",,terminal_output +1897,1421805,"TERMINAL",0,0,"76",,terminal_output +1898,1422121,"TERMINAL",0,0,"2025-06-27 15:10:48.027102: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751029848.040603 1726031 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751029848.045284 1726031 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751029848.058408 1726031 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751029848.058427 1726031 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751029848.058429 1726031 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751029848.058431 1726031 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +1899,1422839,"TERMINAL",0,0,"87",,terminal_output +1900,1423882,"TERMINAL",0,0,"98",,terminal_output +1901,1424413,"TERMINAL",0,0,"W0000 00:00:1751029850.351825 1726031 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +1902,1424817,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +1903,1424946,"TERMINAL",0,0,"509",,terminal_output +1904,1425488,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +1905,1425964,"TERMINAL",0,0,"140",,terminal_output +1906,1426099,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250627_151051-6atjgzuw\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run tokenizer-tiny-overfit-0000\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/6atjgzuw\r\n",,terminal_output +1907,1427001,"TERMINAL",0,0,"21",,terminal_output +1908,1427458,"TERMINAL",0,0,"2025-06-27 15:10:53.392400: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1909,1428036,"TERMINAL",0,0,"32",,terminal_output +1910,1429106,"TERMINAL",0,0,"44",,terminal_output +1911,1430163,"TERMINAL",0,0,"65",,terminal_output +1912,1431196,"TERMINAL",0,0,"76",,terminal_output +1913,1432220,"TERMINAL",0,0,"87",,terminal_output +1914,1433286,"TERMINAL",0,0,"98",,terminal_output +1915,1434343,"TERMINAL",0,0,"1:009",,terminal_output +1916,1435380,"TERMINAL",0,0,"150",,terminal_output +1917,1436418,"TERMINAL",0,0,"21",,terminal_output +1918,1437471,"TERMINAL",0,0,"32",,terminal_output +1919,1438540,"TERMINAL",0,0,"43",,terminal_output +1920,1439578,"TERMINAL",0,0,"54",,terminal_output +1921,1440621,"TERMINAL",0,0,"65",,terminal_output +1922,1440809,"TERMINAL",0,0,"2025-06-27 15:11:06.720145: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1923,1441653,"TERMINAL",0,0,"76",,terminal_output +1924,1442707,"TERMINAL",0,0,"87",,terminal_output +1925,1442789,"TERMINAL",0,0,"2025-06-27 15:11:08.729022: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1926,1443752,"TERMINAL",0,0,"98",,terminal_output +1927,1444804,"TERMINAL",0,0,"109",,terminal_output +1928,1445951,"TERMINAL",0,0,"11:00",,terminal_output +1929,1446774,"TERMINAL",0,0,"Starting training from step 0...\r\nbatch shape: (1, 16, 90, 160, 3)\r\n",,terminal_output +1930,1446986,"TERMINAL",0,0,"21",,terminal_output +1931,1448000,"TERMINAL",0,0,"32",,terminal_output +1932,1449050,"TERMINAL",0,0,"43",,terminal_output +1933,1450106,"TERMINAL",0,0,"54",,terminal_output +1934,1451227,"TERMINAL",0,0,"66",,terminal_output +1935,1452121,"TERMINAL",0,0,"87",,terminal_output +1936,1453156,"TERMINAL",0,0,"98",,terminal_output +1937,1454207,"TERMINAL",0,0,"209",,terminal_output +1938,1455376,"TERMINAL",0,0,"110",,terminal_output +1939,1456732,"TERMINAL",0,0,"21",,terminal_output +1940,1457617,"TERMINAL",0,0,"32",,terminal_output +1941,1458380,"TERMINAL",0,0,"43",,terminal_output +1942,1458721,"TERMINAL",0,0,"2025-06-27 15:11:24.657087: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:11:24.657673: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:11:24.657697: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:11:24.657808: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:11:24.659490: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1943,1459429,"TERMINAL",0,0,"54",,terminal_output +1944,1460503,"TERMINAL",0,0,"65",,terminal_output +1945,1461542,"TERMINAL",0,0,"76",,terminal_output +1946,1462573,"TERMINAL",0,0,"87",,terminal_output +1947,1463644,"TERMINAL",0,0,"98",,terminal_output +1948,1464761,"TERMINAL",0,0,"309",,terminal_output +1949,1465714,"TERMINAL",0,0,"120",,terminal_output +1950,1466803,"TERMINAL",0,0,"21",,terminal_output +1951,1467841,"TERMINAL",0,0,"32",,terminal_output +1952,1468847,"TERMINAL",0,0,"43",,terminal_output +1953,1470071,"TERMINAL",0,0,"54",,terminal_output +1954,1471027,"TERMINAL",0,0,"65",,terminal_output +1955,1472001,"TERMINAL",0,0,"76",,terminal_output +1956,1473054,"TERMINAL",0,0,"87",,terminal_output +1957,1474121,"TERMINAL",0,0,"99",,terminal_output +1958,1475138,"TERMINAL",0,0,"4130",,terminal_output +1959,1476178,"TERMINAL",0,0,"21",,terminal_output +1960,1477232,"TERMINAL",0,0,"32",,terminal_output +1961,1478270,"TERMINAL",0,0,"43",,terminal_output +1962,1479322,"TERMINAL",0,0,"54",,terminal_output +1963,1480371,"TERMINAL",0,0,"65",,terminal_output +1964,1481418,"TERMINAL",0,0,"76",,terminal_output +1965,1482482,"TERMINAL",0,0,"87",,terminal_output +1966,1483717,"TERMINAL",0,0,"98",,terminal_output +1967,1484765,"TERMINAL",0,0,"509",,terminal_output +1968,1485816,"TERMINAL",0,0,"140",,terminal_output +1969,1486867,"TERMINAL",0,0,"21",,terminal_output +1970,1487900,"TERMINAL",0,0,"32",,terminal_output +1971,1488965,"TERMINAL",0,0,"43",,terminal_output +1972,1490007,"TERMINAL",0,0,"54",,terminal_output +1973,1491034,"TERMINAL",0,0,"65",,terminal_output +1974,1492079,"TERMINAL",0,0,"77",,terminal_output +1975,1493147,"TERMINAL",0,0,"98",,terminal_output +1976,1494166,"TERMINAL",0,0,"2:009",,terminal_output +1977,1495225,"TERMINAL",0,0,"150",,terminal_output +1978,1496259,"TERMINAL",0,0,"21",,terminal_output +1979,1497307,"TERMINAL",0,0,"32",,terminal_output +1980,1498360,"TERMINAL",0,0,"43",,terminal_output +1981,1499409,"TERMINAL",0,0,"54",,terminal_output +1982,1500475,"TERMINAL",0,0,"65",,terminal_output +1983,1501547,"TERMINAL",0,0,"76",,terminal_output +1984,1502582,"TERMINAL",0,0,"87",,terminal_output +1985,1503640,"TERMINAL",0,0,"98",,terminal_output +1986,1504678,"TERMINAL",0,0,"109",,terminal_output +1987,1505147,"TERMINAL",0,0,"Step 0, loss: 0.27132558822631836, step time: 57991.09196662903ms\r\n",,terminal_output +1988,1505328,"TERMINAL",0,0,"Step 1, loss: 0.2122431993484497, step time: 27.8780460357666ms\r\n",,terminal_output +1989,1505497,"TERMINAL",0,0,"Step 2, loss: 0.1855737268924713, step time: 25.726318359375ms\r\n",,terminal_output +1990,1505716,"TERMINAL",0,0,"Step 3, loss: 0.17755062878131866, step time: 25.07925033569336ms\r\n",,terminal_output +1991,1505717,"TERMINAL",0,0,"12:00",,terminal_output +1992,1505918,"TERMINAL",0,0,"Step 4, loss: 0.1778501570224762, step time: 25.27785301208496ms\r\n",,terminal_output +1993,1506059,"TERMINAL",0,0,"Step 5, loss: 0.1761177033185959, step time: 25.107860565185547ms\r\n",,terminal_output +1994,1506206,"TERMINAL",0,0,"Step 6, loss: 0.1710580289363861, step time: 23.405075073242188ms\r\n",,terminal_output +1995,1506386,"TERMINAL",0,0,"Step 7, loss: 0.1691397875547409, step time: 24.682044982910156ms\r\n",,terminal_output +1996,1506652,"TERMINAL",0,0,"Step 8, loss: 0.1638549566268921, step time: 25.056123733520508ms\r\n",,terminal_output +1997,1506772,"TERMINAL",0,0,"21",,terminal_output +1998,1506802,"TERMINAL",0,0,"Step 9, loss: 0.1570517122745514, step time: 28.68056297302246ms\r\n",,terminal_output +1999,1506958,"TERMINAL",0,0,"Step 10, loss: 0.14786937832832336, step time: 24.375200271606445ms\r\n",,terminal_output +2000,1507088,"TERMINAL",0,0,"Step 11, loss: 0.14304016530513763, step time: 25.97665786743164ms\r\n",,terminal_output +2001,1507267,"TERMINAL",0,0,"Step 12, loss: 0.13767606019973755, step time: 24.98173713684082ms\r\n",,terminal_output +2002,1507443,"TERMINAL",0,0,"Step 13, loss: 0.13274313509464264, step time: 27.913570404052734ms\r\n",,terminal_output +2003,1507616,"TERMINAL",0,0,"Step 14, loss: 0.13003244996070862, step time: 29.102087020874023ms\r\n",,terminal_output +2004,1507782,"TERMINAL",0,0,"32",,terminal_output +2005,1507794,"TERMINAL",0,0,"Step 15, loss: 0.12706784904003143, step time: 23.3609676361084ms\r\n",,terminal_output +2006,1507971,"TERMINAL",0,0,"Step 16, loss: 0.12329636514186859, step time: 22.699356079101562ms\r\n",,terminal_output +2007,1508147,"TERMINAL",0,0,"Step 17, loss: 0.11889143288135529, step time: 23.842811584472656ms\r\n",,terminal_output +2008,1508322,"TERMINAL",0,0,"Step 18, loss: 0.11478517949581146, step time: 20.96867561340332ms\r\n",,terminal_output +2009,1508501,"TERMINAL",0,0,"Step 19, loss: 0.11081941425800323, step time: 20.25890350341797ms\r\n",,terminal_output +2010,1508690,"TERMINAL",0,0,"Step 20, loss: 0.10775110870599747, step time: 20.08819580078125ms\r\n",,terminal_output +2011,1508827,"TERMINAL",0,0,"43",,terminal_output +2012,1508853,"TERMINAL",0,0,"Step 21, loss: 0.10501230508089066, step time: 21.18968963623047ms\r\n",,terminal_output +2013,1509028,"TERMINAL",0,0,"Step 22, loss: 0.10211650282144547, step time: 20.095348358154297ms\r\n",,terminal_output +2014,1509214,"TERMINAL",0,0,"Step 23, loss: 0.09963268041610718, step time: 20.909786224365234ms\r\n",,terminal_output +2015,1509381,"TERMINAL",0,0,"Step 24, loss: 0.09808941185474396, step time: 19.710063934326172ms\r\n",,terminal_output +2016,1509555,"TERMINAL",0,0,"Step 25, loss: 0.09499428421258926, step time: 19.811391830444336ms\r\n",,terminal_output +2017,1509730,"TERMINAL",0,0,"Step 26, loss: 0.09223476052284241, step time: 19.750595092773438ms\r\n",,terminal_output +2018,1509869,"TERMINAL",0,0,"54",,terminal_output +2019,1509903,"TERMINAL",0,0,"Step 27, loss: 0.09092212468385696, step time: 21.255016326904297ms\r\n",,terminal_output +2020,1510081,"TERMINAL",0,0,"Step 28, loss: 0.09024617075920105, step time: 19.626617431640625ms\r\n",,terminal_output +2021,1510261,"TERMINAL",0,0,"Step 29, loss: 0.08941002190113068, step time: 21.010398864746094ms\r\n",,terminal_output +2022,1510431,"TERMINAL",0,0,"Step 30, loss: 0.08678887784481049, step time: 19.726037979125977ms\r\n",,terminal_output +2023,1510642,"TERMINAL",0,0,"Step 31, loss: 0.08452095836400986, step time: 19.88673210144043ms\r\n",,terminal_output +2024,1510790,"TERMINAL",0,0,"Step 32, loss: 0.08306258171796799, step time: 19.881010055541992ms\r\n",,terminal_output +2025,1510929,"TERMINAL",0,0,"65",,terminal_output +2026,1510965,"TERMINAL",0,0,"Step 33, loss: 0.08194216340780258, step time: 26.58987045288086ms\r\n",,terminal_output +2027,1511136,"TERMINAL",0,0,"Step 34, loss: 0.08037874847650528, step time: 19.863367080688477ms\r\n",,terminal_output +2028,1511318,"TERMINAL",0,0,"Step 35, loss: 0.07896984368562698, step time: 20.465373992919922ms\r\n",,terminal_output +2029,1511512,"TERMINAL",0,0,"Step 36, loss: 0.07819032669067383, step time: 19.56319808959961ms\r\n",,terminal_output +2030,1511688,"TERMINAL",0,0,"Step 37, loss: 0.07714777439832687, step time: 19.711971282958984ms\r\n",,terminal_output +2031,1511841,"TERMINAL",0,0,"Step 38, loss: 0.07691826671361923, step time: 19.849538803100586ms\r\n",,terminal_output +2032,1511975,"TERMINAL",0,0,"76",,terminal_output +2033,1512013,"TERMINAL",0,0,"Step 39, loss: 0.0762789249420166, step time: 20.622968673706055ms\r\n",,terminal_output +2034,1512190,"TERMINAL",0,0,"Step 40, loss: 0.07600050419569016, step time: 19.408464431762695ms\r\n",,terminal_output +2035,1512366,"TERMINAL",0,0,"Step 41, loss: 0.07498683780431747, step time: 20.648479461669922ms\r\n",,terminal_output +2036,1512548,"TERMINAL",0,0,"Step 42, loss: 0.07523719221353531, step time: 19.67477798461914ms\r\n",,terminal_output +2037,1512713,"TERMINAL",0,0,"Step 43, loss: 0.07697263360023499, step time: 19.551515579223633ms\r\n",,terminal_output +2038,1512889,"TERMINAL",0,0,"Step 44, loss: 0.07849132269620895, step time: 19.725799560546875ms\r\n",,terminal_output +2039,1513027,"TERMINAL",0,0,"87",,terminal_output +2040,1513071,"TERMINAL",0,0,"Step 45, loss: 0.0800718143582344, step time: 20.55644989013672ms\r\n",,terminal_output +2041,1513243,"TERMINAL",0,0,"Step 46, loss: 0.08573857694864273, step time: 19.722700119018555ms\r\n",,terminal_output +2042,1513417,"TERMINAL",0,0,"Step 47, loss: 0.0908166840672493, step time: 20.599365234375ms\r\n",,terminal_output +2043,1513592,"TERMINAL",0,0,"Step 48, loss: 0.09578568488359451, step time: 19.609689712524414ms\r\n",,terminal_output +2044,1513772,"TERMINAL",0,0,"Step 49, loss: 0.09983011335134506, step time: 19.562482833862305ms\r\n",,terminal_output +2045,1513947,"TERMINAL",0,0,"Step 50, loss: 0.10240868479013443, step time: 19.278764724731445ms\r\n",,terminal_output +2046,1514057,"TERMINAL",0,0,"98",,terminal_output +2047,1514147,"TERMINAL",0,0,"Step 51, loss: 0.10289336740970612, step time: 20.72453498840332ms\r\n",,terminal_output +2048,1514308,"TERMINAL",0,0,"Step 52, loss: 0.10174334794282913, step time: 24.203777313232422ms\r\n",,terminal_output +2049,1514473,"TERMINAL",0,0,"Step 53, loss: 0.09959869086742401, step time: 20.653247833251953ms\r\n",,terminal_output +2050,1514647,"TERMINAL",0,0,"Step 54, loss: 0.09633366763591766, step time: 17.097949981689453ms\r\n",,terminal_output +2051,1514864,"TERMINAL",0,0,"Step 55, loss: 0.09352824091911316, step time: 15.82956314086914ms\r\n",,terminal_output +2052,1515004,"TERMINAL",0,0,"Step 56, loss: 0.08970002084970474, step time: 15.410423278808594ms\r\n",,terminal_output +2053,1515137,"TERMINAL",0,0,"2110",,terminal_output +2054,1515169,"TERMINAL",0,0,"Step 57, loss: 0.08657428622245789, step time: 16.187667846679688ms\r\n",,terminal_output +2055,1515356,"TERMINAL",0,0,"Step 58, loss: 0.08380654454231262, step time: 15.225410461425781ms\r\n",,terminal_output +2056,1515514,"TERMINAL",0,0,"Step 59, loss: 0.08098071068525314, step time: 17.35234260559082ms\r\n",,terminal_output +2057,1515691,"TERMINAL",0,0,"Step 60, loss: 0.07887909561395645, step time: 15.457868576049805ms\r\n",,terminal_output +2058,1515867,"TERMINAL",0,0,"Step 61, loss: 0.07696036249399185, step time: 15.271902084350586ms\r\n",,terminal_output +2059,1516038,"TERMINAL",0,0,"Step 62, loss: 0.07476290315389633, step time: 15.001535415649414ms\r\n",,terminal_output +2060,1516205,"TERMINAL",0,0,"21",,terminal_output +2061,1516244,"TERMINAL",0,0,"Step 63, loss: 0.07220403105020523, step time: 15.915632247924805ms\r\n",,terminal_output +2062,1516394,"TERMINAL",0,0,"Step 64, loss: 0.07085993140935898, step time: 16.568660736083984ms\r\n",,terminal_output +2063,1516600,"TERMINAL",0,0,"Step 65, loss: 0.07041798532009125, step time: 16.16072654724121ms\r\n",,terminal_output +2064,1516756,"TERMINAL",0,0,"Step 66, loss: 0.06943371891975403, step time: 15.121698379516602ms\r\n",,terminal_output +2065,1516926,"TERMINAL",0,0,"Step 67, loss: 0.06839241832494736, step time: 15.12765884399414ms\r\n",,terminal_output +2066,1517081,"TERMINAL",0,0,"Step 68, loss: 0.06776509433984756, step time: 14.951944351196289ms\r\n",,terminal_output +2067,1517215,"TERMINAL",0,0,"32",,terminal_output +2068,1517254,"TERMINAL",0,0,"Step 69, loss: 0.06710164248943329, step time: 15.763282775878906ms\r\n",,terminal_output +2069,1517442,"TERMINAL",0,0,"Step 70, loss: 0.0666656568646431, step time: 14.66989517211914ms\r\n",,terminal_output +2070,1517611,"TERMINAL",0,0,"Step 71, loss: 0.06643381714820862, step time: 15.739679336547852ms\r\n",,terminal_output +2071,1517775,"TERMINAL",0,0,"Step 72, loss: 0.06574515998363495, step time: 14.82701301574707ms\r\n",,terminal_output +2072,1517972,"TERMINAL",0,0,"Step 73, loss: 0.06528597325086594, step time: 14.843463897705078ms\r\n",,terminal_output +2073,1518124,"TERMINAL",0,0,"Step 74, loss: 0.06552674621343613, step time: 15.31672477722168ms\r\n",,terminal_output +2074,1518264,"TERMINAL",0,0,"43",,terminal_output +2075,1518304,"TERMINAL",0,0,"Step 75, loss: 0.06598003953695297, step time: 15.427112579345703ms\r\n",,terminal_output +2076,1518473,"TERMINAL",0,0,"Step 76, loss: 0.06622635573148727, step time: 14.861345291137695ms\r\n",,terminal_output +2077,1518644,"TERMINAL",0,0,"Step 77, loss: 0.06617008149623871, step time: 15.723228454589844ms\r\n",,terminal_output +2078,1518818,"TERMINAL",0,0,"Step 78, loss: 0.0664023607969284, step time: 15.506982803344727ms\r\n",,terminal_output +2079,1519000,"TERMINAL",0,0,"Step 79, loss: 0.06637489795684814, step time: 14.768838882446289ms\r\n",,terminal_output +2080,1519174,"TERMINAL",0,0,"Step 80, loss: 0.06621888279914856, step time: 14.690160751342773ms\r\n",,terminal_output +2081,1519310,"TERMINAL",0,0,"54",,terminal_output +2082,1519369,"TERMINAL",0,0,"Step 81, loss: 0.06592927128076553, step time: 15.550613403320312ms\r\n",,terminal_output +2083,1519547,"TERMINAL",0,0,"Step 82, loss: 0.06505300104618073, step time: 14.795541763305664ms\r\n",,terminal_output +2084,1519701,"TERMINAL",0,0,"Step 83, loss: 0.06420338153839111, step time: 15.482664108276367ms\r\n",,terminal_output +2085,1519873,"TERMINAL",0,0,"Step 84, loss: 0.06326809525489807, step time: 14.930963516235352ms\r\n",,terminal_output +2086,1520041,"TERMINAL",0,0,"Step 85, loss: 0.06240592151880264, step time: 14.669418334960938ms\r\n",,terminal_output +2087,1520214,"TERMINAL",0,0,"Step 86, loss: 0.06148574501276016, step time: 14.798402786254883ms\r\n",,terminal_output +2088,1520373,"TERMINAL",0,0,"65",,terminal_output +2089,1520385,"TERMINAL",0,0,"Step 87, loss: 0.060493048280477524, step time: 15.590190887451172ms\r\n",,terminal_output +2090,1520570,"TERMINAL",0,0,"Step 88, loss: 0.05977379530668259, step time: 15.08474349975586ms\r\n",,terminal_output +2091,1520731,"TERMINAL",0,0,"Step 89, loss: 0.05930326133966446, step time: 15.66004753112793ms\r\n",,terminal_output +2092,1520903,"TERMINAL",0,0,"Step 90, loss: 0.05883357673883438, step time: 14.492511749267578ms\r\n",,terminal_output +2093,1521080,"TERMINAL",0,0,"Step 91, loss: 0.058342307806015015, step time: 14.943122863769531ms\r\n",,terminal_output +2094,1521249,"TERMINAL",0,0,"Step 92, loss: 0.05828448385000229, step time: 14.591455459594727ms\r\n",,terminal_output +2095,1521387,"TERMINAL",0,0,"76",,terminal_output +2096,1521426,"TERMINAL",0,0,"Step 93, loss: 0.05798940733075142, step time: 15.645265579223633ms\r\n",,terminal_output +2097,1521642,"TERMINAL",0,0,"Step 94, loss: 0.057853180915117264, step time: 20.11847496032715ms\r\n",,terminal_output +2098,1521777,"TERMINAL",0,0,"Step 95, loss: 0.05778525024652481, step time: 20.583629608154297ms\r\n",,terminal_output +2099,1521957,"TERMINAL",0,0,"Step 96, loss: 0.057797566056251526, step time: 19.35410499572754ms\r\n",,terminal_output +2100,1522132,"TERMINAL",0,0,"Step 97, loss: 0.05710943788290024, step time: 19.65808868408203ms\r\n",,terminal_output +2101,1522306,"TERMINAL",0,0,"Step 98, loss: 0.057192955166101456, step time: 19.742965698242188ms\r\n",,terminal_output +2102,1522561,"TERMINAL",0,0,"87",,terminal_output +2103,1522561,"TERMINAL",0,0,"Step 99, loss: 0.057309191673994064, step time: 20.35689353942871ms\r\n",,terminal_output +2104,1523379,"TERMINAL",0,0,"Step 100, loss: 0.057245783507823944, step time: 28.415918350219727ms\r\n",,terminal_output +2105,1523551,"TERMINAL",0,0,"98",,terminal_output +2106,1523586,"TERMINAL",0,0,"Step 101, loss: 0.05726677551865578, step time: 24.48296546936035ms\r\n",,terminal_output +2107,1523730,"TERMINAL",0,0,"Step 102, loss: 0.05745009705424309, step time: 25.498628616333008ms\r\n",,terminal_output +2108,1523908,"TERMINAL",0,0,"Step 103, loss: 0.057342659682035446, step time: 24.51038360595703ms\r\n",,terminal_output +2109,1524094,"TERMINAL",0,0,"Step 104, loss: 0.0574355386197567, step time: 30.658721923828125ms\r\n",,terminal_output +2110,1524273,"TERMINAL",0,0,"Step 105, loss: 0.05769295245409012, step time: 27.311325073242188ms\r\n",,terminal_output +2111,1524448,"TERMINAL",0,0,"Step 106, loss: 0.05732066556811333, step time: 28.501272201538086ms\r\n",,terminal_output +2112,1524527,"TERMINAL",0,0,"309",,terminal_output +2113,1524658,"TERMINAL",0,0,"Step 107, loss: 0.057175204157829285, step time: 28.898000717163086ms\r\n",,terminal_output +2114,1524803,"TERMINAL",0,0,"Step 108, loss: 0.05712767317891121, step time: 26.47876739501953ms\r\n",,terminal_output +2115,1524977,"TERMINAL",0,0,"Step 109, loss: 0.057058367878198624, step time: 23.822307586669922ms\r\n",,terminal_output +2116,1525187,"TERMINAL",0,0,"Step 110, loss: 0.05692405253648758, step time: 20.287036895751953ms\r\n",,terminal_output +2117,1525336,"TERMINAL",0,0,"Step 111, loss: 0.05703963339328766, step time: 24.51801300048828ms\r\n",,terminal_output +2118,1525515,"TERMINAL",0,0,"Step 112, loss: 0.057107388973236084, step time: 24.685382843017578ms\r\n",,terminal_output +2119,1525606,"TERMINAL",0,0,"120",,terminal_output +2120,1525708,"TERMINAL",0,0,"Step 113, loss: 0.057029157876968384, step time: 24.400949478149414ms\r\n",,terminal_output +2121,1525866,"TERMINAL",0,0,"Step 114, loss: 0.05696145445108414, step time: 22.758007049560547ms\r\n",,terminal_output +2122,1526044,"TERMINAL",0,0,"Step 115, loss: 0.05700189992785454, step time: 21.532535552978516ms\r\n",,terminal_output +2123,1526218,"TERMINAL",0,0,"Step 116, loss: 0.056854940950870514, step time: 20.30158042907715ms\r\n",,terminal_output +2124,1526397,"TERMINAL",0,0,"Step 117, loss: 0.05648580938577652, step time: 24.469375610351562ms\r\n",,terminal_output +2125,1526623,"TERMINAL",0,0,"Step 118, loss: 0.05606388673186302, step time: 23.987293243408203ms\r\n",,terminal_output +2126,1526623,"TERMINAL",0,0,"21",,terminal_output +2127,1526751,"TERMINAL",0,0,"Step 119, loss: 0.05551157519221306, step time: 24.263381958007812ms\r\n",,terminal_output +2128,1526931,"TERMINAL",0,0,"Step 120, loss: 0.0546814389526844, step time: 22.557973861694336ms\r\n",,terminal_output +2129,1527103,"TERMINAL",0,0,"Step 121, loss: 0.0543450191617012, step time: 22.066593170166016ms\r\n",,terminal_output +2130,1527281,"TERMINAL",0,0,"Step 122, loss: 0.05376313626766205, step time: 20.450830459594727ms\r\n",,terminal_output +2131,1527455,"TERMINAL",0,0,"Step 123, loss: 0.053177360445261, step time: 23.941993713378906ms\r\n",,terminal_output +2132,1527671,"TERMINAL",0,0,"Step 124, loss: 0.05385174974799156, step time: 23.996591567993164ms\r\n",,terminal_output +2133,1527694,"TERMINAL",0,0,"32",,terminal_output +2134,1527840,"TERMINAL",0,0,"Step 125, loss: 0.05498424172401428, step time: 24.22475814819336ms\r\n",,terminal_output +2135,1527992,"TERMINAL",0,0,"Step 126, loss: 0.056152377277612686, step time: 22.834300994873047ms\r\n",,terminal_output +2136,1528174,"TERMINAL",0,0,"Step 127, loss: 0.055481329560279846, step time: 22.212982177734375ms\r\n",,terminal_output +2137,1528338,"TERMINAL",0,0,"Step 128, loss: 0.054322026669979095, step time: 20.25580406188965ms\r\n",,terminal_output +2138,1528521,"TERMINAL",0,0,"Step 129, loss: 0.05279437452554703, step time: 24.316072463989258ms\r\n",,terminal_output +2139,1528719,"TERMINAL",0,0,"Step 130, loss: 0.051557574421167374, step time: 22.515296936035156ms\r\n",,terminal_output +2140,1528755,"TERMINAL",0,0,"43",,terminal_output +2141,1528889,"TERMINAL",0,0,"Step 131, loss: 0.05088089406490326, step time: 20.748376846313477ms\r\n",,terminal_output +2142,1529068,"TERMINAL",0,0,"Step 132, loss: 0.050587333738803864, step time: 19.841909408569336ms\r\n",,terminal_output +2143,1529274,"TERMINAL",0,0,"Step 133, loss: 0.05017203465104103, step time: 19.657611846923828ms\r\n",,terminal_output +2144,1529418,"TERMINAL",0,0,"Step 134, loss: 0.049587756395339966, step time: 20.03192901611328ms\r\n",,terminal_output +2145,1529584,"TERMINAL",0,0,"Step 135, loss: 0.04896565154194832, step time: 19.85645294189453ms\r\n",,terminal_output +2146,1529761,"TERMINAL",0,0,"54",,terminal_output +2147,1529762,"TERMINAL",0,0,"Step 136, loss: 0.04817724972963333, step time: 20.110130310058594ms\r\n",,terminal_output +2148,1529939,"TERMINAL",0,0,"Step 137, loss: 0.04777523875236511, step time: 20.48802375793457ms\r\n",,terminal_output +2149,1530103,"TERMINAL",0,0,"Step 138, loss: 0.04708292335271835, step time: 29.757261276245117ms\r\n",,terminal_output +2150,1530307,"TERMINAL",0,0,"Step 139, loss: 0.04652995616197586, step time: 23.048877716064453ms\r\n",,terminal_output +2151,1530460,"TERMINAL",0,0,"Step 140, loss: 0.0462082102894783, step time: 20.154476165771484ms\r\n",,terminal_output +2152,1530633,"TERMINAL",0,0,"Step 141, loss: 0.04591856524348259, step time: 21.32868766784668ms\r\n",,terminal_output +2153,1530789,"TERMINAL",0,0,"65",,terminal_output +2154,1530809,"TERMINAL",0,0,"Step 142, loss: 0.04579511284828186, step time: 20.335912704467773ms\r\n",,terminal_output +2155,1530986,"TERMINAL",0,0,"Step 143, loss: 0.045278675854206085, step time: 20.800113677978516ms\r\n",,terminal_output +2156,1531165,"TERMINAL",0,0,"Step 144, loss: 0.04480832815170288, step time: 19.919872283935547ms\r\n",,terminal_output +2157,1531341,"TERMINAL",0,0,"Step 145, loss: 0.04477040097117424, step time: 19.49906349182129ms\r\n",,terminal_output +2158,1531516,"TERMINAL",0,0,"Step 146, loss: 0.04454253241419792, step time: 20.016193389892578ms\r\n",,terminal_output +2159,1531694,"TERMINAL",0,0,"Step 147, loss: 0.04441037029027939, step time: 20.282506942749023ms\r\n",,terminal_output +2160,1531842,"TERMINAL",0,0,"76",,terminal_output +2161,1531871,"TERMINAL",0,0,"Step 148, loss: 0.04404979199171066, step time: 19.956111907958984ms\r\n",,terminal_output +2162,1532044,"TERMINAL",0,0,"Step 149, loss: 0.04372889921069145, step time: 20.37334442138672ms\r\n",,terminal_output +2163,1532222,"TERMINAL",0,0,"Step 150, loss: 0.043596845120191574, step time: 19.6073055267334ms\r\n",,terminal_output +2164,1532398,"TERMINAL",0,0,"Step 151, loss: 0.043377622961997986, step time: 19.253253936767578ms\r\n",,terminal_output +2165,1532588,"TERMINAL",0,0,"Step 152, loss: 0.043282173573970795, step time: 19.91558074951172ms\r\n",,terminal_output +2166,1532764,"TERMINAL",0,0,"Step 153, loss: 0.04304316267371178, step time: 20.254135131835938ms\r\n",,terminal_output +2167,1532887,"TERMINAL",0,0,"87",,terminal_output +2168,1532924,"TERMINAL",0,0,"Step 154, loss: 0.042938314378261566, step time: 19.67024803161621ms\r\n",,terminal_output +2169,1533109,"TERMINAL",0,0,"Step 155, loss: 0.042851150035858154, step time: 19.99378204345703ms\r\n",,terminal_output +2170,1533279,"TERMINAL",0,0,"Step 156, loss: 0.04274819791316986, step time: 19.460678100585938ms\r\n",,terminal_output +2171,1533475,"TERMINAL",0,0,"Step 157, loss: 0.04280657321214676, step time: 19.023895263671875ms\r\n",,terminal_output +2172,1533660,"TERMINAL",0,0,"Step 158, loss: 0.04285239428281784, step time: 19.873380661010742ms\r\n",,terminal_output +2173,1533814,"TERMINAL",0,0,"Step 159, loss: 0.04323887452483177, step time: 20.31397819519043ms\r\n",,terminal_output +2174,1533975,"TERMINAL",0,0,"98",,terminal_output +2175,1533983,"TERMINAL",0,0,"Step 160, loss: 0.04334348812699318, step time: 20.22075653076172ms\r\n",,terminal_output +2176,1534163,"TERMINAL",0,0,"Step 161, loss: 0.04336251690983772, step time: 21.24953269958496ms\r\n",,terminal_output +2177,1534358,"TERMINAL",0,0,"Step 162, loss: 0.04358673095703125, step time: 19.455671310424805ms\r\n",,terminal_output +2178,1534514,"TERMINAL",0,0,"Step 163, loss: 0.0437278188765049, step time: 19.387483596801758ms\r\n",,terminal_output +2179,1534685,"TERMINAL",0,0,"Step 164, loss: 0.044114310294389725, step time: 19.623279571533203ms\r\n",,terminal_output +2180,1534877,"TERMINAL",0,0,"Step 165, loss: 0.04393230378627777, step time: 20.03622055053711ms\r\n",,terminal_output +2181,1535017,"TERMINAL",0,0,"409",,terminal_output +2182,1535039,"TERMINAL",0,0,"Step 166, loss: 0.04392385110259056, step time: 19.79851722717285ms\r\n",,terminal_output +2183,1535212,"TERMINAL",0,0,"Step 167, loss: 0.044087063521146774, step time: 20.21026611328125ms\r\n",,terminal_output +2184,1535409,"TERMINAL",0,0,"Step 168, loss: 0.04384639859199524, step time: 19.479036331176758ms\r\n",,terminal_output +2185,1535563,"TERMINAL",0,0,"Step 169, loss: 0.04386235401034355, step time: 19.350528717041016ms\r\n",,terminal_output +2186,1535779,"TERMINAL",0,0,"Step 170, loss: 0.04416583105921745, step time: 19.3936824798584ms\r\n",,terminal_output +2187,1535947,"TERMINAL",0,0,"Step 171, loss: 0.043759819120168686, step time: 20.327329635620117ms\r\n",,terminal_output +2188,1536072,"TERMINAL",0,0,"130",,terminal_output +2189,1536129,"TERMINAL",0,0,"Step 172, loss: 0.0438392199575901, step time: 19.652605056762695ms\r\n",,terminal_output +2190,1536296,"TERMINAL",0,0,"Step 173, loss: 0.043486785143613815, step time: 20.38288116455078ms\r\n",,terminal_output +2191,1536446,"TERMINAL",0,0,"Step 174, loss: 0.04322483390569687, step time: 19.781827926635742ms\r\n",,terminal_output +2192,1536622,"TERMINAL",0,0,"Step 175, loss: 0.04306307062506676, step time: 19.108057022094727ms\r\n",,terminal_output +2193,1536799,"TERMINAL",0,0,"Step 176, loss: 0.04269145801663399, step time: 21.73471450805664ms\r\n",,terminal_output +2194,1536973,"TERMINAL",0,0,"Step 177, loss: 0.04226048290729523, step time: 20.6449031829834ms\r\n",,terminal_output +2195,1537143,"TERMINAL",0,0,"22",,terminal_output +2196,1537183,"TERMINAL",0,0,"Step 178, loss: 0.042141154408454895, step time: 20.023584365844727ms\r\n",,terminal_output +2197,1537327,"TERMINAL",0,0,"Step 179, loss: 0.04194315895438194, step time: 20.254135131835938ms\r\n",,terminal_output +2198,1537506,"TERMINAL",0,0,"Step 180, loss: 0.041929323226213455, step time: 25.798559188842773ms\r\n",,terminal_output +2199,1537559,"TERMINAL",0,0,"^C",,terminal_output +2200,1537714,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 234, in \r\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/array.py"", line 341, in __format__\r\n return format(self._value[()], format_spec)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/profiler.py"", line 354, in wrapper\r\n return func(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/array.py"", line 641, in _value\r\n npy_value, did_copy = self._single_device_array_to_np_array_did_copy()\r\nKeyboardInterrupt\r\n",,terminal_output +2201,1537791,"TERMINAL",0,0,"^CException ignored in atexit callback: .teardown_atexit at 0x14866c5cbe20>\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/service_connection.py"", line 94, in teardown_atexit\r\n conn.teardown(hooks.exit_code)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/service_connection.py"", line 226, in teardown\r\n self._router.join()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/interface/router.py"", line 75, in join\r\n self._thread.join()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/threading.py"", line 1096, in join\r\n self._wait_for_tstate_lock()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/threading.py"", line 1116, in _wait_for_tstate_lock\r\n if lock.acquire(block, timeout):\r\nKeyboardInterrupt: \r\n",,terminal_output +2202,1538032,"TERMINAL",0,0,"^CException ignored in atexit callback: \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/api.py"", line 3168, in clean_up\r\n clear_backends()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/api.py"", line 3158, in clear_backends\r\n pjit._infer_params_cached.cache_clear()\r\nKeyboardInterrupt: \r\n",,terminal_output +2203,1538165,"TERMINAL",0,0,"43",,terminal_output +2204,1538457,"TERMINAL",0,0,"^C",,terminal_output +2205,1538816,"TERMINAL",0,0,"\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0507 jafar]$ ",,terminal_output +2206,1538877,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0507 jafar]$ ",,terminal_output +2207,1539185,"TERMINAL",0,0,"54",,terminal_output +2208,1540276,"TERMINAL",0,0,"65",,terminal_output +2209,1541078,"train_tokenizer.py",0,0,"",python,tab +2210,1541079,"train_tokenizer.py",7577,0,"",python,selection_mouse +2211,1541303,"TERMINAL",0,0,"76",,terminal_output +2212,1541550,"train_tokenizer.py",7537,0,"",python,selection_mouse +2213,1542331,"TERMINAL",0,0,"87",,terminal_output +2214,1543387,"TERMINAL",0,0,"98",,terminal_output +2215,1544106,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +2216,1544451,"TERMINAL",0,0,"509",,terminal_output +2217,1544954,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +2218,1545488,"TERMINAL",0,0,"140",,terminal_output +2219,1546534,"TERMINAL",0,0,"21",,terminal_output +2220,1547564,"TERMINAL",0,0,"32",,terminal_output +2221,1548447,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +2222,1548627,"TERMINAL",0,0,"43",,terminal_output +2223,1549693,"TERMINAL",0,0,"54",,terminal_output +2224,1550701,"TERMINAL",0,0,"65",,terminal_output +2225,1551755,"TERMINAL",0,0,"76",,terminal_output +2226,1551911,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",0,0,"",shellscript,tab +2227,1552795,"TERMINAL",0,0,"87",,terminal_output +2228,1552811,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1227,0,"",shellscript,selection_mouse +2229,1552813,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1226,0,"",shellscript,selection_command +2230,1552981,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1226,1,"0",shellscript,selection_mouse +2231,1552983,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1227,0,"",shellscript,selection_command +2232,1553016,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1180,47,"\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2233,1553023,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1115,112,"\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2234,1553033,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1006,221,"stant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2235,1553061,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",962,265,"age_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2236,1553068,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",936,291,"p_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2237,1553140,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",914,313,"_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2238,1553140,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",894,333,"_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2239,1553209,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",873,354,"ch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2240,1553233,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",841,386,"_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2241,1553246,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",815,412," train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2242,1553285,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",803,424,"\nsrun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2243,1553499,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",815,412," train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2244,1553516,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",814,413,"n train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2245,1553519,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",813,414,"on train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2246,1553527,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",812,415,"hon train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2247,1553561,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",811,416,"thon train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2248,1553566,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",810,417,"ython train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2249,1553602,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",809,418,"python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2250,1553841,"TERMINAL",0,0,"98",,terminal_output +2251,1554884,"TERMINAL",0,0,"3:009",,terminal_output +2252,1555928,"TERMINAL",0,0,"150",,terminal_output +2253,1556581,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +2254,1557002,"TERMINAL",0,0,"21",,terminal_output +2255,1557603,"scripts_horeka/overfit_sample_tiny/tester.sh",859,0,"",shellscript,selection_mouse +2256,1557771,"scripts_horeka/overfit_sample_tiny/tester.sh",812,47,"\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2257,1557782,"scripts_horeka/overfit_sample_tiny/tester.sh",745,114,"\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2258,1557798,"scripts_horeka/overfit_sample_tiny/tester.sh",606,253,"overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2259,1557814,"scripts_horeka/overfit_sample_tiny/tester.sh",510,349,"terval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2260,1557848,"scripts_horeka/overfit_sample_tiny/tester.sh",464,395,"e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2261,1557855,"scripts_horeka/overfit_sample_tiny/tester.sh",443,416,"=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2262,1557893,"scripts_horeka/overfit_sample_tiny/tester.sh",442,417,"e=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2263,1557893,"scripts_horeka/overfit_sample_tiny/tester.sh",409,450,"$CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2264,1557899,"scripts_horeka/overfit_sample_tiny/tester.sh",408,451," $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2265,1557937,"scripts_horeka/overfit_sample_tiny/tester.sh",379,480,"tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2266,1557978,"scripts_horeka/overfit_sample_tiny/tester.sh",378,481,"_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2267,1558021,"scripts_horeka/overfit_sample_tiny/tester.sh",377,482,"n_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2268,1558060,"scripts_horeka/overfit_sample_tiny/tester.sh",376,483,"in_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2269,1558061,"TERMINAL",0,0,"32",,terminal_output +2270,1558107,"scripts_horeka/overfit_sample_tiny/tester.sh",375,484,"ain_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2271,1558149,"scripts_horeka/overfit_sample_tiny/tester.sh",374,485,"rain_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2272,1558443,"scripts_horeka/overfit_sample_tiny/tester.sh",373,486,"train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=4.3e-5 \\n --max_lr=4.3e-5 \\n --log_image_interval=100 \\n --log \\n --name=tokenizer-tiny-overfit-$slurm_job_id \\n --tags tokenizer overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim 384 \\n --latent_dim 32 \\n --num_latents 1024 \\n --patch_size 4 \\n --num_blocks 8 \\n --num_heads 8 \\n --codebook_dropout 0.0",shellscript,selection_mouse +2273,1559113,"TERMINAL",0,0,"44",,terminal_output +2274,1559346,"scripts_horeka/overfit_sample_tiny/tester.sh",373,486,"",shellscript,content +2275,1559681,"scripts_horeka/overfit_sample_tiny/tester.sh",373,0,"python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,content +2276,1560118,"TERMINAL",0,0,"65",,terminal_output +2277,1560705,"scripts_horeka/overfit_sample_tiny/tester.sh",701,0,"",shellscript,selection_mouse +2278,1561184,"TERMINAL",0,0,"76",,terminal_output +2279,1561233,"scripts_horeka/overfit_sample_tiny/tester.sh",764,0,"",shellscript,selection_mouse +2280,1562218,"TERMINAL",0,0,"87",,terminal_output +2281,1562295,"scripts_horeka/overfit_sample_tiny/tester.sh",427,0,"",shellscript,selection_mouse +2282,1562816,"scripts_horeka/overfit_sample_tiny/tester.sh",764,0,"",shellscript,selection_mouse +2283,1563295,"TERMINAL",0,0,"98",,terminal_output +2284,1564240,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/tester.sh ",,terminal_output +2285,1564339,"TERMINAL",0,0,"109",,terminal_output +2286,1565355,"TERMINAL",0,0,"13:00",,terminal_output +2287,1566419,"TERMINAL",0,0,"21",,terminal_output +2288,1567441,"TERMINAL",0,0,"32",,terminal_output +2289,1568475,"TERMINAL",0,0,"43",,terminal_output +2290,1569518,"TERMINAL",0,0,"54",,terminal_output +2291,1569972,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +2292,1569973,"scripts_horeka/overfit_sample_tiny/tester.sh",791,0,"",shellscript,selection_mouse +2293,1570173,"scripts_horeka/overfit_sample_tiny/tester.sh",701,90,"\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2294,1570186,"scripts_horeka/overfit_sample_tiny/tester.sh",599,192,"r \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2295,1570202,"scripts_horeka/overfit_sample_tiny/tester.sh",478,313,"_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2296,1570231,"scripts_horeka/overfit_sample_tiny/tester.sh",365,426,"\npython python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2297,1570243,"scripts_horeka/overfit_sample_tiny/tester.sh",322,469,"mkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\npython python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2298,1570281,"scripts_horeka/overfit_sample_tiny/tester.sh",263,528,"CHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\npython python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2299,1570282,"scripts_horeka/overfit_sample_tiny/tester.sh",262,529,"\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\npython python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2300,1570333,"scripts_horeka/overfit_sample_tiny/tester.sh",242,549,"slurm_job_id=""0000""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\npython python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2301,1570424,"scripts_horeka/overfit_sample_tiny/tester.sh",225,566,"job_name=""debug""\nslurm_job_id=""0000""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\npython python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2302,1570556,"scripts_horeka/overfit_sample_tiny/tester.sh",242,549,"slurm_job_id=""0000""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\npython python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2303,1570573,"TERMINAL",0,0,"65",,terminal_output +2304,1570603,"scripts_horeka/overfit_sample_tiny/tester.sh",262,529,"\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\npython python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2305,1570604,"scripts_horeka/overfit_sample_tiny/tester.sh",263,528,"CHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\npython python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2306,1570620,"scripts_horeka/overfit_sample_tiny/tester.sh",322,469,"mkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\npython python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2307,1570656,"scripts_horeka/overfit_sample_tiny/tester.sh",347,444,"\nenv | grep SLURM\n\npython python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2308,1570657,"scripts_horeka/overfit_sample_tiny/tester.sh",365,426,"\npython python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2309,1570675,"scripts_horeka/overfit_sample_tiny/tester.sh",366,425,"python python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2310,1570701,"scripts_horeka/overfit_sample_tiny/tester.sh",428,363," --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2311,1570702,"scripts_horeka/overfit_sample_tiny/tester.sh",469,322," --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2312,1570742,"scripts_horeka/overfit_sample_tiny/tester.sh",516,275," --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2313,1570788,"scripts_horeka/overfit_sample_tiny/tester.sh",517,274," --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2314,1570789,"scripts_horeka/overfit_sample_tiny/tester.sh",548,243,"-log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2315,1570832,"scripts_horeka/overfit_sample_tiny/tester.sh",561,230,"entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2316,1570870,"scripts_horeka/overfit_sample_tiny/tester.sh",562,229,"ntity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2317,1570871,"scripts_horeka/overfit_sample_tiny/tester.sh",589,202,"oject jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2318,1570871,"scripts_horeka/overfit_sample_tiny/tester.sh",590,201,"ject jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2319,1570872,"scripts_horeka/overfit_sample_tiny/tester.sh",591,200,"ect jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2320,1570872,"scripts_horeka/overfit_sample_tiny/tester.sh",592,199,"ct jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2321,1570914,"scripts_horeka/overfit_sample_tiny/tester.sh",566,225,"y instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2322,1570914,"scripts_horeka/overfit_sample_tiny/tester.sh",567,224," instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2323,1570953,"scripts_horeka/overfit_sample_tiny/tester.sh",554,237,"\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2324,1571101,"scripts_horeka/overfit_sample_tiny/tester.sh",554,0,"",shellscript,selection_mouse +2325,1571643,"TERMINAL",0,0,"76",,terminal_output +2326,1572635,"TERMINAL",0,0,"87",,terminal_output +2327,1573683,"TERMINAL",0,0,"98",,terminal_output +2328,1573786,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",0,0,"",shellscript,tab +2329,1574637,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1227,0,"",shellscript,selection_mouse +2330,1574638,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1226,0,"",shellscript,selection_command +2331,1574775,"TERMINAL",0,0,"209",,terminal_output +2332,1574786,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1226,1,"0",shellscript,selection_mouse +2333,1574787,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1227,0,"",shellscript,selection_command +2334,1574833,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1200,27,"\n --codebook_dropout=0.0",shellscript,selection_mouse +2335,1574875,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1109,118,"m=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2336,1574876,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1026,201,"ject jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2337,1574916,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",985,242,"log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2338,1574917,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",928,299," --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2339,1574919,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",927,300," --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2340,1574970,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",906,321," --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2341,1574970,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",905,322," --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2342,1574972,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",885,342," --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2343,1575023,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",864,363," --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2344,1575084,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",831,396," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2345,1575189,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",804,423,"srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2346,1575777,"TERMINAL",0,0,"110",,terminal_output +2347,1576136,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",804,0,"",shellscript,selection_command +2348,1576916,"TERMINAL",0,0,"21",,terminal_output +2349,1577963,"TERMINAL",0,0,"32",,terminal_output +2350,1578933,"TERMINAL",0,0,"43",,terminal_output +2351,1580007,"TERMINAL",0,0,"54",,terminal_output +2352,1581042,"TERMINAL",0,0,"65",,terminal_output +2353,1582117,"TERMINAL",0,0,"77",,terminal_output +2354,1583135,"TERMINAL",0,0,"98",,terminal_output +2355,1584209,"TERMINAL",0,0,"309",,terminal_output +2356,1585282,"TERMINAL",0,0,"120",,terminal_output +2357,1586278,"TERMINAL",0,0,"21",,terminal_output +2358,1587333,"TERMINAL",0,0,"32",,terminal_output +2359,1588414,"TERMINAL",0,0,"43",,terminal_output +2360,1589405,"TERMINAL",0,0,"54",,terminal_output +2361,1590456,"TERMINAL",0,0,"65",,terminal_output +2362,1591539,"TERMINAL",0,0,"76",,terminal_output +2363,1592560,"TERMINAL",0,0,"87",,terminal_output +2364,1593627,"TERMINAL",0,0,"98",,terminal_output +2365,1594647,"TERMINAL",0,0,"409",,terminal_output +2366,1595681,"TERMINAL",0,0,"130",,terminal_output +2367,1596728,"TERMINAL",0,0,"21",,terminal_output +2368,1597810,"TERMINAL",0,0,"32",,terminal_output +2369,1598864,"TERMINAL",0,0,"43",,terminal_output +2370,1599890,"TERMINAL",0,0,"54",,terminal_output +2371,1600936,"TERMINAL",0,0,"65",,terminal_output +2372,1601985,"TERMINAL",0,0,"76",,terminal_output +2373,1603030,"TERMINAL",0,0,"87",,terminal_output +2374,1604057,"TERMINAL",0,0,"98",,terminal_output +2375,1605093,"TERMINAL",0,0,"5140",,terminal_output +2376,1606144,"TERMINAL",0,0,"21",,terminal_output +2377,1607176,"TERMINAL",0,0,"32",,terminal_output +2378,1608223,"TERMINAL",0,0,"43",,terminal_output +2379,1609273,"TERMINAL",0,0,"54",,terminal_output +2380,1610315,"TERMINAL",0,0,"65",,terminal_output +2381,1611353,"TERMINAL",0,0,"76",,terminal_output +2382,1612386,"TERMINAL",0,0,"87",,terminal_output +2383,1613484,"TERMINAL",0,0,"98",,terminal_output +2384,1614477,"TERMINAL",0,0,"4:009",,terminal_output +2385,1615524,"TERMINAL",0,0,"150",,terminal_output +2386,1615970,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",991,0," --name=lam-tiny-overfit-$slurm_job_id \\n",shellscript,content +2387,1616014,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1035,0," --tags lam overfit tiny \\n",shellscript,content +2388,1616556,"TERMINAL",0,0,"21",,terminal_output +2389,1617640,"TERMINAL",0,0,"32",,terminal_output +2390,1618640,"TERMINAL",0,0,"43",,terminal_output +2391,1619671,"TERMINAL",0,0,"54",,terminal_output +2392,1620707,"TERMINAL",0,0,"65",,terminal_output +2393,1621749,"TERMINAL",0,0,"76",,terminal_output +2394,1622795,"TERMINAL",0,0,"87",,terminal_output +2395,1623822,"TERMINAL",0,0,"98",,terminal_output +2396,1624871,"TERMINAL",0,0,"109",,terminal_output +2397,1625917,"TERMINAL",0,0,"14:00",,terminal_output +2398,1626965,"TERMINAL",0,0,"21",,terminal_output +2399,1628005,"TERMINAL",0,0,"32",,terminal_output +2400,1628764,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",0,0,"",shellscript,tab +2401,1628765,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1301,0,"",shellscript,selection_mouse +2402,1628768,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1300,0,"",shellscript,selection_command +2403,1628916,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1274,26,"\n --codebook_dropout=0.",shellscript,selection_mouse +2404,1628917,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1274,27,"\n --codebook_dropout=0.0",shellscript,selection_command +2405,1628936,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1211,90,"\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2406,1628955,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1167,134,"\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2407,1628967,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1131,170,"_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2408,1628992,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1107,194,"far \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2409,1629027,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1080,221,"stant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2410,1629053,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1049,252," overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2411,1629062,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",1004,297,"m-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2412,1629089,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",990,311,"\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2413,1629091,"TERMINAL",0,0,"43",,terminal_output +2414,1629136,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",962,339,"age_interval=3 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2415,1629175,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",961,340,"mage_interval=3 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2416,1629215,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",936,365,"p_steps=125 \\n --log_image_interval=3 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2417,1629216,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",935,366,"up_steps=125 \\n --log_image_interval=3 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2418,1629256,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",914,387,"_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2419,1629295,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",913,388,"x_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2420,1629333,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",912,389,"ax_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2421,1629333,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",911,390,"max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2422,1629371,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",890,411,"-min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2423,1629371,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",867,434," --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2424,1629410,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",866,435," --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2425,1629448,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",832,469," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2426,1629486,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",831,470," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2427,1630102,"TERMINAL",0,0,"55",,terminal_output +2428,1631135,"TERMINAL",0,0,"76",,terminal_output +2429,1632187,"TERMINAL",0,0,"87",,terminal_output +2430,1632665,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +2431,1633240,"TERMINAL",0,0,"98",,terminal_output +2432,1633712,"scripts_horeka/overfit_sample_tiny/tester.sh",791,0,"",shellscript,selection_mouse +2433,1633880,"scripts_horeka/overfit_sample_tiny/tester.sh",764,27,"\n --codebook_dropout=0.0",shellscript,selection_mouse +2434,1633901,"scripts_horeka/overfit_sample_tiny/tester.sh",677,114," \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2435,1633916,"scripts_horeka/overfit_sample_tiny/tester.sh",595,196,"jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2436,1633944,"scripts_horeka/overfit_sample_tiny/tester.sh",564,227,"ity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2437,1633958,"scripts_horeka/overfit_sample_tiny/tester.sh",519,272,"-log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2438,1633991,"scripts_horeka/overfit_sample_tiny/tester.sh",516,275," --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2439,1633992,"scripts_horeka/overfit_sample_tiny/tester.sh",515,276," --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2440,1634006,"scripts_horeka/overfit_sample_tiny/tester.sh",489,302," --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2441,1634052,"scripts_horeka/overfit_sample_tiny/tester.sh",449,342," --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2442,1634099,"scripts_horeka/overfit_sample_tiny/tester.sh",428,363," --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2443,1634135,"scripts_horeka/overfit_sample_tiny/tester.sh",395,396," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2444,1634136,"scripts_horeka/overfit_sample_tiny/tester.sh",366,425,"python python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2445,1634280,"TERMINAL",0,0,"209",,terminal_output +2446,1634462,"scripts_horeka/overfit_sample_tiny/tester.sh",395,396," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2447,1634963,"scripts_horeka/overfit_sample_tiny/tester.sh",395,396,"",shellscript,content +2448,1635337,"TERMINAL",0,0,"110",,terminal_output +2449,1635371,"scripts_horeka/overfit_sample_tiny/tester.sh",395,0," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=3 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,content +2450,1636396,"TERMINAL",0,0,"21",,terminal_output +2451,1637178,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +2452,1637345,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=1721172\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0507\r\nSLURM_JOB_START_TIME=1751029211\r\nSLURM_STEP_NODELIST=hkn0507\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751032811\r\nSLURM_PMI2_SRUN_PORT=35623\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3298895\r\nSLURM_PTY_PORT=42613\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=35\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e10.hkn0507\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_MEM_PER_NODE=51200\r\nSLURM_PTY_WIN_COL=73\r\nSLURM_NODELIST=hkn0507\r\nSLURM_SRUN_COMM_PORT=37257\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3298895\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0507\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=37257\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0507\r\npython: can't open file '/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/python': [Errno 2] No such file or directory\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0507 jafar]$ ",,terminal_output +2453,1637451,"TERMINAL",0,0,"32",,terminal_output +2454,1638452,"TERMINAL",0,0,"43",,terminal_output +2455,1639498,"TERMINAL",0,0,"54",,terminal_output +2456,1640542,"TERMINAL",0,0,"65",,terminal_output +2457,1641588,"TERMINAL",0,0,"76",,terminal_output +2458,1642661,"TERMINAL",0,0,"87",,terminal_output +2459,1643027,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +2460,1643028,"scripts_horeka/overfit_sample_tiny/tester.sh",540,0,"",shellscript,selection_mouse +2461,1643799,"scripts_horeka/overfit_sample_tiny/tester.sh",539,1,"",shellscript,content +2462,1643807,"TERMINAL",0,0,"98",,terminal_output +2463,1644094,"scripts_horeka/overfit_sample_tiny/tester.sh",539,0,"2",shellscript,content +2464,1644095,"scripts_horeka/overfit_sample_tiny/tester.sh",540,0,"",shellscript,selection_keyboard +2465,1644431,"scripts_horeka/overfit_sample_tiny/tester.sh",539,1,"",shellscript,content +2466,1644581,"scripts_horeka/overfit_sample_tiny/tester.sh",539,0,"1",shellscript,content +2467,1644581,"scripts_horeka/overfit_sample_tiny/tester.sh",540,0,"",shellscript,selection_keyboard +2468,1644669,"scripts_horeka/overfit_sample_tiny/tester.sh",540,0,"0",shellscript,content +2469,1644670,"scripts_horeka/overfit_sample_tiny/tester.sh",541,0,"",shellscript,selection_keyboard +2470,1644728,"TERMINAL",0,0,"309",,terminal_output +2471,1644793,"scripts_horeka/overfit_sample_tiny/tester.sh",541,0,"0",shellscript,content +2472,1644794,"scripts_horeka/overfit_sample_tiny/tester.sh",542,0,"",shellscript,selection_keyboard +2473,1645749,"TERMINAL",0,0,"120",,terminal_output +2474,1646781,"TERMINAL",0,0,"21",,terminal_output +2475,1647822,"TERMINAL",0,0,"32",,terminal_output +2476,1648872,"TERMINAL",0,0,"43",,terminal_output +2477,1649924,"TERMINAL",0,0,"54",,terminal_output +2478,1650961,"TERMINAL",0,0,"65",,terminal_output +2479,1652001,"TERMINAL",0,0,"76",,terminal_output +2480,1653043,"TERMINAL",0,0,"87",,terminal_output +2481,1654115,"TERMINAL",0,0,"99",,terminal_output +2482,1655137,"TERMINAL",0,0,"4130",,terminal_output +2483,1656183,"TERMINAL",0,0,"21",,terminal_output +2484,1657225,"TERMINAL",0,0,"32",,terminal_output +2485,1658253,"TERMINAL",0,0,"43",,terminal_output +2486,1659320,"TERMINAL",0,0,"54",,terminal_output +2487,1660349,"TERMINAL",0,0,"65",,terminal_output +2488,1661390,"TERMINAL",0,0,"76",,terminal_output +2489,1662512,"TERMINAL",0,0,"87",,terminal_output +2490,1663085,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/tester.sh ",,terminal_output +2491,1663520,"TERMINAL",0,0,"98",,terminal_output +2492,1664566,"TERMINAL",0,0,"509",,terminal_output +2493,1665620,"TERMINAL",0,0,"140",,terminal_output +2494,1666664,"TERMINAL",0,0,"21",,terminal_output +2495,1667712,"TERMINAL",0,0,"32",,terminal_output +2496,1668757,"TERMINAL",0,0,"43",,terminal_output +2497,1668881,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +2498,1668882,"scripts_horeka/overfit_sample_tiny/tester.sh",369,0,"",shellscript,selection_mouse +2499,1668987,"scripts_horeka/overfit_sample_tiny/tester.sh",366,6,"python",shellscript,selection_mouse +2500,1669305,"scripts_horeka/overfit_sample_tiny/tester.sh",366,6,"",shellscript,content +2501,1669873,"TERMINAL",0,0,"54",,terminal_output +2502,1670019,"scripts_horeka/overfit_sample_tiny/tester.sh",366,1,"",shellscript,content +2503,1670877,"TERMINAL",0,0,"65",,terminal_output +2504,1671364,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +2505,1671497,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=1721172\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0507\r\nSLURM_JOB_START_TIME=1751029211\r\nSLURM_STEP_NODELIST=hkn0507\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751032811\r\nSLURM_PMI2_SRUN_PORT=35623\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3298895\r\nSLURM_PTY_PORT=42613\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=35\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e10.hkn0507\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_MEM_PER_NODE=51200\r\nSLURM_PTY_WIN_COL=73\r\nSLURM_NODELIST=hkn0507\r\nSLURM_SRUN_COMM_PORT=37257\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3298895\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0507\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=37257\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0507\r\n",,terminal_output +2506,1671883,"TERMINAL",0,0,"76",,terminal_output +2507,1672432,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +2508,1672433,"scripts_horeka/overfit_sample_tiny/tester.sh",860,0,"",shellscript,selection_mouse +2509,1672578,"scripts_horeka/overfit_sample_tiny/tester.sh",833,27,"\n --codebook_dropout=0.0",shellscript,selection_mouse +2510,1672603,"scripts_horeka/overfit_sample_tiny/tester.sh",770,90,"\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2511,1672603,"scripts_horeka/overfit_sample_tiny/tester.sh",664,196,"jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2512,1672616,"scripts_horeka/overfit_sample_tiny/tester.sh",557,303,"ame=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2513,1672642,"scripts_horeka/overfit_sample_tiny/tester.sh",483,377," --warmup_steps=125 \\n --log_image_interval=100 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2514,1672655,"scripts_horeka/overfit_sample_tiny/tester.sh",462,398," --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=100 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2515,1672682,"scripts_horeka/overfit_sample_tiny/tester.sh",442,418," --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=100 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2516,1672923,"TERMINAL",0,0,"87",,terminal_output +2517,1673002,"scripts_horeka/overfit_sample_tiny/tester.sh",421,439," --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=100 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2518,1673081,"scripts_horeka/overfit_sample_tiny/tester.sh",388,472," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=5e-7 \\n --max_lr=5e-6 \\n --warmup_steps=125 \\n --log_image_interval=100 \\n --log \\n --name=lam-tiny-overfit-$slurm_job_id \\n --tags lam overfit tiny \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=384 \\n --latent_dim=32 \\n --num_latents=6 \\n --patch_size=16 \\n --num_blocks=8 \\n --num_heads=8 \\n --codebook_dropout=0.0",shellscript,selection_mouse +2519,1673465,"TERMINAL",0,0,"2025-06-27 15:14:59.370995: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751030099.384454 1727872 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751030099.389092 1727872 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751030099.402346 1727872 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751030099.402363 1727872 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751030099.402365 1727872 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751030099.402367 1727872 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +2520,1673909,"scripts_horeka/overfit_sample_tiny/tester.sh",392,0,"",shellscript,selection_command +2521,1673985,"TERMINAL",0,0,"98",,terminal_output +2522,1675041,"TERMINAL",0,0,"5:009",,terminal_output +2523,1675843,"TERMINAL",0,0,"W0000 00:00:1751030101.751528 1727872 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +2524,1676155,"TERMINAL",0,0,"150",,terminal_output +2525,1676155,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +2526,1676924,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +2527,1677129,"TERMINAL",0,0,"22",,terminal_output +2528,1677481,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250627_151502-jzc6arbu\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run lam-tiny-overfit-0000\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/jzc6arbu\r\n",,terminal_output +2529,1678151,"TERMINAL",0,0,"43",,terminal_output +2530,1679210,"TERMINAL",0,0,"54",,terminal_output +2531,1680231,"TERMINAL",0,0,"65",,terminal_output +2532,1681142,"scripts_horeka/overfit_sample_tiny/tester.sh",388,0,"",shellscript,selection_command +2533,1681271,"TERMINAL",0,0,"76",,terminal_output +2534,1682323,"TERMINAL",0,0,"87",,terminal_output +2535,1683354,"TERMINAL",0,0,"98",,terminal_output +2536,1684022,"scripts_horeka/overfit_sample_tiny/tester.sh",550,0," --log_checkpoint_interval=500 \\n",shellscript,content +2537,1684423,"TERMINAL",0,0,"109",,terminal_output +2538,1685441,"TERMINAL",0,0,"15:00",,terminal_output +2539,1686484,"TERMINAL",0,0,"21",,terminal_output +2540,1687535,"TERMINAL",0,0,"32",,terminal_output +2541,1688563,"TERMINAL",0,0,"43",,terminal_output +2542,1689658,"TERMINAL",0,0,"54",,terminal_output +2543,1690672,"TERMINAL",0,0,"65",,terminal_output +2544,1691601,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",0,0,"",shellscript,tab +2545,1691690,"TERMINAL",0,0,"76",,terminal_output +2546,1692769,"TERMINAL",0,0,"87",,terminal_output +2547,1693214,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",835,0,"",shellscript,selection_command +2548,1693833,"TERMINAL",0,0,"98",,terminal_output +2549,1693853,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",831,0,"",shellscript,selection_command +2550,1694805,"TERMINAL",0,0,"209",,terminal_output +2551,1695831,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +2552,1695957,"TERMINAL",0,0,"110",,terminal_output +2553,1696918,"TERMINAL",0,0,"21",,terminal_output +2554,1697150,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",841,0,"",shellscript,selection_command +2555,1697995,"TERMINAL",0,0,"32",,terminal_output +2556,1698433,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",837,0,"",shellscript,selection_command +2557,1698982,"TERMINAL",0,0,"43",,terminal_output +2558,1700036,"TERMINAL",0,0,"54",,terminal_output +2559,1700367,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",0,0,"",shellscript,tab +2560,1700590,"TERMINAL",0,0,"Starting training from step 0...\r\nbatch shape: (12, 16, 90, 160, 3)\r\n",,terminal_output +2561,1701065,"TERMINAL",0,0,"65",,terminal_output +2562,1702192,"TERMINAL",0,0,"87",,terminal_output +2563,1702255,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +2564,1703158,"TERMINAL",0,0,"98",,terminal_output +2565,1704218,"TERMINAL",0,0,"309",,terminal_output +2566,1705248,"TERMINAL",0,0,"120",,terminal_output +2567,1706295,"TERMINAL",0,0,"21",,terminal_output +2568,1707352,"TERMINAL",0,0,"32",,terminal_output +2569,1708393,"TERMINAL",0,0,"43",,terminal_output +2570,1709428,"TERMINAL",0,0,"54",,terminal_output +2571,1710469,"TERMINAL",0,0,"65",,terminal_output +2572,1710725,"train_lam.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n batch_size: int = 36\n vq_beta: float = 0.25\n min_lr: float = 3e-6\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n vq_reset_thresh: int = 50\n # LAM\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 6\n patch_size: int = 16\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.0\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_lam""\n tags: list = field(default_factory=lambda: [""lam""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n\n\nargs = tyro.cli(Args)\n\n\ndef lam_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n gt_future_frames = inputs[""videos""][:, 1:]\n mse = jnp.square(gt_future_frames - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = gt_future_frames.clip(0, 1).reshape(-1, *gt_future_frames.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n count_fn = jax.vmap(lambda i: (outputs[""indices""] == i).sum())\n index_counts = count_fn(jnp.arange(args.num_latents))\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=(index_counts != 0).mean(),\n )\n return loss, (outputs[""recon""], index_counts, metrics)\n\n\n@jax.jit\ndef train_step(state, inputs, action_last_active):\n # --- Update model ---\n rng, inputs[""rng""] = jax.random.split(inputs[""rng""])\n grad_fn = jax.value_and_grad(lam_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, idx_counts, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n\n # --- Reset inactive latent actions ---\n codebook = state.params[""params""][""vq""][""codebook""]\n num_codes = len(codebook)\n active_codes = idx_counts != 0.0\n action_last_active = jnp.where(active_codes, 0, action_last_active + 1)\n p_code = active_codes / active_codes.sum()\n reset_idxs = jax.random.choice(rng, num_codes, shape=(num_codes,), p=p_code)\n do_reset = action_last_active >= args.vq_reset_thresh\n new_codebook = jnp.where(\n jnp.expand_dims(do_reset, -1), codebook[reset_idxs], codebook\n )\n state.params[""params""][""vq""][""codebook""] = new_codebook\n action_last_active = jnp.where(do_reset, 0, action_last_active)\n return state, loss, recon, action_last_active, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n ) \n\n # --- Initialize model ---\n lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n # Track when each action was last sampled\n action_last_active = jnp.zeros(args.num_latents)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n rng, _rng = jax.random.split(rng)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = lam.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=lam.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n action_last_active = jax.device_put(action_last_active, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer__\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n # for videos in dataloader:\n # npy_path = ""overfit_dir/single_sample_corner.npy""\n npy_path = ""overfit_dir/single_batch_12_elems.npy""\n videos = np.load(npy_path)\n print(""batch shape: "", videos.shape)\n while(True):\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng)\n start_time = time.time()\n train_state, loss, recon, action_last_active, metrics = train_step(\n train_state, inputs, action_last_active\n )\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0][1:]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""lam_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +2573,1711548,"TERMINAL",0,0,"76",,terminal_output +2574,1712630,"TERMINAL",0,0,"87",,terminal_output +2575,1713667,"TERMINAL",0,0,"98",,terminal_output +2576,1714714,"TERMINAL",0,0,"409",,terminal_output +2577,1714968,"train_lam.py",7523,0,"",python,selection_mouse +2578,1715577,"train_lam.py",7431,0,"",python,selection_mouse +2579,1715731,"TERMINAL",0,0,"130",,terminal_output +2580,1715984,"TERMINAL",0,0,"2025-06-27 15:15:41.916160: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:15:41.917462: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:15:41.917484: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:15:41.918104: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2581,1716726,"train_lam.py",7406,0,"",python,selection_command +2582,1716751,"TERMINAL",0,0,"21",,terminal_output +2583,1717531,"train_lam.py",7406,0,"#",python,content +2584,1717532,"train_lam.py",7407,0,"",python,selection_keyboard +2585,1717571,"train_lam.py",7407,0," ",python,content +2586,1717572,"train_lam.py",7408,0,"",python,selection_keyboard +2587,1717830,"TERMINAL",0,0,"32",,terminal_output +2588,1717972,"train_lam.py",7407,0,"",python,selection_command +2589,1718069,"train_lam.py",7347,0,"",python,selection_command +2590,1718635,"train_lam.py",7346,0,"",python,selection_command +2591,1718892,"train_lam.py",7346,1,"",python,content +2592,1718894,"TERMINAL",0,0,"43",,terminal_output +2593,1719052,"train_lam.py",7346,1,"",python,content +2594,1719908,"TERMINAL",0,0,"54",,terminal_output +2595,1720942,"TERMINAL",0,0,"65",,terminal_output +2596,1721984,"TERMINAL",0,0,"76",,terminal_output +2597,1723022,"TERMINAL",0,0,"87",,terminal_output +2598,1724062,"TERMINAL",0,0,"99",,terminal_output +2599,1724285,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",0,0,"",shellscript,tab +2600,1725152,"TERMINAL",0,0,"5140",,terminal_output +2601,1726157,"TERMINAL",0,0,"21",,terminal_output +2602,1727093,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +2603,1727233,"TERMINAL",0,0,"32",,terminal_output +2604,1728244,"TERMINAL",0,0,"43",,terminal_output +2605,1729010,"TERMINAL",0,0,"^C2025-06-27 15:15:54.868517: F external/xla/xla/service/gpu/autotuning/gemm_fusion_autotuner.cc:1136] Non-OK-status: executable.status()\r\nStatus: INTERNAL: ptxas exited with non-zero error code 2, output: - Failure occured when compiling fusion gemm_fusion_dot.109 with config '{block_m:64,block_n:64,block_k:32,split_k:1,num_stages:4,num_warps:4,num_ctas:1}'\r\nFused HLO computation:\r\n%gemm_fusion_dot.109_computation (parameter_0.111: f32[12,15,60,384], parameter_1.111: f32[384], parameter_2.36: f32[384,8,48]) -> f32[180,60,8,48] {\r\n %parameter_0.111 = f32[12,15,60,384]{3,2,1,0} parameter(0)\r\n %parameter_1.111 = f32[384]{0} parameter(1)\r\n %broadcast.4226 = f32[12,15,60,384]{3,2,1,0} broadcast(%parameter_1.111), dimensions={3}, metadata={op_name=""jit(train_step)/jit(main)/jvp(LatentActionModel)/decoder/STBlock_0/LayerNorm_0/add"" source_file=""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/normalization.py"" source_line=218}\r\n %add.1529 = f32[12,15,60,384]{3,2,1,0} add(%parameter_0.111, %broadcast.4226), metadata={op_name=""jit(train_step)/jit(main)/jvp(LatentActionModel)/decoder/STBlock_0/LayerNorm_0/add"" source_file=""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/normalization.py"" source_line=218}\r\n %bitcast.13180 = f32[10800,384]{1,0} bitcast(%add.1529), metadata={op_name=""jit(train_step)/jit(main)/jvp(LatentActionModel)/decoder/STBlock_0/LayerNorm_0/add"" source_file=""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/normalization.py"" source_line=218}\r\n %parameter_2.36 = f32[384,8,48]{2,1,0} parameter(2)\r\n %bitcast.13181 = f32[384,384]{1,0} bitcast(%parameter_2.36), metadata={op_name=""state.params[\'params\'][\'decoder\'][\'STBlock_0\'][\'MultiHeadAttention_0\'][\'query\'][\'kernel\']""}\r\n %dot.1324 = f32[10800,384]{1,0} dot(%bitcast.13180, %bitcast.13181), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_name=""jit(train_step)/jit(main)/jvp(LatentActionModel)/decoder/STBlock_0/MultiHeadAttention_0/query/dot_general"" source_file=""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"" source_line=199}\r\n %bitcast.13182 = f32[12,15,60,8,48]{4,3,2,1,0} bitcast(%dot.1324), metadata={op_name=""jit(train_step)/jit(main)/jvp(LatentActionModel)/decoder/STBlock_0/MultiHeadAttention_0/query/dot_general"" source_file=""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"" source_line=199}\r\n ROOT %bitcast.13183 = f32[180,60,8,48]{3,2,1,0} bitcast(%bitcast.13182), metadata={op_name=""jit(train_step)/jit(main)/jvp(LatentActionModel)/decoder/STBlock_0/MultiHeadAttention_0/query/dot_general"" source_file=""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"" source_line=199}\r\n}\r\n",,terminal_output +2606,1729280,"TERMINAL",0,0,"54",,terminal_output +2607,1729459,"TERMINAL",0,0,"scripts_horeka/overfit_sample_tiny/tester.sh: line 37: 1727872 Aborted (core dumped) python train_lam.py --ckpt_dir $CHECKPOINT_DIR --batch_size=1 --min_lr=5e-7 --max_lr=5e-6 --warmup_steps=125 --log_image_interval=100 --log --name=lam-tiny-overfit-$slurm_job_id --tags lam overfit tiny --entity instant-uv --project jafar --data_dir $tf_records_dir --model_dim=384 --latent_dim=32 --num_latents=6 --patch_size=16 --num_blocks=8 --num_heads=8 --codebook_dropout=0.0\r\nscripts_horeka/overfit_sample_tiny/tester.sh: line 38: --codebook_dropout=0.0: command not found\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0507 jafar]$ ",,terminal_output +2608,1729736,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0507 jafar]$ ",,terminal_output +2609,1730110,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/tester.sh ",,terminal_output +2610,1730319,"TERMINAL",0,0,"65",,terminal_output +2611,1730404,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +2612,1730545,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=1721172\r\nSLURM_JOB_GPUS=1\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0507\r\nSLURM_JOB_START_TIME=1751029211\r\nSLURM_STEP_NODELIST=hkn0507\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751032811\r\nSLURM_PMI2_SRUN_PORT=35623\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3298895\r\nSLURM_PTY_PORT=42613\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=35\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e10.hkn0507\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_MEM_PER_NODE=51200\r\nSLURM_PTY_WIN_COL=73\r\nSLURM_NODELIST=hkn0507\r\nSLURM_SRUN_COMM_PORT=37257\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3298895\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0507\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=37257\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0507\r\n",,terminal_output +2613,1731370,"TERMINAL",0,0,"76",,terminal_output +2614,1732402,"TERMINAL",0,0,"87",,terminal_output +2615,1732547,"TERMINAL",0,0,"2025-06-27 15:15:58.445759: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751030158.458568 1728856 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751030158.462850 1728856 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751030158.474975 1728856 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751030158.474993 1728856 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751030158.474995 1728856 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751030158.474997 1728856 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +2616,1733451,"TERMINAL",0,0,"98",,terminal_output +2617,1734496,"TERMINAL",0,0,"6:009",,terminal_output +2618,1734793,"TERMINAL",0,0,"W0000 00:00:1751030160.738834 1728856 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +2619,1735108,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +2620,1735544,"TERMINAL",0,0,"150",,terminal_output +2621,1735861,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +2622,1736569,"TERMINAL",0,0,"21",,terminal_output +2623,1736707,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250627_151601-r1da3dp3\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run lam-tiny-overfit-0000\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/r1da3dp3\r\n",,terminal_output +2624,1737623,"TERMINAL",0,0,"32",,terminal_output +2625,1738678,"TERMINAL",0,0,"43",,terminal_output +2626,1739744,"TERMINAL",0,0,"54",,terminal_output +2627,1740785,"TERMINAL",0,0,"65",,terminal_output +2628,1741826,"TERMINAL",0,0,"76",,terminal_output +2629,1742877,"TERMINAL",0,0,"87",,terminal_output +2630,1743916,"TERMINAL",0,0,"98",,terminal_output +2631,1744964,"TERMINAL",0,0,"109",,terminal_output +2632,1746054,"TERMINAL",0,0,"16:00",,terminal_output +2633,1747279,"TERMINAL",0,0,"22",,terminal_output +2634,1748317,"TERMINAL",0,0,"43",,terminal_output +2635,1749369,"TERMINAL",0,0,"54",,terminal_output +2636,1750419,"TERMINAL",0,0,"65",,terminal_output +2637,1751479,"TERMINAL",0,0,"76",,terminal_output +2638,1752544,"TERMINAL",0,0,"87",,terminal_output +2639,1753602,"TERMINAL",0,0,"98",,terminal_output +2640,1754640,"TERMINAL",0,0,"209",,terminal_output +2641,1755704,"TERMINAL",0,0,"110",,terminal_output +2642,1756727,"TERMINAL",0,0,"21",,terminal_output +2643,1757768,"TERMINAL",0,0,"32",,terminal_output +2644,1758810,"TERMINAL",0,0,"43",,terminal_output +2645,1759446,"TERMINAL",0,0,"Starting training from step 0...\r\nbatch shape: (1, 16, 90, 160, 3)\r\n",,terminal_output +2646,1759860,"TERMINAL",0,0,"54",,terminal_output +2647,1760901,"TERMINAL",0,0,"65",,terminal_output +2648,1761957,"TERMINAL",0,0,"76",,terminal_output +2649,1762996,"TERMINAL",0,0,"87",,terminal_output +2650,1764035,"TERMINAL",0,0,"98",,terminal_output +2651,1765120,"TERMINAL",0,0,"3020",,terminal_output +2652,1766131,"TERMINAL",0,0,"21",,terminal_output +2653,1767171,"TERMINAL",0,0,"32",,terminal_output +2654,1768203,"TERMINAL",0,0,"43",,terminal_output +2655,1769248,"TERMINAL",0,0,"54",,terminal_output +2656,1770301,"TERMINAL",0,0,"65",,terminal_output +2657,1770823,"TERMINAL",0,0,"2025-06-27 15:16:36.765834: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-27 15:16:36.765890: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2658,1771348,"TERMINAL",0,0,"76",,terminal_output +2659,1772391,"TERMINAL",0,0,"87",,terminal_output +2660,1773440,"TERMINAL",0,0,"98",,terminal_output +2661,1774506,"TERMINAL",0,0,"409",,terminal_output +2662,1775553,"TERMINAL",0,0,"130",,terminal_output +2663,1776609,"TERMINAL",0,0,"21",,terminal_output +2664,1777644,"TERMINAL",0,0,"32",,terminal_output +2665,1778752,"TERMINAL",0,0,"43",,terminal_output +2666,1779746,"TERMINAL",0,0,"54",,terminal_output +2667,1780787,"TERMINAL",0,0,"65",,terminal_output +2668,1781862,"TERMINAL",0,0,"76",,terminal_output +2669,1782868,"TERMINAL",0,0,"87",,terminal_output +2670,1783918,"TERMINAL",0,0,"98",,terminal_output +2671,1784961,"TERMINAL",0,0,"509",,terminal_output +2672,1786004,"TERMINAL",0,0,"140",,terminal_output +2673,1787052,"TERMINAL",0,0,"21",,terminal_output +2674,1788103,"TERMINAL",0,0,"33",,terminal_output +2675,1789156,"TERMINAL",0,0,"54",,terminal_output +2676,1790210,"TERMINAL",0,0,"65",,terminal_output +2677,1791250,"TERMINAL",0,0,"76",,terminal_output +2678,1792353,"TERMINAL",0,0,"87",,terminal_output +2679,1793711,"TERMINAL",0,0,"98",,terminal_output +2680,1794802,"TERMINAL",0,0,"7:009",,terminal_output +2681,1795759,"TERMINAL",0,0,"150",,terminal_output +2682,1796804,"TERMINAL",0,0,"21",,terminal_output +2683,1797853,"TERMINAL",0,0,"32",,terminal_output +2684,1798897,"TERMINAL",0,0,"43",,terminal_output +2685,1799939,"TERMINAL",0,0,"54",,terminal_output +2686,1800993,"TERMINAL",0,0,"65",,terminal_output +2687,1802053,"TERMINAL",0,0,"76",,terminal_output +2688,1803089,"TERMINAL",0,0,"88",,terminal_output +2689,1804138,"TERMINAL",0,0,"109",,terminal_output +2690,1805182,"TERMINAL",0,0,"17:00",,terminal_output +2691,1806236,"TERMINAL",0,0,"21",,terminal_output +2692,1807288,"TERMINAL",0,0,"32",,terminal_output +2693,1808307,"TERMINAL",0,0,"43",,terminal_output +2694,1809378,"TERMINAL",0,0,"54",,terminal_output +2695,1810403,"TERMINAL",0,0,"65",,terminal_output +2696,1811444,"TERMINAL",0,0,"76",,terminal_output +2697,1812501,"TERMINAL",0,0,"87",,terminal_output +2698,1813551,"TERMINAL",0,0,"98",,terminal_output +2699,1814598,"TERMINAL",0,0,"209",,terminal_output +2700,1815644,"TERMINAL",0,0,"110",,terminal_output +2701,1816711,"TERMINAL",0,0,"21",,terminal_output +2702,1817759,"TERMINAL",0,0,"32",,terminal_output +2703,1817805,"TERMINAL",0,0,"Step 0, loss: 0.304625540971756, step time: 57866.43195152283ms\r\nStep 1, loss: 0.3040958046913147, step time: 24.658918380737305ms\r\nStep 2, loss: 0.30308663845062256, step time: 19.010066986083984ms\r\nStep 3, loss: 0.30244067311286926, step time: 18.449068069458008ms\r\nStep 4, loss: 0.3017611801624298, step time: 17.46344566345215ms\r\nStep 5, loss: 0.3005269467830658, step time: 15.354633331298828ms\r\nStep 6, loss: 0.2992149293422699, step time: 16.361474990844727ms\r\nStep 7, loss: 0.298322468996048, step time: 15.922784805297852ms\r\nStep 8, loss: 0.2973535358905792, step time: 17.154216766357422ms\r\nStep 9, loss: 0.2963504493236542, step time: 17.682552337646484ms\r\nStep 10, loss: 0.2952902615070343, step time: 16.932249069213867ms\r\nStep 11, loss: 0.2941837012767792, step time: 15.525102615356445ms\r\nStep 12, loss: 0.29302772879600525, step time: 18.045902252197266ms\r\nStep 13, loss: 0.29181596636772156, step time: 17.444849014282227ms\r\nStep 14, loss: 0.29056334495544434, step time: 17.01974868774414ms\r\nStep 15, loss: 0.28926852345466614, step time: 15.057086944580078ms\r\nStep 16, loss: 0.2879336476325989, step time: 15.59758186340332ms\r\nStep 17, loss: 0.2865558862686157, step time: 16.21532440185547ms\r\nStep 18, loss: 0.28515180945396423, step time: 15.978574752807617ms\r\nStep 19, loss: 0.2837095260620117, step time: 15.158891677856445ms\r\n",,terminal_output +2704,1817927,"TERMINAL",0,0,"Step 20, loss: 0.2822326719760895, step time: 15.34271240234375ms\r\nStep 21, loss: 0.2807152569293976, step time: 14.994382858276367ms\r\nStep 22, loss: 0.2791770398616791, step time: 14.994621276855469ms\r\nStep 23, loss: 0.27761316299438477, step time: 14.983892440795898ms\r\nStep 24, loss: 0.27603939175605774, step time: 15.363216400146484ms\r\n",,terminal_output +2705,1818247,"TERMINAL",0,0,"Step 25, loss: 0.27443727850914, step time: 14.83774185180664ms\r\nStep 26, loss: 0.2728257477283478, step time: 15.461921691894531ms\r\nStep 27, loss: 0.2712024748325348, step time: 14.890193939208984ms\r\nStep 28, loss: 0.2695786952972412, step time: 14.727354049682617ms\r\nStep 29, loss: 0.2679296135902405, step time: 14.92929458618164ms\r\nStep 30, loss: 0.2663016617298126, step time: 14.922857284545898ms\r\nStep 31, loss: 0.264666885137558, step time: 14.681100845336914ms\r\nStep 32, loss: 0.26301929354667664, step time: 15.252113342285156ms\r\nStep 33, loss: 0.26137953996658325, step time: 14.73093032836914ms\r\nStep 34, loss: 0.2597470283508301, step time: 14.768123626708984ms\r\nStep 35, loss: 0.2581256330013275, step time: 14.856815338134766ms\r\nStep 36, loss: 0.2565075159072876, step time: 15.483856201171875ms\r\nStep 37, loss: 0.25490111112594604, step time: 14.948129653930664ms\r\nStep 38, loss: 0.25330471992492676, step time: 15.427350997924805ms\r\nStep 39, loss: 0.2517126500606537, step time: 14.858484268188477ms\r\n",,terminal_output +2706,1818457,"TERMINAL",0,0,"Step 40, loss: 0.2501316964626312, step time: 14.504194259643555ms\r\nStep 41, loss: 0.24856439232826233, step time: 14.83011245727539ms\r\nStep 42, loss: 0.24702543020248413, step time: 16.90053939819336ms\r\nStep 43, loss: 0.24548782408237457, step time: 14.803647994995117ms\r\nStep 44, loss: 0.24397775530815125, step time: 15.18559455871582ms\r\nStep 45, loss: 0.24247974157333374, step time: 14.719009399414062ms\r\nStep 46, loss: 0.24099858105182648, step time: 14.742851257324219ms\r\nStep 47, loss: 0.2395462691783905, step time: 14.951944351196289ms\r\nStep 48, loss: 0.23811054229736328, step time: 15.209436416625977ms\r\n",,terminal_output +2707,1818614,"TERMINAL",0,0,"Step 49, loss: 0.23668988049030304, step time: 14.72926139831543ms\r\nStep 50, loss: 0.2353028804063797, step time: 15.120506286621094ms\r\nStep 51, loss: 0.23392197489738464, step time: 14.725685119628906ms\r\nStep 52, loss: 0.23255394399166107, step time: 14.766454696655273ms\r\nStep 53, loss: 0.2312024086713791, step time: 14.98723030090332ms\r\nStep 54, loss: 0.22984127700328827, step time: 15.120506286621094ms\r\n",,terminal_output +2708,1818696,"TERMINAL",0,0,"Step 55, loss: 0.22847296297550201, step time: 14.870643615722656ms\r\nStep 56, loss: 0.2270941585302353, step time: 15.457868576049805ms\r\nStep 57, loss: 0.2256830334663391, step time: 14.805078506469727ms\r\nStep 58, loss: 0.22424159944057465, step time: 14.894247055053711ms\r\nStep 59, loss: 0.22277279198169708, step time: 14.905691146850586ms\r\n",,terminal_output +2709,1818847,"TERMINAL",0,0,"43",,terminal_output +2710,1819111,"TERMINAL",0,0,"Step 60, loss: 0.22127486765384674, step time: 15.052556991577148ms\r\nStep 61, loss: 0.21974417567253113, step time: 14.89567756652832ms\r\nStep 62, loss: 0.21818026900291443, step time: 15.244483947753906ms\r\nStep 63, loss: 0.21659588813781738, step time: 15.19155502319336ms\r\nStep 64, loss: 0.21498353779315948, step time: 14.899730682373047ms\r\nStep 65, loss: 0.2133595049381256, step time: 14.870643615722656ms\r\nStep 66, loss: 0.21172067523002625, step time: 16.257047653198242ms\r\nStep 67, loss: 0.2100593000650406, step time: 14.870166778564453ms\r\nStep 68, loss: 0.20839883387088776, step time: 15.09237289428711ms\r\nStep 69, loss: 0.20673704147338867, step time: 14.837026596069336ms\r\nStep 70, loss: 0.2050764113664627, step time: 14.827489852905273ms\r\nStep 71, loss: 0.20341795682907104, step time: 15.213966369628906ms\r\nStep 72, loss: 0.20177999138832092, step time: 15.368223190307617ms\r\nStep 73, loss: 0.20015396177768707, step time: 14.893054962158203ms\r\nStep 74, loss: 0.19855549931526184, step time: 15.249252319335938ms\r\nStep 75, loss: 0.1969837248325348, step time: 14.604806900024414ms\r\nStep 76, loss: 0.19544541835784912, step time: 14.633655548095703ms\r\nStep 77, loss: 0.1939474642276764, step time: 14.786005020141602ms\r\n",,terminal_output +2711,1819227,"TERMINAL",0,0,"Step 78, loss: 0.19248205423355103, step time: 15.17939567565918ms\r\nStep 79, loss: 0.19105970859527588, step time: 14.842510223388672ms\r\nStep 80, loss: 0.18967023491859436, step time: 14.967918395996094ms\r\nStep 81, loss: 0.1883212774991989, step time: 14.731407165527344ms\r\nStep 82, loss: 0.1870051473379135, step time: 14.520406723022461ms\r\nStep 83, loss: 0.18571682274341583, step time: 14.737606048583984ms\r\n",,terminal_output +2712,1819408,"TERMINAL",0,0,"Step 84, loss: 0.18446601927280426, step time: 15.120744705200195ms\r\nStep 85, loss: 0.1832393854856491, step time: 14.807939529418945ms\r\nStep 86, loss: 0.1820446401834488, step time: 15.209436416625977ms\r\nStep 87, loss: 0.18086840212345123, step time: 14.774560928344727ms\r\nStep 88, loss: 0.17971326410770416, step time: 14.713048934936523ms\r\nStep 89, loss: 0.17858442664146423, step time: 14.992952346801758ms\r\nStep 90, loss: 0.1774749606847763, step time: 15.052556991577148ms\r\nStep 91, loss: 0.17639951407909393, step time: 14.728546142578125ms\r\n",,terminal_output +2713,1819501,"TERMINAL",0,0,"Step 92, loss: 0.17535372078418732, step time: 15.056371688842773ms\r\nStep 93, loss: 0.17433302104473114, step time: 15.096187591552734ms\r\nStep 94, loss: 0.17332953214645386, step time: 14.782190322875977ms\r\nStep 95, loss: 0.17234407365322113, step time: 14.847517013549805ms\r\n",,terminal_output +2714,1819614,"TERMINAL",0,0,"Step 96, loss: 0.1713724583387375, step time: 15.372276306152344ms\r\nStep 97, loss: 0.17039452493190765, step time: 14.864921569824219ms\r\nStep 98, loss: 0.16941355168819427, step time: 15.305042266845703ms\r\nStep 99, loss: 0.16842485964298248, step time: 14.884471893310547ms\r\n",,terminal_output +2715,1819823,"TERMINAL",0,0,"54",,terminal_output +2716,1820493,"TERMINAL",0,0,"Step 100, loss: 0.16742372512817383, step time: 24.51324462890625ms\r\nStep 101, loss: 0.16640883684158325, step time: 18.778085708618164ms\r\n",,terminal_output +2717,1820769,"TERMINAL",0,0,"Step 102, loss: 0.16537657380104065, step time: 19.405126571655273ms\r\nStep 103, loss: 0.16431841254234314, step time: 17.548561096191406ms\r\nStep 104, loss: 0.16324754059314728, step time: 18.11695098876953ms\r\nStep 105, loss: 0.16214656829833984, step time: 14.727592468261719ms\r\nStep 106, loss: 0.16102716326713562, step time: 15.014886856079102ms\r\nStep 107, loss: 0.1598922610282898, step time: 14.490365982055664ms\r\nStep 108, loss: 0.1587497442960739, step time: 15.60354232788086ms\r\nStep 109, loss: 0.15759825706481934, step time: 14.521598815917969ms\r\nStep 110, loss: 0.15645451843738556, step time: 15.752553939819336ms\r\nStep 111, loss: 0.1553155928850174, step time: 14.561176300048828ms\r\nStep 112, loss: 0.1541677862405777, step time: 14.669418334960938ms\r\nStep 113, loss: 0.15303397178649902, step time: 14.32347297668457ms\r\n",,terminal_output +2718,1820933,"TERMINAL",0,0,"65",,terminal_output +2719,1821198,"TERMINAL",0,0,"Step 114, loss: 0.15188796818256378, step time: 15.32292366027832ms\r\nStep 115, loss: 0.15073740482330322, step time: 14.310359954833984ms\r\nStep 116, loss: 0.1495860517024994, step time: 15.251874923706055ms\r\nStep 117, loss: 0.14842957258224487, step time: 14.2669677734375ms\r\nStep 118, loss: 0.14726519584655762, step time: 14.653682708740234ms\r\nStep 119, loss: 0.1460844874382019, step time: 14.416933059692383ms\r\nStep 120, loss: 0.1448974460363388, step time: 15.033721923828125ms\r\nStep 121, loss: 0.14368568360805511, step time: 14.377355575561523ms\r\nStep 122, loss: 0.14245156943798065, step time: 15.301942825317383ms\r\nStep 123, loss: 0.14119315147399902, step time: 14.472007751464844ms\r\nStep 124, loss: 0.1399172991514206, step time: 14.54782485961914ms\r\nStep 125, loss: 0.13862425088882446, step time: 14.320135116577148ms\r\nStep 126, loss: 0.13732098042964935, step time: 15.242338180541992ms\r\nStep 127, loss: 0.1360234022140503, step time: 14.559507369995117ms\r\nStep 128, loss: 0.13473553955554962, step time: 15.230655670166016ms\r\nStep 129, loss: 0.1334712952375412, step time: 14.485836029052734ms\r\nStep 130, loss: 0.13223950564861298, step time: 14.52183723449707ms\r\nStep 131, loss: 0.13104987144470215, step time: 14.495611190795898ms\r\nStep 132, loss: 0.12991023063659668, step time: 15.084981918334961ms\r\nStep 133, loss: 0.1288333535194397, step time: 14.523983001708984ms\r\n",,terminal_output +2720,1821286,"TERMINAL",0,0,"Step 134, loss: 0.12780998647212982, step time: 26.257991790771484ms\r\nStep 135, loss: 0.12683983147144318, step time: 16.817092895507812ms\r\nStep 136, loss: 0.12591597437858582, step time: 14.86659049987793ms\r\n",,terminal_output +2721,1821452,"TERMINAL",0,0,"Step 137, loss: 0.12502773106098175, step time: 14.511585235595703ms\r\nStep 138, loss: 0.12417373806238174, step time: 15.67387580871582ms\r\nStep 139, loss: 0.12334442883729935, step time: 14.621257781982422ms\r\nStep 140, loss: 0.12252020090818405, step time: 15.338897705078125ms\r\nStep 141, loss: 0.12171290814876556, step time: 14.549016952514648ms\r\nStep 142, loss: 0.12090212106704712, step time: 14.715194702148438ms\r\nStep 143, loss: 0.12009697407484055, step time: 14.368057250976562ms\r\nStep 144, loss: 0.11930695176124573, step time: 15.137910842895508ms\r\n",,terminal_output +2722,1821543,"TERMINAL",0,0,"Step 145, loss: 0.11852563172578812, step time: 14.704465866088867ms\r\nStep 146, loss: 0.11775336414575577, step time: 15.377283096313477ms\r\nStep 147, loss: 0.1169883981347084, step time: 14.493703842163086ms\r\nStep 148, loss: 0.11622729897499084, step time: 14.659643173217773ms\r\n",,terminal_output +2723,1821630,"TERMINAL",0,0,"Step 149, loss: 0.11546392738819122, step time: 14.53852653503418ms\r\nStep 150, loss: 0.11468526721000671, step time: 15.152931213378906ms\r\nStep 151, loss: 0.1138937920331955, step time: 14.648914337158203ms\r\nStep 152, loss: 0.11308746039867401, step time: 15.357017517089844ms\r\n",,terminal_output +2724,1821880,"TERMINAL",0,0,"Step 153, loss: 0.11225895583629608, step time: 14.529705047607422ms\r\nStep 154, loss: 0.11141347140073776, step time: 14.846563339233398ms\r\nStep 155, loss: 0.11056648194789886, step time: 14.493465423583984ms\r\nStep 156, loss: 0.10972218960523605, step time: 15.305757522583008ms\r\nStep 157, loss: 0.10888800770044327, step time: 14.565229415893555ms\r\nStep 158, loss: 0.10806896537542343, step time: 15.282154083251953ms\r\nStep 159, loss: 0.1072494238615036, step time: 14.446020126342773ms\r\nStep 160, loss: 0.106437548995018, step time: 14.617204666137695ms\r\nStep 161, loss: 0.10564491152763367, step time: 17.934799194335938ms\r\nStep 162, loss: 0.10487166792154312, step time: 17.716169357299805ms\r\nStep 163, loss: 0.10412520915269852, step time: 14.68968391418457ms\r\n",,terminal_output +2725,1821962,"TERMINAL",0,0,"76",,terminal_output +2726,1821965,"TERMINAL",0,0,"Step 164, loss: 0.10340244323015213, step time: 15.57612419128418ms\r\nStep 165, loss: 0.10268262028694153, step time: 14.652252197265625ms\r\nStep 166, loss: 0.1019783690571785, step time: 14.728784561157227ms\r\nStep 167, loss: 0.10129663348197937, step time: 14.662742614746094ms\r\n",,terminal_output +2727,1822167,"TERMINAL",0,0,"Step 168, loss: 0.1006385087966919, step time: 15.296697616577148ms\r\nStep 169, loss: 0.10000693053007126, step time: 14.655351638793945ms\r\nStep 170, loss: 0.09939225018024445, step time: 15.20991325378418ms\r\nStep 171, loss: 0.09878616780042648, step time: 14.629364013671875ms\r\nStep 172, loss: 0.09818416088819504, step time: 14.723777770996094ms\r\nStep 173, loss: 0.09758914262056351, step time: 14.594793319702148ms\r\nStep 174, loss: 0.09698668122291565, step time: 15.269994735717773ms\r\nStep 175, loss: 0.0963706374168396, step time: 14.469385147094727ms\r\n",,terminal_output +2728,1822253,"TERMINAL",0,0,"Step 176, loss: 0.0957496389746666, step time: 15.47551155090332ms\r\nStep 177, loss: 0.09512831270694733, step time: 14.668464660644531ms\r\nStep 178, loss: 0.09450118988752365, step time: 14.81318473815918ms\r\nStep 179, loss: 0.09385775774717331, step time: 14.552831649780273ms\r\nStep 180, loss: 0.09320498257875443, step time: 15.032052993774414ms\r\n",,terminal_output +2729,1822383,"TERMINAL",0,0,"Step 181, loss: 0.09255474805831909, step time: 14.401435852050781ms\r\nStep 182, loss: 0.09190457314252853, step time: 15.132427215576172ms\r\nStep 183, loss: 0.09124764055013657, step time: 14.336824417114258ms\r\nStep 184, loss: 0.09058674424886703, step time: 14.562845230102539ms\r\nStep 185, loss: 0.08992475271224976, step time: 15.37179946899414ms\r\nStep 186, loss: 0.08926796913146973, step time: 15.29550552368164ms\r\n",,terminal_output +2730,1822627,"TERMINAL",0,0,"Step 187, loss: 0.08862143754959106, step time: 17.55213737487793ms\r\nStep 188, loss: 0.08798456192016602, step time: 15.608072280883789ms\r\nStep 189, loss: 0.08735531568527222, step time: 14.616250991821289ms\r\nStep 190, loss: 0.0867428183555603, step time: 14.580965042114258ms\r\nStep 191, loss: 0.08615193516016006, step time: 14.564275741577148ms\r\nStep 192, loss: 0.08557227998971939, step time: 15.176534652709961ms\r\nStep 193, loss: 0.08499827980995178, step time: 14.600992202758789ms\r\nStep 194, loss: 0.08444629609584808, step time: 15.600919723510742ms\r\nStep 195, loss: 0.08392199873924255, step time: 14.50490951538086ms\r\nStep 196, loss: 0.08341525495052338, step time: 14.723062515258789ms\r\nStep 197, loss: 0.08290896564722061, step time: 14.57977294921875ms\r\n",,terminal_output +2731,1822701,"TERMINAL",0,0,"Step 198, loss: 0.08242785930633545, step time: 15.26498794555664ms\r\nStep 199, loss: 0.08197083324193954, step time: 14.571428298950195ms\r\n",,terminal_output +2732,1822868,"TERMINAL",0,0,"Step 200, loss: 0.08151824027299881, step time: 25.07925033569336ms\r\nStep 201, loss: 0.08105430752038956, step time: 19.193172454833984ms\r\nStep 202, loss: 0.08063013106584549, step time: 16.050100326538086ms\r\n",,terminal_output +2733,1823023,"TERMINAL",0,0,"87",,terminal_output +2734,1823241,"TERMINAL",0,0,"Step 203, loss: 0.08017849177122116, step time: 15.033483505249023ms\r\nStep 204, loss: 0.07974500209093094, step time: 18.951892852783203ms\r\nStep 205, loss: 0.0793263241648674, step time: 17.481565475463867ms\r\nStep 206, loss: 0.07889285683631897, step time: 15.887737274169922ms\r\nStep 207, loss: 0.07846441864967346, step time: 14.831304550170898ms\r\nStep 208, loss: 0.07802984118461609, step time: 14.813661575317383ms\r\nStep 209, loss: 0.07760480791330338, step time: 14.558076858520508ms\r\nStep 210, loss: 0.07717610895633698, step time: 15.271425247192383ms\r\nStep 211, loss: 0.07677090167999268, step time: 14.647483825683594ms\r\nStep 212, loss: 0.0763351321220398, step time: 15.404939651489258ms\r\nStep 213, loss: 0.07594169676303864, step time: 14.612913131713867ms\r\nStep 214, loss: 0.07553335279226303, step time: 14.821529388427734ms\r\nStep 215, loss: 0.07513994723558426, step time: 14.871597290039062ms\r\nStep 216, loss: 0.0747586190700531, step time: 15.714645385742188ms\r\nStep 217, loss: 0.07436733692884445, step time: 14.774084091186523ms\r\nStep 218, loss: 0.07401066273450851, step time: 15.522480010986328ms\r\n",,terminal_output +2735,1823372,"TERMINAL",0,0,"Step 219, loss: 0.07363943010568619, step time: 14.91093635559082ms\r\nStep 220, loss: 0.07328793406486511, step time: 14.72616195678711ms\r\nStep 221, loss: 0.0729466900229454, step time: 14.703035354614258ms\r\nStep 222, loss: 0.07260105013847351, step time: 15.407085418701172ms\r\nStep 223, loss: 0.07227737456560135, step time: 14.764785766601562ms\r\nStep 224, loss: 0.07194779813289642, step time: 15.48004150390625ms\r\n",,terminal_output +2736,1823981,"TERMINAL",0,0,"Step 225, loss: 0.0716102197766304, step time: 14.636754989624023ms\r\nStep 226, loss: 0.07129697501659393, step time: 15.05589485168457ms\r\nStep 227, loss: 0.07097893953323364, step time: 14.88041877746582ms\r\nStep 228, loss: 0.07069230824708939, step time: 15.42210578918457ms\r\nStep 229, loss: 0.07038278877735138, step time: 14.735221862792969ms\r\nStep 230, loss: 0.07011078298091888, step time: 15.379667282104492ms\r\nStep 231, loss: 0.06980815529823303, step time: 14.750003814697266ms\r\nStep 232, loss: 0.06952559947967529, step time: 16.252517700195312ms\r\nStep 233, loss: 0.06926330178976059, step time: 14.687776565551758ms\r\nStep 234, loss: 0.0689668133854866, step time: 15.28787612915039ms\r\nStep 235, loss: 0.0687023401260376, step time: 14.50347900390625ms\r\nStep 236, loss: 0.06842497736215591, step time: 15.309572219848633ms\r\nStep 237, loss: 0.06813904643058777, step time: 14.535665512084961ms\r\nStep 238, loss: 0.06786537915468216, step time: 14.62554931640625ms\r\nStep 239, loss: 0.06759868562221527, step time: 14.551639556884766ms\r\nStep 240, loss: 0.06731639802455902, step time: 14.984846115112305ms\r\nStep 241, loss: 0.06704827398061752, step time: 14.599084854125977ms\r\nStep 242, loss: 0.06677684932947159, step time: 15.210151672363281ms\r\nStep 243, loss: 0.06650599837303162, step time: 14.479637145996094ms\r\nStep 244, loss: 0.06625619530677795, step time: 14.535188674926758ms\r\nStep 245, loss: 0.06598655134439468, step time: 14.536619186401367ms\r\nStep 246, loss: 0.06574896723031998, step time: 16.756772994995117ms\r\nStep 247, loss: 0.06548270583152771, step time: 17.55523681640625ms\r\nStep 248, loss: 0.0652594044804573, step time: 17.87877082824707ms\r\nStep 249, loss: 0.06500789523124695, step time: 16.356468200683594ms\r\nStep 250, loss: 0.06476293504238129, step time: 16.422510147094727ms\r\n",,terminal_output +2737,1824037,"TERMINAL",0,0,"98",,terminal_output +2738,1824159,"TERMINAL",0,0,"Step 251, loss: 0.0645442083477974, step time: 15.658378601074219ms\r\nStep 252, loss: 0.06429929286241531, step time: 16.034364700317383ms\r\nStep 253, loss: 0.06407838314771652, step time: 15.271663665771484ms\r\nStep 254, loss: 0.06384888291358948, step time: 15.80667495727539ms\r\nStep 255, loss: 0.06361676752567291, step time: 14.894247055053711ms\r\nStep 256, loss: 0.06340458244085312, step time: 15.254497528076172ms\r\nStep 257, loss: 0.06318632513284683, step time: 17.208576202392578ms\r\nStep 258, loss: 0.06296400725841522, step time: 17.75527000427246ms\r\nStep 259, loss: 0.06274822354316711, step time: 16.79229736328125ms\r\n",,terminal_output +2739,1824316,"TERMINAL",0,0,"Step 260, loss: 0.06253710389137268, step time: 18.25237274169922ms\r\nStep 261, loss: 0.062327656894922256, step time: 17.290353775024414ms\r\nStep 262, loss: 0.062120597809553146, step time: 17.187118530273438ms\r\nStep 263, loss: 0.061922989785671234, step time: 17.11273193359375ms\r\nStep 264, loss: 0.06171225756406784, step time: 18.192291259765625ms\r\nStep 265, loss: 0.06151239946484566, step time: 17.25316047668457ms\r\nStep 266, loss: 0.061311010271310806, step time: 18.125534057617188ms\r\n",,terminal_output +2740,1824440,"TERMINAL",0,0,"Step 267, loss: 0.06111517176032066, step time: 17.370939254760742ms\r\nStep 268, loss: 0.0609070360660553, step time: 17.597198486328125ms\r\nStep 269, loss: 0.0607282854616642, step time: 17.389535903930664ms\r\nStep 270, loss: 0.060516223311424255, step time: 18.04208755493164ms\r\nStep 271, loss: 0.06032330542802811, step time: 17.76909828186035ms\r\n",,terminal_output +2741,1824958,"TERMINAL",0,0,"Step 272, loss: 0.060123831033706665, step time: 310.7185363769531ms\r\nStep 273, loss: 0.05993209406733513, step time: 20.067453384399414ms\r\nStep 274, loss: 0.059752315282821655, step time: 17.2274112701416ms\r\nStep 275, loss: 0.05954265221953392, step time: 15.40231704711914ms\r\nStep 276, loss: 0.05935540422797203, step time: 16.005277633666992ms\r\nStep 277, loss: 0.05917457491159439, step time: 15.228509902954102ms\r\nStep 278, loss: 0.05897144600749016, step time: 15.891313552856445ms\r\nStep 279, loss: 0.05878596380352974, step time: 14.921426773071289ms\r\nStep 280, loss: 0.05860640108585358, step time: 14.773130416870117ms\r\nStep 281, loss: 0.058408260345458984, step time: 15.018939971923828ms\r\n",,terminal_output +2742,1825077,"TERMINAL",0,0,"309",,terminal_output +2743,1825368,"TERMINAL",0,0,"Step 282, loss: 0.058227695524692535, step time: 15.671253204345703ms\r\nStep 283, loss: 0.0580485463142395, step time: 14.959573745727539ms\r\nStep 284, loss: 0.057873666286468506, step time: 15.579938888549805ms\r\nStep 285, loss: 0.057676590979099274, step time: 14.675140380859375ms\r\nStep 286, loss: 0.057495858520269394, step time: 14.74761962890625ms\r\nStep 287, loss: 0.05731789022684097, step time: 14.422416687011719ms\r\nStep 288, loss: 0.05715030059218407, step time: 16.103029251098633ms\r\nStep 289, loss: 0.05696549266576767, step time: 14.826297760009766ms\r\nStep 290, loss: 0.056793469935655594, step time: 15.477418899536133ms\r\nStep 291, loss: 0.056620486080646515, step time: 14.864683151245117ms\r\nStep 292, loss: 0.05645710229873657, step time: 15.084266662597656ms\r\nStep 293, loss: 0.05627600476145744, step time: 14.89114761352539ms\r\nStep 294, loss: 0.056117087602615356, step time: 15.707731246948242ms\r\nStep 295, loss: 0.055939167737960815, step time: 14.939308166503906ms\r\nStep 296, loss: 0.05576464906334877, step time: 15.87677001953125ms\r\nStep 297, loss: 0.05559298023581505, step time: 14.886140823364258ms\r\nStep 298, loss: 0.05542769283056259, step time: 14.986753463745117ms\r\nStep 299, loss: 0.0552678182721138, step time: 14.769315719604492ms\r\n",,terminal_output +2744,1825638,"TERMINAL",0,0,"Step 300, loss: 0.05509178712964058, step time: 21.943092346191406ms\r\nStep 301, loss: 0.05491688847541809, step time: 18.48149299621582ms\r\nStep 302, loss: 0.054749056696891785, step time: 17.075061798095703ms\r\nStep 303, loss: 0.054581377655267715, step time: 16.99542999267578ms\r\nStep 304, loss: 0.05441867187619209, step time: 15.079736709594727ms\r\nStep 305, loss: 0.05424065887928009, step time: 14.800548553466797ms\r\nStep 306, loss: 0.05411436781287193, step time: 15.566349029541016ms\r\n",,terminal_output +2745,1825761,"TERMINAL",0,0,"Step 307, loss: 0.053933676332235336, step time: 14.643192291259766ms\r\nStep 308, loss: 0.05377134308218956, step time: 15.570402145385742ms\r\nStep 309, loss: 0.05361742898821831, step time: 14.791011810302734ms\r\nStep 310, loss: 0.05344615876674652, step time: 14.941930770874023ms\r\nStep 311, loss: 0.053290486335754395, step time: 14.643192291259766ms\r\n",,terminal_output +2746,1825931,"TERMINAL",0,0,"Step 312, loss: 0.053141120821237564, step time: 15.334129333496094ms\r\nStep 313, loss: 0.05297868698835373, step time: 14.592409133911133ms\r\nStep 314, loss: 0.05282750353217125, step time: 15.374898910522461ms\r\nStep 315, loss: 0.05267007276415825, step time: 14.336347579956055ms\r\nStep 316, loss: 0.05251329392194748, step time: 15.027999877929688ms\r\nStep 317, loss: 0.05235614627599716, step time: 14.685869216918945ms\r\nStep 318, loss: 0.0521971620619297, step time: 15.351533889770508ms\r\nStep 319, loss: 0.05204544961452484, step time: 14.703989028930664ms\r\n",,terminal_output +2747,1826108,"TERMINAL",0,0,"121",,terminal_output +2748,1826151,"TERMINAL",0,0,"Step 320, loss: 0.05189427360892296, step time: 15.488624572753906ms\r\nStep 321, loss: 0.05172489583492279, step time: 14.77956771850586ms\r\nStep 322, loss: 0.051581453531980515, step time: 14.810323715209961ms\r\nStep 323, loss: 0.05142321065068245, step time: 14.54305648803711ms\r\nStep 324, loss: 0.0512666329741478, step time: 15.29836654663086ms\r\nStep 325, loss: 0.05112284794449806, step time: 14.548540115356445ms\r\nStep 326, loss: 0.050962284207344055, step time: 15.51198959350586ms\r\nStep 327, loss: 0.050810351967811584, step time: 14.55378532409668ms\r\nStep 328, loss: 0.05065859109163284, step time: 14.88637924194336ms\r\nStep 329, loss: 0.05050339549779892, step time: 14.887332916259766ms\r\n",,terminal_output +2749,1826291,"TERMINAL",0,0,"Step 330, loss: 0.05034905672073364, step time: 15.158653259277344ms\r\nStep 331, loss: 0.0501900352537632, step time: 14.583349227905273ms\r\nStep 332, loss: 0.05005808174610138, step time: 15.449762344360352ms\r\nStep 333, loss: 0.04988996684551239, step time: 14.621734619140625ms\r\nStep 334, loss: 0.04975801706314087, step time: 14.653444290161133ms\r\nStep 335, loss: 0.04959884285926819, step time: 14.50490951538086ms\r\n",,terminal_output +2750,1826504,"TERMINAL",0,0,"Step 336, loss: 0.04944945499300957, step time: 15.410423278808594ms\r\nStep 337, loss: 0.049311742186546326, step time: 14.600992202758789ms\r\nStep 338, loss: 0.049164824187755585, step time: 15.502452850341797ms\r\nStep 339, loss: 0.04901587963104248, step time: 14.458656311035156ms\r\nStep 340, loss: 0.04886757954955101, step time: 14.279365539550781ms\r\nStep 341, loss: 0.04872959107160568, step time: 14.392852783203125ms\r\nStep 342, loss: 0.048577096313238144, step time: 15.065908432006836ms\r\nStep 343, loss: 0.048438530415296555, step time: 14.536619186401367ms\r\nStep 344, loss: 0.04829784482717514, step time: 15.336036682128906ms\r\nStep 345, loss: 0.04815376549959183, step time: 14.313459396362305ms\r\n",,terminal_output +2751,1826683,"TERMINAL",0,0,"Step 346, loss: 0.04799240455031395, step time: 14.573097229003906ms\r\nStep 347, loss: 0.04785452038049698, step time: 14.382123947143555ms\r\nStep 348, loss: 0.04771886393427849, step time: 15.471696853637695ms\r\nStep 349, loss: 0.04756438359618187, step time: 14.747858047485352ms\r\nStep 350, loss: 0.04742643982172012, step time: 15.2130126953125ms\r\nStep 351, loss: 0.04727621376514435, step time: 14.569282531738281ms\r\nStep 352, loss: 0.04713345691561699, step time: 14.692306518554688ms\r\nStep 353, loss: 0.046992022544145584, step time: 14.404773712158203ms\r\n",,terminal_output +2752,1826870,"TERMINAL",0,0,"Step 354, loss: 0.046858854591846466, step time: 17.044544219970703ms\r\nStep 355, loss: 0.04670434445142746, step time: 14.725446701049805ms\r\nStep 356, loss: 0.046570200473070145, step time: 15.462398529052734ms\r\nStep 357, loss: 0.04642631858587265, step time: 14.791488647460938ms\r\nStep 358, loss: 0.04630787670612335, step time: 26.107311248779297ms\r\nStep 359, loss: 0.04614880681037903, step time: 15.713214874267578ms\r\nStep 360, loss: 0.046025246381759644, step time: 15.88296890258789ms\r\nStep 361, loss: 0.0459081269800663, step time: 14.799356460571289ms\r\n",,terminal_output +2753,1827118,"TERMINAL",0,0,"Step 362, loss: 0.04574566334486008, step time: 15.673398971557617ms\r\nStep 363, loss: 0.0456223338842392, step time: 14.461040496826172ms\r\nStep 364, loss: 0.0454985611140728, step time: 14.83464241027832ms\r\nStep 365, loss: 0.0453406386077404, step time: 14.557838439941406ms\r\nStep 366, loss: 0.045210372656583786, step time: 15.381097793579102ms\r\nStep 367, loss: 0.04509182646870613, step time: 14.57357406616211ms\r\nStep 368, loss: 0.04494044557213783, step time: 15.291690826416016ms\r\nStep 369, loss: 0.04480402544140816, step time: 14.514446258544922ms\r\nStep 370, loss: 0.04468081519007683, step time: 14.339685440063477ms\r\nStep 371, loss: 0.044539324939250946, step time: 14.435052871704102ms\r\nStep 372, loss: 0.04440223053097725, step time: 15.215396881103516ms\r\n",,terminal_output +2754,1827196,"TERMINAL",0,0,"32",,terminal_output +2755,1827293,"TERMINAL",0,0,"Step 373, loss: 0.044271182268857956, step time: 14.49894905090332ms\r\nStep 374, loss: 0.04413921386003494, step time: 15.357732772827148ms\r\nStep 375, loss: 0.04399741441011429, step time: 14.459848403930664ms\r\nStep 376, loss: 0.043864525854587555, step time: 14.713525772094727ms\r\nStep 377, loss: 0.04373336210846901, step time: 14.42408561706543ms\r\nStep 378, loss: 0.04359850287437439, step time: 15.093564987182617ms\r\nStep 379, loss: 0.043464452028274536, step time: 14.394044876098633ms\r\nStep 380, loss: 0.04334007576107979, step time: 15.432119369506836ms\r\n",,terminal_output +2756,1827375,"TERMINAL",0,0,"Step 381, loss: 0.04319867491722107, step time: 14.724969863891602ms\r\nStep 382, loss: 0.0430779904127121, step time: 14.853477478027344ms\r\nStep 383, loss: 0.042940251529216766, step time: 14.626741409301758ms\r\nStep 384, loss: 0.04282127320766449, step time: 15.594244003295898ms\r\n",,terminal_output +2757,1827822,"TERMINAL",0,0,"Step 385, loss: 0.042684540152549744, step time: 19.570350646972656ms\r\nStep 386, loss: 0.042570460587739944, step time: 16.735076904296875ms\r\nStep 387, loss: 0.04243835061788559, step time: 15.107870101928711ms\r\nStep 388, loss: 0.04231490194797516, step time: 15.104532241821289ms\r\nStep 389, loss: 0.042190711945295334, step time: 14.757156372070312ms\r\nStep 390, loss: 0.04206245765089989, step time: 15.310049057006836ms\r\nStep 391, loss: 0.041940659284591675, step time: 14.961004257202148ms\r\nStep 392, loss: 0.04182455688714981, step time: 15.566110610961914ms\r\nStep 393, loss: 0.04169619455933571, step time: 14.756917953491211ms\r\nStep 394, loss: 0.041570309549570084, step time: 14.786243438720703ms\r\nStep 395, loss: 0.041443921625614166, step time: 14.524698257446289ms\r\nStep 396, loss: 0.041328199207782745, step time: 15.395641326904297ms\r\nStep 397, loss: 0.041199833154678345, step time: 14.695167541503906ms\r\nStep 398, loss: 0.04107361659407616, step time: 15.690803527832031ms\r\nStep 399, loss: 0.040950801223516464, step time: 15.224933624267578ms\r\n",,terminal_output +2758,1827885,"TERMINAL",0,0,"Step 400, loss: 0.04084661602973938, step time: 21.592378616333008ms\r\n",,terminal_output +2759,1827972,"TERMINAL",0,0,"Step 401, loss: 0.0407092310488224, step time: 17.19522476196289ms\r\nStep 402, loss: 0.04058854281902313, step time: 21.133899688720703ms\r\nStep 403, loss: 0.04048268124461174, step time: 15.572786331176758ms\r\n",,terminal_output +2760,1828072,"TERMINAL",0,0,"Step 404, loss: 0.04035705327987671, step time: 16.133785247802734ms\r\nStep 405, loss: 0.04024110734462738, step time: 14.952421188354492ms\r\nStep 406, loss: 0.040125034749507904, step time: 15.10930061340332ms\r\nStep 407, loss: 0.04000481963157654, step time: 14.963150024414062ms\r\n",,terminal_output +2761,1828163,"TERMINAL",0,0,"Step 408, loss: 0.03989247977733612, step time: 15.7928466796875ms\r\nStep 409, loss: 0.03977774456143379, step time: 14.869213104248047ms\r\nStep 410, loss: 0.03965938836336136, step time: 15.47694206237793ms\r\nStep 411, loss: 0.03954024985432625, step time: 14.723777770996094ms\r\nStep 412, loss: 0.03943559527397156, step time: 14.865875244140625ms\r\n",,terminal_output +2762,1828246,"TERMINAL",0,0,"43",,terminal_output +2763,1828466,"TERMINAL",0,0,"Step 413, loss: 0.03931690752506256, step time: 14.732599258422852ms\r\nStep 414, loss: 0.03920217975974083, step time: 15.4266357421875ms\r\nStep 415, loss: 0.039080169051885605, step time: 14.563322067260742ms\r\nStep 416, loss: 0.038971491158008575, step time: 15.762567520141602ms\r\nStep 417, loss: 0.03884986788034439, step time: 14.683246612548828ms\r\nStep 418, loss: 0.03874753415584564, step time: 14.873266220092773ms\r\nStep 419, loss: 0.03863527998328209, step time: 14.683723449707031ms\r\nStep 420, loss: 0.03851598873734474, step time: 15.231609344482422ms\r\nStep 421, loss: 0.03841600567102432, step time: 14.787673950195312ms\r\nStep 422, loss: 0.03830425441265106, step time: 15.604972839355469ms\r\nStep 423, loss: 0.038201648741960526, step time: 14.525890350341797ms\r\nStep 424, loss: 0.03809196129441261, step time: 14.684438705444336ms\r\nStep 425, loss: 0.03798145428299904, step time: 14.552116394042969ms\r\n",,terminal_output +2764,1828603,"TERMINAL",0,0,"Step 426, loss: 0.03787994384765625, step time: 15.343189239501953ms\r\nStep 427, loss: 0.0377717986702919, step time: 14.716148376464844ms\r\nStep 428, loss: 0.03767409920692444, step time: 15.68460464477539ms\r\nStep 429, loss: 0.0375639870762825, step time: 14.804601669311523ms\r\nStep 430, loss: 0.0374525785446167, step time: 14.835596084594727ms\r\nStep 431, loss: 0.037354372441768646, step time: 14.910221099853516ms\r\n",,terminal_output +2765,1828771,"TERMINAL",0,0,"Step 432, loss: 0.037250399589538574, step time: 15.702486038208008ms\r\nStep 433, loss: 0.037147533148527145, step time: 14.818906784057617ms\r\nStep 434, loss: 0.03703729063272476, step time: 15.63262939453125ms\r\nStep 435, loss: 0.03693998232483864, step time: 14.628887176513672ms\r\nStep 436, loss: 0.03684254363179207, step time: 15.090227127075195ms\r\nStep 437, loss: 0.036739178001880646, step time: 14.667272567749023ms\r\nStep 438, loss: 0.03663460537791252, step time: 15.477180480957031ms\r\nStep 439, loss: 0.03653516620397568, step time: 14.758110046386719ms\r\n",,terminal_output +2766,1828825,"TERMINAL",0,0,"Step 440, loss: 0.036437809467315674, step time: 15.585899353027344ms\r\nStep 441, loss: 0.03634442016482353, step time: 14.783620834350586ms\r\n",,terminal_output +2767,1828905,"TERMINAL",0,0,"Step 442, loss: 0.03623717278242111, step time: 15.018939971923828ms\r\nStep 443, loss: 0.03614958003163338, step time: 14.780998229980469ms\r\nStep 444, loss: 0.036048222333192825, step time: 15.471458435058594ms\r\nStep 445, loss: 0.03594765439629555, step time: 14.490127563476562ms\r\n",,terminal_output +2768,1829246,"TERMINAL",0,0,"54",,terminal_output +2769,1829378,"TERMINAL",0,0,"Step 446, loss: 0.035864025354385376, step time: 15.676736831665039ms\r\nStep 447, loss: 0.035758230835199356, step time: 14.995336532592773ms\r\nStep 448, loss: 0.03566578030586243, step time: 14.900445938110352ms\r\nStep 449, loss: 0.03557426109910011, step time: 14.522314071655273ms\r\nStep 450, loss: 0.03547542914748192, step time: 15.250682830810547ms\r\nStep 451, loss: 0.03538646921515465, step time: 14.86063003540039ms\r\nStep 452, loss: 0.035303663462400436, step time: 15.497446060180664ms\r\nStep 453, loss: 0.035198599100112915, step time: 14.512777328491211ms\r\nStep 454, loss: 0.0351102277636528, step time: 14.723539352416992ms\r\nStep 455, loss: 0.035022422671318054, step time: 14.279603958129883ms\r\nStep 456, loss: 0.03493376821279526, step time: 15.142440795898438ms\r\nStep 457, loss: 0.034836333245038986, step time: 14.501094818115234ms\r\nStep 458, loss: 0.034755222499370575, step time: 17.868518829345703ms\r\nStep 459, loss: 0.03465690836310387, step time: 14.682292938232422ms\r\nStep 460, loss: 0.03458121046423912, step time: 14.803647994995117ms\r\nStep 461, loss: 0.034495674073696136, step time: 14.636039733886719ms\r\nStep 462, loss: 0.034405846148729324, step time: 15.341520309448242ms\r\nStep 463, loss: 0.034313056617975235, step time: 14.588356018066406ms\r\nStep 464, loss: 0.03423354774713516, step time: 15.429973602294922ms\r\nStep 465, loss: 0.034149352461099625, step time: 14.533519744873047ms\r\nStep 466, loss: 0.03406291455030441, step time: 14.972448348999023ms\r\n",,terminal_output +2770,1829433,"TERMINAL",0,0,"Step 467, loss: 0.033980097621679306, step time: 14.713287353515625ms\r\nStep 468, loss: 0.03389302268624306, step time: 15.590190887451172ms\r\nStep 469, loss: 0.03381432220339775, step time: 14.754772186279297ms\r\n",,terminal_output +2771,1829651,"TERMINAL",0,0,"Step 470, loss: 0.03373045474290848, step time: 15.430688858032227ms\r\nStep 471, loss: 0.033647507429122925, step time: 14.711618423461914ms\r\nStep 472, loss: 0.033574823290109634, step time: 14.921188354492188ms\r\nStep 473, loss: 0.033491119742393494, step time: 14.610052108764648ms\r\nStep 474, loss: 0.03341370448470116, step time: 15.487194061279297ms\r\nStep 475, loss: 0.033332083374261856, step time: 14.416933059692383ms\r\nStep 476, loss: 0.03325434401631355, step time: 15.413522720336914ms\r\nStep 477, loss: 0.033171527087688446, step time: 14.508962631225586ms\r\nStep 478, loss: 0.03310016542673111, step time: 14.635086059570312ms\r\nStep 479, loss: 0.03301452845335007, step time: 14.448404312133789ms\r\n",,terminal_output +2772,1829880,"TERMINAL",0,0,"Step 480, loss: 0.032943371683359146, step time: 15.265464782714844ms\r\nStep 481, loss: 0.03287220373749733, step time: 14.706134796142578ms\r\nStep 482, loss: 0.03278546780347824, step time: 15.674591064453125ms\r\nStep 483, loss: 0.03271225467324257, step time: 14.595508575439453ms\r\nStep 484, loss: 0.0326424166560173, step time: 14.950990676879883ms\r\nStep 485, loss: 0.03256593644618988, step time: 14.358997344970703ms\r\nStep 486, loss: 0.0324866957962513, step time: 15.403032302856445ms\r\nStep 487, loss: 0.03241315111517906, step time: 14.704465866088867ms\r\nStep 488, loss: 0.03233987092971802, step time: 19.114017486572266ms\r\nStep 489, loss: 0.03227201849222183, step time: 14.824151992797852ms\r\n",,terminal_output +2773,1829975,"TERMINAL",0,0,"Step 490, loss: 0.032197993248701096, step time: 14.911413192749023ms\r\nStep 491, loss: 0.03212234005331993, step time: 14.766216278076172ms\r\nStep 492, loss: 0.032053008675575256, step time: 15.635013580322266ms\r\nStep 493, loss: 0.03199745714664459, step time: 14.915227890014648ms\r\n",,terminal_output +2774,1830105,"TERMINAL",0,0,"Step 494, loss: 0.03191502392292023, step time: 15.958786010742188ms\r\nStep 495, loss: 0.031842902302742004, step time: 14.489889144897461ms\r\nStep 496, loss: 0.031776316463947296, step time: 14.907360076904297ms\r\nStep 497, loss: 0.03170435503125191, step time: 14.83154296875ms\r\nStep 498, loss: 0.03163660317659378, step time: 15.589475631713867ms\r\nStep 499, loss: 0.03156886622309685, step time: 14.716625213623047ms\r\n",,terminal_output +2775,1830277,"TERMINAL",0,0,"65",,terminal_output +2776,1831325,"TERMINAL",0,0,"76",,terminal_output +2777,1832202,"TERMINAL",0,0,"Step 500, loss: 0.03149690479040146, step time: 20.807743072509766ms\r\nStep 501, loss: 0.031431447714567184, step time: 23.747682571411133ms\r\n",,terminal_output +2778,1832325,"TERMINAL",0,0,"Step 502, loss: 0.03135818988084793, step time: 20.16425132751465ms\r\nStep 503, loss: 0.03129955753684044, step time: 18.670320510864258ms\r\nStep 504, loss: 0.031225301325321198, step time: 19.707202911376953ms\r\nStep 505, loss: 0.031157827004790306, step time: 18.14723014831543ms\r\n",,terminal_output +2779,1832410,"TERMINAL",0,0,"87",,terminal_output +2780,1832412,"TERMINAL",0,0,"Step 506, loss: 0.031096599996089935, step time: 15.956640243530273ms\r\nStep 507, loss: 0.031027715653181076, step time: 15.656471252441406ms\r\nStep 508, loss: 0.030964281409978867, step time: 15.651464462280273ms\r\nStep 509, loss: 0.03089892864227295, step time: 15.045404434204102ms\r\n",,terminal_output +2781,1832507,"TERMINAL",0,0,"Step 510, loss: 0.03082994371652603, step time: 15.578269958496094ms\r\nStep 511, loss: 0.030768854543566704, step time: 15.281200408935547ms\r\nStep 512, loss: 0.03071354515850544, step time: 15.25115966796875ms\r\nStep 513, loss: 0.03066265769302845, step time: 14.811992645263672ms\r\n",,terminal_output +2782,1832684,"TERMINAL",0,0,"Step 514, loss: 0.030585575848817825, step time: 15.110254287719727ms\r\nStep 515, loss: 0.030522959306836128, step time: 14.586210250854492ms\r\nStep 516, loss: 0.03047153726220131, step time: 15.314102172851562ms\r\nStep 517, loss: 0.030403805896639824, step time: 14.800071716308594ms\r\nStep 518, loss: 0.030344968661665916, step time: 15.223026275634766ms\r\nStep 519, loss: 0.030283357948064804, step time: 14.953374862670898ms\r\nStep 520, loss: 0.030220774933695793, step time: 15.02680778503418ms\r\nStep 521, loss: 0.030163660645484924, step time: 14.7705078125ms\r\n",,terminal_output +2783,1832868,"TERMINAL",0,0,"Step 522, loss: 0.03012058511376381, step time: 15.09237289428711ms\r\nStep 523, loss: 0.03005046397447586, step time: 15.006780624389648ms\r\nStep 524, loss: 0.029993463307619095, step time: 15.528440475463867ms\r\nStep 525, loss: 0.02993602864444256, step time: 15.004873275756836ms\r\nStep 526, loss: 0.02987089194357395, step time: 15.343189239501953ms\r\nStep 527, loss: 0.029820958152413368, step time: 14.860391616821289ms\r\nStep 528, loss: 0.029758770018815994, step time: 15.192031860351562ms\r\nStep 529, loss: 0.029704531654715538, step time: 14.681339263916016ms\r\n",,terminal_output +2784,1833102,"TERMINAL",0,0,"Step 530, loss: 0.029644673690199852, step time: 19.686222076416016ms\r\nStep 531, loss: 0.029599711298942566, step time: 21.681785583496094ms\r\nStep 532, loss: 0.02954094484448433, step time: 15.991687774658203ms\r\nStep 533, loss: 0.029487647116184235, step time: 15.39158821105957ms\r\nStep 534, loss: 0.029433825984597206, step time: 15.565872192382812ms\r\nStep 535, loss: 0.02938585914671421, step time: 14.759540557861328ms\r\nStep 536, loss: 0.029333721846342087, step time: 14.79029655456543ms\r\nStep 537, loss: 0.029270773753523827, step time: 14.922857284545898ms\r\nStep 538, loss: 0.02922496758401394, step time: 15.36417007446289ms\r\nStep 539, loss: 0.029173076152801514, step time: 14.827966690063477ms\r\n",,terminal_output +2785,1833287,"TERMINAL",0,0,"Step 540, loss: 0.02911575324833393, step time: 18.688201904296875ms\r\nStep 541, loss: 0.029067886993288994, step time: 15.027284622192383ms\r\nStep 542, loss: 0.029017427936196327, step time: 14.58120346069336ms\r\nStep 543, loss: 0.028968021273612976, step time: 14.735698699951172ms\r\nStep 544, loss: 0.028911905363202095, step time: 15.168190002441406ms\r\nStep 545, loss: 0.028867535293102264, step time: 14.690637588500977ms\r\nStep 546, loss: 0.028815215453505516, step time: 22.22752571105957ms\r\nStep 547, loss: 0.028766561299562454, step time: 15.779733657836914ms\r\n",,terminal_output +2786,1833443,"TERMINAL",0,0,"98",,terminal_output +2787,1833637,"TERMINAL",0,0,"Step 548, loss: 0.028717193752527237, step time: 14.817237854003906ms\r\nStep 549, loss: 0.028672005981206894, step time: 14.956474304199219ms\r\nStep 550, loss: 0.02862308919429779, step time: 15.343427658081055ms\r\nStep 551, loss: 0.028578748926520348, step time: 14.64390754699707ms\r\nStep 552, loss: 0.02853366918861866, step time: 15.053033828735352ms\r\nStep 553, loss: 0.02847878821194172, step time: 14.635562896728516ms\r\nStep 554, loss: 0.028427766636013985, step time: 14.489412307739258ms\r\nStep 555, loss: 0.028386346995830536, step time: 14.683961868286133ms\r\nStep 556, loss: 0.028340840712189674, step time: 15.11383056640625ms\r\nStep 557, loss: 0.02828953042626381, step time: 14.582633972167969ms\r\nStep 558, loss: 0.02824779972434044, step time: 16.88361167907715ms\r\nStep 559, loss: 0.02819850668311119, step time: 14.601469039916992ms\r\nStep 560, loss: 0.02815905772149563, step time: 14.473915100097656ms\r\nStep 561, loss: 0.028112094849348068, step time: 14.962196350097656ms\r\nStep 562, loss: 0.02806100994348526, step time: 15.315771102905273ms\r\n",,terminal_output +2788,1833953,"TERMINAL",0,0,"Step 563, loss: 0.02802511304616928, step time: 14.890193939208984ms\r\nStep 564, loss: 0.027977855876088142, step time: 15.16103744506836ms\r\nStep 565, loss: 0.027934158220887184, step time: 14.709949493408203ms\r\nStep 566, loss: 0.027889080345630646, step time: 17.047882080078125ms\r\nStep 567, loss: 0.02783948741853237, step time: 14.991521835327148ms\r\nStep 568, loss: 0.027806667611002922, step time: 15.220165252685547ms\r\nStep 569, loss: 0.027762362733483315, step time: 14.804363250732422ms\r\nStep 570, loss: 0.02772408351302147, step time: 15.310525894165039ms\r\nStep 571, loss: 0.02768165059387684, step time: 14.887809753417969ms\r\nStep 572, loss: 0.027633460238575935, step time: 14.757633209228516ms\r\nStep 573, loss: 0.027590973302721977, step time: 14.822006225585938ms\r\nStep 574, loss: 0.02756212092936039, step time: 15.312671661376953ms\r\nStep 575, loss: 0.027518613263964653, step time: 14.770984649658203ms\r\nStep 576, loss: 0.027468962594866753, step time: 15.292882919311523ms\r\n",,terminal_output +2789,1834260,"TERMINAL",0,0,"Step 577, loss: 0.027435436844825745, step time: 18.341541290283203ms\r\nStep 578, loss: 0.027397016063332558, step time: 14.910459518432617ms\r\nStep 579, loss: 0.027355430647730827, step time: 14.957189559936523ms\r\nStep 580, loss: 0.027313224971294403, step time: 15.328168869018555ms\r\nStep 581, loss: 0.027276866137981415, step time: 15.029430389404297ms\r\nStep 582, loss: 0.027240557596087456, step time: 15.374183654785156ms\r\nStep 583, loss: 0.027196379378437996, step time: 14.911651611328125ms\r\nStep 584, loss: 0.027165008708834648, step time: 14.858245849609375ms\r\nStep 585, loss: 0.02712993696331978, step time: 14.81318473815918ms\r\nStep 586, loss: 0.027093205600976944, step time: 15.561342239379883ms\r\nStep 587, loss: 0.027063576504588127, step time: 14.995813369750977ms\r\nStep 588, loss: 0.027022799476981163, step time: 15.339136123657227ms\r\nStep 589, loss: 0.026980159804224968, step time: 14.838457107543945ms\r\nStep 590, loss: 0.02694324031472206, step time: 14.59360122680664ms\r\n",,terminal_output +2790,1834479,"TERMINAL",0,0,"Step 591, loss: 0.026915090158581734, step time: 14.731884002685547ms\r\nStep 592, loss: 0.026874609291553497, step time: 15.234708786010742ms\r\nStep 593, loss: 0.026833513751626015, step time: 14.699697494506836ms\r\nStep 594, loss: 0.026804374530911446, step time: 15.088558197021484ms\r\nStep 595, loss: 0.026769844815135002, step time: 14.609098434448242ms\r\nStep 596, loss: 0.026735490188002586, step time: 14.625072479248047ms\r\nStep 597, loss: 0.026699064299464226, step time: 14.805078506469727ms\r\nStep 598, loss: 0.026668202131986618, step time: 15.21921157836914ms\r\nStep 599, loss: 0.0266361553221941, step time: 14.739513397216797ms\r\n",,terminal_output +2791,1834480,"TERMINAL",0,0,"409",,terminal_output +2792,1834708,"TERMINAL",0,0,"Step 600, loss: 0.02659911848604679, step time: 25.89726448059082ms\r\nStep 601, loss: 0.026563379913568497, step time: 19.026517868041992ms\r\nStep 602, loss: 0.02653290145099163, step time: 15.536069869995117ms\r\n",,terminal_output +2793,1835000,"TERMINAL",0,0,"Step 603, loss: 0.026495881378650665, step time: 306.9338798522949ms\r\nStep 604, loss: 0.026468690484762192, step time: 23.814678192138672ms\r\n",,terminal_output +2794,1835548,"TERMINAL",0,0,"130",,terminal_output +2795,1835683,"TERMINAL",0,0,"Step 605, loss: 0.02644549496471882, step time: 19.290685653686523ms\r\nStep 606, loss: 0.026405462995171547, step time: 16.155242919921875ms\r\nStep 607, loss: 0.02637217380106449, step time: 16.040563583374023ms\r\nStep 608, loss: 0.02634248696267605, step time: 15.110969543457031ms\r\nStep 609, loss: 0.02631063014268875, step time: 15.136480331420898ms\r\nStep 610, loss: 0.02627747878432274, step time: 15.109062194824219ms\r\nStep 611, loss: 0.026248587295413017, step time: 14.897346496582031ms\r\nStep 612, loss: 0.026209546253085136, step time: 15.30599594116211ms\r\nStep 613, loss: 0.026193419471383095, step time: 14.882802963256836ms\r\nStep 614, loss: 0.02616789937019348, step time: 25.5126953125ms\r\nStep 615, loss: 0.02613803558051586, step time: 16.35885238647461ms\r\nStep 616, loss: 0.02609967440366745, step time: 16.05844497680664ms\r\nStep 617, loss: 0.026069195941090584, step time: 15.090703964233398ms\r\nStep 618, loss: 0.026042617857456207, step time: 15.468120574951172ms\r\nStep 619, loss: 0.026014013215899467, step time: 14.59813117980957ms\r\nStep 620, loss: 0.025982432067394257, step time: 14.495611190795898ms\r\nStep 621, loss: 0.025949103757739067, step time: 14.838695526123047ms\r\nStep 622, loss: 0.025925980880856514, step time: 17.517805099487305ms\r\nStep 623, loss: 0.025901781395077705, step time: 15.949010848999023ms\r\nStep 624, loss: 0.025869295001029968, step time: 15.656709671020508ms\r\nStep 625, loss: 0.025843510404229164, step time: 15.278816223144531ms\r\nStep 626, loss: 0.025824010372161865, step time: 15.38991928100586ms\r\nStep 627, loss: 0.025795934721827507, step time: 15.145301818847656ms\r\nStep 628, loss: 0.02576940320432186, step time: 15.575647354125977ms\r\nStep 629, loss: 0.0257283803075552, step time: 19.77086067199707ms\r\nStep 630, loss: 0.0257074236869812, step time: 15.451908111572266ms\r\nStep 631, loss: 0.02567880041897297, step time: 14.936447143554688ms\r\nStep 632, loss: 0.025655489414930344, step time: 14.863252639770508ms\r\nStep 633, loss: 0.025625072419643402, step time: 14.96267318725586ms\r\nStep 634, loss: 0.025594687089323997, step time: 15.522480010986328ms\r\n",,terminal_output +2796,1836108,"TERMINAL",0,0,"Step 635, loss: 0.025568567216396332, step time: 14.723539352416992ms\r\nStep 636, loss: 0.0255509652197361, step time: 15.198230743408203ms\r\nStep 637, loss: 0.02552793174982071, step time: 14.508962631225586ms\r\nStep 638, loss: 0.02550797536969185, step time: 14.730691909790039ms\r\nStep 639, loss: 0.025484511628746986, step time: 14.78266716003418ms\r\nStep 640, loss: 0.025454554706811905, step time: 15.06805419921875ms\r\nStep 641, loss: 0.02542177028954029, step time: 14.650344848632812ms\r\nStep 642, loss: 0.025398220866918564, step time: 15.118837356567383ms\r\nStep 643, loss: 0.025375423952937126, step time: 15.561342239379883ms\r\nStep 644, loss: 0.0253462977707386, step time: 14.701128005981445ms\r\nStep 645, loss: 0.0253184475004673, step time: 14.650821685791016ms\r\nStep 646, loss: 0.02530195750296116, step time: 15.251398086547852ms\r\nStep 647, loss: 0.02527744136750698, step time: 14.704227447509766ms\r\nStep 648, loss: 0.025250406935811043, step time: 15.058755874633789ms\r\nStep 649, loss: 0.025226108729839325, step time: 14.496326446533203ms\r\nStep 650, loss: 0.025202592834830284, step time: 14.444112777709961ms\r\nStep 651, loss: 0.025182539597153664, step time: 15.38228988647461ms\r\nStep 652, loss: 0.02516559511423111, step time: 15.206098556518555ms\r\nStep 653, loss: 0.025137485936284065, step time: 14.620304107666016ms\r\n",,terminal_output +2797,1836200,"TERMINAL",0,0,"Step 654, loss: 0.025108765810728073, step time: 15.128135681152344ms\r\nStep 655, loss: 0.025095218792557716, step time: 14.486074447631836ms\r\nStep 656, loss: 0.02507607638835907, step time: 14.611244201660156ms\r\nStep 657, loss: 0.025055531412363052, step time: 14.834165573120117ms\r\n",,terminal_output +2798,1836387,"TERMINAL",0,0,"Step 658, loss: 0.025035133585333824, step time: 15.240192413330078ms\r\nStep 659, loss: 0.02501675672829151, step time: 14.671802520751953ms\r\nStep 660, loss: 0.024987610056996346, step time: 17.697572708129883ms\r\nStep 661, loss: 0.024972638115286827, step time: 14.696359634399414ms\r\nStep 662, loss: 0.024949686601758003, step time: 14.77670669555664ms\r\nStep 663, loss: 0.02492685802280903, step time: 14.865398406982422ms\r\nStep 664, loss: 0.024898070842027664, step time: 15.274763107299805ms\r\nStep 665, loss: 0.02489134669303894, step time: 14.551639556884766ms\r\nStep 666, loss: 0.024870671331882477, step time: 15.08474349975586ms\r\n",,terminal_output +2799,1836599,"TERMINAL",0,0,"21",,terminal_output +2800,1836796,"TERMINAL",0,0,"Step 667, loss: 0.024846013635396957, step time: 14.54472541809082ms\r\nStep 668, loss: 0.02481805719435215, step time: 14.65606689453125ms\r\nStep 669, loss: 0.024793501943349838, step time: 14.689922332763672ms\r\nStep 670, loss: 0.0247796718031168, step time: 15.091657638549805ms\r\nStep 671, loss: 0.02476269192993641, step time: 15.919208526611328ms\r\nStep 672, loss: 0.024731304496526718, step time: 16.567707061767578ms\r\nStep 673, loss: 0.024715879932045937, step time: 15.239477157592773ms\r\nStep 674, loss: 0.024693671613931656, step time: 15.92707633972168ms\r\nStep 675, loss: 0.024669229984283447, step time: 14.893531799316406ms\r\nStep 676, loss: 0.024650434032082558, step time: 15.408039093017578ms\r\nStep 677, loss: 0.024628952145576477, step time: 14.836788177490234ms\r\nStep 678, loss: 0.02460264228284359, step time: 15.388965606689453ms\r\nStep 679, loss: 0.02459184266626835, step time: 15.665054321289062ms\r\nStep 680, loss: 0.024568697437644005, step time: 15.305042266845703ms\r\nStep 681, loss: 0.024556441232562065, step time: 15.002965927124023ms\r\nStep 682, loss: 0.024537857621908188, step time: 15.375614166259766ms\r\nStep 683, loss: 0.024521172046661377, step time: 14.899253845214844ms\r\nStep 684, loss: 0.02448578178882599, step time: 15.146970748901367ms\r\n",,terminal_output +2801,1837025,"TERMINAL",0,0,"Step 685, loss: 0.02446703426539898, step time: 14.906167984008789ms\r\nStep 686, loss: 0.02445380762219429, step time: 14.976024627685547ms\r\nStep 687, loss: 0.02442941628396511, step time: 24.426937103271484ms\r\nStep 688, loss: 0.024411121383309364, step time: 16.04008674621582ms\r\nStep 689, loss: 0.02438395842909813, step time: 15.075206756591797ms\r\nStep 690, loss: 0.024363411590456963, step time: 15.436887741088867ms\r\nStep 691, loss: 0.02435321733355522, step time: 14.879703521728516ms\r\nStep 692, loss: 0.02432761713862419, step time: 14.62697982788086ms\r\nStep 693, loss: 0.024310937151312828, step time: 14.848709106445312ms\r\n",,terminal_output +2802,1837164,"TERMINAL",0,0,"Step 694, loss: 0.02428789809346199, step time: 15.115022659301758ms\r\nStep 695, loss: 0.024275299161672592, step time: 16.445159912109375ms\r\nStep 696, loss: 0.024261843413114548, step time: 19.409656524658203ms\r\nStep 697, loss: 0.02424730733036995, step time: 14.947652816772461ms\r\nStep 698, loss: 0.024224285036325455, step time: 14.75381851196289ms\r\nStep 699, loss: 0.0242046806961298, step time: 14.885663986206055ms\r\n",,terminal_output +2803,1837345,"TERMINAL",0,0,"Step 700, loss: 0.02419092319905758, step time: 25.960206985473633ms\r\nStep 701, loss: 0.024189772084355354, step time: 19.366979598999023ms\r\nStep 702, loss: 0.02417149394750595, step time: 18.9969539642334ms\r\n",,terminal_output +2804,1837661,"TERMINAL",0,0,"32",,terminal_output +2805,1837883,"TERMINAL",0,0,"Step 703, loss: 0.024151617661118507, step time: 17.499446868896484ms\r\nStep 704, loss: 0.0241183303296566, step time: 15.125036239624023ms\r\nStep 705, loss: 0.024108735844492912, step time: 15.198945999145508ms\r\nStep 706, loss: 0.024099918082356453, step time: 15.635490417480469ms\r\nStep 707, loss: 0.02407102845609188, step time: 15.035867691040039ms\r\nStep 708, loss: 0.024048885330557823, step time: 15.671253204345703ms\r\nStep 709, loss: 0.02403675764799118, step time: 15.072107315063477ms\r\nStep 710, loss: 0.02402113750576973, step time: 15.532732009887695ms\r\nStep 711, loss: 0.024006307125091553, step time: 15.155315399169922ms\r\nStep 712, loss: 0.023988449946045876, step time: 15.62190055847168ms\r\nStep 713, loss: 0.023971188813447952, step time: 14.919519424438477ms\r\nStep 714, loss: 0.023956241086125374, step time: 16.263723373413086ms\r\nStep 715, loss: 0.023943252861499786, step time: 15.072345733642578ms\r\nStep 716, loss: 0.023926882073283195, step time: 14.93382453918457ms\r\nStep 717, loss: 0.02390940487384796, step time: 15.018701553344727ms\r\nStep 718, loss: 0.02389669604599476, step time: 16.41058921813965ms\r\nStep 719, loss: 0.023880839347839355, step time: 14.895439147949219ms\r\nStep 720, loss: 0.023869192227721214, step time: 16.000986099243164ms\r\nStep 721, loss: 0.023872533813118935, step time: 15.352964401245117ms\r\nStep 722, loss: 0.023865699768066406, step time: 14.837503433227539ms\r\nStep 723, loss: 0.023831216618418694, step time: 14.906167984008789ms\r\nStep 724, loss: 0.0238134004175663, step time: 15.329360961914062ms\r\nStep 725, loss: 0.023810895159840584, step time: 14.77956771850586ms\r\nStep 726, loss: 0.02377772331237793, step time: 15.238523483276367ms\r\n",,terminal_output +2806,1837988,"TERMINAL",0,0,"Step 727, loss: 0.02376372367143631, step time: 14.87421989440918ms\r\nStep 728, loss: 0.023760054260492325, step time: 14.964103698730469ms\r\nStep 729, loss: 0.02372412383556366, step time: 15.030145645141602ms\r\nStep 730, loss: 0.023724624887108803, step time: 15.217304229736328ms\r\n",,terminal_output +2807,1838128,"TERMINAL",0,0,"Step 731, loss: 0.023703204467892647, step time: 14.896392822265625ms\r\nStep 732, loss: 0.023681635037064552, step time: 15.245676040649414ms\r\nStep 733, loss: 0.02367774024605751, step time: 14.707565307617188ms\r\nStep 734, loss: 0.02365870773792267, step time: 14.644861221313477ms\r\nStep 735, loss: 0.02364082634449005, step time: 14.92166519165039ms\r\nStep 736, loss: 0.023636506870388985, step time: 15.424013137817383ms\r\nStep 737, loss: 0.023620381951332092, step time: 14.812707901000977ms\r\n",,terminal_output +2808,1838300,"TERMINAL",0,0,"Step 738, loss: 0.023601561784744263, step time: 15.259742736816406ms\r\nStep 739, loss: 0.023591414093971252, step time: 14.724969863891602ms\r\nStep 740, loss: 0.023578954860568047, step time: 14.557123184204102ms\r\nStep 741, loss: 0.02356891520321369, step time: 14.723062515258789ms\r\nStep 742, loss: 0.023559292778372765, step time: 15.250444412231445ms\r\nStep 743, loss: 0.02355981059372425, step time: 15.709400177001953ms\r\nStep 744, loss: 0.02354344353079796, step time: 16.46733283996582ms\r\nStep 745, loss: 0.0235225148499012, step time: 14.653682708740234ms\r\n",,terminal_output +2809,1838387,"TERMINAL",0,0,"Step 746, loss: 0.02350527234375477, step time: 14.699697494506836ms\r\nStep 747, loss: 0.023492055013775826, step time: 14.73855972290039ms\r\nStep 748, loss: 0.02348387986421585, step time: 15.336751937866211ms\r\nStep 749, loss: 0.023458421230316162, step time: 14.687776565551758ms\r\n",,terminal_output +2810,1838528,"TERMINAL",0,0,"Step 750, loss: 0.023454824462532997, step time: 15.04206657409668ms\r\nStep 751, loss: 0.023439116775989532, step time: 14.789581298828125ms\r\nStep 752, loss: 0.023424429818987846, step time: 24.863243103027344ms\r\nStep 753, loss: 0.023407287895679474, step time: 15.27261734008789ms\r\nStep 754, loss: 0.023399842903017998, step time: 15.799999237060547ms\r\n",,terminal_output +2811,1838591,"TERMINAL",0,0,"Step 755, loss: 0.023387057706713676, step time: 14.836311340332031ms\r\nStep 756, loss: 0.02337660640478134, step time: 15.067815780639648ms\r\n",,terminal_output +2812,1838680,"TERMINAL",0,0,"Step 757, loss: 0.023365909233689308, step time: 14.636039733886719ms\r\nStep 758, loss: 0.023370608687400818, step time: 14.537811279296875ms\r\nStep 759, loss: 0.023366250097751617, step time: 14.728307723999023ms\r\nStep 760, loss: 0.023342983797192574, step time: 14.976263046264648ms\r\nStep 761, loss: 0.023312190547585487, step time: 17.857789993286133ms\r\n",,terminal_output +2813,1838680,"TERMINAL",0,0,"43",,terminal_output +2814,1838807,"TERMINAL",0,0,"Step 762, loss: 0.023316117003560066, step time: 19.002676010131836ms\r\nStep 763, loss: 0.02329804003238678, step time: 14.567375183105469ms\r\nStep 764, loss: 0.023281095549464226, step time: 14.48965072631836ms\r\nStep 765, loss: 0.023266131058335304, step time: 14.508247375488281ms\r\nStep 766, loss: 0.023260442540049553, step time: 15.144824981689453ms\r\nStep 767, loss: 0.023236973211169243, step time: 14.918327331542969ms\r\n",,terminal_output +2815,1838864,"TERMINAL",0,0,"Step 768, loss: 0.02323063835501671, step time: 14.946937561035156ms\r\nStep 769, loss: 0.02322164736688137, step time: 14.645576477050781ms\r\n",,terminal_output +2816,1839343,"TERMINAL",0,0,"Step 770, loss: 0.02320816181600094, step time: 14.983654022216797ms\r\nStep 771, loss: 0.023195091634988785, step time: 15.531063079833984ms\r\nStep 772, loss: 0.023182325065135956, step time: 15.954256057739258ms\r\nStep 773, loss: 0.023174989968538284, step time: 14.889955520629883ms\r\nStep 774, loss: 0.023167256265878677, step time: 15.341520309448242ms\r\nStep 775, loss: 0.023148713633418083, step time: 14.568567276000977ms\r\nStep 776, loss: 0.02314125932753086, step time: 14.725923538208008ms\r\nStep 777, loss: 0.023127585649490356, step time: 14.920473098754883ms\r\nStep 778, loss: 0.023118773475289345, step time: 15.221595764160156ms\r\nStep 779, loss: 0.023111701011657715, step time: 14.719486236572266ms\r\nStep 780, loss: 0.023111918941140175, step time: 14.848947525024414ms\r\nStep 781, loss: 0.023109178990125656, step time: 14.549016952514648ms\r\nStep 782, loss: 0.023119596764445305, step time: 14.452934265136719ms\r\nStep 783, loss: 0.02308334968984127, step time: 14.526844024658203ms\r\nStep 784, loss: 0.023061471059918404, step time: 15.114545822143555ms\r\nStep 785, loss: 0.023065105080604553, step time: 14.610528945922852ms\r\nStep 786, loss: 0.023037880659103394, step time: 15.18702507019043ms\r\nStep 787, loss: 0.02302921563386917, step time: 14.727592468261719ms\r\nStep 788, loss: 0.02301769331097603, step time: 14.626502990722656ms\r\nStep 789, loss: 0.023002412170171738, step time: 15.655279159545898ms\r\nStep 790, loss: 0.02299531362950802, step time: 15.159130096435547ms\r\n",,terminal_output +2817,1839449,"TERMINAL",0,0,"Step 791, loss: 0.022974103689193726, step time: 14.783143997192383ms\r\nStep 792, loss: 0.02297927252948284, step time: 15.27094841003418ms\r\nStep 793, loss: 0.022957677021622658, step time: 14.716148376464844ms\r\nStep 794, loss: 0.022945893928408623, step time: 14.702081680297852ms\r\nStep 795, loss: 0.022938022390007973, step time: 14.799356460571289ms\r\n",,terminal_output +2818,1839506,"TERMINAL",0,0,"Step 796, loss: 0.02292793244123459, step time: 15.262126922607422ms\r\nStep 797, loss: 0.022920431569218636, step time: 14.701128005981445ms\r\n",,terminal_output +2819,1839571,"TERMINAL",0,0,"Step 798, loss: 0.02290530316531658, step time: 16.46280288696289ms\r\nStep 799, loss: 0.022903449833393097, step time: 14.904499053955078ms\r\n",,terminal_output +2820,1839685,"TERMINAL",0,0,"Step 800, loss: 0.02288905344903469, step time: 21.161317825317383ms\r\n",,terminal_output +2821,1839765,"TERMINAL",0,0,"54",,terminal_output +2822,1840008,"TERMINAL",0,0,"Step 801, loss: 0.02288573607802391, step time: 16.767024993896484ms\r\nStep 802, loss: 0.022892383858561516, step time: 16.608476638793945ms\r\nStep 803, loss: 0.02287645824253559, step time: 15.146017074584961ms\r\nStep 804, loss: 0.0228594858199358, step time: 15.339374542236328ms\r\nStep 805, loss: 0.022839078679680824, step time: 14.90330696105957ms\r\nStep 806, loss: 0.022835982963442802, step time: 14.795541763305664ms\r\nStep 807, loss: 0.022836439311504364, step time: 15.043973922729492ms\r\nStep 808, loss: 0.022811241447925568, step time: 15.480995178222656ms\r\nStep 809, loss: 0.022800223901867867, step time: 15.141487121582031ms\r\nStep 810, loss: 0.02279227040708065, step time: 15.645027160644531ms\r\nStep 811, loss: 0.022783303633332253, step time: 14.82391357421875ms\r\nStep 812, loss: 0.02278439886868, step time: 14.646530151367188ms\r\nStep 813, loss: 0.022770561277866364, step time: 15.443563461303711ms\r\nStep 814, loss: 0.022761931642889977, step time: 15.538215637207031ms\r\nStep 815, loss: 0.02275129407644272, step time: 14.673471450805664ms\r\n",,terminal_output +2823,1840173,"TERMINAL",0,0,"Step 816, loss: 0.02274291217327118, step time: 15.192747116088867ms\r\nStep 817, loss: 0.022739991545677185, step time: 27.971506118774414ms\r\nStep 818, loss: 0.022732118144631386, step time: 15.697240829467773ms\r\nStep 819, loss: 0.022736797109246254, step time: 15.020608901977539ms\r\nStep 820, loss: 0.022735483944416046, step time: 15.819549560546875ms\r\nStep 821, loss: 0.02272668294608593, step time: 15.093803405761719ms\r\n",,terminal_output +2824,1840313,"TERMINAL",0,0,"Step 822, loss: 0.022702911868691444, step time: 15.382766723632812ms\r\nStep 823, loss: 0.022694511339068413, step time: 14.723062515258789ms\r\nStep 824, loss: 0.022693537175655365, step time: 14.61172103881836ms\r\nStep 825, loss: 0.0226789191365242, step time: 14.673948287963867ms\r\nStep 826, loss: 0.022663436830043793, step time: 15.198230743408203ms\r\nStep 827, loss: 0.022668611258268356, step time: 16.715526580810547ms\r\n",,terminal_output +2825,1840651,"TERMINAL",0,0,"Step 828, loss: 0.02265334129333496, step time: 16.102313995361328ms\r\nStep 829, loss: 0.022644994780421257, step time: 14.707803726196289ms\r\nStep 830, loss: 0.022640401497483253, step time: 14.678001403808594ms\r\nStep 831, loss: 0.022630766034126282, step time: 14.926910400390625ms\r\nStep 832, loss: 0.022626053541898727, step time: 15.526533126831055ms\r\nStep 833, loss: 0.022614534944295883, step time: 14.846086502075195ms\r\nStep 834, loss: 0.022607646882534027, step time: 15.365362167358398ms\r\nStep 835, loss: 0.022609489038586617, step time: 14.983892440795898ms\r\nStep 836, loss: 0.02260790951550007, step time: 18.665075302124023ms\r\nStep 837, loss: 0.022605139762163162, step time: 16.92938804626465ms\r\nStep 838, loss: 0.02260088361799717, step time: 15.943050384521484ms\r\nStep 839, loss: 0.02258913591504097, step time: 14.988183975219727ms\r\nStep 840, loss: 0.022571107372641563, step time: 15.265464782714844ms\r\nStep 841, loss: 0.02256481908261776, step time: 14.780521392822266ms\r\nStep 842, loss: 0.022564377635717392, step time: 14.782905578613281ms\r\n",,terminal_output +2826,1840799,"TERMINAL",0,0,"65",,terminal_output +2827,1841019,"TERMINAL",0,0,"Step 843, loss: 0.02255425602197647, step time: 14.963626861572266ms\r\nStep 844, loss: 0.022538624703884125, step time: 15.386343002319336ms\r\nStep 845, loss: 0.02252591773867607, step time: 15.04063606262207ms\r\nStep 846, loss: 0.022528333589434624, step time: 15.351057052612305ms\r\nStep 847, loss: 0.022518755868077278, step time: 14.795780181884766ms\r\nStep 848, loss: 0.022515635937452316, step time: 14.73379135131836ms\r\nStep 849, loss: 0.022496484220027924, step time: 14.936685562133789ms\r\nStep 850, loss: 0.02248552069067955, step time: 15.122413635253906ms\r\nStep 851, loss: 0.022483203560113907, step time: 14.839410781860352ms\r\nStep 852, loss: 0.022474732249975204, step time: 15.255451202392578ms\r\nStep 853, loss: 0.022481443360447884, step time: 14.826297760009766ms\r\nStep 854, loss: 0.022473830729722977, step time: 14.748573303222656ms\r\nStep 855, loss: 0.022473765537142754, step time: 15.368938446044922ms\r\nStep 856, loss: 0.022450143471360207, step time: 15.531063079833984ms\r\nStep 857, loss: 0.022447410970926285, step time: 14.85753059387207ms\r\nStep 858, loss: 0.022441908717155457, step time: 15.299320220947266ms\r\n",,terminal_output +2828,1841114,"TERMINAL",0,0,"Step 859, loss: 0.022442804649472237, step time: 14.81318473815918ms\r\nStep 860, loss: 0.022439369931817055, step time: 14.65153694152832ms\r\nStep 861, loss: 0.022423209622502327, step time: 15.099525451660156ms\r\nStep 862, loss: 0.022409597411751747, step time: 15.414953231811523ms\r\n",,terminal_output +2829,1841174,"TERMINAL",0,0,"Step 863, loss: 0.022410674020648003, step time: 14.899253845214844ms\r\nStep 864, loss: 0.02240428887307644, step time: 15.475034713745117ms\r\n",,terminal_output +2830,1841638,"TERMINAL",0,0,"Step 865, loss: 0.022404087707400322, step time: 15.268564224243164ms\r\nStep 866, loss: 0.02239411137998104, step time: 14.943599700927734ms\r\nStep 867, loss: 0.02239091880619526, step time: 15.013694763183594ms\r\nStep 868, loss: 0.02238314040005207, step time: 15.38538932800293ms\r\nStep 869, loss: 0.02237575687468052, step time: 14.81938362121582ms\r\nStep 870, loss: 0.02236654981970787, step time: 14.974594116210938ms\r\nStep 871, loss: 0.022361576557159424, step time: 14.735698699951172ms\r\nStep 872, loss: 0.022354895249009132, step time: 14.582395553588867ms\r\nStep 873, loss: 0.022346079349517822, step time: 14.806509017944336ms\r\nStep 874, loss: 0.022353259846568108, step time: 15.08474349975586ms\r\nStep 875, loss: 0.02235334925353527, step time: 14.719963073730469ms\r\nStep 876, loss: 0.022366920486092567, step time: 15.19632339477539ms\r\nStep 877, loss: 0.02235107496380806, step time: 14.909744262695312ms\r\nStep 878, loss: 0.022332550957798958, step time: 14.822721481323242ms\r\nStep 879, loss: 0.022328374907374382, step time: 15.027999877929688ms\r\nStep 880, loss: 0.02232125587761402, step time: 15.190601348876953ms\r\nStep 881, loss: 0.022317351773381233, step time: 14.920234680175781ms\r\nStep 882, loss: 0.022293243557214737, step time: 15.211343765258789ms\r\nStep 883, loss: 0.022290386259555817, step time: 14.745712280273438ms\r\nStep 884, loss: 0.022285060957074165, step time: 14.717817306518555ms\r\nStep 885, loss: 0.022286657243967056, step time: 15.818357467651367ms\r\nStep 886, loss: 0.02226250059902668, step time: 15.613794326782227ms\r\n",,terminal_output +2831,1841841,"TERMINAL",0,0,"76",,terminal_output +2832,1841933,"TERMINAL",0,0,"Step 887, loss: 0.022254033014178276, step time: 14.861583709716797ms\r\nStep 888, loss: 0.022250613197684288, step time: 15.28310775756836ms\r\nStep 889, loss: 0.02225257083773613, step time: 14.621496200561523ms\r\nStep 890, loss: 0.022241000086069107, step time: 14.365434646606445ms\r\nStep 891, loss: 0.02223776839673519, step time: 14.671087265014648ms\r\nStep 892, loss: 0.02223183773458004, step time: 15.03753662109375ms\r\nStep 893, loss: 0.022221291437745094, step time: 14.611482620239258ms\r\nStep 894, loss: 0.022216100245714188, step time: 15.098094940185547ms\r\nStep 895, loss: 0.022212132811546326, step time: 14.428853988647461ms\r\nStep 896, loss: 0.02220507152378559, step time: 15.228509902954102ms\r\nStep 897, loss: 0.022189658135175705, step time: 14.802217483520508ms\r\nStep 898, loss: 0.022185711190104485, step time: 15.115499496459961ms\r\nStep 899, loss: 0.02219330705702305, step time: 14.784812927246094ms\r\n",,terminal_output +2833,1842114,"TERMINAL",0,0,"Step 900, loss: 0.02219344489276409, step time: 21.826982498168945ms\r\nStep 901, loss: 0.022203834727406502, step time: 16.901254653930664ms\r\nStep 902, loss: 0.022183500230312347, step time: 15.51198959350586ms\r\n",,terminal_output +2834,1842300,"TERMINAL",0,0,"Step 903, loss: 0.022162068635225296, step time: 15.091180801391602ms\r\nStep 904, loss: 0.02215547300875187, step time: 16.03984832763672ms\r\nStep 905, loss: 0.02215939201414585, step time: 14.886140823364258ms\r\nStep 906, loss: 0.02215021289885044, step time: 15.354156494140625ms\r\nStep 907, loss: 0.022125020623207092, step time: 14.814376831054688ms\r\nStep 908, loss: 0.02212425135076046, step time: 14.978647232055664ms\r\nStep 909, loss: 0.022128473967313766, step time: 15.146017074584961ms\r\nStep 910, loss: 0.02211662568151951, step time: 15.233755111694336ms\r\n",,terminal_output +2835,1842395,"TERMINAL",0,0,"Step 911, loss: 0.022101547569036484, step time: 14.84227180480957ms\r\nStep 912, loss: 0.022097617387771606, step time: 15.306949615478516ms\r\nStep 913, loss: 0.022100981324911118, step time: 15.051126480102539ms\r\nStep 914, loss: 0.02210337296128273, step time: 14.792919158935547ms\r\n",,terminal_output +2836,1842484,"TERMINAL",0,0,"Step 915, loss: 0.022081531584262848, step time: 14.874935150146484ms\r\nStep 916, loss: 0.022067617624998093, step time: 15.434503555297852ms\r\nStep 917, loss: 0.02207697182893753, step time: 14.884471893310547ms\r\nStep 918, loss: 0.022081762552261353, step time: 22.51148223876953ms\r\n",,terminal_output +2837,1842584,"TERMINAL",0,0,"Step 919, loss: 0.022074805572628975, step time: 18.849611282348633ms\r\nStep 920, loss: 0.022053852677345276, step time: 14.963626861572266ms\r\nStep 921, loss: 0.02205035462975502, step time: 15.039205551147461ms\r\nStep 922, loss: 0.022046146914362907, step time: 15.884160995483398ms\r\n",,terminal_output +2838,1842874,"TERMINAL",0,0,"87",,terminal_output +2839,1842934,"TERMINAL",0,0,"Step 923, loss: 0.022052664309740067, step time: 15.036821365356445ms\r\nStep 924, loss: 0.022052733227610588, step time: 15.213966369628906ms\r\nStep 925, loss: 0.022034382447600365, step time: 14.65463638305664ms\r\nStep 926, loss: 0.02202269621193409, step time: 14.692544937133789ms\r\nStep 927, loss: 0.022017810493707657, step time: 14.878034591674805ms\r\nStep 928, loss: 0.022023748606443405, step time: 15.318632125854492ms\r\nStep 929, loss: 0.022025765851140022, step time: 14.827728271484375ms\r\nStep 930, loss: 0.022023068740963936, step time: 19.636154174804688ms\r\nStep 931, loss: 0.022026220336556435, step time: 15.041112899780273ms\r\nStep 932, loss: 0.02201366424560547, step time: 14.745950698852539ms\r\nStep 933, loss: 0.02200182154774666, step time: 14.865875244140625ms\r\nStep 934, loss: 0.0219978429377079, step time: 15.369653701782227ms\r\nStep 935, loss: 0.021993404254317284, step time: 14.510393142700195ms\r\nStep 936, loss: 0.022000813856720924, step time: 15.308141708374023ms\r\nStep 937, loss: 0.02198462001979351, step time: 14.768838882446289ms\r\nStep 938, loss: 0.021978920325636864, step time: 14.61482048034668ms\r\n",,terminal_output +2840,1843025,"TERMINAL",0,0,"Step 939, loss: 0.021969623863697052, step time: 14.861106872558594ms\r\nStep 940, loss: 0.02196458913385868, step time: 18.9058780670166ms\r\nStep 941, loss: 0.021971317008137703, step time: 15.011787414550781ms\r\nStep 942, loss: 0.02196926809847355, step time: 15.3350830078125ms\r\n",,terminal_output +2841,1843123,"TERMINAL",0,0,"Step 943, loss: 0.021979035809636116, step time: 14.867544174194336ms\r\nStep 944, loss: 0.021960275247693062, step time: 14.766216278076172ms\r\nStep 945, loss: 0.021951356902718544, step time: 14.74905014038086ms\r\nStep 946, loss: 0.021938631311058998, step time: 15.239477157592773ms\r\n",,terminal_output +2842,1843303,"TERMINAL",0,0,"Step 947, loss: 0.02193968929350376, step time: 14.731168746948242ms\r\nStep 948, loss: 0.02194545790553093, step time: 15.246152877807617ms\r\nStep 949, loss: 0.021946746855974197, step time: 14.823675155639648ms\r\nStep 950, loss: 0.02194736897945404, step time: 15.850067138671875ms\r\nStep 951, loss: 0.02193157933652401, step time: 15.372991561889648ms\r\nStep 952, loss: 0.021917158737778664, step time: 15.596151351928711ms\r\nStep 953, loss: 0.021920273080468178, step time: 14.956235885620117ms\r\n",,terminal_output +2843,1843446,"TERMINAL",0,0,"Step 954, loss: 0.02191842719912529, step time: 15.320301055908203ms\r\nStep 955, loss: 0.02192460373044014, step time: 14.706850051879883ms\r\nStep 956, loss: 0.021908557042479515, step time: 14.735937118530273ms\r\nStep 957, loss: 0.02189754508435726, step time: 14.952659606933594ms\r\nStep 958, loss: 0.02189711667597294, step time: 15.377283096313477ms\r\nStep 959, loss: 0.021890318021178246, step time: 14.78886604309082ms\r\nStep 960, loss: 0.02189331129193306, step time: 15.25425910949707ms\r\n",,terminal_output +2844,1843511,"TERMINAL",0,0,"Step 961, loss: 0.02189229242503643, step time: 15.872716903686523ms\r\n",,terminal_output +2845,1843598,"TERMINAL",0,0,"Step 962, loss: 0.02188885398209095, step time: 14.719963073730469ms\r\nStep 963, loss: 0.021869132295250893, step time: 14.874458312988281ms\r\n",,terminal_output +2846,1843720,"TERMINAL",0,0,"Step 964, loss: 0.021860159933567047, step time: 15.248775482177734ms\r\nStep 965, loss: 0.021859314292669296, step time: 14.491558074951172ms\r\nStep 966, loss: 0.021861378103494644, step time: 15.043497085571289ms\r\nStep 967, loss: 0.021878207102417946, step time: 14.480829238891602ms\r\nStep 968, loss: 0.021882545202970505, step time: 14.345645904541016ms\r\nStep 969, loss: 0.021868202835321426, step time: 14.703750610351562ms\r\n",,terminal_output +2847,1843940,"TERMINAL",0,0,"98",,terminal_output +2848,1844103,"TERMINAL",0,0,"Step 970, loss: 0.021846557036042213, step time: 334.8267078399658ms\r\nStep 971, loss: 0.021839940920472145, step time: 20.095109939575195ms\r\nStep 972, loss: 0.021848060190677643, step time: 17.42076873779297ms\r\nStep 973, loss: 0.0218534953892231, step time: 15.645742416381836ms\r\nStep 974, loss: 0.02183069847524166, step time: 15.165328979492188ms\r\n",,terminal_output +2849,1844158,"TERMINAL",0,0,"Step 975, loss: 0.021813876926898956, step time: 14.904499053955078ms\r\nStep 976, loss: 0.021814575418829918, step time: 15.70272445678711ms\r\nStep 977, loss: 0.021827466785907745, step time: 15.008687973022461ms\r\nStep 978, loss: 0.0218174010515213, step time: 16.025066375732422ms\r\n",,terminal_output +2850,1844215,"TERMINAL",0,0,"Step 979, loss: 0.021804075688123703, step time: 15.03753662109375ms\r\nStep 980, loss: 0.021784480661153793, step time: 14.743804931640625ms\r\n",,terminal_output +2851,1844271,"TERMINAL",0,0,"Step 981, loss: 0.021788010373711586, step time: 14.858007431030273ms\r\nStep 982, loss: 0.0217980295419693, step time: 15.743732452392578ms\r\nStep 983, loss: 0.021789874881505966, step time: 15.035390853881836ms\r\n",,terminal_output +2852,1844347,"TERMINAL",0,0,"Step 984, loss: 0.021784715354442596, step time: 15.30766487121582ms\r\nStep 985, loss: 0.02177463285624981, step time: 14.60409164428711ms\r\n",,terminal_output +2853,1844539,"TERMINAL",0,0,"Step 986, loss: 0.021766869351267815, step time: 14.64700698852539ms\r\nStep 987, loss: 0.021766699850559235, step time: 14.54019546508789ms\r\nStep 988, loss: 0.02176026627421379, step time: 15.190601348876953ms\r\nStep 989, loss: 0.02176572009921074, step time: 15.831232070922852ms\r\nStep 990, loss: 0.021776271983981133, step time: 15.556097030639648ms\r\nStep 991, loss: 0.021775834262371063, step time: 14.651060104370117ms\r\nStep 992, loss: 0.02174953930079937, step time: 14.41335678100586ms\r\nStep 993, loss: 0.021734336391091347, step time: 14.479637145996094ms\r\nStep 994, loss: 0.02173462323844433, step time: 15.379905700683594ms\r\nStep 995, loss: 0.021737506613135338, step time: 14.63937759399414ms\r\n",,terminal_output +2854,1844604,"TERMINAL",0,0,"Step 996, loss: 0.021740077063441277, step time: 15.148639678955078ms\r\nStep 997, loss: 0.02172192558646202, step time: 14.699697494506836ms\r\n",,terminal_output +2855,1844661,"TERMINAL",0,0,"Step 998, loss: 0.021711407229304314, step time: 15.2435302734375ms\r\nStep 999, loss: 0.02170371823012829, step time: 14.796257019042969ms\r\n",,terminal_output +2856,1844963,"TERMINAL",0,0,"509",,terminal_output +2857,1846001,"TERMINAL",0,0,"140",,terminal_output +2858,1846597,"TERMINAL",0,0,"Step 1000, loss: 0.021708019077777863, step time: 27.588844299316406ms\r\nStep 1001, loss: 0.021717172116041183, step time: 22.724390029907227ms\r\nStep 1002, loss: 0.021713558584451675, step time: 19.3789005279541ms\r\n",,terminal_output +2859,1847097,"TERMINAL",0,0,"21",,terminal_output +2860,1847121,"TERMINAL",0,0,"Step 1003, loss: 0.02169901691377163, step time: 16.134023666381836ms\r\nStep 1004, loss: 0.021682199090719223, step time: 15.613555908203125ms\r\nStep 1005, loss: 0.02167621999979019, step time: 19.264936447143555ms\r\nStep 1006, loss: 0.021689074113965034, step time: 18.03135871887207ms\r\nStep 1007, loss: 0.021674687042832375, step time: 16.144752502441406ms\r\nStep 1008, loss: 0.021679511293768883, step time: 15.359640121459961ms\r\nStep 1009, loss: 0.021676059812307358, step time: 15.288114547729492ms\r\nStep 1010, loss: 0.021672870963811874, step time: 15.2435302734375ms\r\nStep 1011, loss: 0.02165689691901207, step time: 15.685319900512695ms\r\nStep 1012, loss: 0.021638255566358566, step time: 14.852046966552734ms\r\nStep 1013, loss: 0.02164001762866974, step time: 15.781164169311523ms\r\nStep 1014, loss: 0.021640773862600327, step time: 14.986753463745117ms\r\nStep 1015, loss: 0.021646855399012566, step time: 14.963388442993164ms\r\nStep 1016, loss: 0.02165159396827221, step time: 15.063047409057617ms\r\nStep 1017, loss: 0.021631818264722824, step time: 15.682697296142578ms\r\nStep 1018, loss: 0.02161385864019394, step time: 14.813661575317383ms\r\nStep 1019, loss: 0.021600279957056046, step time: 15.424728393554688ms\r\nStep 1020, loss: 0.021611323580145836, step time: 14.858007431030273ms\r\nStep 1021, loss: 0.021608222275972366, step time: 15.09404182434082ms\r\nStep 1022, loss: 0.02161247842013836, step time: 14.8773193359375ms\r\n",,terminal_output +2861,1847704,"TERMINAL",0,0,"Step 1023, loss: 0.021593891084194183, step time: 15.595436096191406ms\r\nStep 1024, loss: 0.021581824868917465, step time: 15.078544616699219ms\r\nStep 1025, loss: 0.021566566079854965, step time: 15.575647354125977ms\r\nStep 1026, loss: 0.021570177748799324, step time: 15.319585800170898ms\r\nStep 1027, loss: 0.02157110534608364, step time: 14.906644821166992ms\r\nStep 1028, loss: 0.021565577015280724, step time: 15.054464340209961ms\r\nStep 1029, loss: 0.021561183035373688, step time: 15.666007995605469ms\r\nStep 1030, loss: 0.021542755886912346, step time: 14.939069747924805ms\r\nStep 1031, loss: 0.021535731852054596, step time: 15.67387580871582ms\r\nStep 1032, loss: 0.021531011909246445, step time: 15.249967575073242ms\r\nStep 1033, loss: 0.021537045016884804, step time: 15.027999877929688ms\r\nStep 1034, loss: 0.021549448370933533, step time: 14.766931533813477ms\r\nStep 1035, loss: 0.021554425358772278, step time: 15.4876708984375ms\r\nStep 1036, loss: 0.021534021943807602, step time: 15.199899673461914ms\r\nStep 1037, loss: 0.021493781358003616, step time: 19.71745491027832ms\r\nStep 1038, loss: 0.021510230377316475, step time: 15.952587127685547ms\r\nStep 1039, loss: 0.021516356617212296, step time: 15.132904052734375ms\r\nStep 1040, loss: 0.021487664431333542, step time: 14.798164367675781ms\r\nStep 1041, loss: 0.021469125524163246, step time: 15.831947326660156ms\r\nStep 1042, loss: 0.021480659022927284, step time: 15.149831771850586ms\r\nStep 1043, loss: 0.021472107619047165, step time: 15.679121017456055ms\r\nStep 1044, loss: 0.021457171067595482, step time: 15.334844589233398ms\r\nStep 1045, loss: 0.021436845883727074, step time: 15.089750289916992ms\r\nStep 1046, loss: 0.021441016346216202, step time: 15.086889266967773ms\r\nStep 1047, loss: 0.021440964192152023, step time: 15.563488006591797ms\r\nStep 1048, loss: 0.02143562212586403, step time: 14.955282211303711ms\r\nStep 1049, loss: 0.02142125368118286, step time: 15.458106994628906ms\r\n",,terminal_output +2862,1847847,"TERMINAL",0,0,"Step 1050, loss: 0.021405227482318878, step time: 14.820337295532227ms\r\nStep 1051, loss: 0.02139957621693611, step time: 15.00558853149414ms\r\nStep 1052, loss: 0.021399224177002907, step time: 16.41368865966797ms\r\nStep 1053, loss: 0.02140067145228386, step time: 16.20650291442871ms\r\nStep 1054, loss: 0.02140207216143608, step time: 15.803098678588867ms\r\nStep 1055, loss: 0.02139958366751671, step time: 15.582799911499023ms\r\n",,terminal_output +2863,1848058,"TERMINAL",0,0,"Step 1056, loss: 0.021387914195656776, step time: 15.532970428466797ms\r\nStep 1057, loss: 0.021372171118855476, step time: 14.945507049560547ms\r\nStep 1058, loss: 0.021351292729377747, step time: 14.859914779663086ms\r\nStep 1059, loss: 0.02134779281914234, step time: 15.619277954101562ms\r\nStep 1060, loss: 0.021356604993343353, step time: 15.055418014526367ms\r\nStep 1061, loss: 0.021361729130148888, step time: 15.761852264404297ms\r\nStep 1062, loss: 0.021358951926231384, step time: 15.254497528076172ms\r\nStep 1063, loss: 0.021331610158085823, step time: 15.017986297607422ms\r\nStep 1064, loss: 0.021305201575160027, step time: 14.87421989440918ms\r\n",,terminal_output +2864,1848111,"TERMINAL",0,0,"Step 1065, loss: 0.02130337990820408, step time: 15.695810317993164ms\r\n",,terminal_output +2865,1848111,"TERMINAL",0,0,"32",,terminal_output +2866,1848182,"TERMINAL",0,0,"Step 1066, loss: 0.021318132057785988, step time: 15.116453170776367ms\r\nStep 1067, loss: 0.021320082247257233, step time: 18.40496063232422ms\r\n",,terminal_output +2867,1848300,"TERMINAL",0,0,"Step 1068, loss: 0.021299239248037338, step time: 15.434503555297852ms\r\nStep 1069, loss: 0.021277928724884987, step time: 15.16103744506836ms\r\nStep 1070, loss: 0.02126828208565712, step time: 14.665365219116211ms\r\nStep 1071, loss: 0.021266290917992592, step time: 15.569925308227539ms\r\nStep 1072, loss: 0.021278804168105125, step time: 15.011310577392578ms\r\nStep 1073, loss: 0.021267522126436234, step time: 15.424728393554688ms\r\nStep 1074, loss: 0.021253352984786034, step time: 15.122175216674805ms\r\nStep 1075, loss: 0.021233974024653435, step time: 14.928817749023438ms\r\n",,terminal_output +2868,1848651,"TERMINAL",0,0,"Step 1076, loss: 0.021220769733190536, step time: 15.025854110717773ms\r\nStep 1077, loss: 0.021215282380580902, step time: 15.479564666748047ms\r\nStep 1078, loss: 0.021219193935394287, step time: 14.951229095458984ms\r\nStep 1079, loss: 0.021214179694652557, step time: 15.301704406738281ms\r\nStep 1080, loss: 0.02120663970708847, step time: 14.82701301574707ms\r\nStep 1081, loss: 0.021193252876400948, step time: 15.01154899597168ms\r\nStep 1082, loss: 0.0211652759462595, step time: 14.859914779663086ms\r\nStep 1083, loss: 0.021154088899493217, step time: 15.506505966186523ms\r\nStep 1084, loss: 0.021152066066861153, step time: 17.46392250061035ms\r\nStep 1085, loss: 0.02116098254919052, step time: 15.884876251220703ms\r\nStep 1086, loss: 0.02116360329091549, step time: 15.378952026367188ms\r\nStep 1087, loss: 0.021148009225726128, step time: 14.885187149047852ms\r\nStep 1088, loss: 0.021121257916092873, step time: 14.929533004760742ms\r\nStep 1089, loss: 0.021101003512740135, step time: 15.656471252441406ms\r\nStep 1090, loss: 0.02110118791460991, step time: 14.781951904296875ms\r\nStep 1091, loss: 0.02111205644905567, step time: 15.531063079833984ms\r\n",,terminal_output +2869,1848844,"TERMINAL",0,0,"Step 1092, loss: 0.02110414206981659, step time: 15.322208404541016ms\r\nStep 1093, loss: 0.021072328090667725, step time: 15.085697174072266ms\r\nStep 1094, loss: 0.021054329350590706, step time: 14.8468017578125ms\r\nStep 1095, loss: 0.02105465903878212, step time: 15.364646911621094ms\r\nStep 1096, loss: 0.021055499091744423, step time: 14.766931533813477ms\r\nStep 1097, loss: 0.021046871319413185, step time: 15.276432037353516ms\r\nStep 1098, loss: 0.02103576622903347, step time: 15.030145645141602ms\r\nStep 1099, loss: 0.02101113647222519, step time: 15.052318572998047ms\r\n",,terminal_output +2870,1849112,"TERMINAL",0,0,"Step 1100, loss: 0.020987721160054207, step time: 20.974397659301758ms\r\nStep 1101, loss: 0.02099030278623104, step time: 19.327640533447266ms\r\nStep 1102, loss: 0.020995447412133217, step time: 16.497135162353516ms\r\nStep 1103, loss: 0.021002009510993958, step time: 17.575740814208984ms\r\nStep 1104, loss: 0.02096392773091793, step time: 15.576601028442383ms\r\nStep 1105, loss: 0.020935161039233208, step time: 15.012264251708984ms\r\nStep 1106, loss: 0.020928645506501198, step time: 15.07115364074707ms\r\n",,terminal_output +2871,1849113,"TERMINAL",0,0,"44",,terminal_output +2872,1849429,"TERMINAL",0,0,"Step 1107, loss: 0.020932676270604134, step time: 15.491724014282227ms\r\nStep 1108, loss: 0.02093106135725975, step time: 14.940023422241211ms\r\nStep 1109, loss: 0.02091367170214653, step time: 15.779733657836914ms\r\nStep 1110, loss: 0.020890500396490097, step time: 14.921188354492188ms\r\nStep 1111, loss: 0.02086924947798252, step time: 15.074491500854492ms\r\nStep 1112, loss: 0.02086043171584606, step time: 14.97960090637207ms\r\nStep 1113, loss: 0.020853374153375626, step time: 15.716552734375ms\r\nStep 1114, loss: 0.020857805386185646, step time: 15.021324157714844ms\r\nStep 1115, loss: 0.02088249661028385, step time: 15.474081039428711ms\r\nStep 1116, loss: 0.020876983180642128, step time: 15.181541442871094ms\r\nStep 1117, loss: 0.020836863666772842, step time: 14.984846115112305ms\r\nStep 1118, loss: 0.020805858075618744, step time: 14.882802963256836ms\r\nStep 1119, loss: 0.020818084478378296, step time: 15.580892562866211ms\r\n",,terminal_output +2873,1849610,"TERMINAL",0,0,"Step 1120, loss: 0.020820708945393562, step time: 17.362117767333984ms\r\nStep 1121, loss: 0.0207842830568552, step time: 15.696287155151367ms\r\nStep 1122, loss: 0.020764503628015518, step time: 15.250444412231445ms\r\nStep 1123, loss: 0.02078583464026451, step time: 14.986753463745117ms\r\nStep 1124, loss: 0.02076266147196293, step time: 15.087366104125977ms\r\nStep 1125, loss: 0.02073628269135952, step time: 15.636205673217773ms\r\nStep 1126, loss: 0.020731013268232346, step time: 15.022039413452148ms\r\nStep 1127, loss: 0.020731080323457718, step time: 15.607118606567383ms\r\nStep 1128, loss: 0.0207231342792511, step time: 15.160322189331055ms\r\n",,terminal_output +2874,1849915,"TERMINAL",0,0,"Step 1129, loss: 0.020691825076937675, step time: 14.812231063842773ms\r\nStep 1130, loss: 0.020679842680692673, step time: 14.608621597290039ms\r\nStep 1131, loss: 0.020679298788309097, step time: 16.19720458984375ms\r\nStep 1132, loss: 0.020678240805864334, step time: 14.846086502075195ms\r\nStep 1133, loss: 0.02065996825695038, step time: 15.416383743286133ms\r\nStep 1134, loss: 0.02064022794365883, step time: 15.160083770751953ms\r\nStep 1135, loss: 0.020621296018362045, step time: 14.848470687866211ms\r\nStep 1136, loss: 0.02060822956264019, step time: 14.841556549072266ms\r\nStep 1137, loss: 0.02059502713382244, step time: 15.688896179199219ms\r\nStep 1138, loss: 0.020602645352482796, step time: 15.154838562011719ms\r\nStep 1139, loss: 0.020621774718165398, step time: 15.47098159790039ms\r\nStep 1140, loss: 0.020617282018065453, step time: 14.964580535888672ms\r\nStep 1141, loss: 0.02055894024670124, step time: 14.9993896484375ms\r\n",,terminal_output +2875,1849967,"TERMINAL",0,0,"Step 1142, loss: 0.02052539959549904, step time: 14.983654022216797ms\r\nStep 1143, loss: 0.02055027335882187, step time: 15.499591827392578ms\r\n",,terminal_output +2876,1850018,"TERMINAL",0,0,"Step 1144, loss: 0.02053864672780037, step time: 15.25568962097168ms\r\nStep 1145, loss: 0.02048785611987114, step time: 15.469551086425781ms\r\nStep 1146, loss: 0.020476598292589188, step time: 15.238523483276367ms\r\n",,terminal_output +2877,1850165,"TERMINAL",0,0,"Step 1147, loss: 0.020483996719121933, step time: 15.039205551147461ms\r\nStep 1148, loss: 0.020473215728998184, step time: 14.764070510864258ms\r\nStep 1149, loss: 0.02042386122047901, step time: 15.369176864624023ms\r\nStep 1150, loss: 0.0204165019094944, step time: 16.64590835571289ms\r\nStep 1151, loss: 0.020421959459781647, step time: 15.478849411010742ms\r\nStep 1152, loss: 0.020405752584338188, step time: 15.030145645141602ms\r\n",,terminal_output +2878,1850166,"TERMINAL",0,0,"65",,terminal_output +2879,1850228,"TERMINAL",0,0,"Step 1153, loss: 0.020365238189697266, step time: 14.845609664916992ms\r\nStep 1154, loss: 0.02035452052950859, step time: 14.787435531616211ms\r\n",,terminal_output +2880,1850353,"TERMINAL",0,0,"Step 1155, loss: 0.02035531960427761, step time: 15.402078628540039ms\r\nStep 1156, loss: 0.020356230437755585, step time: 14.998912811279297ms\r\nStep 1157, loss: 0.020322639495134354, step time: 15.507936477661133ms\r\nStep 1158, loss: 0.020298320800065994, step time: 15.041589736938477ms\r\nStep 1159, loss: 0.02027355507016182, step time: 14.898300170898438ms\r\nStep 1160, loss: 0.020276561379432678, step time: 14.689445495605469ms\r\n",,terminal_output +2881,1850421,"TERMINAL",0,0,"Step 1161, loss: 0.020287200808525085, step time: 15.529870986938477ms\r\nStep 1162, loss: 0.020269211381673813, step time: 14.939546585083008ms\r\n",,terminal_output +2882,1850484,"TERMINAL",0,0,"^CTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_lam.py"", line 242, in \r\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/array.py"", line 341, in __format__\r\n return format(self._value[()], format_spec)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/profiler.py"", line 354, in wrapper\r\n return func(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/array.py"", line 641, in _value\r\n npy_value, did_copy = self._single_device_array_to_np_array_did_copy()\r\nKeyboardInterrupt\r\n",,terminal_output +2883,1850603,"TERMINAL",0,0,"^CException ignored in atexit callback: .teardown_atexit at 0x150e4c173be0>\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/service_connection.py"", line 94, in teardown_atexit\r\n conn.teardown(hooks.exit_code)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/service_connection.py"", line 226, in teardown\r\n self._router.join()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/interface/router.py"", line 75, in join\r\n self._thread.join()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/threading.py"", line 1096, in join\r\n self._wait_for_tstate_lock()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/threading.py"", line 1116, in _wait_for_tstate_lock\r\n if lock.acquire(block, timeout):\r\nKeyboardInterrupt: \r\n",,terminal_output +2884,1850916,"TERMINAL",0,0,"^CException ignored in: .remove at 0x150ea886e5f0>\r\nTraceback (most recent call last):\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/weakref.py"", line 370, in remove\r\n def remove(k, selfref=ref(self)):\r\nKeyboardInterrupt: \r\n",,terminal_output +2885,1851221,"TERMINAL",0,0,"76",,terminal_output +2886,1851687,"TERMINAL",0,0,"^C",,terminal_output +2887,1851833,"TERMINAL",0,0,"^C",,terminal_output +2888,1852082,"TERMINAL",0,0,"\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0507 jafar]$ ",,terminal_output +2889,1852293,"TERMINAL",0,0,"87",,terminal_output +2890,1853312,"TERMINAL",0,0,"98",,terminal_output +2891,1854327,"TERMINAL",0,0,"8:009",,terminal_output +2892,1855381,"TERMINAL",0,0,"150",,terminal_output +2893,1856490,"TERMINAL",0,0,"21",,terminal_output +2894,1857532,"TERMINAL",0,0,"32",,terminal_output +2895,1858601,"TERMINAL",0,0,"43",,terminal_output +2896,1859560,"TERMINAL",0,0,"54",,terminal_output +2897,1860747,"TERMINAL",0,0,"65",,terminal_output +2898,1861803,"TERMINAL",0,0,"76",,terminal_output +2899,1861897,"TERMINAL",0,0,"[?25lcd[?25h",,terminal_output +2900,1861997,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +2901,1862108,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +2902,1862533,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +2903,1862577,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +2904,1862789,"TERMINAL",0,0,"87",,terminal_output +2905,1862797,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0507:~/Projects[?2004h(jafar) [tum_cte0515@hkn0507 Projects]$ ",,terminal_output +2906,1863833,"TERMINAL",0,0,"98",,terminal_output +2907,1864481,"TERMINAL",0,0,"\r(reverse-i-search)`': ",,terminal_output +2908,1864722,"TERMINAL",0,0,"[?25l[49@s': sh scripts_horeka/overfit_sample_tiny/tester.sh[?25h",,terminal_output +2909,1864836,"TERMINAL",0,0,"\r[1@h': sh scripts_horeka/overfit_sample_tiny/tester.sh",,terminal_output +2910,1864836,"TERMINAL",0,0,"109",,terminal_output +2911,1865827,"TERMINAL",0,0,"18:00",,terminal_output +2912,1866896,"TERMINAL",0,0,"21",,terminal_output +2913,1867958,"TERMINAL",0,0,"32",,terminal_output +2914,1868981,"TERMINAL",0,0,"43",,terminal_output +2915,1869121,"TERMINAL",0,0,"[?25ls\r ': sh scripts_horeka/overfit_sample_tiny/tester.sh  [?25h",,terminal_output +2916,1870068,"TERMINAL",0,0,"54",,terminal_output +2917,1870378,"TERMINAL",0,0,"\rj': sh jafar/scripts_horeka/sync_runner.sh jafar jafar_jobs/ ",,terminal_output +2918,1871066,"TERMINAL",0,0,"65",,terminal_output +2919,1872124,"TERMINAL",0,0,"87",,terminal_output +2920,1873167,"TERMINAL",0,0,"98",,terminal_output +2921,1873684,"TERMINAL",0,0,"[?25l\rjafar) [tum_cte0515@hkn0507 Projects]$ sh jafar/scripts_horeka/sync_runne[14@r.sh jafar jaf\r\r\n[?2004l\r[?25hsending incremental file list\r\n",,terminal_output +2922,1874226,"TERMINAL",0,0,"209",,terminal_output +2923,1875283,"TERMINAL",0,0,"train_lam.py\r\ntrain_tokenizer.py\r\n",,terminal_output +2924,1875288,"TERMINAL",0,0,"110",,terminal_output +2925,1875592,"TERMINAL",0,0,"scripts_horeka/\r\nscripts_horeka/modelsize_scaling/dynamics/A_train_dyn_1.5M.sbatch\r\nscripts_horeka/modelsize_scaling/dynamics/C_train_dyn_6M.sbatch\r\nscripts_horeka/modelsize_scaling/dynamics/D_train_dyn_12M.sbatch\r\nscripts_horeka/modelsize_scaling/dynamics/E_train_dyn_18M.sbatch\r\nscripts_horeka/overfit_sample_tiny/\r\nscripts_horeka/overfit_sample_tiny/tester.sh\r\nscripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample.sbatch\r\nscripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch\r\nscripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch\r\n",,terminal_output +2926,1875730,"TERMINAL",0,0,"\r\nsent 40,437 bytes received 263 bytes 16,280.00 bytes/sec\r\ntotal size is 64,337,228 speedup is 1,580.77\r\n]0;tum_cte0515@hkn0507:~/Projects[?2004h(jafar) [tum_cte0515@hkn0507 Projects]$ ",,terminal_output +2927,1876332,"TERMINAL",0,0,"21",,terminal_output +2928,1877359,"TERMINAL",0,0,"32",,terminal_output +2929,1878400,"TERMINAL",0,0,"43",,terminal_output +2930,1879451,"TERMINAL",0,0,"54",,terminal_output +2931,1880501,"TERMINAL",0,0,"65",,terminal_output +2932,1881589,"TERMINAL",0,0,"76",,terminal_output +2933,1882669,"TERMINAL",0,0,"87",,terminal_output +2934,1883820,"TERMINAL",0,0,"98",,terminal_output +2935,1884704,"TERMINAL",0,0,"309",,terminal_output +2936,1885740,"TERMINAL",0,0,"120",,terminal_output +2937,1886591,"TERMINAL",0,0,"[?25lcd[?25h",,terminal_output +2938,1886705,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +2939,1886784,"TERMINAL",0,0,"21",,terminal_output +2940,1886835,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +2941,1887156,"TERMINAL",0,0,"[?25lj[?25h",,terminal_output +2942,1887304,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +2943,1887495,"TERMINAL",0,0,"far",,terminal_output +2944,1887840,"TERMINAL",0,0,"32",,terminal_output +2945,1888495,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +2946,1888731,"TERMINAL",0,0,"[?25lj[?25h",,terminal_output +2947,1888835,"TERMINAL",0,0,"obs/",,terminal_output +2948,1888855,"TERMINAL",0,0,"43",,terminal_output +2949,1889248,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0507:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ ",,terminal_output +2950,1889910,"TERMINAL",0,0,"54",,terminal_output +2951,1890945,"TERMINAL",0,0,"65",,terminal_output +2952,1892055,"TERMINAL",0,0,"76",,terminal_output +2953,1893037,"TERMINAL",0,0,"87",,terminal_output +2954,1894076,"TERMINAL",0,0,"99",,terminal_output +2955,1895183,"TERMINAL",0,0,"4130",,terminal_output +2956,1896238,"TERMINAL",0,0,"21",,terminal_output +2957,1897224,"TERMINAL",0,0,"32",,terminal_output +2958,1898267,"TERMINAL",0,0,"43",,terminal_output +2959,1899307,"TERMINAL",0,0,"54",,terminal_output +2960,1900353,"TERMINAL",0,0,"65",,terminal_output +2961,1901392,"TERMINAL",0,0,"76",,terminal_output +2962,1902481,"TERMINAL",0,0,"87",,terminal_output +2963,1903490,"TERMINAL",0,0,"98",,terminal_output +2964,1904559,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +2965,1904560,"TERMINAL",0,0,"509",,terminal_output +2966,1904649,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +2967,1904785,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +2968,1904836,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +2969,1904966,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +2970,1905055,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +2971,1905201,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +2972,1905636,"TERMINAL",0,0,"140",,terminal_output +2973,1905636,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +2974,1905724,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +2975,1905842,"TERMINAL",0,0,"ripts_",,terminal_output +2976,1906665,"TERMINAL",0,0,"21",,terminal_output +2977,1906732,"TERMINAL",0,0,"",,terminal_output +2978,1907279,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +2979,1907379,"TERMINAL",0,0,"oreka/",,terminal_output +2980,1907619,"TERMINAL",0,0,"",,terminal_output +2981,1907732,"TERMINAL",0,0,"32",,terminal_output +2982,1908071,"TERMINAL",0,0,"\r\nbatchsize_scaling/\r\nget_lrs.py\r\nlearning_rates.md\r\nmodelsize_scaling/\r\noverfit_batch/\r\noverfit_sample/\r\noverfit_sample_tiny/\r\npreprocess_dataset.sbatch\r\nsync_runner.sh\r\ntrain_dynamics.sh\r\ntrain_lam_overfit_batch.sbatch\r\ntrain_lam_overfit_sample.sbatch\r\ntrain_lam.sh\r\ntrain_tokenizer_coinrun.sbatch\r\ntrain_tokenizer_overfit_batch.sbatch\r\ntrain_tokenizer_overfit_sample.sbatch\r\ntrain_tokenizer.sh\r\n(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ sbatch scripts_horeka/",,terminal_output +2983,1908737,"TERMINAL",0,0,"43",,terminal_output +2984,1909796,"TERMINAL",0,0,"54",,terminal_output +2985,1910832,"TERMINAL",0,0,"65",,terminal_output +2986,1911871,"TERMINAL",0,0,"76",,terminal_output +2987,1912467,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +2988,1912616,"TERMINAL",0,0,"verfit_",,terminal_output +2989,1913000,"TERMINAL",0,0,"87",,terminal_output +2990,1913369,"TERMINAL",0,0,"[?25ls \r[?25h",,terminal_output +2991,1913502,"TERMINAL",0,0,"ample",,terminal_output +2992,1913984,"TERMINAL",0,0,"98",,terminal_output +2993,1914776,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +2994,1915024,"TERMINAL",0,0,"9:009",,terminal_output +2995,1915156,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +2996,1915229,"TERMINAL",0,0,"iny/",,terminal_output +2997,1916095,"TERMINAL",0,0,"151",,terminal_output +2998,1916954,"TERMINAL",0,0,"t",,terminal_output +2999,1917153,"TERMINAL",0,0,"32",,terminal_output +3000,1917154,"TERMINAL",0,0,"",,terminal_output +3001,1917591,"TERMINAL",0,0,"\r\ntester.sh\r\ntrain_dynamics_overfit_sample.sbatch\r\ntrain_lam_overfit_sample.sbatch\r\ntrain_tokenizer_overfit_sample.sbatch\r\n(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ sbatch scripts_horeka/overfit_sample_tiny/t",,terminal_output +3002,1918170,"TERMINAL",0,0,"43",,terminal_output +3003,1919220,"TERMINAL",0,0,"54",,terminal_output +3004,1919424,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +3005,1919621,"TERMINAL",0,0,"ain_",,terminal_output +3006,1920269,"TERMINAL",0,0,"65",,terminal_output +3007,1921207,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +3008,1921388,"TERMINAL",0,0,"76",,terminal_output +3009,1922020,"TERMINAL",0,0,"[?25lo[?25h[?25lk[?25h",,terminal_output +3010,1922077,"TERMINAL",0,0,"enizer_overfit_sample.sbatch",,terminal_output +3011,1922351,"TERMINAL",0,0,"87",,terminal_output +3012,1923180,"TERMINAL",0,0,"\r\n[?2004l\rSubmitted batch job 3299016\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ ",,terminal_output +3013,1923402,"TERMINAL",0,0,"99016train_toPD 0:00(Priority)3298895 accelerat interact tum_cte0 R18:58\t 1 hkn0507",,terminal_output +3014,1923631,"TERMINAL",0,0,"sbatch scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",,terminal_output +3015,1924438,"TERMINAL",0,0,"109",,terminal_output +3016,1925551,"TERMINAL",0,0,"19:00",,terminal_output +3017,1926621,"TERMINAL",0,0,"21",,terminal_output +3018,1927220,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3019,1927270,"TERMINAL",0,0,"am_overfit_sample.sbatch",,terminal_output +3020,1927566,"TERMINAL",0,0,"32",,terminal_output +3021,1928571,"TERMINAL",0,0,"\r\n[?2004l\rSubmitted batch job 3299017\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ ",,terminal_output +3022,1928608,"TERMINAL",0,0,"\r43299017 accelerat train_to tum_cte0 PD\t0:00\t 1 (Priority)3",,terminal_output +3023,1929659,"TERMINAL",0,0,"54",,terminal_output +3024,1930333,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3025,1930517,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +3026,1930650,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +3027,1930745,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +3028,1930758,"TERMINAL",0,0,"65",,terminal_output +3029,1930889,"TERMINAL",0,0,"[?25lc[?25h[?25lh[?25h",,terminal_output +3030,1931002,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +3031,1931776,"TERMINAL",0,0,"76",,terminal_output +3032,1932845,"TERMINAL",0,0,"87",,terminal_output +3033,1933861,"TERMINAL",0,0,"98",,terminal_output +3034,1934415,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",0,0,"",shellscript,tab +3035,1934916,"TERMINAL",0,0,"209",,terminal_output +3036,1935953,"TERMINAL",0,0,"110",,terminal_output +3037,1936632,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +3038,1937066,"TERMINAL",0,0,"21",,terminal_output +3039,1938034,"TERMINAL",0,0,"32",,terminal_output +3040,1939079,"TERMINAL",0,0,"44",,terminal_output +3041,1939808,"TERMINAL",0,0,"",,terminal_output +3042,1939949,"TERMINAL",0,0,"\r\nframe-knoms.png read_tf_record.py\r\nframe.png requirements-franz.txt\r\ngenerate_dataset.py requirements.txt\r\ngeneration_1750863858.4915645.gif sample.py\r\ngenie.py scripts_cremers/\r\n.gitignore scripts_horeka/\r\nLICENSE slurm/\r\nlogs/ train_dynamics.py\r\nmodels/ train_lam.py\r\noverfit_dir/ train_tokenizer.py\r\n.pre-commit-config.yaml utils/\r\n__pycache__/ wandb/\r\nREADME.md \r\n(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ sbatch ",,terminal_output +3043,1940135,"TERMINAL",0,0,"65",,terminal_output +3044,1941124,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +3045,1941170,"TERMINAL",0,0,"76",,terminal_output +3046,1941259,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +3047,1941681,"TERMINAL",0,0,"dels/",,terminal_output +3048,1942153,"TERMINAL",0,0,"",,terminal_output +3049,1942225,"TERMINAL",0,0,"87",,terminal_output +3050,1943427,"TERMINAL",0,0,"98",,terminal_output +3051,1943893,"TERMINAL",0,0,"[?25ls[?25h[?25lc[?25h",,terminal_output +3052,1944182,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +3053,1944346,"TERMINAL",0,0,"309",,terminal_output +3054,1944816,"TERMINAL",0,0,"ripts_",,terminal_output +3055,1945273,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +3056,1945356,"TERMINAL",0,0,"oreka/",,terminal_output +3057,1945395,"TERMINAL",0,0,"120",,terminal_output +3058,1945835,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +3059,1946033,"TERMINAL",0,0,"odelsize_scaling/",,terminal_output +3060,1946424,"TERMINAL",0,0,"21",,terminal_output +3061,1946827,"TERMINAL",0,0,"dynamics/",,terminal_output +3062,1946874,"TERMINAL",0,0,"",,terminal_output +3063,1947480,"TERMINAL",0,0,"32",,terminal_output +3064,1947611,"TERMINAL",0,0,"\r\n1_train_dyn_36M.sbatch B_train_dyn_3.5.sbatch\r\n2_train_dyn_110M.sbatch C_train_dyn_6M.sbatch\r\n3_train_dyn_180M.sbatch D_train_dyn_12M.sbatch\r\n4_train_dyn_270M.sbatch E_train_dyn_18M.sbatch\r\n5_train_dyn_500M.sbatch model_sizes.md\r\nA_train_dyn_1.5M.sbatch train_dyn.sh\r\n(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ sbatch scripts_horeka/modelsize_scaling/dynamics/",,terminal_output +3065,1947785,"TERMINAL",0,0,"\r\n1_train_dyn_36M.sbatch B_train_dyn_3.5.sbatch\r\n2_train_dyn_110M.sbatch C_train_dyn_6M.sbatch\r\n3_train_dyn_180M.sbatch D_train_dyn_12M.sbatch\r\n4_train_dyn_270M.sbatch E_train_dyn_18M.sbatch\r\n5_train_dyn_500M.sbatch model_sizes.md\r\nA_train_dyn_1.5M.sbatch train_dyn.sh\r\n(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ sbatch scripts_horeka/modelsize_scaling/dynamics/",,terminal_output +3066,1948515,"TERMINAL",0,0,"43",,terminal_output +3067,1949158,"TERMINAL",0,0,"[?25lA[?25h",,terminal_output +3068,1949580,"TERMINAL",0,0,"54",,terminal_output +3069,1950659,"TERMINAL",0,0,"65",,terminal_output +3070,1950804,"TERMINAL",0,0,"_train_dyn_1.5M.sbatch",,terminal_output +3071,1951717,"TERMINAL",0,0,"76",,terminal_output +3072,1952731,"TERMINAL",0,0,"87",,terminal_output +3073,1953430,"TERMINAL",0,0,"h",,terminal_output +3074,1953808,"TERMINAL",0,0,"98",,terminal_output +3075,1954352,"TERMINAL",0,0,"\r\n[?2004l\rSubmitted batch job 3299036\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ ",,terminal_output +3076,1954788,"TERMINAL",0,0,"sbatch scripts_horeka/modelsize_scaling/dynamics/A_train_dyn_1.5M.sbatch",,terminal_output +3077,1954843,"TERMINAL",0,0,"\r403299036 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)9",,terminal_output +3078,1955885,"TERMINAL",0,0,"130",,terminal_output +3079,1956917,"TERMINAL",0,0,"21",,terminal_output +3080,1957346,"TERMINAL",0,0,"[?25lC[?25h",,terminal_output +3081,1957487,"TERMINAL",0,0,"_train_dyn_6M.sbatch",,terminal_output +3082,1958011,"TERMINAL",0,0,"32",,terminal_output +3083,1958606,"TERMINAL",0,0,"\r\n[?2004l\rSubmitted batch job 3299037\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ ",,terminal_output +3084,1958947,"TERMINAL",0,0,"sbatch scripts_horeka/modelsize_scaling/dynamics/C_train_dyn_6M.sbatch",,terminal_output +3085,1959075,"TERMINAL",0,0,"\r43299037 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3",,terminal_output +3086,1960092,"TERMINAL",0,0,"54",,terminal_output +3087,1961105,"TERMINAL",0,0,"66",,terminal_output +3088,1961249,"TERMINAL",0,0,"[?25lD[?25h",,terminal_output +3089,1961443,"TERMINAL",0,0,"_train_dyn_12M.sbatch",,terminal_output +3090,1962134,"TERMINAL",0,0,"\r\n[?2004l\rSubmitted batch job 3299038\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ ",,terminal_output +3091,1962178,"TERMINAL",0,0,"\r83299038 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)7",,terminal_output +3092,1962378,"TERMINAL",0,0,"sbatch scripts_horeka/modelsize_scaling/dynamics/D_train_dyn_12M.sbatch",,terminal_output +3093,1963242,"TERMINAL",0,0,"98",,terminal_output +3094,1964237,"TERMINAL",0,0,"509",,terminal_output +3095,1964455,"TERMINAL",0,0,"[?25lE[?25h",,terminal_output +3096,1964722,"TERMINAL",0,0,"_train_dyn_18M.sbatch",,terminal_output +3097,1965270,"TERMINAL",0,0,"\r\n[?2004l\rSubmitted batch job 3299039\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ ",,terminal_output +3098,1965293,"TERMINAL",0,0,"\r13299039 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)40",,terminal_output +3099,1965786,"TERMINAL",0,0,"sbatch scripts_horeka/modelsize_scaling/dynamics/E_train_dyn_18M.sbatch",,terminal_output +3100,1966341,"TERMINAL",0,0,"21",,terminal_output +3101,1967419,"TERMINAL",0,0,"32",,terminal_output +3102,1967745,"TERMINAL",0,0,"\r\n\r",,terminal_output +3103,1967849,"TERMINAL",0,0,"",,terminal_output +3104,1968031,"TERMINAL",0,0,"",,terminal_output +3105,1968174,"TERMINAL",0,0,"",,terminal_output +3106,1968312,"TERMINAL",0,0,"",,terminal_output +3107,1968465,"TERMINAL",0,0,"43",,terminal_output +3108,1969494,"TERMINAL",0,0,"54",,terminal_output +3109,1970572,"TERMINAL",0,0,"65",,terminal_output +3110,1971599,"TERMINAL",0,0,"76",,terminal_output +3111,1972499,"train_tokenizer.py",0,0,"",python,tab +3112,1972740,"TERMINAL",0,0,"87",,terminal_output +3113,1973687,"TERMINAL",0,0,"98",,terminal_output +3114,1974809,"TERMINAL",0,0,"20:009",,terminal_output +3115,1975792,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +3116,1975858,"TERMINAL",0,0,"150",,terminal_output +3117,1976290,"scripts_horeka/overfit_sample_tiny/tester.sh",0,0,"",shellscript,tab +3118,1976860,"TERMINAL",0,0,"21",,terminal_output +3119,1977964,"TERMINAL",0,0,"32",,terminal_output +3120,1978993,"TERMINAL",0,0,"43",,terminal_output +3121,1979986,"TERMINAL",0,0,"54",,terminal_output +3122,1979995,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ ",,terminal_output +3123,1981126,"TERMINAL",0,0,"65",,terminal_output +3124,1982119,"TERMINAL",0,0,"76",,terminal_output +3125,1983119,"TERMINAL",0,0,"88",,terminal_output +3126,1984213,"TERMINAL",0,0,"109",,terminal_output +3127,1985226,"TERMINAL",0,0,"120:00",,terminal_output +3128,1986253,"TERMINAL",0,0,"21",,terminal_output +3129,1987340,"TERMINAL",0,0,"32",,terminal_output +3130,1988377,"TERMINAL",0,0,"4891to3dy3",,terminal_output +3131,1989390,"TERMINAL",0,0,"54",,terminal_output +3132,1990459,"TERMINAL",0,0,"65",,terminal_output +3133,1991557,"TERMINAL",0,0,"76",,terminal_output +3134,1992550,"TERMINAL",0,0,"87",,terminal_output +3135,1993599,"TERMINAL",0,0,"98",,terminal_output +3136,1994835,"TERMINAL",0,0,"209",,terminal_output +3137,1995681,"TERMINAL",0,0,"110",,terminal_output +3138,1996769,"TERMINAL",0,0,"21",,terminal_output +3139,1997861,"TERMINAL",0,0,"32",,terminal_output +3140,1998867,"TERMINAL",0,0,"43",,terminal_output +3141,1999933,"TERMINAL",0,0,"54",,terminal_output +3142,2001074,"TERMINAL",0,0,"65",,terminal_output +3143,2002003,"TERMINAL",0,0,"76",,terminal_output +3144,2003001,"TERMINAL",0,0,"87",,terminal_output +3145,2004108,"TERMINAL",0,0,"98",,terminal_output +3146,2005101,"TERMINAL",0,0,"3020",,terminal_output +3147,2006135,"TERMINAL",0,0,"21",,terminal_output +3148,2007307,"TERMINAL",0,0,"32",,terminal_output +3149,2008377,"TERMINAL",0,0,"43",,terminal_output +3150,2009268,"TERMINAL",0,0,"54",,terminal_output +3151,2010317,"TERMINAL",0,0,"65",,terminal_output +3152,2011449,"TERMINAL",0,0,"76",,terminal_output +3153,2012477,"TERMINAL",0,0,"87",,terminal_output +3154,2013532,"TERMINAL",0,0,"98",,terminal_output +3155,2014527,"TERMINAL",0,0,"409",,terminal_output +3156,2015545,"TERMINAL",0,0,"130",,terminal_output +3157,2016601,"TERMINAL",0,0,"21",,terminal_output +3158,2017644,"TERMINAL",0,0,"32",,terminal_output +3159,2018673,"TERMINAL",0,0,"43",,terminal_output +3160,2019709,"TERMINAL",0,0,"54",,terminal_output +3161,2020764,"TERMINAL",0,0,"65",,terminal_output +3162,2021807,"TERMINAL",0,0,"76",,terminal_output +3163,2022842,"TERMINAL",0,0,"87",,terminal_output +3164,2023934,"TERMINAL",0,0,"98",,terminal_output +3165,2025024,"TERMINAL",0,0,"509",,terminal_output +3166,2026127,"TERMINAL",0,0,"140",,terminal_output +3167,2027172,"TERMINAL",0,0,"21",,terminal_output +3168,2028158,"TERMINAL",0,0,"32",,terminal_output +3169,2029111,"TERMINAL",0,0,"44",,terminal_output +3170,2030398,"TERMINAL",0,0,"65",,terminal_output +3171,2031228,"TERMINAL",0,0,"76",,terminal_output +3172,2032329,"TERMINAL",0,0,"87",,terminal_output +3173,2033407,"TERMINAL",0,0,"98",,terminal_output +3174,2034624,"TERMINAL",0,0,"1:009",,terminal_output +3175,2035516,"TERMINAL",0,0,"150",,terminal_output +3176,2036423,"TERMINAL",0,0,"21",,terminal_output +3177,2037404,"TERMINAL",0,0,"32",,terminal_output +3178,2038447,"TERMINAL",0,0,"43",,terminal_output +3179,2039477,"TERMINAL",0,0,"54",,terminal_output +3180,2040521,"TERMINAL",0,0,"65",,terminal_output +3181,2041585,"TERMINAL",0,0,"76",,terminal_output +3182,2042618,"TERMINAL",0,0,"87",,terminal_output +3183,2043667,"TERMINAL",0,0,"98",,terminal_output +3184,2044720,"TERMINAL",0,0,"109",,terminal_output +3185,2045741,"TERMINAL",0,0,"11:00",,terminal_output +3186,2046804,"TERMINAL",0,0,"21",,terminal_output +3187,2047844,"TERMINAL",0,0,"32",,terminal_output +3188,2048884,"TERMINAL",0,0,"43",,terminal_output +3189,2049952,"TERMINAL",0,0,"54",,terminal_output +3190,2050979,"TERMINAL",0,0,"65",,terminal_output +3191,2052023,"TERMINAL",0,0,"76",,terminal_output +3192,2053078,"TERMINAL",0,0,"88",,terminal_output +3193,2054150,"TERMINAL",0,0,"209",,terminal_output +3194,2055166,"TERMINAL",0,0,"110",,terminal_output +3195,2056243,"TERMINAL",0,0,"21",,terminal_output +3196,2057285,"TERMINAL",0,0,"32",,terminal_output +3197,2058284,"TERMINAL",0,0,"43",,terminal_output +3198,2059334,"TERMINAL",0,0,"54",,terminal_output +3199,2060386,"TERMINAL",0,0,"65",,terminal_output +3200,2061432,"TERMINAL",0,0,"76",,terminal_output +3201,2062451,"TERMINAL",0,0,"[?25lqu[?25h",,terminal_output +3202,2062506,"TERMINAL",0,0,"87",,terminal_output +3203,2062529,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +3204,2062641,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3205,2062823,"TERMINAL",0,0,"[?25lu[?25h[?25le[?25h",,terminal_output +3206,2062965,"TERMINAL",0,0,"[?25l[?2004l\r[?25h[?1049h(B[?7hEvery 1.0s: squeue --mehkn0507.localdomain: Fri Jun 27 15:21:28 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3299038 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3299039 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3299017 accelerat train_to tum_cte0 PD\t0:00\t 1 (Priority)3299036 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3299037 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3299016 accelerat train_to tum_cte0 PD\t0:00\t 1 (Priority)3298895 accelerat interact tum_cte0 R21:17\t 1 hkn0507",,terminal_output +3207,2063535,"TERMINAL",0,0,"98",,terminal_output +3208,2063974,"TERMINAL",0,0,"98",,terminal_output +3209,2064582,"TERMINAL",0,0,"309",,terminal_output +3210,2064985,"TERMINAL",0,0,"309",,terminal_output +3211,2065630,"TERMINAL",0,0,"120",,terminal_output +3212,2066000,"TERMINAL",0,0,"120",,terminal_output +3213,2066686,"TERMINAL",0,0,"21",,terminal_output +3214,2067020,"TERMINAL",0,0,"21",,terminal_output +3215,2067740,"TERMINAL",0,0,"32",,terminal_output +3216,2068064,"TERMINAL",0,0,"32",,terminal_output +3217,2068776,"TERMINAL",0,0,"43",,terminal_output +3218,2069057,"TERMINAL",0,0,"43",,terminal_output +3219,2069819,"TERMINAL",0,0,"54",,terminal_output +3220,2070065,"TERMINAL",0,0,"55",,terminal_output +3221,2070864,"TERMINAL",0,0,"65",,terminal_output +3222,2071093,"TERMINAL",0,0,"76",,terminal_output +3223,2071927,"TERMINAL",0,0,"76",,terminal_output +3224,2072112,"TERMINAL",0,0,"87",,terminal_output +3225,2072954,"TERMINAL",0,0,"87",,terminal_output +3226,2073118,"TERMINAL",0,0,"98",,terminal_output +3227,2074015,"TERMINAL",0,0,"98",,terminal_output +3228,2074139,"TERMINAL",0,0,"409",,terminal_output +3229,2075052,"TERMINAL",0,0,"409",,terminal_output +3230,2075202,"TERMINAL",0,0,"130",,terminal_output +3231,2076099,"TERMINAL",0,0,"131",,terminal_output +3232,2076215,"TERMINAL",0,0,"21",,terminal_output +3233,2077167,"TERMINAL",0,0,"32",,terminal_output +3234,2077205,"TERMINAL",0,0,"32",,terminal_output +3235,2078298,"TERMINAL",0,0,"43",,terminal_output +3236,2078302,"TERMINAL",0,0,"43",,terminal_output +3237,2079301,"TERMINAL",0,0,"54",,terminal_output +3238,2079305,"TERMINAL",0,0,"54",,terminal_output +3239,2080279,"TERMINAL",0,0,"65",,terminal_output +3240,2080337,"TERMINAL",0,0,"65",,terminal_output +3241,2081340,"TERMINAL",0,0,"76",,terminal_output +3242,2081342,"TERMINAL",0,0,"76",,terminal_output +3243,2082275,"TERMINAL",0,0,"87",,terminal_output +3244,2082371,"TERMINAL",0,0,"87",,terminal_output +3245,2083294,"TERMINAL",0,0,"98",,terminal_output +3246,2083455,"TERMINAL",0,0,"98",,terminal_output +3247,2084327,"TERMINAL",0,0,"509",,terminal_output +3248,2084467,"TERMINAL",0,0,"509",,terminal_output +3249,2085307,"TERMINAL",0,0,"140",,terminal_output +3250,2085535,"TERMINAL",0,0,"140",,terminal_output +3251,2086325,"TERMINAL",0,0,"21",,terminal_output +3252,2086557,"TERMINAL",0,0,"21",,terminal_output +3253,2087344,"TERMINAL",0,0,"32",,terminal_output +3254,2087593,"TERMINAL",0,0,"32",,terminal_output +3255,2088367,"TERMINAL",0,0,"43",,terminal_output +3256,2088646,"TERMINAL",0,0,"43",,terminal_output +3257,2089378,"TERMINAL",0,0,"54",,terminal_output +3258,2089697,"TERMINAL",0,0,"54",,terminal_output +3259,2090391,"TERMINAL",0,0,"65",,terminal_output +3260,2090761,"TERMINAL",0,0,"65",,terminal_output +3261,2091410,"TERMINAL",0,0,"76",,terminal_output +3262,2091791,"TERMINAL",0,0,"76",,terminal_output +3263,2092424,"TERMINAL",0,0,"87",,terminal_output +3264,2092927,"TERMINAL",0,0,"87",,terminal_output +3265,2093575,"TERMINAL",0,0,"98",,terminal_output +3266,2093875,"TERMINAL",0,0,"98",,terminal_output +3267,2094482,"TERMINAL",0,0,"2:009",,terminal_output +3268,2094938,"TERMINAL",0,0,"2:009",,terminal_output +3269,2095518,"TERMINAL",0,0,"150",,terminal_output +3270,2095970,"TERMINAL",0,0,"150",,terminal_output +3271,2096520,"TERMINAL",0,0,"21",,terminal_output +3272,2097192,"TERMINAL",0,0,"21",,terminal_output +3273,2097590,"TERMINAL",0,0,"32",,terminal_output +3274,2098074,"TERMINAL",0,0,"32",,terminal_output +3275,2098549,"TERMINAL",0,0,"43",,terminal_output +3276,2099138,"TERMINAL",0,0,"54",,terminal_output +3277,2099561,"TERMINAL",0,0,"54",,terminal_output +3278,2100143,"TERMINAL",0,0,"65",,terminal_output +3279,2100622,"TERMINAL",0,0,"65",,terminal_output +3280,2101221,"TERMINAL",0,0,"76",,terminal_output +3281,2101609,"TERMINAL",0,0,"76",,terminal_output +3282,2102267,"TERMINAL",0,0,"87",,terminal_output +3283,2102706,"TERMINAL",0,0,"87",,terminal_output +3284,2103290,"TERMINAL",0,0,"98",,terminal_output +3285,2103629,"TERMINAL",0,0,"98",,terminal_output +3286,2104367,"TERMINAL",0,0,"109",,terminal_output +3287,2104647,"TERMINAL",0,0,"109",,terminal_output +3288,2105539,"TERMINAL",0,0,"12:00",,terminal_output +3289,2106462,"TERMINAL",0,0,"12:01",,terminal_output +3290,2106470,"TERMINAL",0,0,"21",,terminal_output +3291,2107473,"TERMINAL",0,0,"32",,terminal_output +3292,2107511,"TERMINAL",0,0,"32",,terminal_output +3293,2108519,"TERMINAL",0,0,"43",,terminal_output +3294,2108598,"TERMINAL",0,0,"43",,terminal_output +3295,2109539,"TERMINAL",0,0,"54",,terminal_output +3296,2109616,"TERMINAL",0,0,"54",,terminal_output +3297,2110530,"TERMINAL",0,0,"65",,terminal_output +3298,2110617,"TERMINAL",0,0,"65",,terminal_output +3299,2111528,"TERMINAL",0,0,"76",,terminal_output +3300,2111682,"TERMINAL",0,0,"76",,terminal_output +3301,2112596,"TERMINAL",0,0,"87",,terminal_output +3302,2112790,"TERMINAL",0,0,"87",,terminal_output +3303,2113570,"TERMINAL",0,0,"98",,terminal_output +3304,2113772,"TERMINAL",0,0,"98",,terminal_output +3305,2114566,"TERMINAL",0,0,"209",,terminal_output +3306,2114788,"TERMINAL",0,0,"209",,terminal_output +3307,2115603,"TERMINAL",0,0,"110",,terminal_output +3308,2115825,"TERMINAL",0,0,"110",,terminal_output +3309,2116585,"TERMINAL",0,0,"21",,terminal_output +3310,2116871,"TERMINAL",0,0,"21",,terminal_output +3311,2117607,"TERMINAL",0,0,"32",,terminal_output +3312,2117908,"TERMINAL",0,0,"32",,terminal_output +3313,2118618,"TERMINAL",0,0,"43",,terminal_output +3314,2118945,"TERMINAL",0,0,"43",,terminal_output +3315,2119644,"TERMINAL",0,0,"54",,terminal_output +3316,2119977,"TERMINAL",0,0,"54",,terminal_output +3317,2120642,"TERMINAL",0,0,"65",,terminal_output +3318,2121019,"TERMINAL",0,0,"65",,terminal_output +3319,2121667,"TERMINAL",0,0,"76",,terminal_output +3320,2122064,"TERMINAL",0,0,"76",,terminal_output +3321,2122772,"TERMINAL",0,0,"87",,terminal_output +3322,2123199,"TERMINAL",0,0,"88",,terminal_output +3323,2123706,"TERMINAL",0,0,"98",,terminal_output +3324,2124141,"TERMINAL",0,0,"309",,terminal_output +3325,2124705,"TERMINAL",0,0,"309",,terminal_output +3326,2125182,"TERMINAL",0,0,"120",,terminal_output +3327,2125733,"TERMINAL",0,0,"120",,terminal_output +3328,2126229,"TERMINAL",0,0,"21",,terminal_output +3329,2126791,"TERMINAL",0,0,"21",,terminal_output +3330,2127386,"TERMINAL",0,0,"32",,terminal_output +3331,2127775,"TERMINAL",0,0,"32",,terminal_output +3332,2128359,"TERMINAL",0,0,"43",,terminal_output +3333,2128767,"TERMINAL",0,0,"43",,terminal_output +3334,2129372,"TERMINAL",0,0,"54",,terminal_output +3335,2129822,"TERMINAL",0,0,"54",,terminal_output +3336,2130429,"TERMINAL",0,0,"65",,terminal_output +3337,2130811,"TERMINAL",0,0,"65",,terminal_output +3338,2131484,"TERMINAL",0,0,"76",,terminal_output +3339,2131831,"TERMINAL",0,0,"76",,terminal_output +3340,2132529,"TERMINAL",0,0,"87",,terminal_output +3341,2132833,"TERMINAL",0,0,"87",,terminal_output +3342,2133579,"TERMINAL",0,0,"98",,terminal_output +3343,2133863,"TERMINAL",0,0,"98",,terminal_output +3344,2134597,"TERMINAL",0,0,"409",,terminal_output +3345,2134867,"TERMINAL",0,0,"409",,terminal_output +3346,2135621,"TERMINAL",0,0,"130",,terminal_output +3347,2135890,"TERMINAL",0,0,"130",,terminal_output +3348,2136672,"TERMINAL",0,0,"21",,terminal_output +3349,2136903,"TERMINAL",0,0,"21",,terminal_output +3350,2137715,"TERMINAL",0,0,"32",,terminal_output +3351,2137921,"TERMINAL",0,0,"32",,terminal_output +3352,2138763,"TERMINAL",0,0,"43",,terminal_output +3353,2138947,"TERMINAL",0,0,"43",,terminal_output +3354,2139877,"TERMINAL",0,0,"54",,terminal_output +3355,2140035,"TERMINAL",0,0,"54",,terminal_output +3356,2140838,"TERMINAL",0,0,"65",,terminal_output +3357,2140979,"TERMINAL",0,0,"65",,terminal_output +3358,2141888,"TERMINAL",0,0,"76",,terminal_output +3359,2142028,"TERMINAL",0,0,"76",,terminal_output +3360,2142943,"TERMINAL",0,0,"87",,terminal_output +3361,2143019,"TERMINAL",0,0,"87",,terminal_output +3362,2143993,"TERMINAL",0,0,"98",,terminal_output +3363,2144036,"TERMINAL",0,0,"98",,terminal_output +3364,2145034,"TERMINAL",0,0,"509",,terminal_output +3365,2145050,"TERMINAL",0,0,"509",,terminal_output +3366,2146076,"TERMINAL",0,0,"140",,terminal_output +3367,2146094,"TERMINAL",0,0,"141",,terminal_output +3368,2147082,"TERMINAL",0,0,"22",,terminal_output +3369,2147120,"TERMINAL",0,0,"32",,terminal_output +3370,2148105,"TERMINAL",0,0,"43",,terminal_output +3371,2148223,"TERMINAL",0,0,"43",,terminal_output +3372,2149120,"TERMINAL",0,0,"54",,terminal_output +3373,2149237,"TERMINAL",0,0,"54",,terminal_output +3374,2150123,"TERMINAL",0,0,"65",,terminal_output +3375,2150263,"TERMINAL",0,0,"65",,terminal_output +3376,2151156,"TERMINAL",0,0,"76",,terminal_output +3377,2151314,"TERMINAL",0,0,"76",,terminal_output +3378,2152174,"TERMINAL",0,0,"87",,terminal_output +3379,2152348,"TERMINAL",0,0,"87",,terminal_output +3380,2153237,"TERMINAL",0,0,"98",,terminal_output +3381,2153403,"TERMINAL",0,0,"98",,terminal_output +3382,2154196,"TERMINAL",0,0,"3:009",,terminal_output +3383,2154457,"TERMINAL",0,0,"3:009",,terminal_output +3384,2155235,"TERMINAL",0,0,"150",,terminal_output +3385,2155484,"TERMINAL",0,0,"150",,terminal_output +3386,2156214,"TERMINAL",0,0,"21",,terminal_output +3387,2156533,"TERMINAL",0,0,"21",,terminal_output +3388,2157241,"TERMINAL",0,0,"32",,terminal_output +3389,2157629,"TERMINAL",0,0,"32",,terminal_output +3390,2158359,"TERMINAL",0,0,"43",,terminal_output +3391,2158634,"TERMINAL",0,0,"43",,terminal_output +3392,2159300,"TERMINAL",0,0,"54",,terminal_output +3393,2159704,"TERMINAL",0,0,"54",,terminal_output +3394,2160296,"TERMINAL",0,0,"65",,terminal_output +3395,2160725,"TERMINAL",0,0,"65",,terminal_output +3396,2161319,"TERMINAL",0,0,"76",,terminal_output +3397,2161786,"TERMINAL",0,0,"76",,terminal_output +3398,2162338,"TERMINAL",0,0,"87",,terminal_output +3399,2162868,"TERMINAL",0,0,"87",,terminal_output +3400,2163350,"TERMINAL",0,0,"98",,terminal_output +3401,2163895,"TERMINAL",0,0,"98",,terminal_output +3402,2164416,"TERMINAL",0,0,"109",,terminal_output +3403,2165007,"TERMINAL",0,0,"109",,terminal_output +3404,2165449,"TERMINAL",0,0,"13:00",,terminal_output +3405,2166078,"TERMINAL",0,0,"13:00",,terminal_output +3406,2166436,"TERMINAL",0,0,"21",,terminal_output +3407,2167205,"TERMINAL",0,0,"21",,terminal_output +3408,2167579,"TERMINAL",0,0,"32",,terminal_output +3409,2168245,"TERMINAL",0,0,"33",,terminal_output +3410,2168650,"TERMINAL",0,0,"43",,terminal_output +3411,2169438,"TERMINAL",0,0,"54",,terminal_output +3412,2169709,"TERMINAL",0,0,"54",,terminal_output +3413,2170522,"TERMINAL",0,0,"65",,terminal_output +3414,2170783,"TERMINAL",0,0,"65",,terminal_output +3415,2171282,"TERMINAL",0,0,"76",,terminal_output +3416,2171478,"TERMINAL",0,0,"76",,terminal_output +3417,2172502,"TERMINAL",0,0,"87",,terminal_output +3418,2172684,"TERMINAL",0,0,"87",,terminal_output +3419,2173560,"TERMINAL",0,0,"98",,terminal_output +3420,2173694,"TERMINAL",0,0,"98",,terminal_output +3421,2174799,"TERMINAL",0,0,"209",,terminal_output +3422,2175027,"TERMINAL",0,0,"209",,terminal_output +3423,2175928,"TERMINAL",0,0,"110",,terminal_output +3424,2176028,"TERMINAL",0,0,"110",,terminal_output +3425,2176928,"TERMINAL",0,0,"21",,terminal_output +3426,2177064,"TERMINAL",0,0,"21",,terminal_output +3427,2177860,"TERMINAL",0,0,"32",,terminal_output +3428,2177864,"TERMINAL",0,0,"32",,terminal_output +3429,2178611,"TERMINAL",0,0,"43",,terminal_output +3430,2178612,"TERMINAL",0,0,"43",,terminal_output +3431,2179907,"TERMINAL",0,0,"54",,terminal_output +3432,2179910,"TERMINAL",0,0,"54",,terminal_output +3433,2180685,"TERMINAL",0,0,"65",,terminal_output +3434,2180914,"TERMINAL",0,0,"65",,terminal_output +3435,2182028,"TERMINAL",0,0,"76",,terminal_output +3436,2182029,"TERMINAL",0,0,"76",,terminal_output +3437,2182673,"TERMINAL",0,0,"87",,terminal_output +3438,2182858,"TERMINAL",0,0,"87",,terminal_output +3439,2184060,"TERMINAL",0,0,"98",,terminal_output +3440,2184117,"TERMINAL",0,0,"98",,terminal_output +3441,2184689,"TERMINAL",0,0,"309",,terminal_output +3442,2184847,"TERMINAL",0,0,"309",,terminal_output +3443,2186127,"TERMINAL",0,0,"120",,terminal_output +3444,2186393,"TERMINAL",0,0,"120",,terminal_output +3445,2186932,"TERMINAL",0,0,"21",,terminal_output +3446,2187000,"TERMINAL",0,0,"21",,terminal_output +3447,2188003,"TERMINAL",0,0,"32",,terminal_output +3448,2188545,"TERMINAL",0,0,"32",,terminal_output +3449,2189573,"TERMINAL",0,0,"43",,terminal_output +3450,2189795,"TERMINAL",0,0,"43",,terminal_output +3451,2190230,"TERMINAL",0,0,"54",,terminal_output +3452,2190444,"TERMINAL",0,0,"55",,terminal_output +3453,2191439,"TERMINAL",0,0,"65",,terminal_output +3454,2191787,"TERMINAL",0,0,"76",,terminal_output +3455,2192473,"TERMINAL",0,0,"76",,terminal_output +3456,2193596,"TERMINAL",0,0,"87",,terminal_output +3457,2193597,"TERMINAL",0,0,"8798",,terminal_output +3458,2194464,"TERMINAL",0,0,"98",,terminal_output +3459,2194768,"TERMINAL",0,0,"409",,terminal_output +3460,2195208,"TERMINAL",0,0,"409",,terminal_output +3461,2195752,"TERMINAL",0,0,"130",,terminal_output +3462,2196345,"TERMINAL",0,0,"130",,terminal_output +3463,2196639,"TERMINAL",0,0,"21",,terminal_output +3464,2196971,"TERMINAL",0,0,"21",,terminal_output +3465,2197536,"TERMINAL",0,0,"32",,terminal_output +3466,2198443,"TERMINAL",0,0,"32",,terminal_output +3467,2199102,"TERMINAL",0,0,"43",,terminal_output +3468,2199308,"TERMINAL",0,0,"43",,terminal_output +3469,2199675,"TERMINAL",0,0,"54",,terminal_output +3470,2199980,"TERMINAL",0,0,"54",,terminal_output +3471,2200815,"TERMINAL",0,0,"65",,terminal_output +3472,2201561,"TERMINAL",0,0,"65",,terminal_output +3473,2202459,"TERMINAL",0,0,"76",,terminal_output +3474,2202860,"TERMINAL",0,0,"76",,terminal_output +3475,2203268,"TERMINAL",0,0,"87",,terminal_output +3476,2203523,"TERMINAL",0,0,"87",,terminal_output +3477,2203697,"TERMINAL",0,0,"98",,terminal_output +3478,2204150,"TERMINAL",0,0,"98",,terminal_output +3479,2205255,"TERMINAL",0,0,"509",,terminal_output +3480,2205697,"TERMINAL",0,0,"509",,terminal_output +3481,2206138,"TERMINAL",0,0,"140",,terminal_output +3482,2206214,"TERMINAL",0,0,"140",,terminal_output +3483,2207029,"TERMINAL",0,0,"21",,terminal_output +3484,2207469,"TERMINAL",0,0,"22",,terminal_output +3485,2208506,"TERMINAL",0,0,"32",,terminal_output +3486,2208604,"TERMINAL",0,0,"43",,terminal_output +3487,2208948,"TERMINAL",0,0,"43",,terminal_output +3488,2209154,"TERMINAL",0,0,"54",,terminal_output +3489,2210202,"TERMINAL",0,0,"54",,terminal_output +3490,2210448,"TERMINAL",0,0,"65",,terminal_output +3491,2211461,"TERMINAL",0,0,"65",,terminal_output +3492,2211465,"TERMINAL",0,0,"76",,terminal_output +3493,2212173,"TERMINAL",0,0,"77",,terminal_output +3494,2212324,"TERMINAL",0,0,"87",,terminal_output +3495,2213681,"TERMINAL",0,0,"98",,terminal_output +3496,2213688,"TERMINAL",0,0,"98",,terminal_output +3497,2214197,"TERMINAL",0,0,"4:009",,terminal_output +3498,2214264,"TERMINAL",0,0,"4:009",,terminal_output +3499,2215894,"TERMINAL",0,0,"150",,terminal_output +3500,2215900,"TERMINAL",0,0,"150",,terminal_output +3501,2216476,"TERMINAL",0,0,"21",,terminal_output +3502,2216521,"TERMINAL",0,0,"21",,terminal_output +3503,2216989,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0507:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ ",,terminal_output +3504,2217538,"TERMINAL",0,0,"[?25ludl[?25h",,terminal_output +3505,2217722,"TERMINAL",0,0,"32",,terminal_output +3506,2217848,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3507,2218061,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3508,2218270,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3509,2218605,"TERMINAL",0,0,"[?25ludl[?25h",,terminal_output +3510,2218657,"TERMINAL",0,0,"43",,terminal_output +3511,2218712,"TERMINAL",0,0,"",,terminal_output +3512,2218803,"TERMINAL",0,0,"",,terminal_output +3513,2219077,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +3514,2219174,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3515,2219546,"TERMINAL",0,0,"[?25lle[?25h",,terminal_output +3516,2219701,"TERMINAL",0,0,"54",,terminal_output +3517,2219885,"TERMINAL",0,0,"\r\n[?2004l\rPartition dev_cpuonly : 9 nodes idle\r\nPartition cpuonly : 45 nodes idle\r\nPartition dev_accelerated : 0 nodes idle\r\nPartition accelerated : 9 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 1 nodes idle\r\nPartition large : 8 nodes idle\r\n]0;tum_cte0515@hkn0507:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0507 jafar_jobs]$ ",,terminal_output +3518,2220694,"TERMINAL",0,0,"65",,terminal_output +3519,2221896,"TERMINAL",0,0,"76",,terminal_output +3520,2223082,"TERMINAL",0,0,"87",,terminal_output +3521,2223688,"TERMINAL",0,0,"98",,terminal_output +3522,2225156,"TERMINAL",0,0,"109",,terminal_output +3523,2226048,"TERMINAL",0,0,"14:00",,terminal_output +3524,2226884,"TERMINAL",0,0,"21",,terminal_output +3525,2228450,"TERMINAL",0,0,"32",,terminal_output +3526,2229596,"TERMINAL",0,0,"43",,terminal_output +3527,2230301,"TERMINAL",0,0,"54",,terminal_output +3528,2231148,"TERMINAL",0,0,"65",,terminal_output +3529,2232304,"TERMINAL",0,0,"76",,terminal_output +3530,2233198,"TERMINAL",0,0,"87",,terminal_output +3531,2234128,"TERMINAL",0,0,"99",,terminal_output +3532,2235533,"TERMINAL",0,0,"2110",,terminal_output +3533,2236201,"TERMINAL",0,0,"21",,terminal_output +3534,2237340,"TERMINAL",0,0,"32",,terminal_output +3535,2238460,"TERMINAL",0,0,"43",,terminal_output +3536,2239711,"TERMINAL",0,0,"54",,terminal_output +3537,2240475,"TERMINAL",0,0,"65",,terminal_output +3538,2241875,"TERMINAL",0,0,"76",,terminal_output +3539,2242520,"TERMINAL",0,0,"87",,terminal_output +3540,2243975,"TERMINAL",0,0,"98",,terminal_output +3541,2245168,"TERMINAL",0,0,"309",,terminal_output +3542,2246136,"TERMINAL",0,0,"120",,terminal_output +3543,2246758,"TERMINAL",0,0,"21",,terminal_output +3544,2247889,"TERMINAL",0,0,"32",,terminal_output +3545,2248875,"TERMINAL",0,0,"43",,terminal_output +3546,2249878,"TERMINAL",0,0,"54",,terminal_output +3547,2250885,"TERMINAL",0,0,"65",,terminal_output +3548,2251965,"TERMINAL",0,0,"76",,terminal_output +3549,2253114,"TERMINAL",0,0,"87",,terminal_output +3550,2254022,"TERMINAL",0,0,"98",,terminal_output +3551,2255253,"TERMINAL",0,0,"409",,terminal_output +3552,2256380,"TERMINAL",0,0,"131",,terminal_output +3553,2257455,"TERMINAL",0,0,"32",,terminal_output +3554,2258596,"TERMINAL",0,0,"43",,terminal_output +3555,2259473,"TERMINAL",0,0,"54",,terminal_output +3556,2260535,"TERMINAL",0,0,"65",,terminal_output +3557,2261439,"TERMINAL",0,0,"76",,terminal_output +3558,2262734,"TERMINAL",0,0,"87",,terminal_output +3559,2263513,"TERMINAL",0,0,"98",,terminal_output +3560,2264806,"TERMINAL",0,0,"509",,terminal_output +3561,2265926,"TERMINAL",0,0,"140",,terminal_output +3562,2266583,"TERMINAL",0,0,"21",,terminal_output +3563,2268077,"TERMINAL",0,0,"32",,terminal_output +3564,2268984,"TERMINAL",0,0,"43",,terminal_output +3565,2269845,"TERMINAL",0,0,"54",,terminal_output +3566,2271014,"TERMINAL",0,0,"65",,terminal_output +3567,2271821,"TERMINAL",0,0,"76",,terminal_output +3568,2273295,"TERMINAL",0,0,"87",,terminal_output +3569,2273928,"TERMINAL",0,0,"98",,terminal_output +3570,2275292,"TERMINAL",0,0,"5:009",,terminal_output +3571,2276200,"TERMINAL",0,0,"150",,terminal_output +3572,2277223,"TERMINAL",0,0,"22",,terminal_output +3573,2278490,"TERMINAL",0,0,"c",,terminal_output +3574,2278513,"TERMINAL",0,0,"43",,terminal_output +3575,2278625,"TERMINAL",0,0,"an",,terminal_output +3576,2279197,"TERMINAL",0,0,"54",,terminal_output +3577,2280692,"TERMINAL",0,0,"",,terminal_output +3578,2280693,"TERMINAL",0,0,"65",,terminal_output +3579,2281388,"TERMINAL",0,0,"76",,terminal_output +3580,2282308,"TERMINAL",0,0,"",,terminal_output +3581,2282649,"TERMINAL",0,0,"87",,terminal_output +3582,2283573,"TERMINAL",0,0,"98",,terminal_output +3583,2284428,"TERMINAL",0,0,"109",,terminal_output +3584,2285815,"TERMINAL",0,0,"15:00",,terminal_output +3585,2286720,"TERMINAL",0,0,"21",,terminal_output +3586,2287598,"TERMINAL",0,0,"32",,terminal_output +3587,2288950,"TERMINAL",0,0,"43",,terminal_output +3588,2290063,"TERMINAL",0,0,"54",,terminal_output +3589,2290752,"TERMINAL",0,0,"65",,terminal_output +3590,2292122,"TERMINAL",0,0,"76",,terminal_output +3591,2292339,"TERMINAL",0,0,"[?2004l\r\r\nexit\r\nsalloc: Relinquishing job allocation 3298895\r\nsalloc: Job allocation 3298895 has been revoked.\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +3592,2293052,"TERMINAL",0,0,"\r83298895 accelerat interact tum_cte0 CG25:07\t 1 hkn0507",,terminal_output +3593,2294235,"TERMINAL",0,0,"9",,terminal_output +3594,2294962,"TERMINAL",0,0,"20",,terminal_output +3595,2296408,"TERMINAL",0,0,"1",,terminal_output +3596,2297131,"TERMINAL",0,0,"2",,terminal_output +3597,2298085,"TERMINAL",0,0,"3",,terminal_output +3598,2299424,"TERMINAL",0,0,"5",,terminal_output +3599,2300206,"TERMINAL",0,0,"6",,terminal_output +3600,2301707,"TERMINAL",0,0,"7",,terminal_output +3601,2302307,"TERMINAL",0,0,"8",,terminal_output +3602,2303334,"TERMINAL",0,0,"\r9",,terminal_output +3603,2304388,"TERMINAL",0,0,"30",,terminal_output +3604,2305450,"TERMINAL",0,0,"1",,terminal_output +3605,2306545,"TERMINAL",0,0,"2",,terminal_output +3606,2307671,"TERMINAL",0,0,"3",,terminal_output +3607,2308698,"TERMINAL",0,0,"4",,terminal_output +3608,2310032,"TERMINAL",0,0,"5",,terminal_output +3609,2311081,"TERMINAL",0,0,"6",,terminal_output +3610,2311819,"TERMINAL",0,0,"7",,terminal_output +3611,2312801,"TERMINAL",0,0,"8",,terminal_output +3612,2314129,"TERMINAL",0,0,"9",,terminal_output +3613,2314872,"TERMINAL",0,0,"40",,terminal_output +3614,2316353,"TERMINAL",0,0,"1",,terminal_output +3615,2316949,"TERMINAL",0,0,"2",,terminal_output +3616,2318282,"TERMINAL",0,0,"3",,terminal_output +3617,2319440,"TERMINAL",0,0,"4",,terminal_output +3618,2320240,"TERMINAL",0,0,"5",,terminal_output +3619,2321568,"TERMINAL",0,0,"7",,terminal_output +3620,2322393,"TERMINAL",0,0,"8",,terminal_output +3621,2323712,"TERMINAL",0,0,"9",,terminal_output +3622,2324819,"TERMINAL",0,0,"50",,terminal_output +3623,2325699,"TERMINAL",0,0,"1",,terminal_output +3624,2326696,"TERMINAL",0,0,"2",,terminal_output +3625,2327832,"TERMINAL",0,0,"3",,terminal_output +3626,2328870,"TERMINAL",0,0,"4",,terminal_output +3627,2329983,"TERMINAL",0,0,"5",,terminal_output +3628,2330919,"TERMINAL",0,0,"6",,terminal_output +3629,2331950,"TERMINAL",0,0,"7",,terminal_output +3630,2332964,"TERMINAL",0,0,"8",,terminal_output +3631,2333735,"TERMINAL",0,0,"9",,terminal_output +3632,2335127,"TERMINAL",0,0,"6:00",,terminal_output +3633,2335846,"TERMINAL",0,0,"1",,terminal_output +3634,2337160,"TERMINAL",0,0,"2",,terminal_output +3635,2338114,"TERMINAL",0,0,"3",,terminal_output +3636,2339353,"TERMINAL",0,0,"4",,terminal_output +3637,2340143,"TERMINAL",0,0,"5",,terminal_output +3638,2341432,"TERMINAL",0,0,"6",,terminal_output +3639,2342303,"TERMINAL",0,0,"7",,terminal_output +3640,2343290,"TERMINAL",0,0,"9",,terminal_output +3641,2344582,"TERMINAL",0,0,"10",,terminal_output +3642,2345241,"TERMINAL",0,0,"1",,terminal_output +3643,2346821,"TERMINAL",0,0,"2",,terminal_output +3644,2347450,"TERMINAL",0,0,"3",,terminal_output +3645,2348475,"TERMINAL",0,0,"4",,terminal_output +3646,2349433,"TERMINAL",0,0,"5 R1hkn0507",,terminal_output +3647,2350483,"TERMINAL",0,0,"62",,terminal_output +3648,2351533,"TERMINAL",0,0,"73",,terminal_output +3649,2352573,"TERMINAL",0,0,"84",,terminal_output +3650,2353634,"TERMINAL",0,0,"95",,terminal_output +3651,2354696,"TERMINAL",0,0,"206",,terminal_output +3652,2355725,"TERMINAL",0,0,"17",,terminal_output +3653,2356970,"TERMINAL",0,0,"28",,terminal_output +3654,2357822,"TERMINAL",0,0,"39",,terminal_output +3655,2358935,"TERMINAL",0,0,"410",,terminal_output +3656,2360005,"TERMINAL",0,0,"51",,terminal_output +3657,2360955,"TERMINAL",0,0,"62",,terminal_output +3658,2361998,"TERMINAL",0,0,"73",,terminal_output +3659,2363062,"TERMINAL",0,0,"84",,terminal_output +3660,2364112,"TERMINAL",0,0,"96",,terminal_output +3661,2365140,"TERMINAL",0,0,"317",,terminal_output +3662,2366195,"TERMINAL",0,0,"28",,terminal_output +3663,2367317,"TERMINAL",0,0,"39",,terminal_output +3664,2368314,"TERMINAL",0,0,"420",,terminal_output +3665,2369363,"TERMINAL",0,0,"51",,terminal_output +3666,2370405,"TERMINAL",0,0,"62",,terminal_output +3667,2371445,"TERMINAL",0,0,"73",,terminal_output +3668,2372501,"TERMINAL",0,0,"84",,terminal_output +3669,2373547,"TERMINAL",0,0,"95",,terminal_output +3670,2374596,"TERMINAL",0,0,"406",,terminal_output +3671,2375647,"TERMINAL",0,0,"17",,terminal_output +3672,2376706,"TERMINAL",0,0,"28",,terminal_output +3673,2377743,"TERMINAL",0,0,"39",,terminal_output +3674,2378788,"TERMINAL",0,0,"430",,terminal_output +3675,2379896,"TERMINAL",0,0,"51",,terminal_output +3676,2380879,"TERMINAL",0,0,"62",,terminal_output +3677,2381937,"TERMINAL",0,0,"73",,terminal_output +3678,2383091,"TERMINAL",0,0,"84",,terminal_output +3679,2384032,"TERMINAL",0,0,"95",,terminal_output +3680,2385097,"TERMINAL",0,0,"507",,terminal_output +3681,2386155,"TERMINAL",0,0,"28",,terminal_output +3682,2387169,"TERMINAL",0,0,"39",,terminal_output +3683,2388217,"TERMINAL",0,0,"440",,terminal_output +3684,2389273,"TERMINAL",0,0,"51",,terminal_output +3685,2390316,"TERMINAL",0,0,"62",,terminal_output +3686,2391365,"TERMINAL",0,0,"73",,terminal_output +3687,2392407,"TERMINAL",0,0,"84",,terminal_output +3688,2393473,"TERMINAL",0,0,"95",,terminal_output +3689,2394559,"TERMINAL",0,0,"7:006",,terminal_output +3690,2395579,"TERMINAL",0,0,"17",,terminal_output +3691,2396627,"TERMINAL",0,0,"28",,terminal_output +3692,2397674,"TERMINAL",0,0,"39",,terminal_output +3693,2398735,"TERMINAL",0,0,"450",,terminal_output +3694,2399793,"TERMINAL",0,0,"51",,terminal_output +3695,2400815,"TERMINAL",0,0,"62",,terminal_output +3696,2401854,"TERMINAL",0,0,"73",,terminal_output +3697,2402898,"TERMINAL",0,0,"84",,terminal_output +3698,2403948,"TERMINAL",0,0,"95",,terminal_output +3699,2405017,"TERMINAL",0,0,"106",,terminal_output +3700,2406042,"TERMINAL",0,0,"17",,terminal_output +3701,2407104,"TERMINAL",0,0,"29",,terminal_output +3702,2408138,"TERMINAL",0,0,"41:00",,terminal_output +3703,2409196,"TERMINAL",0,0,"51",,terminal_output +3704,2410289,"TERMINAL",0,0,"62",,terminal_output +3705,2411324,"TERMINAL",0,0,"73",,terminal_output +3706,2412434,"TERMINAL",0,0,"84",,terminal_output +3707,2413383,"TERMINAL",0,0,"95",,terminal_output +3708,2414433,"TERMINAL",0,0,"206",,terminal_output +3709,2415482,"TERMINAL",0,0,"17",,terminal_output +3710,2416567,"TERMINAL",0,0,"28",,terminal_output +3711,2417638,"TERMINAL",0,0,"39",,terminal_output +3712,2418647,"TERMINAL",0,0,"410",,terminal_output +3713,2419691,"TERMINAL",0,0,"51",,terminal_output +3714,2420783,"TERMINAL",0,0,"62",,terminal_output +3715,2421914,"TERMINAL",0,0,"73",,terminal_output +3716,2422881,"TERMINAL",0,0,"84",,terminal_output +3717,2423908,"TERMINAL",0,0,"95",,terminal_output +3718,2424909,"TERMINAL",0,0,"306",,terminal_output +3719,2425971,"TERMINAL",0,0,"17",,terminal_output +3720,2427027,"TERMINAL",0,0,"28",,terminal_output +3721,2428092,"TERMINAL",0,0,"39",,terminal_output +3722,2429213,"TERMINAL",0,0,"421",,terminal_output +3723,2430163,"TERMINAL",0,0,"62",,terminal_output +3724,2431199,"TERMINAL",0,0,"73",,terminal_output +3725,2432407,"TERMINAL",0,0,"84",,terminal_output +3726,2433304,"TERMINAL",0,0,"95",,terminal_output +3727,2434341,"TERMINAL",0,0,"406",,terminal_output +3728,2435450,"TERMINAL",0,0,"17",,terminal_output +3729,2436425,"TERMINAL",0,0,"28",,terminal_output +3730,2437484,"TERMINAL",0,0,"39",,terminal_output +3731,2437682,"TERMINAL",0,0,"squeue --me",,terminal_command +3732,2437695,"TERMINAL",0,0,"]633;E;2025-06-27 15:27:43 squeue --me;77164738-3379-4db1-b4b8-ccd452227802]633;C JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON)\r\n 3299038 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299039 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299017 accelerat train_to tum_cte0 PD 0:00 1 (Priority)\r\n 3299036 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299037 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299016 accelerat train_to tum_cte0 R 1:29 1 hkn0507\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +3733,2438573,"TERMINAL",0,0,"430",,terminal_output +3734,2438698,"TERMINAL",0,0,"squeue --me",,terminal_command +3735,2438766,"TERMINAL",0,0,"]633;E;2025-06-27 15:27:44 squeue --me;77164738-3379-4db1-b4b8-ccd452227802]633;C JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON)\r\n 3299038 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299039 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299017 accelerat train_to tum_cte0 PD 0:00 1 (Priority)\r\n 3299036 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299037 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299016 accelerat train_to tum_cte0 R 1:30 1 hkn0507\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +3736,2439344,"TERMINAL",0,0,"squeue --me",,terminal_command +3737,2439437,"TERMINAL",0,0,"]633;E;2025-06-27 15:27:45 squeue --me;77164738-3379-4db1-b4b8-ccd452227802]633;C JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON)\r\n 3299038 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299039 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299017 accelerat train_to tum_cte0 PD 0:00 1 (Priority)\r\n 3299036 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299037 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299016 accelerat train_to tum_cte0 R 1:31 1 hkn0507\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +3738,2439681,"TERMINAL",0,0,"51",,terminal_output +3739,2440030,"TERMINAL",0,0,"squeue --me",,terminal_command +3740,2440089,"TERMINAL",0,0,"]633;E;2025-06-27 15:27:45 squeue --me;77164738-3379-4db1-b4b8-ccd452227802]633;C JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON)\r\n 3299038 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299039 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299017 accelerat train_to tum_cte0 PD 0:00 1 (Priority)\r\n 3299036 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299037 accelerat train_dy tum_cte0 PD 0:00 1 (Priority)\r\n 3299016 accelerat train_to tum_cte0 R 1:32 1 hkn0507\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +3741,2440620,"TERMINAL",0,0,"62",,terminal_output +3742,2441663,"TERMINAL",0,0,"73",,terminal_output +3743,2442707,"TERMINAL",0,0,"84",,terminal_output +3744,2443761,"TERMINAL",0,0,"95",,terminal_output +3745,2444815,"TERMINAL",0,0,"506",,terminal_output +3746,2445880,"TERMINAL",0,0,"17",,terminal_output +3747,2446908,"TERMINAL",0,0,"28",,terminal_output +3748,2447945,"TERMINAL",0,0,"39",,terminal_output +3749,2449010,"TERMINAL",0,0,"440",,terminal_output +3750,2450024,"TERMINAL",0,0,"51",,terminal_output +3751,2451081,"TERMINAL",0,0,"63",,terminal_output +3752,2452127,"TERMINAL",0,0,"84",,terminal_output +3753,2453165,"TERMINAL",0,0,"95",,terminal_output +3754,2454211,"TERMINAL",0,0,"8:006",,terminal_output +3755,2455259,"TERMINAL",0,0,"17",,terminal_output +3756,2456289,"TERMINAL",0,0,"28",,terminal_output +3757,2457339,"TERMINAL",0,0,"39",,terminal_output +3758,2458366,"TERMINAL",0,0,"450",,terminal_output +3759,2459402,"TERMINAL",0,0,"51",,terminal_output +3760,2460441,"TERMINAL",0,0,"62",,terminal_output +3761,2461486,"TERMINAL",0,0,"73",,terminal_output +3762,2462535,"TERMINAL",0,0,"84",,terminal_output +3763,2463578,"TERMINAL",0,0,"95",,terminal_output +3764,2464628,"TERMINAL",0,0,"106",,terminal_output +3765,2465673,"TERMINAL",0,0,"17",,terminal_output +3766,2466723,"TERMINAL",0,0,"28",,terminal_output +3767,2467769,"TERMINAL",0,0,"39",,terminal_output +3768,2469091,"TERMINAL",0,0,"42:01",,terminal_output +3769,2470152,"TERMINAL",0,0,"62",,terminal_output +3770,2471198,"TERMINAL",0,0,"73",,terminal_output +3771,2472245,"TERMINAL",0,0,"84",,terminal_output +3772,2473294,"TERMINAL",0,0,"95",,terminal_output +3773,2474402,"TERMINAL",0,0,"206",,terminal_output +3774,2475401,"TERMINAL",0,0,"17",,terminal_output +3775,2476480,"TERMINAL",0,0,"28",,terminal_output +3776,2477490,"TERMINAL",0,0,"39",,terminal_output +3777,2478524,"TERMINAL",0,0,"410",,terminal_output +3778,2479614,"TERMINAL",0,0,"51",,terminal_output +3779,2480630,"TERMINAL",0,0,"62",,terminal_output +3780,2481689,"TERMINAL",0,0,"73",,terminal_output +3781,2482717,"TERMINAL",0,0,"84",,terminal_output +3782,2483771,"TERMINAL",0,0,"95",,terminal_output +3783,2484879,"TERMINAL",0,0,"306",,terminal_output +3784,2485875,"TERMINAL",0,0,"17",,terminal_output +3785,2486910,"TERMINAL",0,0,"28",,terminal_output +3786,2487957,"TERMINAL",0,0,"39",,terminal_output +3787,2489030,"TERMINAL",0,0,"420",,terminal_output +3788,2490054,"TERMINAL",0,0,"51",,terminal_output +3789,2491091,"TERMINAL",0,0,"63",,terminal_output +3790,2492138,"TERMINAL",0,0,"84",,terminal_output +3791,2493190,"TERMINAL",0,0,"95",,terminal_output +3792,2494229,"TERMINAL",0,0,"406",,terminal_output +3793,2495279,"TERMINAL",0,0,"17",,terminal_output +3794,2496356,"TERMINAL",0,0,"28",,terminal_output +3795,2497407,"TERMINAL",0,0,"39",,terminal_output +3796,2498455,"TERMINAL",0,0,"430",,terminal_output +3797,2499510,"TERMINAL",0,0,"51",,terminal_output +3798,2500561,"TERMINAL",0,0,"62",,terminal_output +3799,2501599,"TERMINAL",0,0,"73",,terminal_output +3800,2502640,"TERMINAL",0,0,"84",,terminal_output +3801,2503700,"TERMINAL",0,0,"95",,terminal_output +3802,2504762,"TERMINAL",0,0,"506",,terminal_output +3803,2505793,"TERMINAL",0,0,"17",,terminal_output +3804,2506840,"TERMINAL",0,0,"28",,terminal_output +3805,2507882,"TERMINAL",0,0,"39",,terminal_output +3806,2508929,"TERMINAL",0,0,"440",,terminal_output +3807,2509988,"TERMINAL",0,0,"51",,terminal_output +3808,2511030,"TERMINAL",0,0,"62",,terminal_output +3809,2512083,"TERMINAL",0,0,"74",,terminal_output +3810,2513129,"TERMINAL",0,0,"95",,terminal_output +3811,2514183,"TERMINAL",0,0,"9:006",,terminal_output +3812,2515226,"TERMINAL",0,0,"17",,terminal_output +3813,2516283,"TERMINAL",0,0,"28",,terminal_output +3814,2517327,"TERMINAL",0,0,"39",,terminal_output +3815,2518375,"TERMINAL",0,0,"450",,terminal_output +3816,2519423,"TERMINAL",0,0,"51",,terminal_output +3817,2520471,"TERMINAL",0,0,"62",,terminal_output +3818,2521525,"TERMINAL",0,0,"73",,terminal_output +3819,2522602,"TERMINAL",0,0,"84",,terminal_output +3820,2523628,"TERMINAL",0,0,"95",,terminal_output +3821,2524659,"TERMINAL",0,0,"106",,terminal_output +3822,2525709,"TERMINAL",0,0,"17",,terminal_output +3823,2526750,"TERMINAL",0,0,"28",,terminal_output +3824,2527848,"TERMINAL",0,0,"39",,terminal_output +3825,2528850,"TERMINAL",0,0,"43:00",,terminal_output +3826,2529899,"TERMINAL",0,0,"51",,terminal_output +3827,2530952,"TERMINAL",0,0,"62",,terminal_output +3828,2531987,"TERMINAL",0,0,"73",,terminal_output +3829,2533037,"TERMINAL",0,0,"84",,terminal_output +3830,2534068,"TERMINAL",0,0,"96",,terminal_output +3831,2535118,"TERMINAL",0,0,"217",,terminal_output +3832,2536154,"TERMINAL",0,0,"28",,terminal_output +3833,2537194,"TERMINAL",0,0,"39",,terminal_output +3834,2538245,"TERMINAL",0,0,"410",,terminal_output +3835,2539299,"TERMINAL",0,0,"51",,terminal_output +3836,2540337,"TERMINAL",0,0,"62",,terminal_output +3837,2541392,"TERMINAL",0,0,"73",,terminal_output +3838,2542441,"TERMINAL",0,0,"84",,terminal_output +3839,2543483,"TERMINAL",0,0,"95",,terminal_output +3840,2544536,"TERMINAL",0,0,"306",,terminal_output +3841,2545586,"TERMINAL",0,0,"17",,terminal_output +3842,2546649,"TERMINAL",0,0,"28",,terminal_output +3843,2547704,"TERMINAL",0,0,"39",,terminal_output +3844,2548736,"TERMINAL",0,0,"420",,terminal_output +3845,2549746,"TERMINAL",0,0,"51",,terminal_output +3846,2550824,"TERMINAL",0,0,"62",,terminal_output +3847,2551848,"TERMINAL",0,0,"73",,terminal_output +3848,2552886,"TERMINAL",0,0,"84",,terminal_output +3849,2553930,"TERMINAL",0,0,"95",,terminal_output +3850,2554995,"TERMINAL",0,0,"406",,terminal_output +3851,2556020,"TERMINAL",0,0,"17",,terminal_output +3852,2557064,"TERMINAL",0,0,"29",,terminal_output +3853,2558097,"TERMINAL",0,0,"430",,terminal_output +3854,2559165,"TERMINAL",0,0,"51",,terminal_output +3855,2560182,"TERMINAL",0,0,"62",,terminal_output +3856,2561254,"TERMINAL",0,0,"73",,terminal_output +3857,2562297,"TERMINAL",0,0,"84",,terminal_output +3858,2563362,"TERMINAL",0,0,"95",,terminal_output +3859,2564391,"TERMINAL",0,0,"506",,terminal_output +3860,2565437,"TERMINAL",0,0,"17",,terminal_output +3861,2566487,"TERMINAL",0,0,"28",,terminal_output +3862,2567537,"TERMINAL",0,0,"39",,terminal_output +3863,2568585,"TERMINAL",0,0,"440",,terminal_output +3864,2569625,"TERMINAL",0,0,"51",,terminal_output +3865,2570681,"TERMINAL",0,0,"62",,terminal_output +3866,2571732,"TERMINAL",0,0,"73",,terminal_output +3867,2572778,"TERMINAL",0,0,"84",,terminal_output +3868,2573818,"TERMINAL",0,0,"95",,terminal_output +3869,2574899,"TERMINAL",0,0,"30:006",,terminal_output +3870,2575903,"TERMINAL",0,0,"17",,terminal_output +3871,2576931,"TERMINAL",0,0,"28",,terminal_output +3872,2577975,"TERMINAL",0,0,"39",,terminal_output +3873,2579013,"TERMINAL",0,0,"450",,terminal_output +3874,2580061,"TERMINAL",0,0,"52",,terminal_output +3875,2581116,"TERMINAL",0,0,"73",,terminal_output +3876,2582166,"TERMINAL",0,0,"84",,terminal_output +3877,2583210,"TERMINAL",0,0,"95",,terminal_output +3878,2584248,"TERMINAL",0,0,"106",,terminal_output +3879,2585286,"TERMINAL",0,0,"17",,terminal_output +3880,2586337,"TERMINAL",0,0,"28",,terminal_output +3881,2587385,"TERMINAL",0,0,"39",,terminal_output +3882,2588423,"TERMINAL",0,0,"44:00",,terminal_output +3883,2589476,"TERMINAL",0,0,"51",,terminal_output +3884,2590517,"TERMINAL",0,0,"62",,terminal_output +3885,2591554,"TERMINAL",0,0,"73",,terminal_output +3886,2592610,"TERMINAL",0,0,"84",,terminal_output +3887,2593654,"TERMINAL",0,0,"95",,terminal_output +3888,2594691,"TERMINAL",0,0,"206",,terminal_output +3889,2595728,"TERMINAL",0,0,"17",,terminal_output +3890,2596783,"TERMINAL",0,0,"28",,terminal_output +3891,2597871,"TERMINAL",0,0,"39",,terminal_output +3892,2598880,"TERMINAL",0,0,"410",,terminal_output +3893,2599922,"TERMINAL",0,0,"51",,terminal_output +3894,2600977,"TERMINAL",0,0,"62",,terminal_output +3895,2602012,"TERMINAL",0,0,"73",,terminal_output +3896,2603061,"TERMINAL",0,0,"84",,terminal_output +3897,2604092,"TERMINAL",0,0,"306",,terminal_output +3898,2605144,"TERMINAL",0,0,"17",,terminal_output +3899,2606189,"TERMINAL",0,0,"28",,terminal_output +3900,2607232,"TERMINAL",0,0,"39",,terminal_output +3901,2608287,"TERMINAL",0,0,"420",,terminal_output +3902,2609337,"TERMINAL",0,0,"51",,terminal_output +3903,2610381,"TERMINAL",0,0,"62",,terminal_output +3904,2611432,"TERMINAL",0,0,"73",,terminal_output +3905,2612472,"TERMINAL",0,0,"84",,terminal_output +3906,2613524,"TERMINAL",0,0,"95",,terminal_output +3907,2614573,"TERMINAL",0,0,"406",,terminal_output +3908,2615664,"TERMINAL",0,0,"17",,terminal_output +3909,2616664,"TERMINAL",0,0,"28",,terminal_output +3910,2617724,"TERMINAL",0,0,"39",,terminal_output +3911,2618763,"TERMINAL",0,0,"430",,terminal_output +3912,2619881,"TERMINAL",0,0,"51",,terminal_output +3913,2620886,"TERMINAL",0,0,"62",,terminal_output +3914,2621913,"TERMINAL",0,0,"73",,terminal_output +3915,2622975,"TERMINAL",0,0,"84",,terminal_output +3916,2624031,"TERMINAL",0,0,"95",,terminal_output +3917,2625119,"TERMINAL",0,0,"506",,terminal_output +3918,2626128,"TERMINAL",0,0,"18",,terminal_output +3919,2627180,"TERMINAL",0,0,"39",,terminal_output +3920,2628244,"TERMINAL",0,0,"440",,terminal_output +3921,2629369,"TERMINAL",0,0,"51",,terminal_output +3922,2630323,"TERMINAL",0,0,"62",,terminal_output +3923,2631482,"TERMINAL",0,0,"73",,terminal_output +3924,2632521,"TERMINAL",0,0,"84",,terminal_output +3925,2633489,"TERMINAL",0,0,"95",,terminal_output +3926,2634661,"TERMINAL",0,0,"1:006",,terminal_output +3927,2635602,"TERMINAL",0,0,"17",,terminal_output +3928,2636612,"TERMINAL",0,0,"28",,terminal_output +3929,2637757,"TERMINAL",0,0,"39",,terminal_output +3930,2638686,"TERMINAL",0,0,"450",,terminal_output +3931,2639751,"TERMINAL",0,0,"51",,terminal_output +3932,2640809,"TERMINAL",0,0,"62",,terminal_output +3933,2641806,"TERMINAL",0,0,"73",,terminal_output +3934,2642886,"TERMINAL",0,0,"84",,terminal_output +3935,2643928,"TERMINAL",0,0,"95",,terminal_output +3936,2644961,"TERMINAL",0,0,"106",,terminal_output +3937,2646103,"TERMINAL",0,0,"17",,terminal_output +3938,2647056,"TERMINAL",0,0,"28",,terminal_output +3939,2648117,"TERMINAL",0,0,"35:00",,terminal_output +3940,2649161,"TERMINAL",0,0,"51",,terminal_output +3941,2650217,"TERMINAL",0,0,"62",,terminal_output +3942,2651236,"TERMINAL",0,0,"73",,terminal_output +3943,2652301,"TERMINAL",0,0,"84",,terminal_output +3944,2653377,"TERMINAL",0,0,"95",,terminal_output +3945,2654376,"TERMINAL",0,0,"206",,terminal_output +3946,2655451,"TERMINAL",0,0,"17",,terminal_output +3947,2656480,"TERMINAL",0,0,"28",,terminal_output +3948,2657518,"TERMINAL",0,0,"39",,terminal_output +3949,2658568,"TERMINAL",0,0,"410",,terminal_output +3950,2659616,"TERMINAL",0,0,"51",,terminal_output +3951,2660779,"TERMINAL",0,0,"62",,terminal_output +3952,2661747,"TERMINAL",0,0,"73",,terminal_output +3953,2662762,"TERMINAL",0,0,"84",,terminal_output +3954,2663809,"TERMINAL",0,0,"95",,terminal_output +3955,2664910,"TERMINAL",0,0,"306",,terminal_output +3956,2665964,"TERMINAL",0,0,"17",,terminal_output +3957,2666972,"TERMINAL",0,0,"28",,terminal_output +3958,2668017,"TERMINAL",0,0,"39",,terminal_output +3959,2669072,"TERMINAL",0,0,"420",,terminal_output +3960,2670105,"TERMINAL",0,0,"62",,terminal_output +3961,2671150,"TERMINAL",0,0,"73",,terminal_output +3962,2672191,"TERMINAL",0,0,"84",,terminal_output +3963,2673237,"TERMINAL",0,0,"95",,terminal_output +3964,2674282,"TERMINAL",0,0,"406",,terminal_output +3965,2675340,"TERMINAL",0,0,"17",,terminal_output +3966,2676391,"TERMINAL",0,0,"28",,terminal_output +3967,2677433,"TERMINAL",0,0,"39",,terminal_output +3968,2678480,"TERMINAL",0,0,"430",,terminal_output +3969,2679525,"TERMINAL",0,0,"51",,terminal_output +3970,2680571,"TERMINAL",0,0,"62",,terminal_output +3971,2681620,"TERMINAL",0,0,"73",,terminal_output +3972,2682663,"TERMINAL",0,0,"84",,terminal_output +3973,2683706,"TERMINAL",0,0,"95",,terminal_output +3974,2684749,"TERMINAL",0,0,"506",,terminal_output +3975,2685794,"TERMINAL",0,0,"17",,terminal_output +3976,2686841,"TERMINAL",0,0,"28",,terminal_output +3977,2687887,"TERMINAL",0,0,"39",,terminal_output +3978,2688931,"TERMINAL",0,0,"440",,terminal_output +3979,2689976,"TERMINAL",0,0,"51",,terminal_output +3980,2691034,"TERMINAL",0,0,"62",,terminal_output +3981,2692092,"TERMINAL",0,0,"74",,terminal_output +3982,2693150,"TERMINAL",0,0,"95",,terminal_output +3983,2694172,"TERMINAL",0,0,"2:006",,terminal_output +3984,2695225,"TERMINAL",0,0,"17",,terminal_output +3985,2696268,"TERMINAL",0,0,"28",,terminal_output +3986,2697316,"TERMINAL",0,0,"39",,terminal_output +3987,2698351,"TERMINAL",0,0,"450",,terminal_output +3988,2699400,"TERMINAL",0,0,"51",,terminal_output +3989,2700448,"TERMINAL",0,0,"62",,terminal_output +3990,2701496,"TERMINAL",0,0,"73",,terminal_output +3991,2702541,"TERMINAL",0,0,"84",,terminal_output +3992,2703521,"TERMINAL",0,0,"watch",,terminal_focus +3993,2703598,"TERMINAL",0,0,"95",,terminal_output +3994,2704676,"TERMINAL",0,0,"106",,terminal_output +3995,2705683,"TERMINAL",0,0,"17",,terminal_output +3996,2706738,"TERMINAL",0,0,"28",,terminal_output +3997,2707788,"TERMINAL",0,0,"39",,terminal_output +3998,2708828,"TERMINAL",0,0,"46:00",,terminal_output +3999,2709911,"TERMINAL",0,0,"51",,terminal_output +4000,2710916,"TERMINAL",0,0,"62",,terminal_output +4001,2711968,"TERMINAL",0,0,"73",,terminal_output +4002,2713009,"TERMINAL",0,0,"84",,terminal_output +4003,2714262,"TERMINAL",0,0,"95",,terminal_output +4004,2715089,"TERMINAL",0,0,"207",,terminal_output +4005,2716139,"TERMINAL",0,0,"28",,terminal_output +4006,2716448,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +4007,2717200,"TERMINAL",0,0,"39",,terminal_output +4008,2718233,"TERMINAL",0,0,"410",,terminal_output +4009,2719271,"TERMINAL",0,0,"51",,terminal_output +4010,2720311,"TERMINAL",0,0,"62",,terminal_output +4011,2721357,"TERMINAL",0,0,"73",,terminal_output +4012,2722418,"TERMINAL",0,0,"84",,terminal_output +4013,2722686,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",0,0,"",shellscript,tab +4014,2723450,"TERMINAL",0,0,"95",,terminal_output +4015,2724492,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",343,0,"",shellscript,selection_mouse +4016,2724539,"TERMINAL",0,0,"306",,terminal_output +4017,2725554,"TERMINAL",0,0,"17",,terminal_output +4018,2725606,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",342,1,"",shellscript,content +4019,2725749,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",341,1,"",shellscript,content +4020,2725879,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",340,1,"",shellscript,content +4021,2726003,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",339,1,"",shellscript,content +4022,2726402,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",338,1,"",shellscript,content +4023,2726405,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",337,1,"",shellscript,content +4024,2726518,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",336,1,"",shellscript,content +4025,2726628,"TERMINAL",0,0,"28",,terminal_output +4026,2726678,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",335,1,"",shellscript,content +4027,2726848,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",334,1,"",shellscript,content +4028,2727635,"TERMINAL",0,0,"39",,terminal_output +4029,2727943,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",334,0,"l",shellscript,content +4030,2727943,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",335,0,"",shellscript,selection_keyboard +4031,2728030,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",335,0,"a",shellscript,content +4032,2728031,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",336,0,"",shellscript,selection_keyboard +4033,2728112,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",336,0,"m",shellscript,content +4034,2728113,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",337,0,"",shellscript,selection_keyboard +4035,2728682,"TERMINAL",0,0,"420",,terminal_output +4036,2729727,"TERMINAL",0,0,"51",,terminal_output +4037,2730119,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",83,0,"",shellscript,selection_mouse +4038,2730770,"TERMINAL",0,0,"62",,terminal_output +4039,2730893,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",82,1,"",shellscript,content +4040,2731268,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",82,0,"1",shellscript,content +4041,2731268,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",83,0,"",shellscript,selection_keyboard +4042,2731809,"TERMINAL",0,0,"73",,terminal_output +4043,2732769,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",0,0,"",shellscript,tab +4044,2732845,"TERMINAL",0,0,"84",,terminal_output +4045,2733924,"TERMINAL",0,0,"95",,terminal_output +4046,2734355,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",83,0,"",shellscript,selection_mouse +4047,2734698,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",82,1,"",shellscript,content +4048,2734837,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",82,0,"1",shellscript,content +4049,2734838,"scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",83,0,"",shellscript,selection_keyboard +4050,2734946,"TERMINAL",0,0,"406",,terminal_output +4051,2736056,"TERMINAL",0,0,"17",,terminal_output +4052,2737050,"TERMINAL",0,0,"28",,terminal_output +4053,2738095,"TERMINAL",0,0,"330",,terminal_output +4054,2738829,"TERMINAL",0,0,"bash",,terminal_focus +4055,2739177,"TERMINAL",0,0,"51",,terminal_output +4056,2740204,"TERMINAL",0,0,"62",,terminal_output +4057,2741134,"TERMINAL",0,0,"scancel 3299016",,terminal_command +4058,2741166,"TERMINAL",0,0,"]633;E;2025-06-27 15:32:47 scancel 3299016;77164738-3379-4db1-b4b8-ccd452227802]633;C",,terminal_output +4059,2741276,"TERMINAL",0,0,"M73299016 accelerat train_to tum_cte0 CG\t6:33\t 1 hkn0507",,terminal_output +4060,2742291,"TERMINAL",0,0,"8",,terminal_output +4061,2743332,"TERMINAL",0,0,"9",,terminal_output +4062,2743621,"TERMINAL",0,0,"watch",,terminal_focus +4063,2744410,"TERMINAL",0,0,"50",,terminal_output +4064,2745487,"TERMINAL",0,0,"1",,terminal_output +4065,2746216,"TERMINAL",0,0,"bash",,terminal_focus +4066,2746478,"TERMINAL",0,0,"2",,terminal_output +4067,2747557,"TERMINAL",0,0,"3",,terminal_output +4068,2748473,"TERMINAL",0,0,"scancel 3299017",,terminal_command +4069,2748610,"TERMINAL",0,0,"\r4",,terminal_output +4070,2749631,"TERMINAL",0,0,"5",,terminal_output +4071,2750230,"TERMINAL",0,0,"cd ..",,terminal_command +4072,2750689,"TERMINAL",0,0,"6",,terminal_output +4073,2751740,"TERMINAL",0,0,"7",,terminal_output +4074,2752763,"TERMINAL",0,0,"8",,terminal_output +4075,2753815,"TERMINAL",0,0,"9",,terminal_output +4076,2754896,"TERMINAL",0,0,"3:00",,terminal_output +4077,2755999,"TERMINAL",0,0,"1",,terminal_output +4078,2756947,"TERMINAL",0,0,"2",,terminal_output +4079,2758322,"TERMINAL",0,0,"\r3",,terminal_output +4080,2759341,"TERMINAL",0,0,"5",,terminal_output +4081,2760419,"TERMINAL",0,0,"6",,terminal_output +4082,2761421,"TERMINAL",0,0,"7",,terminal_output +4083,2761619,"TERMINAL",0,0,"scancel -u mahajanm",,terminal_command +4084,2761663,"TERMINAL",0,0,"]633;E;2025-06-27 15:33:07 scancel -u mahajanm;77164738-3379-4db1-b4b8-ccd452227802]633;Cscancel: error: Invalid user name: mahajanm\r\n]0;tum_cte0515@hkn1993:~/Projects]633;D;1",,terminal_output +4085,2762479,"TERMINAL",0,0,"8",,terminal_output +4086,2763521,"TERMINAL",0,0,"9",,terminal_output +4087,2764599,"TERMINAL",0,0,"10",,terminal_output +4088,2765666,"TERMINAL",0,0,"1",,terminal_output +4089,2766070,"TERMINAL",0,0,"scancel --me",,terminal_command +4090,2766139,"TERMINAL",0,0,"]633;E;2025-06-27 15:33:12 scancel --me;77164738-3379-4db1-b4b8-ccd452227802]633;C]0;tum_cte0515@hkn1993:~/Projects]633;D;0",,terminal_output +4091,2766666,"TERMINAL",0,0,"\r2",,terminal_output +4092,2767703,"TERMINAL",0,0,"3",,terminal_output +4093,2768748,"TERMINAL",0,0,"4",,terminal_output +4094,2769812,"TERMINAL",0,0,"5",,terminal_output +4095,2770277,"TERMINAL",0,0,"sh jafar/scripts_horeka/sync_runner.sh jafar jafar_jobs/ ",,terminal_command +4096,2770347,"TERMINAL",0,0,"]633;E;2025-06-27 15:33:16 sh jafar/scripts_horeka/sync_runner.sh jafar jafar_jobs/ ;77164738-3379-4db1-b4b8-ccd452227802]633;Csending incremental file list\r\n",,terminal_output +4097,2770925,"TERMINAL",0,0,"6",,terminal_output +4098,2771764,"TERMINAL",0,0,"scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch\r\nscripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch\r\n",,terminal_output +4099,2771824,"TERMINAL",0,0,"\r\nsent 10,513 bytes received 101 bytes 7,076.00 bytes/sec\r\ntotal size is 64,337,222 speedup is 6,061.54\r\n]0;tum_cte0515@hkn1993:~/Projects]633;D;0",,terminal_output +4100,2771936,"TERMINAL",0,0,"7",,terminal_output +4101,2772926,"TERMINAL",0,0,"8",,terminal_output +4102,2773973,"TERMINAL",0,0,"9",,terminal_output +4103,2775039,"TERMINAL",0,0,"20",,terminal_output +4104,2776060,"TERMINAL",0,0,"1",,terminal_output +4105,2776433,"TERMINAL",0,0,"cd jafar_jobs/",,terminal_command +4106,2776464,"TERMINAL",0,0,"]633;E;2025-06-27 15:33:22 cd jafar_jobs/;77164738-3379-4db1-b4b8-ccd452227802]633;C]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +4107,2777105,"TERMINAL",0,0,"2",,terminal_output +4108,2778301,"TERMINAL",0,0,"4",,terminal_output +4109,2779210,"TERMINAL",0,0,"5",,terminal_output +4110,2780245,"TERMINAL",0,0,"6",,terminal_output +4111,2781050,"TERMINAL",0,0,"sbatch scripts_horeka/modelsize_scaling/dynamics/E_train_dyn_18M.sbatch",,terminal_command +4112,2781192,"TERMINAL",0,0,"]633;E;2025-06-27 15:33:26 sbatch scripts_horeka/modelsize_scaling/dynamics/E_train_dyn_18M.sbatch;77164738-3379-4db1-b4b8-ccd452227802]633;CSubmitted batch job 3299062\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +4113,2781322,"TERMINAL",0,0,"73299062 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output +4114,2782372,"TERMINAL",0,0,"8",,terminal_output +4115,2783431,"TERMINAL",0,0,"9",,terminal_output +4116,2784420,"TERMINAL",0,0,"30",,terminal_output +4117,2784740,"TERMINAL",0,0,"sbatch scripts_horeka/modelsize_scaling/dynamics/A_train_dyn_1.5M.sbatch",,terminal_command +4118,2784783,"TERMINAL",0,0,"]633;E;2025-06-27 15:33:30 sbatch scripts_horeka/modelsize_scaling/dynamics/A_train_dyn_1.5M.sbatch;77164738-3379-4db1-b4b8-ccd452227802]633;CSubmitted batch job 3299063\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +4119,2785635,"TERMINAL",0,0,"\r13299063 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output +4120,2786579,"TERMINAL",0,0,"2",,terminal_output +4121,2787561,"TERMINAL",0,0,"3",,terminal_output +4122,2788613,"TERMINAL",0,0,"4",,terminal_output +4123,2789703,"TERMINAL",0,0,"5",,terminal_output +4124,2790917,"TERMINAL",0,0,"6",,terminal_output +4125,2791955,"TERMINAL",0,0,"7",,terminal_output +4126,2792813,"TERMINAL",0,0,"8",,terminal_output +4127,2793858,"TERMINAL",0,0,"9",,terminal_output +4128,2794952,"TERMINAL",0,0,"40",,terminal_output +4129,2796113,"TERMINAL",0,0,"1",,terminal_output +4130,2796184,"TERMINAL",0,0,"sbatch scripts_horeka/modelsize_scaling/dynamics/C_train_dyn_6M.sbatch",,terminal_command +4131,2796189,"TERMINAL",0,0,"]633;E;2025-06-27 15:33:41 sbatch scripts_horeka/modelsize_scaling/dynamics/C_train_dyn_6M.sbatch;77164738-3379-4db1-b4b8-ccd452227802]633;CSubmitted batch job 3299065\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +4132,2797001,"TERMINAL",0,0,"\r23299065 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output +4133,2798073,"TERMINAL",0,0,"3",,terminal_output +4134,2799076,"TERMINAL",0,0,"4",,terminal_output +4135,2800121,"TERMINAL",0,0,"6",,terminal_output +4136,2801321,"TERMINAL",0,0,"7",,terminal_output +4137,2802055,"TERMINAL",0,0,"sbatch scripts_horeka/modelsize_scaling/dynamics/D_train_dyn_12M.sbatch",,terminal_command +4138,2802093,"TERMINAL",0,0,"]633;E;2025-06-27 15:33:47 sbatch scripts_horeka/modelsize_scaling/dynamics/D_train_dyn_12M.sbatch;77164738-3379-4db1-b4b8-ccd452227802]633;CSubmitted batch job 3299066\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +4139,2802382,"TERMINAL",0,0,"\r83299066 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output +4140,2803442,"TERMINAL",0,0,"9",,terminal_output +4141,2804488,"TERMINAL",0,0,"50",,terminal_output +4142,2805516,"TERMINAL",0,0,"1",,terminal_output +4143,2806544,"TERMINAL",0,0,"2",,terminal_output +4144,2807620,"TERMINAL",0,0,"3",,terminal_output +4145,2808636,"TERMINAL",0,0,"4",,terminal_output +4146,2809680,"TERMINAL",0,0,"5",,terminal_output +4147,2810729,"TERMINAL",0,0,"6",,terminal_output +4148,2811785,"TERMINAL",0,0,"7",,terminal_output +4149,2812830,"TERMINAL",0,0,"8",,terminal_output +4150,2813864,"TERMINAL",0,0,"9",,terminal_output +4151,2814032,"TERMINAL",0,0,"sbatch scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch",,terminal_command +4152,2814115,"TERMINAL",0,0,"]633;E;2025-06-27 15:34:00 sbatch scripts_horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch;77164738-3379-4db1-b4b8-ccd452227802]633;CSubmitted batch job 3299068\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +4153,2814935,"TERMINAL",0,0,"\r4:003299068 accelerat train_to tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output +4154,2816015,"TERMINAL",0,0,"1",,terminal_output +4155,2816999,"TERMINAL",0,0,"2",,terminal_output +4156,2818073,"TERMINAL",0,0,"3",,terminal_output +4157,2818183,"TERMINAL",0,0,"sbatch scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch",,terminal_command +4158,2818223,"TERMINAL",0,0,"]633;E;2025-06-27 15:34:04 sbatch scripts_horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch;77164738-3379-4db1-b4b8-ccd452227802]633;CSubmitted batch job 3299069\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +4159,2819088,"TERMINAL",0,0,"\r43299069 accelerat train_la tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output +4160,2820136,"TERMINAL",0,0,"6",,terminal_output +4161,2820577,"TERMINAL",0,0,"cd ..",,terminal_command +4162,2821233,"TERMINAL",0,0,"7",,terminal_output +4163,2821312,"TERMINAL",0,0,"ls",,terminal_command +4164,2821353,"TERMINAL",0,0,"]633;E;2025-06-27 15:34:07 ls;77164738-3379-4db1-b4b8-ccd452227802]633;Cjafar jafar_jobs\r\n]0;tum_cte0515@hkn1993:~/Projects]633;D;0",,terminal_output +4165,2822228,"TERMINAL",0,0,"8",,terminal_output +4166,2823217,"TERMINAL",0,0,"cd jafar",,terminal_command +4167,2823280,"TERMINAL",0,0,"9",,terminal_output +4168,2823766,"TERMINAL",0,0,"ls",,terminal_command +4169,2824418,"TERMINAL",0,0,"10",,terminal_output +4170,2825371,"TERMINAL",0,0,"1",,terminal_output +4171,2826417,"TERMINAL",0,0,"2",,terminal_output +4172,2827469,"TERMINAL",0,0,"3",,terminal_output +4173,2828516,"TERMINAL",0,0,"4",,terminal_output +4174,2829552,"TERMINAL",0,0,"5",,terminal_output +4175,2830598,"TERMINAL",0,0,"65dy8to2 R1hkn05053 R1hkn0507",,terminal_output +4176,2831649,"TERMINAL",0,0,"722",,terminal_output +4177,2832688,"TERMINAL",0,0,"833",,terminal_output +4178,2833736,"TERMINAL",0,0,"944",,terminal_output +4179,2834785,"TERMINAL",0,0,"2055",,terminal_output +4180,2835822,"TERMINAL",0,0,"166",,terminal_output +4181,2836871,"TERMINAL",0,0,"277",,terminal_output +4182,2837910,"TERMINAL",0,0,"388",,terminal_output +4183,2838964,"TERMINAL",0,0,"499",,terminal_output +4184,2840023,"TERMINAL",0,0,"51010",,terminal_output +4185,2841052,"TERMINAL",0,0,"611",,terminal_output +4186,2842088,"TERMINAL",0,0,"733",,terminal_output +4187,2843146,"TERMINAL",0,0,"944",,terminal_output +4188,2844182,"TERMINAL",0,0,"3055",,terminal_output +4189,2845228,"TERMINAL",0,0,"166",,terminal_output +4190,2846273,"TERMINAL",0,0,"277",,terminal_output +4191,2847312,"TERMINAL",0,0,"388",,terminal_output +4192,2848362,"TERMINAL",0,0,"499",,terminal_output +4193,2849408,"TERMINAL",0,0,"52020",,terminal_output +4194,2850446,"TERMINAL",0,0,"611",,terminal_output +4195,2851495,"TERMINAL",0,0,"722",,terminal_output +4196,2852542,"TERMINAL",0,0,"833",,terminal_output +4197,2853586,"TERMINAL",0,0,"944",,terminal_output +4198,2854644,"TERMINAL",0,0,"4055",,terminal_output +4199,2855684,"TERMINAL",0,0,"166",,terminal_output +4200,2856734,"TERMINAL",0,0,"277",,terminal_output +4201,2857785,"TERMINAL",0,0,"388",,terminal_output +4202,2858835,"TERMINAL",0,0,"499",,terminal_output +4203,2859912,"TERMINAL",0,0,"53030",,terminal_output +4204,2860916,"TERMINAL",0,0,"611",,terminal_output +4205,2861940,"TERMINAL",0,0,"722",,terminal_output +4206,2862979,"TERMINAL",0,0,"833",,terminal_output +4207,2864085,"TERMINAL",0,0,"944",,terminal_output +4208,2865088,"TERMINAL",0,0,"5066",,terminal_output +4209,2866135,"TERMINAL",0,0,"277",,terminal_output +4210,2867214,"TERMINAL",0,0,"388",,terminal_output +4211,2868243,"TERMINAL",0,0,"499",,terminal_output +4212,2869294,"TERMINAL",0,0,"54040",,terminal_output +4213,2870347,"TERMINAL",0,0,"611",,terminal_output +4214,2871388,"TERMINAL",0,0,"722",,terminal_output +4215,2872435,"TERMINAL",0,0,"833",,terminal_output +4216,2873480,"TERMINAL",0,0,"944",,terminal_output +4217,2874525,"TERMINAL",0,0,"5:0055",,terminal_output +4218,2875567,"TERMINAL",0,0,"166",,terminal_output +4219,2876626,"TERMINAL",0,0,"277",,terminal_output +4220,2877667,"TERMINAL",0,0,"388",,terminal_output +4221,2878717,"TERMINAL",0,0,"499",,terminal_output +4222,2879828,"TERMINAL",0,0,"55050",,terminal_output +4223,2880818,"TERMINAL",0,0,"611",,terminal_output +4224,2881864,"TERMINAL",0,0,"722",,terminal_output +4225,2882910,"TERMINAL",0,0,"833",,terminal_output +4226,2883958,"TERMINAL",0,0,"944",,terminal_output +4227,2885010,"TERMINAL",0,0,"1055",,terminal_output +4228,2886055,"TERMINAL",0,0,"166",,terminal_output +4229,2887134,"TERMINAL",0,0,"288",,terminal_output +4230,2888165,"TERMINAL",0,0,"499",,terminal_output +4231,2889215,"TERMINAL",0,0,"51:001:00",,terminal_output +4232,2890268,"TERMINAL",0,0,"611",,terminal_output +4233,2891298,"TERMINAL",0,0,"722",,terminal_output +4234,2892345,"TERMINAL",0,0,"833",,terminal_output +4235,2893392,"TERMINAL",0,0,"944",,terminal_output +4236,2894438,"TERMINAL",0,0,"2055",,terminal_output +4237,2895499,"TERMINAL",0,0,"166",,terminal_output +4238,2896540,"TERMINAL",0,0,"277",,terminal_output +4239,2897586,"TERMINAL",0,0,"388",,terminal_output +4240,2898634,"TERMINAL",0,0,"499",,terminal_output +4241,2899731,"TERMINAL",0,0,"51010",,terminal_output +4242,2900782,"TERMINAL",0,0,"611",,terminal_output +4243,2901839,"TERMINAL",0,0,"722",,terminal_output +4244,2902823,"TERMINAL",0,0,"833",,terminal_output +4245,2903920,"TERMINAL",0,0,"944",,terminal_output +4246,2904958,"TERMINAL",0,0,"3055",,terminal_output +4247,2906056,"TERMINAL",0,0,"166",,terminal_output +4248,2907021,"TERMINAL",0,0,"277",,terminal_output +4249,2908053,"TERMINAL",0,0,"388",,terminal_output +4250,2909103,"TERMINAL",0,0,"42020",,terminal_output +4251,2910168,"TERMINAL",0,0,"611",,terminal_output +4252,2911306,"TERMINAL",0,0,"722",,terminal_output +4253,2912354,"TERMINAL",0,0,"833",,terminal_output +4254,2913301,"TERMINAL",0,0,"944",,terminal_output +4255,2914347,"TERMINAL",0,0,"4055",,terminal_output +4256,2915401,"TERMINAL",0,0,"166",,terminal_output +4257,2916605,"TERMINAL",0,0,"277",,terminal_output +4258,2917502,"TERMINAL",0,0,"388",,terminal_output +4259,2918548,"TERMINAL",0,0,"499",,terminal_output +4260,2919604,"TERMINAL",0,0,"53030",,terminal_output +4261,2920643,"TERMINAL",0,0,"611",,terminal_output +4262,2921954,"TERMINAL",0,0,"722",,terminal_output +4263,2922757,"TERMINAL",0,0,"833",,terminal_output +4264,2923796,"TERMINAL",0,0,"944",,terminal_output +4265,2924836,"TERMINAL",0,0,"5055",,terminal_output +4266,2925871,"TERMINAL",0,0,"166",,terminal_output +4267,2926914,"TERMINAL",0,0,"277",,terminal_output +4268,2928005,"TERMINAL",0,0,"388",,terminal_output +4269,2929019,"TERMINAL",0,0,"499",,terminal_output +4270,2930127,"TERMINAL",0,0,"54141",,terminal_output +4271,2931123,"TERMINAL",0,0,"722",,terminal_output +4272,2932185,"TERMINAL",0,0,"833",,terminal_output +4273,2933218,"TERMINAL",0,0,"944",,terminal_output +4274,2934265,"TERMINAL",0,0,"6:0055",,terminal_output +4275,2935396,"TERMINAL",0,0,"166",,terminal_output +4276,2936464,"TERMINAL",0,0,"277",,terminal_output +4277,2937406,"TERMINAL",0,0,"388",,terminal_output +4278,2938453,"TERMINAL",0,0,"499",,terminal_output +4279,2939492,"TERMINAL",0,0,"55050",,terminal_output +4280,2940542,"TERMINAL",0,0,"611",,terminal_output +4281,2941594,"TERMINAL",0,0,"722",,terminal_output +4282,2942709,"TERMINAL",0,0,"833",,terminal_output +4283,2943727,"TERMINAL",0,0,"944",,terminal_output +4284,2944736,"TERMINAL",0,0,"1055",,terminal_output +4285,2945782,"TERMINAL",0,0,"166",,terminal_output +4286,2946826,"TERMINAL",0,0,"277",,terminal_output +4287,2947879,"TERMINAL",0,0,"388",,terminal_output +4288,2948925,"TERMINAL",0,0,"499",,terminal_output +4289,2950174,"TERMINAL",0,0,"52:012:01",,terminal_output +4290,2951192,"TERMINAL",0,0,"722",,terminal_output +4291,2952241,"TERMINAL",0,0,"833",,terminal_output +4292,2953285,"TERMINAL",0,0,"944",,terminal_output +4293,2954335,"TERMINAL",0,0,"2055",,terminal_output +4294,2955376,"TERMINAL",0,0,"166",,terminal_output +4295,2956427,"TERMINAL",0,0,"277",,terminal_output +4296,2957473,"TERMINAL",0,0,"388",,terminal_output +4297,2958512,"TERMINAL",0,0,"499",,terminal_output +4298,2959555,"TERMINAL",0,0,"51010",,terminal_output +4299,2960603,"TERMINAL",0,0,"611",,terminal_output +4300,2961659,"TERMINAL",0,0,"722",,terminal_output +4301,2962708,"TERMINAL",0,0,"833",,terminal_output +4302,2963764,"TERMINAL",0,0,"944",,terminal_output +4303,2964808,"TERMINAL",0,0,"3055",,terminal_output +4304,2965849,"TERMINAL",0,0,"166",,terminal_output +4305,2966935,"TERMINAL",0,0,"277",,terminal_output +4306,2967946,"TERMINAL",0,0,"388",,terminal_output +4307,2968999,"TERMINAL",0,0,"499",,terminal_output +4308,2970042,"TERMINAL",0,0,"52020",,terminal_output +4309,2971098,"TERMINAL",0,0,"622",,terminal_output +4310,2972145,"TERMINAL",0,0,"833",,terminal_output +4311,2973206,"TERMINAL",0,0,"944",,terminal_output +4312,2974240,"TERMINAL",0,0,"4055",,terminal_output +4313,2975287,"TERMINAL",0,0,"166",,terminal_output +4314,2976336,"TERMINAL",0,0,"277",,terminal_output +4315,2977390,"TERMINAL",0,0,"388",,terminal_output +4316,2978432,"TERMINAL",0,0,"499",,terminal_output +4317,2979507,"TERMINAL",0,0,"53030",,terminal_output +4318,2980556,"TERMINAL",0,0,"611",,terminal_output +4319,2981584,"TERMINAL",0,0,"722",,terminal_output +4320,2982642,"TERMINAL",0,0,"833",,terminal_output +4321,2983690,"TERMINAL",0,0,"944",,terminal_output +4322,2984736,"TERMINAL",0,0,"5055",,terminal_output +4323,2985775,"TERMINAL",0,0,"166",,terminal_output +4324,2986825,"TERMINAL",0,0,"277",,terminal_output +4325,2987877,"TERMINAL",0,0,"388",,terminal_output +4326,2988959,"TERMINAL",0,0,"499",,terminal_output +4327,2989997,"TERMINAL",0,0,"54040",,terminal_output +4328,2991020,"TERMINAL",0,0,"611",,terminal_output +4329,2992082,"TERMINAL",0,0,"733",,terminal_output +4330,2993121,"TERMINAL",0,0,"944",,terminal_output +4331,2994169,"TERMINAL",0,0,"7:0055",,terminal_output +4332,2995220,"TERMINAL",0,0,"166",,terminal_output +4333,2996272,"TERMINAL",0,0,"277",,terminal_output +4334,2997340,"TERMINAL",0,0,"388",,terminal_output +4335,2998368,"TERMINAL",0,0,"499",,terminal_output +4336,2999419,"TERMINAL",0,0,"55050",,terminal_output +4337,3000469,"TERMINAL",0,0,"611",,terminal_output +4338,3001516,"TERMINAL",0,0,"722",,terminal_output +4339,3002561,"TERMINAL",0,0,"833",,terminal_output +4340,3003614,"TERMINAL",0,0,"944",,terminal_output +4341,3004653,"TERMINAL",0,0,"1055",,terminal_output +4342,3005701,"TERMINAL",0,0,"166",,terminal_output +4343,3006753,"TERMINAL",0,0,"277",,terminal_output +4344,3007800,"TERMINAL",0,0,"388",,terminal_output +4345,3008854,"TERMINAL",0,0,"499",,terminal_output +4346,3009946,"TERMINAL",0,0,"53:003:00",,terminal_output +4347,3010949,"TERMINAL",0,0,"611",,terminal_output +4348,3012014,"TERMINAL",0,0,"722",,terminal_output +4349,3013055,"TERMINAL",0,0,"833",,terminal_output +4350,3014125,"TERMINAL",0,0,"955",,terminal_output +4351,3015152,"TERMINAL",0,0,"2166",,terminal_output +4352,3016206,"TERMINAL",0,0,"277",,terminal_output +4353,3017256,"TERMINAL",0,0,"388",,terminal_output +4354,3018302,"TERMINAL",0,0,"499",,terminal_output +4355,3019355,"TERMINAL",0,0,"51010",,terminal_output +4356,3020405,"TERMINAL",0,0,"611",,terminal_output +4357,3021452,"TERMINAL",0,0,"722",,terminal_output +4358,3022501,"TERMINAL",0,0,"833",,terminal_output +4359,3023541,"TERMINAL",0,0,"944",,terminal_output +4360,3024594,"TERMINAL",0,0,"3055",,terminal_output +4361,3025621,"TERMINAL",0,0,"166",,terminal_output +4362,3026668,"TERMINAL",0,0,"277",,terminal_output +4363,3027735,"TERMINAL",0,0,"388",,terminal_output +4364,3028783,"TERMINAL",0,0,"499",,terminal_output +4365,3029865,"TERMINAL",0,0,"52020",,terminal_output +4366,3030873,"TERMINAL",0,0,"611",,terminal_output +4367,3031952,"TERMINAL",0,0,"722",,terminal_output +4368,3032980,"TERMINAL",0,0,"833",,terminal_output +4369,3034075,"TERMINAL",0,0,"944",,terminal_output +4370,3035069,"TERMINAL",0,0,"4066",,terminal_output +4371,3036130,"TERMINAL",0,0,"277",,terminal_output +4372,3037159,"TERMINAL",0,0,"388",,terminal_output +4373,3038194,"TERMINAL",0,0,"499",,terminal_output +4374,3039248,"TERMINAL",0,0,"53030",,terminal_output +4375,3040319,"TERMINAL",0,0,"611",,terminal_output +4376,3041351,"TERMINAL",0,0,"722",,terminal_output +4377,3042373,"TERMINAL",0,0,"833",,terminal_output +4378,3043419,"TERMINAL",0,0,"944",,terminal_output +4379,3044478,"TERMINAL",0,0,"5055",,terminal_output +4380,3045509,"TERMINAL",0,0,"166",,terminal_output +4381,3046549,"TERMINAL",0,0,"277",,terminal_output +4382,3047598,"TERMINAL",0,0,"388",,terminal_output +4383,3048640,"TERMINAL",0,0,"499",,terminal_output +4384,3049690,"TERMINAL",0,0,"54040",,terminal_output +4385,3050726,"TERMINAL",0,0,"611",,terminal_output +4386,3051803,"TERMINAL",0,0,"722",,terminal_output +4387,3052827,"TERMINAL",0,0,"833",,terminal_output +4388,3053877,"TERMINAL",0,0,"944",,terminal_output +4389,3054925,"TERMINAL",0,0,"8:0055",,terminal_output +4390,3055974,"TERMINAL",0,0,"166",,terminal_output +4391,3057014,"TERMINAL",0,0,"277",,terminal_output +4392,3058052,"TERMINAL",0,0,"388",,terminal_output +4393,3059113,"TERMINAL",0,0,"45050",,terminal_output +4394,3060248,"TERMINAL",0,0,"611",,terminal_output +4395,3061209,"TERMINAL",0,0,"722",,terminal_output +4396,3062270,"TERMINAL",0,0,"833",,terminal_output +4397,3063301,"TERMINAL",0,0,"944",,terminal_output +4398,3064338,"TERMINAL",0,0,"1055",,terminal_output +4399,3065381,"TERMINAL",0,0,"166",,terminal_output +4400,3066441,"TERMINAL",0,0,"277",,terminal_output +4401,3067508,"TERMINAL",0,0,"388",,terminal_output +4402,3068545,"TERMINAL",0,0,"499",,terminal_output +4403,3069614,"TERMINAL",0,0,"54:004:00",,terminal_output +4404,3071090,"TERMINAL",0,0,"622",,terminal_output +4405,3072123,"TERMINAL",0,0,"833",,terminal_output +4406,3073160,"TERMINAL",0,0,"944",,terminal_output +4407,3074203,"TERMINAL",0,0,"2055",,terminal_output +4408,3075243,"TERMINAL",0,0,"166",,terminal_output +4409,3076287,"TERMINAL",0,0,"277",,terminal_output +4410,3077329,"TERMINAL",0,0,"388",,terminal_output +4411,3078392,"TERMINAL",0,0,"499",,terminal_output +4412,3079422,"TERMINAL",0,0,"51010",,terminal_output +4413,3080470,"TERMINAL",0,0,"611",,terminal_output +4414,3081515,"TERMINAL",0,0,"722",,terminal_output +4415,3082564,"TERMINAL",0,0,"833",,terminal_output +4416,3083613,"TERMINAL",0,0,"944",,terminal_output +4417,3084659,"TERMINAL",0,0,"3055",,terminal_output +4418,3085709,"TERMINAL",0,0,"166",,terminal_output +4419,3086765,"TERMINAL",0,0,"277",,terminal_output +4420,3087798,"TERMINAL",0,0,"388",,terminal_output +4421,3088916,"TERMINAL",0,0,"499",,terminal_output +4422,3090008,"TERMINAL",0,0,"52020",,terminal_output +4423,3091062,"TERMINAL",0,0,"611",,terminal_output +4424,3092009,"TERMINAL",0,0,"722",,terminal_output +4425,3093205,"TERMINAL",0,0,"833",,terminal_output +4426,3094208,"TERMINAL",0,0,"955",,terminal_output +4427,3095133,"TERMINAL",0,0,"4166",,terminal_output +4428,3096175,"TERMINAL",0,0,"277",,terminal_output +4429,3097257,"TERMINAL",0,0,"388",,terminal_output +4430,3098276,"TERMINAL",0,0,"499",,terminal_output +4431,3099334,"TERMINAL",0,0,"53030",,terminal_output +4432,3100385,"TERMINAL",0,0,"611",,terminal_output +4433,3101497,"TERMINAL",0,0,"722",,terminal_output +4434,3102517,"TERMINAL",0,0,"833",,terminal_output +4435,3103570,"TERMINAL",0,0,"944",,terminal_output +4436,3104647,"TERMINAL",0,0,"5055",,terminal_output +4437,3105659,"TERMINAL",0,0,"166",,terminal_output +4438,3106685,"TERMINAL",0,0,"277",,terminal_output +4439,3107734,"TERMINAL",0,0,"388",,terminal_output +4440,3108794,"TERMINAL",0,0,"499",,terminal_output +4441,3109859,"TERMINAL",0,0,"54040",,terminal_output +4442,3110895,"TERMINAL",0,0,"611",,terminal_output +4443,3111963,"TERMINAL",0,0,"722",,terminal_output +4444,3112982,"TERMINAL",0,0,"833",,terminal_output +4445,3114005,"TERMINAL",0,0,"944",,terminal_output +4446,3115062,"TERMINAL",0,0,"9:0055",,terminal_output +4447,3116118,"TERMINAL",0,0,"177",,terminal_output +4448,3117153,"TERMINAL",0,0,"388",,terminal_output +4449,3118200,"TERMINAL",0,0,"499",,terminal_output +4450,3119247,"TERMINAL",0,0,"55050",,terminal_output +4451,3120295,"TERMINAL",0,0,"611",,terminal_output +4452,3121390,"TERMINAL",0,0,"722",,terminal_output +4453,3122399,"TERMINAL",0,0,"833",,terminal_output +4454,3123467,"TERMINAL",0,0,"944",,terminal_output +4455,3124479,"TERMINAL",0,0,"1055",,terminal_output +4456,3125543,"TERMINAL",0,0,"166",,terminal_output +4457,3126584,"TERMINAL",0,0,"277",,terminal_output +4458,3127619,"TERMINAL",0,0,"388",,terminal_output +4459,3128714,"TERMINAL",0,0,"499",,terminal_output +4460,3129720,"TERMINAL",0,0,"55:005:00",,terminal_output +4461,3130769,"TERMINAL",0,0,"611",,terminal_output +4462,3131812,"TERMINAL",0,0,"722",,terminal_output +4463,3132850,"TERMINAL",0,0,"833",,terminal_output +4464,3133910,"TERMINAL",0,0,"944",,terminal_output +4465,3134951,"TERMINAL",0,0,"2055",,terminal_output +4466,3136000,"TERMINAL",0,0,"166",,terminal_output +4467,3137057,"TERMINAL",0,0,"277",,terminal_output +4468,3138082,"TERMINAL",0,0,"399",,terminal_output +4469,3139170,"TERMINAL",0,0,"51010",,terminal_output +4470,3140197,"TERMINAL",0,0,"611",,terminal_output +4471,3141237,"TERMINAL",0,0,"722",,terminal_output +4472,3142429,"TERMINAL",0,0,"833",,terminal_output +4473,3143791,"TERMINAL",0,0,"944",,terminal_output +4474,3144679,"TERMINAL",0,0,"3055",,terminal_output +4475,3145591,"TERMINAL",0,0,"166",,terminal_output +4476,3146685,"TERMINAL",0,0,"277",,terminal_output +4477,3147614,"TERMINAL",0,0,"388",,terminal_output +4478,3148602,"TERMINAL",0,0,"499",,terminal_output +4479,3149744,"TERMINAL",0,0,"52020",,terminal_output +4480,3150689,"TERMINAL",0,0,"611",,terminal_output +4481,3151738,"TERMINAL",0,0,"722",,terminal_output +4482,3152771,"TERMINAL",0,0,"833",,terminal_output +4483,3153831,"TERMINAL",0,0,"944",,terminal_output +4484,3154881,"TERMINAL",0,0,"4055",,terminal_output +4485,3155984,"TERMINAL",0,0,"166",,terminal_output +4486,3157074,"TERMINAL",0,0,"277",,terminal_output +4487,3158014,"TERMINAL",0,0,"388",,terminal_output +4488,3159067,"TERMINAL",0,0,"43030",,terminal_output +4489,3160121,"TERMINAL",0,0,"611",,terminal_output +4490,3161170,"TERMINAL",0,0,"722",,terminal_output +4491,3162222,"TERMINAL",0,0,"833",,terminal_output +4492,3163347,"TERMINAL",0,0,"944",,terminal_output +4493,3164346,"TERMINAL",0,0,"5055",,terminal_output +4494,3165363,"TERMINAL",0,0,"166",,terminal_output +4495,3166410,"TERMINAL",0,0,"277",,terminal_output +4496,3167455,"TERMINAL",0,0,"388",,terminal_output +4497,3168542,"TERMINAL",0,0,"499",,terminal_output +4498,3169551,"TERMINAL",0,0,"54040",,terminal_output +4499,3170584,"TERMINAL",0,0,"611",,terminal_output +4500,3171640,"TERMINAL",0,0,"722",,terminal_output +4501,3172686,"TERMINAL",0,0,"833",,terminal_output +4502,3173734,"TERMINAL",0,0,"944",,terminal_output +4503,3174786,"TERMINAL",0,0,"40:0055",,terminal_output +4504,3175832,"TERMINAL",0,0,"166",,terminal_output +4505,3176881,"TERMINAL",0,0,"277",,terminal_output +4506,3177929,"TERMINAL",0,0,"388",,terminal_output +4507,3178989,"TERMINAL",0,0,"499",,terminal_output +4508,3180060,"TERMINAL",0,0,"55050",,terminal_output +4509,3181058,"TERMINAL",0,0,"\n63299069 accelerat train_la tum_cte0 PD\t0:00\t 1 (Priority)11",,terminal_output +4510,3182121,"TERMINAL",0,0,"833",,terminal_output +4511,3183171,"TERMINAL",0,0,"944",,terminal_output +4512,3184207,"TERMINAL",0,0,"1055",,terminal_output +4513,3185268,"TERMINAL",0,0,"166",,terminal_output +4514,3186304,"TERMINAL",0,0,"277",,terminal_output +4515,3187357,"TERMINAL",0,0,"388",,terminal_output +4516,3188414,"TERMINAL",0,0,"499",,terminal_output +4517,3189496,"TERMINAL",0,0,"56:006:00",,terminal_output +4518,3190515,"TERMINAL",0,0,"611",,terminal_output +4519,3191561,"TERMINAL",0,0,"722",,terminal_output +4520,3192611,"TERMINAL",0,0,"833",,terminal_output +4521,3193687,"TERMINAL",0,0,"944",,terminal_output +4522,3194697,"TERMINAL",0,0,"2055",,terminal_output +4523,3195750,"TERMINAL",0,0,"166",,terminal_output +4524,3196798,"TERMINAL",0,0,"277",,terminal_output +4525,3197838,"TERMINAL",0,0,"388",,terminal_output +4526,3198880,"TERMINAL",0,0,"499",,terminal_output +4527,3199929,"TERMINAL",0,0,"51010",,terminal_output +4528,3201045,"TERMINAL",0,0,"611",,terminal_output +4529,3202069,"TERMINAL",0,0,"722",,terminal_output +4530,3203074,"TERMINAL",0,0,"844",,terminal_output +4531,3204164,"TERMINAL",0,0,"3055",,terminal_output +4532,3205212,"TERMINAL",0,0,"166",,terminal_output +4533,3206252,"TERMINAL",0,0,"277",,terminal_output +4534,3207305,"TERMINAL",0,0,"388",,terminal_output +4535,3208330,"TERMINAL",0,0,"499",,terminal_output +4536,3209366,"TERMINAL",0,0,"52020",,terminal_output +4537,3210403,"TERMINAL",0,0,"611",,terminal_output +4538,3211462,"TERMINAL",0,0,"722",,terminal_output +4539,3212505,"TERMINAL",0,0,"833",,terminal_output +4540,3213553,"TERMINAL",0,0,"944",,terminal_output +4541,3214606,"TERMINAL",0,0,"4055",,terminal_output +4542,3215647,"TERMINAL",0,0,"166",,terminal_output +4543,3216687,"TERMINAL",0,0,"277",,terminal_output +4544,3217734,"TERMINAL",0,0,"388",,terminal_output +4545,3218783,"TERMINAL",0,0,"499",,terminal_output +4546,3219838,"TERMINAL",0,0,"53030",,terminal_output +4547,3220891,"TERMINAL",0,0,"611",,terminal_output +4548,3221970,"TERMINAL",0,0,"722",,terminal_output +4549,3222986,"TERMINAL",0,0,"833",,terminal_output +4550,3224078,"TERMINAL",0,0,"944",,terminal_output +4551,3225088,"TERMINAL",0,0,"5066",,terminal_output +4552,3226131,"TERMINAL",0,0,"277",,terminal_output +4553,3227193,"TERMINAL",0,0,"388",,terminal_output +4554,3228237,"TERMINAL",0,0,"499",,terminal_output +4555,3229290,"TERMINAL",0,0,"54040",,terminal_output +4556,3230326,"TERMINAL",0,0,"611",,terminal_output +4557,3231376,"TERMINAL",0,0,"722",,terminal_output +4558,3232425,"TERMINAL",0,0,"833",,terminal_output +4559,3233479,"TERMINAL",0,0,"944",,terminal_output +4560,3234518,"TERMINAL",0,0,"1:0055",,terminal_output +4561,3235567,"TERMINAL",0,0,"166",,terminal_output +4562,3236657,"TERMINAL",0,0,"277",,terminal_output +4563,3237660,"TERMINAL",0,0,"388",,terminal_output +4564,3238715,"TERMINAL",0,0,"499",,terminal_output +4565,3239759,"TERMINAL",0,0,"55050",,terminal_output +4566,3240807,"TERMINAL",0,0,"611",,terminal_output +4567,3241854,"TERMINAL",0,0,"722",,terminal_output +4568,3242903,"TERMINAL",0,0,"833",,terminal_output +4569,3243955,"TERMINAL",0,0,"944",,terminal_output +4570,3245017,"TERMINAL",0,0,"1055",,terminal_output +4571,3246071,"TERMINAL",0,0,"166",,terminal_output +4572,3247112,"TERMINAL",0,0,"288",,terminal_output +4573,3248155,"TERMINAL",0,0,"499",,terminal_output +4574,3249210,"TERMINAL",0,0,"57:007:00",,terminal_output +4575,3250253,"TERMINAL",0,0,"611",,terminal_output +4576,3251326,"TERMINAL",0,0,"722",,terminal_output +4577,3252360,"TERMINAL",0,0,"833",,terminal_output +4578,3253404,"TERMINAL",0,0,"944",,terminal_output +4579,3254443,"TERMINAL",0,0,"2055",,terminal_output +4580,3255494,"TERMINAL",0,0,"166",,terminal_output +4581,3256539,"TERMINAL",0,0,"277",,terminal_output +4582,3257746,"TERMINAL",0,0,"388",,terminal_output +4583,3258686,"TERMINAL",0,0,"499",,terminal_output +4584,3259686,"TERMINAL",0,0,"51010",,terminal_output +4585,3260740,"TERMINAL",0,0,"611",,terminal_output +4586,3261824,"TERMINAL",0,0,"722",,terminal_output +4587,3262896,"TERMINAL",0,0,"833",,terminal_output +4588,3263948,"TERMINAL",0,0,"944",,terminal_output +4589,3264958,"TERMINAL",0,0,"3055",,terminal_output +4590,3266010,"TERMINAL",0,0,"166",,terminal_output +4591,3267165,"TERMINAL",0,0,"288",,terminal_output +4592,3268166,"TERMINAL",0,0,"499",,terminal_output +4593,3269150,"TERMINAL",0,0,"52020",,terminal_output +4594,3270194,"TERMINAL",0,0,"611",,terminal_output +4595,3271280,"TERMINAL",0,0,"722",,terminal_output +4596,3272291,"TERMINAL",0,0,"833",,terminal_output +4597,3273513,"TERMINAL",0,0,"944",,terminal_output +4598,3274372,"TERMINAL",0,0,"4055",,terminal_output +4599,3275422,"TERMINAL",0,0,"166",,terminal_output +4600,3276471,"TERMINAL",0,0,"277",,terminal_output +4601,3277550,"TERMINAL",0,0,"388",,terminal_output +4602,3278596,"TERMINAL",0,0,"499",,terminal_output +4603,3279621,"TERMINAL",0,0,"53030",,terminal_output +4604,3280667,"TERMINAL",0,0,"611",,terminal_output +4605,3281804,"TERMINAL",0,0,"722",,terminal_output +4606,3282758,"TERMINAL",0,0,"833",,terminal_output +4607,3283807,"TERMINAL",0,0,"944",,terminal_output +4608,3284864,"TERMINAL",0,0,"5055",,terminal_output +4609,3285925,"TERMINAL",0,0,"166",,terminal_output +4610,3286993,"TERMINAL",0,0,"277",,terminal_output +4611,3288003,"TERMINAL",0,0,"388",,terminal_output +4612,3289082,"TERMINAL",0,0,"499",,terminal_output +4613,3290138,"TERMINAL",0,0,"54141",,terminal_output +4614,3291187,"TERMINAL",0,0,"722",,terminal_output +4615,3292220,"TERMINAL",0,0,"833",,terminal_output +4616,3293272,"TERMINAL",0,0,"944",,terminal_output +4617,3294311,"TERMINAL",0,0,"2:0055",,terminal_output +4618,3295377,"TERMINAL",0,0,"166",,terminal_output +4619,3296401,"TERMINAL",0,0,"277",,terminal_output +4620,3297447,"TERMINAL",0,0,"388",,terminal_output +4621,3298488,"TERMINAL",0,0,"499",,terminal_output +4622,3299522,"TERMINAL",0,0,"55050",,terminal_output +4623,3300577,"TERMINAL",0,0,"611",,terminal_output +4624,3301624,"TERMINAL",0,0,"722",,terminal_output +4625,3302673,"TERMINAL",0,0,"833",,terminal_output +4626,3303717,"TERMINAL",0,0,"944",,terminal_output +4627,3304750,"TERMINAL",0,0,"1055",,terminal_output +4628,3305794,"TERMINAL",0,0,"166",,terminal_output +4629,3306849,"TERMINAL",0,0,"277",,terminal_output +4630,3307888,"TERMINAL",0,0,"388",,terminal_output +4631,3308942,"TERMINAL",0,0,"499",,terminal_output +4632,3310102,"TERMINAL",0,0,"58:008:00",,terminal_output +4633,3311033,"TERMINAL",0,0,"611",,terminal_output +4634,3312078,"TERMINAL",0,0,"733",,terminal_output +4635,3313121,"TERMINAL",0,0,"944",,terminal_output +4636,3314181,"TERMINAL",0,0,"2055",,terminal_output +4637,3315218,"TERMINAL",0,0,"166",,terminal_output +4638,3316295,"TERMINAL",0,0,"277",,terminal_output +4639,3317339,"TERMINAL",0,0,"388",,terminal_output +4640,3318399,"TERMINAL",0,0,"499",,terminal_output +4641,3319439,"TERMINAL",0,0,"51010",,terminal_output +4642,3320472,"TERMINAL",0,0,"611",,terminal_output +4643,3321524,"TERMINAL",0,0,"722",,terminal_output +4644,3322568,"TERMINAL",0,0,"833",,terminal_output +4645,3323618,"TERMINAL",0,0,"944",,terminal_output +4646,3324658,"TERMINAL",0,0,"3055",,terminal_output +4647,3325704,"TERMINAL",0,0,"166",,terminal_output +4648,3326761,"TERMINAL",0,0,"277",,terminal_output +4649,3327804,"TERMINAL",0,0,"388",,terminal_output +4650,3328849,"TERMINAL",0,0,"499",,terminal_output +4651,3329901,"TERMINAL",0,0,"52020",,terminal_output +4652,3330955,"TERMINAL",0,0,"611",,terminal_output +4653,3331999,"TERMINAL",0,0,"722",,terminal_output +4654,3333045,"TERMINAL",0,0,"833",,terminal_output +4655,3334093,"TERMINAL",0,0,"955",,terminal_output +4656,3335131,"TERMINAL",0,0,"4166",,terminal_output +4657,3336168,"TERMINAL",0,0,"277",,terminal_output +4658,3337221,"TERMINAL",0,0,"388",,terminal_output +4659,3338262,"TERMINAL",0,0,"499",,terminal_output +4660,3339311,"TERMINAL",0,0,"53030",,terminal_output +4661,3340346,"TERMINAL",0,0,"611",,terminal_output +4662,3341390,"TERMINAL",0,0,"722",,terminal_output +4663,3342449,"TERMINAL",0,0,"833",,terminal_output +4664,3343492,"TERMINAL",0,0,"944",,terminal_output +4665,3344523,"TERMINAL",0,0,"5055",,terminal_output +4666,3345577,"TERMINAL",0,0,"166",,terminal_output +4667,3346619,"TERMINAL",0,0,"277",,terminal_output +4668,3347668,"TERMINAL",0,0,"388",,terminal_output +4669,3348725,"TERMINAL",0,0,"499",,terminal_output +4670,3349758,"TERMINAL",0,0,"54040",,terminal_output +4671,3350806,"TERMINAL",0,0,"611",,terminal_output +4672,3351839,"TERMINAL",0,0,"722",,terminal_output +4673,3352892,"TERMINAL",0,0,"833",,terminal_output +4674,3353938,"TERMINAL",0,0,"944",,terminal_output +4675,3354983,"TERMINAL",0,0,"3:0055",,terminal_output +4676,3356015,"TERMINAL",0,0,"166",,terminal_output +4677,3357058,"TERMINAL",0,0,"288",,terminal_output +4678,3358137,"TERMINAL",0,0,"499",,terminal_output +4679,3359147,"TERMINAL",0,0,"55050",,terminal_output +4680,3360192,"TERMINAL",0,0,"611",,terminal_output +4681,3361232,"TERMINAL",0,0,"722",,terminal_output +4682,3362285,"TERMINAL",0,0,"833",,terminal_output +4683,3363318,"TERMINAL",0,0,"944",,terminal_output +4684,3364370,"TERMINAL",0,0,"1055",,terminal_output +4685,3365435,"TERMINAL",0,0,"166",,terminal_output +4686,3366462,"TERMINAL",0,0,"277",,terminal_output +4687,3367534,"TERMINAL",0,0,"388",,terminal_output +4688,3368608,"TERMINAL",0,0,"499",,terminal_output +4689,3369618,"TERMINAL",0,0,"59:009:00",,terminal_output +4690,3370670,"TERMINAL",0,0,"611",,terminal_output +4691,3371707,"TERMINAL",0,0,"722",,terminal_output +4692,3372754,"TERMINAL",0,0,"833",,terminal_output +4693,3373807,"TERMINAL",0,0,"944",,terminal_output +4694,3374854,"TERMINAL",0,0,"2055",,terminal_output +4695,3375895,"TERMINAL",0,0,"166",,terminal_output +4696,3376949,"TERMINAL",0,0,"277",,terminal_output +4697,3378002,"TERMINAL",0,0,"388",,terminal_output +4698,3379075,"TERMINAL",0,0,"499",,terminal_output +4699,3380144,"TERMINAL",0,0,"51111",,terminal_output +4700,3381159,"TERMINAL",0,0,"722",,terminal_output +4701,3382222,"TERMINAL",0,0,"833",,terminal_output +4702,3383266,"TERMINAL",0,0,"944",,terminal_output +4703,3384308,"TERMINAL",0,0,"3055",,terminal_output +4704,3385358,"TERMINAL",0,0,"166",,terminal_output +4705,3386397,"TERMINAL",0,0,"277",,terminal_output +4706,3387452,"TERMINAL",0,0,"388",,terminal_output +4707,3388483,"TERMINAL",0,0,"499",,terminal_output +4708,3389550,"TERMINAL",0,0,"52020",,terminal_output +4709,3390574,"TERMINAL",0,0,"611",,terminal_output +4710,3391612,"TERMINAL",0,0,"722",,terminal_output +4711,3392660,"TERMINAL",0,0,"833",,terminal_output +4712,3393696,"TERMINAL",0,0,"944",,terminal_output +4713,3394730,"TERMINAL",0,0,"4055",,terminal_output +4714,3395771,"TERMINAL",0,0,"166",,terminal_output +4715,3396841,"TERMINAL",0,0,"277",,terminal_output +4716,3397853,"TERMINAL",0,0,"388",,terminal_output +4717,3398892,"TERMINAL",0,0,"499",,terminal_output +4718,3399942,"TERMINAL",0,0,"53030",,terminal_output +4719,3400986,"TERMINAL",0,0,"611",,terminal_output +4720,3402025,"TERMINAL",0,0,"722",,terminal_output +4721,3403072,"TERMINAL",0,0,"844",,terminal_output +4722,3404133,"TERMINAL",0,0,"5055",,terminal_output +4723,3405146,"TERMINAL",0,0,"166",,terminal_output +4724,3406197,"TERMINAL",0,0,"277",,terminal_output +4725,3407236,"TERMINAL",0,0,"388",,terminal_output +4726,3408270,"TERMINAL",0,0,"499",,terminal_output +4727,3409310,"TERMINAL",0,0,"54040",,terminal_output +4728,3410354,"TERMINAL",0,0,"611",,terminal_output +4729,3411394,"TERMINAL",0,0,"722",,terminal_output +4730,3412437,"TERMINAL",0,0,"833",,terminal_output +4731,3413486,"TERMINAL",0,0,"944",,terminal_output +4732,3414533,"TERMINAL",0,0,"4:0055",,terminal_output +4733,3415555,"TERMINAL",0,0,"166",,terminal_output +4734,3416595,"TERMINAL",0,0,"277",,terminal_output +4735,3417622,"TERMINAL",0,0,"388",,terminal_output +4736,3418682,"TERMINAL",0,0,"499",,terminal_output +4737,3419709,"TERMINAL",0,0,"55050",,terminal_output +4738,3420760,"TERMINAL",0,0,"611",,terminal_output +4739,3421823,"TERMINAL",0,0,"722",,terminal_output +4740,3422842,"TERMINAL",0,0,"833",,terminal_output +4741,3423889,"TERMINAL",0,0,"944",,terminal_output +4742,3424934,"TERMINAL",0,0,"1055",,terminal_output +4743,3425973,"TERMINAL",0,0,"166",,terminal_output +4744,3427000,"TERMINAL",0,0,"277",,terminal_output +4745,3428024,"TERMINAL",0,0,"388",,terminal_output +4746,3429094,"TERMINAL",0,0,"499",,terminal_output +4747,3430093,"TERMINAL",0,0,"510:0110:01",,terminal_output +4748,3431171,"TERMINAL",0,0,"722",,terminal_output +4749,3432757,"TERMINAL",0,0,"833",,terminal_output +4750,3433799,"TERMINAL",0,0,"944",,terminal_output +4751,3434846,"TERMINAL",0,0,"2055",,terminal_output +4752,3435874,"TERMINAL",0,0,"166",,terminal_output +4753,3436919,"TERMINAL",0,0,"277",,terminal_output +4754,3437946,"TERMINAL",0,0,"388",,terminal_output +4755,3438989,"TERMINAL",0,0,"499",,terminal_output +4756,3440024,"TERMINAL",0,0,"51010",,terminal_output +4757,3441053,"TERMINAL",0,0,"611",,terminal_output +4758,3442076,"TERMINAL",0,0,"733",,terminal_output +4759,3443148,"TERMINAL",0,0,"944",,terminal_output +4760,3444204,"TERMINAL",0,0,"3055",,terminal_output +4761,3445179,"TERMINAL",0,0,"166",,terminal_output +4762,3446213,"TERMINAL",0,0,"277",,terminal_output +4763,3447256,"TERMINAL",0,0,"388",,terminal_output +4764,3448286,"TERMINAL",0,0,"499",,terminal_output +4765,3449319,"TERMINAL",0,0,"52020",,terminal_output +4766,3450371,"TERMINAL",0,0,"611",,terminal_output +4767,3451405,"TERMINAL",0,0,"722",,terminal_output +4768,3452481,"TERMINAL",0,0,"833",,terminal_output +4769,3453508,"TERMINAL",0,0,"944",,terminal_output +4770,3454552,"TERMINAL",0,0,"4055",,terminal_output +4771,3455566,"TERMINAL",0,0,"166",,terminal_output +4772,3456599,"TERMINAL",0,0,"277",,terminal_output +4773,3457702,"TERMINAL",0,0,"388",,terminal_output +4774,3458730,"TERMINAL",0,0,"499",,terminal_output +4775,3459726,"TERMINAL",0,0,"53030",,terminal_output +4776,3460745,"TERMINAL",0,0,"611",,terminal_output +4777,3461783,"TERMINAL",0,0,"722",,terminal_output +4778,3462889,"TERMINAL",0,0,"833",,terminal_output +4779,3463929,"TERMINAL",0,0,"944",,terminal_output +4780,3464900,"TERMINAL",0,0,"5055",,terminal_output +4781,3465920,"TERMINAL",0,0,"166",,terminal_output +4782,3466990,"TERMINAL",0,0,"277",,terminal_output +4783,3468022,"TERMINAL",0,0,"388",,terminal_output +4784,3469065,"TERMINAL",0,0,"499",,terminal_output +4785,3470089,"TERMINAL",0,0,"54141",,terminal_output +4786,3471153,"TERMINAL",0,0,"722",,terminal_output +4787,3472186,"TERMINAL",0,0,"833",,terminal_output +4788,3473207,"TERMINAL",0,0,"944",,terminal_output +4789,3474307,"TERMINAL",0,0,"5:0055",,terminal_output +4790,3475385,"TERMINAL",0,0,"166",,terminal_output +4791,3476375,"TERMINAL",0,0,"277",,terminal_output +4792,3477355,"TERMINAL",0,0,"388",,terminal_output +4793,3478386,"TERMINAL",0,0,"499",,terminal_output +4794,3479440,"TERMINAL",0,0,"55050",,terminal_output +4795,3480474,"TERMINAL",0,0,"M63299069 accelerat train_la tum_cte0 PD\t0:00\t 1 (Priority)11",,terminal_output +4796,3481493,"TERMINAL",0,0,"722",,terminal_output +4797,3482600,"TERMINAL",0,0,"833",,terminal_output +4798,3483575,"TERMINAL",0,0,"944",,terminal_output +4799,3484617,"TERMINAL",0,0,"1055",,terminal_output +4800,3485734,"TERMINAL",0,0,"166",,terminal_output +4801,3486688,"TERMINAL",0,0,"277",,terminal_output +4802,3487838,"TERMINAL",0,0,"388",,terminal_output +4803,3488756,"TERMINAL",0,0,"499",,terminal_output +4804,3489802,"TERMINAL",0,0,"51:001:00",,terminal_output +4805,3490846,"TERMINAL",0,0,"611",,terminal_output +4806,3491900,"TERMINAL",0,0,"722",,terminal_output +4807,3492937,"TERMINAL",0,0,"833",,terminal_output +4808,3493989,"TERMINAL",0,0,"944",,terminal_output +4809,3495020,"TERMINAL",0,0,"2055",,terminal_output +4810,3496079,"TERMINAL",0,0,"166",,terminal_output +4811,3497107,"TERMINAL",0,0,"388",,terminal_output +4812,3498199,"TERMINAL",0,0,"499",,terminal_output +4813,3499225,"TERMINAL",0,0,"51010",,terminal_output +4814,3500246,"TERMINAL",0,0,"611",,terminal_output +4815,3501293,"TERMINAL",0,0,"722",,terminal_output +4816,3502334,"TERMINAL",0,0,"833",,terminal_output +4817,3503379,"TERMINAL",0,0,"944",,terminal_output +4818,3504409,"TERMINAL",0,0,"3055",,terminal_output +4819,3505462,"TERMINAL",0,0,"166",,terminal_output +4820,3506510,"TERMINAL",0,0,"277",,terminal_output +4821,3507526,"TERMINAL",0,0,"388",,terminal_output +4822,3508583,"TERMINAL",0,0,"499",,terminal_output +4823,3509632,"TERMINAL",0,0,"52020",,terminal_output +4824,3510671,"TERMINAL",0,0,"611",,terminal_output +4825,3511722,"TERMINAL",0,0,"722",,terminal_output +4826,3512774,"TERMINAL",0,0,"833",,terminal_output +4827,3513818,"TERMINAL",0,0,"944",,terminal_output +4828,3514854,"TERMINAL",0,0,"4055",,terminal_output +4829,3515903,"TERMINAL",0,0,"166",,terminal_output +4830,3516961,"TERMINAL",0,0,"277",,terminal_output +4831,3518002,"TERMINAL",0,0,"388",,terminal_output +4832,3519033,"TERMINAL",0,0,"499",,terminal_output +4833,3520075,"TERMINAL",0,0,"53131",,terminal_output +4834,3521137,"TERMINAL",0,0,"722",,terminal_output +4835,3522214,"TERMINAL",0,0,"833",,terminal_output +4836,3523244,"TERMINAL",0,0,"944",,terminal_output +4837,3524271,"TERMINAL",0,0,"5055",,terminal_output +4838,3525323,"TERMINAL",0,0,"166",,terminal_output +4839,3526376,"TERMINAL",0,0,"277",,terminal_output +4840,3527420,"TERMINAL",0,0,"388",,terminal_output +4841,3528461,"TERMINAL",0,0,"499",,terminal_output +4842,3529503,"TERMINAL",0,0,"54040",,terminal_output +4843,3530559,"TERMINAL",0,0,"611",,terminal_output +4844,3531606,"TERMINAL",0,0,"722",,terminal_output +4845,3532660,"TERMINAL",0,0,"833",,terminal_output +4846,3533700,"TERMINAL",0,0,"944",,terminal_output +4847,3534741,"TERMINAL",0,0,"6:0055",,terminal_output +4848,3535784,"TERMINAL",0,0,"166",,terminal_output +4849,3536847,"TERMINAL",0,0,"277",,terminal_output +4850,3537859,"TERMINAL",0,0,"388",,terminal_output +4851,3538909,"TERMINAL",0,0,"499",,terminal_output +4852,3539960,"TERMINAL",0,0,"55050",,terminal_output +4853,3541007,"TERMINAL",0,0,"611",,terminal_output +4854,3542071,"TERMINAL",0,0,"722",,terminal_output +4855,3543112,"TERMINAL",0,0,"944",,terminal_output +4856,3544175,"TERMINAL",0,0,"1055",,terminal_output +4857,3545217,"TERMINAL",0,0,"166",,terminal_output +4858,3546250,"TERMINAL",0,0,"277",,terminal_output +4859,3547304,"TERMINAL",0,0,"388",,terminal_output +4860,3548354,"TERMINAL",0,0,"499",,terminal_output +4861,3549409,"TERMINAL",0,0,"52:002:00",,terminal_output +4862,3550464,"TERMINAL",0,0,"611",,terminal_output +4863,3551498,"TERMINAL",0,0,"722",,terminal_output +4864,3552542,"TERMINAL",0,0,"833",,terminal_output +4865,3553602,"TERMINAL",0,0,"944",,terminal_output +4866,3554651,"TERMINAL",0,0,"2055",,terminal_output +4867,3555698,"TERMINAL",0,0,"166",,terminal_output +4868,3556753,"TERMINAL",0,0,"277",,terminal_output +4869,3557797,"TERMINAL",0,0,"388",,terminal_output +4870,3558846,"TERMINAL",0,0,"499",,terminal_output +4871,3559895,"TERMINAL",0,0,"51010",,terminal_output +4872,3560940,"TERMINAL",0,0,"611",,terminal_output +4873,3561986,"TERMINAL",0,0,"722",,terminal_output +4874,3563033,"TERMINAL",0,0,"833",,terminal_output +4875,3564096,"TERMINAL",0,0,"955",,terminal_output +4876,3565138,"TERMINAL",0,0,"3166",,terminal_output +4877,3566265,"TERMINAL",0,0,"277",,terminal_output +4878,3567293,"TERMINAL",0,0,"388",,terminal_output +4879,3568283,"TERMINAL",0,0,"499",,terminal_output +4880,3569349,"TERMINAL",0,0,"52020",,terminal_output +4881,3570381,"TERMINAL",0,0,"611",,terminal_output +4882,3571437,"TERMINAL",0,0,"722",,terminal_output +4883,3572457,"TERMINAL",0,0,"833",,terminal_output +4884,3573508,"TERMINAL",0,0,"944",,terminal_output +4885,3574553,"TERMINAL",0,0,"4055",,terminal_output +4886,3575610,"TERMINAL",0,0,"166",,terminal_output +4887,3576643,"TERMINAL",0,0,"277",,terminal_output +4888,3577688,"TERMINAL",0,0,"388",,terminal_output +4889,3578738,"TERMINAL",0,0,"499",,terminal_output +4890,3579781,"TERMINAL",0,0,"53030",,terminal_output +4891,3580834,"TERMINAL",0,0,"611",,terminal_output +4892,3581911,"TERMINAL",0,0,"722",,terminal_output +4893,3582943,"TERMINAL",0,0,"833",,terminal_output +4894,3583990,"TERMINAL",0,0,"944",,terminal_output +4895,3585039,"TERMINAL",0,0,"5055",,terminal_output +4896,3586095,"TERMINAL",0,0,"177",,terminal_output +4897,3587135,"TERMINAL",0,0,"388",,terminal_output +4898,3588237,"TERMINAL",0,0,"499",,terminal_output +4899,3589265,"TERMINAL",0,0,"54040",,terminal_output +4900,3590299,"TERMINAL",0,0,"611",,terminal_output +4901,3591347,"TERMINAL",0,0,"722",,terminal_output +4902,3592411,"TERMINAL",0,0,"833",,terminal_output +4903,3593458,"TERMINAL",0,0,"944",,terminal_output +4904,3594500,"TERMINAL",0,0,"7:0055",,terminal_output +4905,3595547,"TERMINAL",0,0,"166",,terminal_output +4906,3596596,"TERMINAL",0,0,"277",,terminal_output +4907,3597650,"TERMINAL",0,0,"388",,terminal_output +4908,3598645,"TERMINAL",0,0,"499",,terminal_output +4909,3599675,"TERMINAL",0,0,"55050",,terminal_output +4910,3600731,"TERMINAL",0,0,"611",,terminal_output +4911,3601783,"TERMINAL",0,0,"722",,terminal_output +4912,3602829,"TERMINAL",0,0,"833",,terminal_output +4913,3603880,"TERMINAL",0,0,"944",,terminal_output +4914,3604940,"TERMINAL",0,0,"1055",,terminal_output +4915,3605992,"TERMINAL",0,0,"166",,terminal_output +4916,3607037,"TERMINAL",0,0,"277",,terminal_output +4917,3608087,"TERMINAL",0,0,"399",,terminal_output +4918,3609138,"TERMINAL",0,0,"53:003:00",,terminal_output +4919,3610222,"TERMINAL",0,0,"611",,terminal_output +4920,3611253,"TERMINAL",0,0,"722",,terminal_output +4921,3612314,"TERMINAL",0,0,"833",,terminal_output +4922,3613342,"TERMINAL",0,0,"944",,terminal_output +4923,3614399,"TERMINAL",0,0,"2055",,terminal_output +4924,3615442,"TERMINAL",0,0,"166",,terminal_output +4925,3616492,"TERMINAL",0,0,"277",,terminal_output +4926,3617547,"TERMINAL",0,0,"388",,terminal_output +4927,3618596,"TERMINAL",0,0,"499",,terminal_output +4928,3619645,"TERMINAL",0,0,"51010",,terminal_output +4929,3620708,"TERMINAL",0,0,"611",,terminal_output +4930,3621758,"TERMINAL",0,0,"722",,terminal_output +4931,3622807,"TERMINAL",0,0,"833",,terminal_output +4932,3623849,"TERMINAL",0,0,"944",,terminal_output +4933,3624901,"TERMINAL",0,0,"3055",,terminal_output +4934,3625941,"TERMINAL",0,0,"166",,terminal_output +4935,3626988,"TERMINAL",0,0,"277",,terminal_output +4936,3628037,"TERMINAL",0,0,"388",,terminal_output +4937,3629096,"TERMINAL",0,0,"42020",,terminal_output +4938,3630142,"TERMINAL",0,0,"611",,terminal_output +4939,3631243,"TERMINAL",0,0,"722",,terminal_output +4940,3632265,"TERMINAL",0,0,"833",,terminal_output +4941,3633296,"TERMINAL",0,0,"944",,terminal_output +4942,3634349,"TERMINAL",0,0,"4055",,terminal_output +4943,3635394,"TERMINAL",0,0,"166",,terminal_output +4944,3636441,"TERMINAL",0,0,"277",,terminal_output +4945,3637490,"TERMINAL",0,0,"388",,terminal_output +4946,3638538,"TERMINAL",0,0,"499",,terminal_output +4947,3639590,"TERMINAL",0,0,"53030",,terminal_output +4948,3640628,"TERMINAL",0,0,"611",,terminal_output +4949,3641682,"TERMINAL",0,0,"722",,terminal_output +4950,3642712,"TERMINAL",0,0,"833",,terminal_output +4951,3643750,"TERMINAL",0,0,"944",,terminal_output +4952,3644804,"TERMINAL",0,0,"5055",,terminal_output +4953,3645856,"TERMINAL",0,0,"166",,terminal_output +4954,3646908,"TERMINAL",0,0,"277",,terminal_output +4955,3647953,"TERMINAL",0,0,"388",,terminal_output +4956,3649003,"TERMINAL",0,0,"499",,terminal_output +4957,3650055,"TERMINAL",0,0,"54040",,terminal_output +4958,3651105,"TERMINAL",0,0,"722",,terminal_output +4959,3652178,"TERMINAL",0,0,"833",,terminal_output +4960,3653210,"TERMINAL",0,0,"944",,terminal_output +4961,3654250,"TERMINAL",0,0,"8:0055",,terminal_output +4962,3655307,"TERMINAL",0,0,"166",,terminal_output +4963,3656343,"TERMINAL",0,0,"277",,terminal_output +4964,3657401,"TERMINAL",0,0,"388",,terminal_output +4965,3658435,"TERMINAL",0,0,"499",,terminal_output +4966,3659483,"TERMINAL",0,0,"55050",,terminal_output +4967,3660537,"TERMINAL",0,0,"611",,terminal_output +4968,3661585,"TERMINAL",0,0,"722",,terminal_output +4969,3662632,"TERMINAL",0,0,"833",,terminal_output +4970,3663676,"TERMINAL",0,0,"944",,terminal_output +4971,3664736,"TERMINAL",0,0,"1055",,terminal_output +4972,3665762,"TERMINAL",0,0,"166",,terminal_output +4973,3666805,"TERMINAL",0,0,"277",,terminal_output +4974,3667892,"TERMINAL",0,0,"388",,terminal_output +4975,3668901,"TERMINAL",0,0,"499",,terminal_output +4976,3669941,"TERMINAL",0,0,"54:004:00",,terminal_output +4977,3670984,"TERMINAL",0,0,"611",,terminal_output +4978,3672089,"TERMINAL",0,0,"722",,terminal_output +4979,3673083,"TERMINAL",0,0,"844",,terminal_output +4980,3674137,"TERMINAL",0,0,"2055",,terminal_output +4981,3675223,"TERMINAL",0,0,"166",,terminal_output +4982,3676214,"TERMINAL",0,0,"277",,terminal_output +4983,3677249,"TERMINAL",0,0,"388",,terminal_output +4984,3678349,"TERMINAL",0,0,"499",,terminal_output +4985,3679366,"TERMINAL",0,0,"51010",,terminal_output +4986,3680465,"TERMINAL",0,0,"611",,terminal_output +4987,3681440,"TERMINAL",0,0,"722",,terminal_output +4988,3682499,"TERMINAL",0,0,"833",,terminal_output +4989,3683522,"TERMINAL",0,0,"944",,terminal_output +4990,3684578,"TERMINAL",0,0,"3055",,terminal_output +4991,3685683,"TERMINAL",0,0,"166",,terminal_output +4992,3686770,"TERMINAL",0,0,"277",,terminal_output +4993,3687725,"TERMINAL",0,0,"388",,terminal_output +4994,3688772,"TERMINAL",0,0,"499",,terminal_output +4995,3689823,"TERMINAL",0,0,"52020",,terminal_output +4996,3690939,"TERMINAL",0,0,"611",,terminal_output +4997,3691936,"TERMINAL",0,0,"722",,terminal_output +4998,3692960,"TERMINAL",0,0,"833",,terminal_output +4999,3694019,"TERMINAL",0,0,"944",,terminal_output +5000,3695051,"TERMINAL",0,0,"4055",,terminal_output +5001,3696160,"TERMINAL",0,0,"177",,terminal_output +5002,3697200,"TERMINAL",0,0,"388",,terminal_output +5003,3698238,"TERMINAL",0,0,"499",,terminal_output +5004,3699289,"TERMINAL",0,0,"53030",,terminal_output +5005,3700308,"TERMINAL",0,0,"611",,terminal_output +5006,3701408,"TERMINAL",0,0,"722",,terminal_output +5007,3702483,"TERMINAL",0,0,"833",,terminal_output +5008,3703453,"TERMINAL",0,0,"944",,terminal_output +5009,3704505,"TERMINAL",0,0,"5055",,terminal_output +5010,3705555,"TERMINAL",0,0,"166",,terminal_output +5011,3706606,"TERMINAL",0,0,"277",,terminal_output +5012,3707644,"TERMINAL",0,0,"388",,terminal_output +5013,3708689,"TERMINAL",0,0,"499",,terminal_output +5014,3709744,"TERMINAL",0,0,"54040",,terminal_output +5015,3710797,"TERMINAL",0,0,"611",,terminal_output +5016,3711842,"TERMINAL",0,0,"722",,terminal_output +5017,3712895,"TERMINAL",0,0,"833",,terminal_output +5018,3713939,"TERMINAL",0,0,"944",,terminal_output +5019,3714986,"TERMINAL",0,0,"9:0055",,terminal_output +5020,3716039,"TERMINAL",0,0,"166",,terminal_output +5021,3717070,"TERMINAL",0,0,"288",,terminal_output +5022,3718117,"TERMINAL",0,0,"499",,terminal_output +5023,3719204,"TERMINAL",0,0,"55050",,terminal_output +5024,3720225,"TERMINAL",0,0,"611",,terminal_output +5025,3721309,"TERMINAL",0,0,"722",,terminal_output +5026,3722336,"TERMINAL",0,0,"833",,terminal_output +5027,3723379,"TERMINAL",0,0,"944",,terminal_output +5028,3724435,"TERMINAL",0,0,"1055",,terminal_output +5029,3725482,"TERMINAL",0,0,"166",,terminal_output diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-3553d16e-f1c9-4e9c-9425-6b663caf1f311753957765078-2025_07_31-12.30.02.749/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-3553d16e-f1c9-4e9c-9425-6b663caf1f311753957765078-2025_07_31-12.30.02.749/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..9770f8ed369e2ed902ff36f5d03c77bed40e39b4 --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-3553d16e-f1c9-4e9c-9425-6b663caf1f311753957765078-2025_07_31-12.30.02.749/source.csv @@ -0,0 +1,4659 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +2,734,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"12:30:02 PM [info] Activating crowd-code\n12:30:02 PM [info] Recording started\n12:30:02 PM [info] Initializing git provider using file system watchers...\n12:30:02 PM [info] Git repository found\n12:30:02 PM [info] Git provider initialized successfully\n12:30:02 PM [info] Initial git state: [object Object]\n",Log,tab +3,2591,"TERMINAL",0,0,"bash",,terminal_focus +4,2961,"TERMINAL",0,0,"bash",,terminal_focus +5,7941,"TERMINAL",0,0,"scancel 3386713",,terminal_command +6,7977,"TERMINAL",0,0,"]633;E;2025-07-31 12:30:10 scancel 3386713;adbf53fe-397b-40d3-9339-94ea79afad56]633;C]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +7,9096,"TERMINAL",0,0,"queue",,terminal_command +8,9158,"TERMINAL",0,0,"]633;E;2025-07-31 12:30:11 queue;adbf53fe-397b-40d3-9339-94ea79afad56]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1991.localdomain: Thu Jul 31 12:30:11 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3386713 accelerat interact tum_cte0 CG 1:57:49\t 1 hkn06083386718 accelerat train_to tum_cte0 PD\t0:00\t 2 (Priority)3386719 accelerat train_to tum_cte0 PD\t0:00\t 2 (Priority)3386722 accelerat train_dy tum_cte0 PD\t0:00\t 8 (Priority)",,terminal_output +9,10209,"TERMINAL",0,0,"2\t",,terminal_output +10,11276,"TERMINAL",0,0,"3\t",,terminal_output +11,12337,"TERMINAL",0,0,"4\t",,terminal_output +12,13433,"TERMINAL",0,0,"5\t",,terminal_output +13,13564,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +14,98242,"TERMINAL",0,0,"salloc --time=05:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5",,terminal_command +15,98313,"TERMINAL",0,0,"]633;E;2025-07-31 12:31:40 salloc --time=05:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5;adbf53fe-397b-40d3-9339-94ea79afad56]633;Csalloc: Pending job allocation 3387190\r\nsalloc: job 3387190 queued and waiting for resources\r\n",,terminal_output +16,157005,"TERMINAL",0,0,"salloc: job 3387190 has been allocated resources\r\nsalloc: Granted job allocation 3387190\r\nsalloc: Waiting for resource configuration\r\n",,terminal_output +17,184056,"TERMINAL",0,0,"salloc: Nodes hkn0602 are ready for job\r\n",,terminal_output +18,184747,"TERMINAL",0,0,"]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h[tum_cte0515@hkn0602 jafar]$ ",,terminal_output +19,195373,"TERMINAL",0,0,"s",,terminal_output +20,195546,"TERMINAL",0,0,"[?25lo[?25h[?25lu[?25h",,terminal_output +21,195644,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +22,196054,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +23,196128,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +24,196230,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +25,196392,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +26,196518,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +27,196614,"TERMINAL",0,0,"env/",,terminal_output +28,196946,"TERMINAL",0,0,"",,terminal_output +29,197642,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +30,197729,"TERMINAL",0,0,"in/",,terminal_output +31,197991,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +32,198049,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +33,198214,"TERMINAL",0,0,"tivate",,terminal_output +34,198494,"TERMINAL",0,0,"[?25l[?2004l\r[?25h]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +35,201418,"TERMINAL",0,0,"q",,terminal_output +36,201548,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +37,201644,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +38,201748,"TERMINAL",0,0,"[?25lu[?25h[?25le[?25h",,terminal_output +39,201917,"TERMINAL",0,0,"[?25l[?2004l\r[?25h[?1049h(B[?7hEvery 1.0s: squeue --mehkn0602.localdomain: Thu Jul 31 12:33:24 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3386718 accelerat train_to tum_cte0 PD\t0:00\t 2 (Priority)3386719 accelerat train_to tum_cte0 PD\t0:00\t 2 (Priority)3386722 accelerat train_dy tum_cte0 PD\t0:00\t 8 (Priority)3387190 accelerat interact tum_cte0 R\t0:45\t 1 hkn0602",,terminal_output +40,203071,"TERMINAL",0,0,"56",,terminal_output +41,203190,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +42,252785,"TERMINAL",0,0,"i",,terminal_output +43,252915,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +44,253012,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +45,253256,"TERMINAL",0,0,"[?25li[?25h[?25ln[?25h",,terminal_output +46,253328,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +47,253501,"TERMINAL",0,0,"[?25l[?2004l\r[?25h[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn0602.localdomain: Thu Jul 31 12:34:16 2025Partition dev_cpuonly: 12 nodes idle\rPartition cpuonly: 68 nodes idle\rPartition dev_accelerated:\t 2 nodes idle\rPartition accelerated: 97 nodes idle\rPartition dev_accelerated-h100 :\t 1 nodes idle\rPartition accelerated-h100:\t 3 nodes idle\rPartition large:\t 6 nodes idle",,terminal_output +48,254479,"TERMINAL",0,0,"7\t",,terminal_output +49,254895,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +50,276024,"TERMINAL",0,0,"q",,terminal_output +51,276184,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +52,276424,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +53,276494,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +54,276617,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +55,276745,"TERMINAL",0,0,"[?25l[?2004l\r[?25h[?1049h(B[?7hEvery 1.0s: squeue --mehkn0602.localdomain: Thu Jul 31 12:34:39 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3386718 accelerat train_to tum_cte0 PD\t0:00\t 2 (Priority)3386719 accelerat train_to tum_cte0 PD\t0:00\t 2 (Priority)3386722 accelerat train_dy tum_cte0 PD\t0:00\t 8 (Priority)3387190 accelerat interact tum_cte0 R\t2:00\t 1 hkn0602",,terminal_output +56,278561,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +57,284472,"TERMINAL",0,0,"c",,terminal_output +58,284808,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +59,284905,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +60,285117,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +61,285253,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +62,285386,"TERMINAL",0,0,"[?25l[?2004l\r]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ [?25h",,terminal_output +63,286689,"genie.py",0,0,"from typing import Dict, Any\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nfrom flax.training.train_state import TrainState\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\nimport grain\n\n\nclass Genie(nn.Module):\n """"""Genie model""""""\n\n # --- Tokenizer ---\n in_dim: int\n tokenizer_dim: int\n tokenizer_ffn_dim: int\n latent_patch_dim: int\n num_patch_latents: int\n patch_size: int\n tokenizer_num_blocks: int\n tokenizer_num_heads: int\n # --- LAM ---\n lam_dim: int\n lam_ffn_dim: int\n latent_action_dim: int\n num_latent_actions: int\n lam_patch_size: int\n lam_num_blocks: int\n lam_num_heads: int\n lam_co_train: bool\n # --- Dynamics ---\n dyna_dim: int\n dyna_ffn_dim: int\n dyna_num_blocks: int\n dyna_num_heads: int\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n dropout: float = 0.0\n mask_limit: float = 0.0\n\n def setup(self):\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n ffn_dim=self.tokenizer_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n ffn_dim=self.lam_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\n lam_outputs = self.lam.vq_encode(batch[""videos""], training=False)\n latent_actions = jax.lax.cond(\n self.lam_co_train,\n lambda: lam_outputs[""z_q""],\n lambda: jax.lax.stop_gradient(lam_outputs[""z_q""]),\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\n latent_actions=latent_actions,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )\n outputs[""lam_indices""] = lam_outputs[""indices""]\n return outputs\n\n @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> Any:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: patches per frame\n S: sequence length\n A: action space\n D: model latent dimension\n """"""\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""] # (B, T, N)\n B, T, N = token_idxs.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs.dtype)\n token_idxs = jnp.concatenate([token_idxs, pad], axis=1) # (B, S, N)\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n\n def generation_step_fn(carry, step_t):\n rng, current_token_idxs = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current and future frames (i.e., t >= step_t)\n mask = jnp.arange(seq_len) >= step_t # (S,)\n mask = jnp.broadcast_to(mask[None, :, None], (B, seq_len, N)) # (B, S, N)\n mask = mask.astype(bool)\n masked_token_idxs = current_token_idxs * ~mask\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs,\n mask,\n action_tokens,\n )\n final_carry_maskgit, _ = loop_fn(init_carry_maskgit, jnp.arange(steps))\n updated_token_idxs = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs)\n return new_carry, None\n\n # --- Run the autoregressive generation using scan ---\n initial_carry = (batch[""rng""], token_idxs)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn, initial_carry, timesteps_to_scan\n )\n final_token_idxs = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n final_token_idxs,\n video_hw=batch[""videos""].shape[2:4],\n )\n return final_frames\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, token_idxs, mask, action_tokens = carry\n step = x\n N = token_idxs.shape[2]\n\n # --- Construct + encode video ---\n vid_embed = self.dynamics.patch_embed(token_idxs) # (B, S, N, D)\n mask_token = self.dynamics.mask_token # (1, 1, 1, D,)\n mask_expanded = mask[..., None] # (B, S, N, 1)\n vid_embed = jnp.where(mask_expanded, mask_token, vid_embed)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed) / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jax.random.categorical(_rng, final_logits)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, token_idxs, new_mask, action_tokens)\n return new_carry, None\n\n\ndef restore_genie_components(\n train_state: TrainState,\n sharding: jax.sharding.NamedSharding,\n inputs: Dict[str, jax.Array],\n rng: jax.Array,\n args,\n):\n """"""Restore pre-trained Genie components""""""\n rng, _rng = jax.random.split(rng)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.adamw(\n learning_rate=optax.constant_schedule(args.max_lr),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n )\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n ffn_dim=args.tokenizer_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n tokenizer_init_params = dummy_tokenizer.init(_rng, inputs)\n dummy_tokenizer_train_state = TrainState.create(\n apply_fn=dummy_tokenizer.apply, params=tokenizer_init_params, tx=dummy_tx\n )\n abstract_sharded_tokenizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_train_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_tokenizer_state),\n ),\n )[""model_state""]\n restored_tokenizer_params = restored_tokenizer.params[""params""]\n train_state.params[""params""][""tokenizer""].update(restored_tokenizer_params)\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n ffn_dim=args.lam_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n lam_init_params = dummy_lam.init(_rng, inputs)\n dummy_lam_train_state = TrainState.create(\n apply_fn=dummy_lam.apply, params=lam_init_params, tx=dummy_tx\n )\n abstract_sharded_lam_state = _create_abstract_sharded_pytree(\n dummy_lam_train_state, sharding\n )\n restored_lam = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_lam_state),\n ),\n )[""model_state""]\n restored_lam_params = restored_lam.params[""params""]\n # Genie does not initialize all LAM modules, thus we omit those extra modules during restoration\n # (f.srambical) FIXME: Currently, this is a small HBM memory crunch since the LAM's decoder is loaded into HBM and immediately dicarded.\n # A workaround would be to restore to host memory first, and only move the weights to HBM after pruning the decoder\n restored_lam_params = {\n k: v\n for k, v in restored_lam_params.items()\n if k in train_state.params[""params""][""lam""]\n }\n train_state.params[""params""][""lam""].update(restored_lam_params)\n lam_checkpoint_manager.close()\n\n return train_state\n\n\ndef _create_abstract_sharded_pytree(pytree_template, sharding_spec):\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)\n",python,tab +64,287814,"genie.py",6198,0,"",python,selection_mouse +65,288457,"genie.py",6197,0,"",python,selection_mouse +66,288458,"genie.py",6196,0,"",python,selection_command +67,293658,"genie.py",6329,0,"",python,selection_mouse +68,294179,"genie.py",6370,0,"",python,selection_mouse +69,294190,"genie.py",6369,0,"",python,selection_command +70,294851,"genie.py",6415,0,"",python,selection_mouse +71,294857,"genie.py",6414,0,"",python,selection_command +72,295732,"genie.py",6391,0,"",python,selection_mouse +73,297310,"genie.py",6466,0,"",python,selection_mouse +74,297442,"genie.py",6461,18,"init_carry_maskgit",python,selection_mouse +75,298409,"genie.py",6488,0,"",python,selection_mouse +76,299280,"genie.py",6485,6,"arange",python,selection_mouse +77,300321,"genie.py",6483,0,"",python,selection_mouse +78,300481,"genie.py",6481,3,"jnp",python,selection_mouse +79,301212,"genie.py",6555,0,"",python,selection_mouse +80,301217,"genie.py",6554,0,"",python,selection_command +81,301878,"genie.py",6488,0,"",python,selection_mouse +82,302047,"genie.py",6485,6,"arange",python,selection_mouse +83,304150,"genie.py",6457,0,"",python,selection_mouse +84,318124,"TERMINAL",0,0,"f",,terminal_output +85,318425,"TERMINAL",0,0,"[?25lq[?25h",,terminal_output +86,318584,"TERMINAL",0,0,"[?25lu[?25h[?25le[?25h",,terminal_output +87,318668,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +88,318732,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +89,318925,"TERMINAL",0,0,"[?25l[?2004l\r[?25h[?1049h(B[?7hEvery 1.0s: squeue -o ""%.10i %.16P %.30j %.8u %.8T %.10M %.9l %.6D %R""hkn0602.localdomain: Thu Jul 31 12:35:21 2025JOBIDPARTITIONNAME USER STATE\t TIME TIME_LIMI NODES NODELIST(REASON)3386718\tacceleratedtrain_tokenizer_1e-4 tum_cte0 PENDING\t 0:00 2-00:00:00\t2 (Priority)3386719\tacceleratedtrain_tokenizer_1e-4 tum_cte0 PENDING\t 0:00 2-00:00:00\t2 (Priority)3386722\taccelerated train_dynamics_causal_8_node tum_cte0 PENDING\t 0:00 2-00:00:00\t8 (Priority)3387190\tacceleratedinteractive tum_cte0 RUNNING\t 2:42 5:00:001 hkn0602",,terminal_output +90,319912,"TERMINAL",0,0,"23",,terminal_output +91,321040,"TERMINAL",0,0,"34",,terminal_output +92,321924,"TERMINAL",0,0,"45",,terminal_output +93,322986,"TERMINAL",0,0,"56",,terminal_output +94,323955,"TERMINAL",0,0,"67",,terminal_output +95,325046,"TERMINAL",0,0,"78",,terminal_output +96,325991,"TERMINAL",0,0,"89",,terminal_output +97,327020,"TERMINAL",0,0,"950",,terminal_output +98,328002,"TERMINAL",0,0,"301",,terminal_output +99,329012,"TERMINAL",0,0,"12",,terminal_output +100,330051,"TERMINAL",0,0,"23",,terminal_output +101,331081,"TERMINAL",0,0,"34",,terminal_output +102,332053,"TERMINAL",0,0,"45",,terminal_output +103,333126,"TERMINAL",0,0,"56",,terminal_output +104,334285,"TERMINAL",0,0,"67",,terminal_output +105,335093,"TERMINAL",0,0,"78",,terminal_output +106,336194,"TERMINAL",0,0,"89",,terminal_output +107,337159,"TERMINAL",0,0,"93:00",,terminal_output +108,338138,"TERMINAL",0,0,"401",,terminal_output +109,339145,"TERMINAL",0,0,"12",,terminal_output +110,340185,"TERMINAL",0,0,"23",,terminal_output +111,341313,"TERMINAL",0,0,"34",,terminal_output +112,342235,"TERMINAL",0,0,"45",,terminal_output +113,343369,"TERMINAL",0,0,"56",,terminal_output +114,344207,"TERMINAL",0,0,"67",,terminal_output +115,345408,"TERMINAL",0,0,"78",,terminal_output +116,346331,"TERMINAL",0,0,"89",,terminal_output +117,347289,"TERMINAL",0,0,"910",,terminal_output +118,348310,"TERMINAL",0,0,"501",,terminal_output +119,349230,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=384 \\n --init_lr=0 \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +120,349294,"TERMINAL",0,0,"12",,terminal_output +121,350323,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",480,0,"",shellscript,selection_mouse +122,350324,"TERMINAL",0,0,"23",,terminal_output +123,350437,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",479,7,"requeue",shellscript,selection_mouse +124,350597,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",469,18,"#SBATCH --requeue\n",shellscript,selection_mouse +125,351355,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",499,0,"",shellscript,selection_mouse +126,351475,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",497,6,"signal",shellscript,selection_mouse +127,351489,"TERMINAL",0,0,"34",,terminal_output +128,352376,"TERMINAL",0,0,"45",,terminal_output +129,352451,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",555,0,"",shellscript,selection_mouse +130,353152,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",538,0,"",shellscript,selection_mouse +131,353363,"TERMINAL",0,0,"56",,terminal_output +132,354340,"TERMINAL",0,0,"67",,terminal_output +133,355476,"TERMINAL",0,0,"79",,terminal_output +134,356371,"TERMINAL",0,0,"920",,terminal_output +135,357393,"TERMINAL",0,0,"6:001",,terminal_output +136,358400,"TERMINAL",0,0,"12",,terminal_output +137,359418,"TERMINAL",0,0,"23",,terminal_output +138,360423,"TERMINAL",0,0,"34",,terminal_output +139,361587,"TERMINAL",0,0,"45",,terminal_output +140,362450,"TERMINAL",0,0,"56",,terminal_output +141,363462,"TERMINAL",0,0,"67",,terminal_output +142,364475,"TERMINAL",0,0,"78",,terminal_output +143,365488,"TERMINAL",0,0,"89",,terminal_output +144,366504,"TERMINAL",0,0,"930",,terminal_output +145,367632,"TERMINAL",0,0,"101",,terminal_output +146,368528,"TERMINAL",0,0,"12",,terminal_output +147,369576,"TERMINAL",0,0,"23",,terminal_output +148,370562,"TERMINAL",0,0,"34",,terminal_output +149,371628,"TERMINAL",0,0,"45",,terminal_output +150,372647,"TERMINAL",0,0,"56",,terminal_output +151,373604,"TERMINAL",0,0,"67",,terminal_output +152,374699,"TERMINAL",0,0,"78",,terminal_output +153,375682,"TERMINAL",0,0,"89",,terminal_output +154,376642,"TERMINAL",0,0,"940",,terminal_output +155,377667,"TERMINAL",0,0,"201",,terminal_output +156,378693,"TERMINAL",0,0,"12",,terminal_output +157,379718,"TERMINAL",0,0,"23",,terminal_output +158,380741,"TERMINAL",0,0,"34",,terminal_output +159,381697,"TERMINAL",0,0,"45",,terminal_output +160,382789,"TERMINAL",0,0,"56",,terminal_output +161,383824,"TERMINAL",0,0,"67",,terminal_output +162,384733,"TERMINAL",0,0,"78",,terminal_output +163,385757,"TERMINAL",0,0,"89",,terminal_output +164,386781,"TERMINAL",0,0,"950",,terminal_output +165,387773,"TERMINAL",0,0,"301",,terminal_output +166,388784,"TERMINAL",0,0,"12",,terminal_output +167,389851,"TERMINAL",0,0,"23",,terminal_output +168,390807,"TERMINAL",0,0,"34",,terminal_output +169,391898,"TERMINAL",0,0,"45",,terminal_output +170,392842,"TERMINAL",0,0,"56",,terminal_output +171,393950,"TERMINAL",0,0,"67",,terminal_output +172,394975,"TERMINAL",0,0,"78",,terminal_output +173,395928,"TERMINAL",0,0,"89",,terminal_output +174,396895,"TERMINAL",0,0,"94:00",,terminal_output +175,398049,"TERMINAL",0,0,"401",,terminal_output +176,399076,"TERMINAL",0,0,"12",,terminal_output +177,400103,"TERMINAL",0,0,"23",,terminal_output +178,400946,"TERMINAL",0,0,"34",,terminal_output +179,402144,"TERMINAL",0,0,"45",,terminal_output +180,403166,"TERMINAL",0,0,"56",,terminal_output +181,404040,"TERMINAL",0,0,"67",,terminal_output +182,405012,"TERMINAL",0,0,"78",,terminal_output +183,406006,"TERMINAL",0,0,"89",,terminal_output +184,407060,"TERMINAL",0,0,"910",,terminal_output +185,408331,"TERMINAL",0,0,"501",,terminal_output +186,409110,"TERMINAL",0,0,"12",,terminal_output +187,410060,"TERMINAL",0,0,"23",,terminal_output +188,411065,"TERMINAL",0,0,"34",,terminal_output +189,412079,"TERMINAL",0,0,"45",,terminal_output +190,413199,"TERMINAL",0,0,"56",,terminal_output +191,414124,"TERMINAL",0,0,"67",,terminal_output +192,415155,"TERMINAL",0,0,"78",,terminal_output +193,416172,"TERMINAL",0,0,"89",,terminal_output +194,417138,"TERMINAL",0,0,"920",,terminal_output +195,418156,"TERMINAL",0,0,"7:001",,terminal_output +196,419279,"TERMINAL",0,0,"12",,terminal_output +197,420369,"TERMINAL",0,0,"23",,terminal_output +198,421191,"TERMINAL",0,0,"34",,terminal_output +199,422229,"TERMINAL",0,0,"45",,terminal_output +200,423239,"TERMINAL",0,0,"56",,terminal_output +201,424260,"TERMINAL",0,0,"67",,terminal_output +202,425285,"TERMINAL",0,0,"78",,terminal_output +203,426254,"TERMINAL",0,0,"89",,terminal_output +204,427266,"TERMINAL",0,0,"930",,terminal_output +205,428359,"TERMINAL",0,0,"101",,terminal_output +206,429323,"TERMINAL",0,0,"12",,terminal_output +207,430413,"TERMINAL",0,0,"23",,terminal_output +208,431428,"TERMINAL",0,0,"34",,terminal_output +209,432351,"TERMINAL",0,0,"45",,terminal_output +210,433374,"TERMINAL",0,0,"56",,terminal_output +211,434364,"TERMINAL",0,0,"68",,terminal_output +212,435370,"TERMINAL",0,0,"89",,terminal_output +213,436382,"TERMINAL",0,0,"940",,terminal_output +214,437389,"TERMINAL",0,0,"201",,terminal_output +215,438420,"TERMINAL",0,0,"12",,terminal_output +216,439419,"TERMINAL",0,0,"23",,terminal_output +217,440428,"TERMINAL",0,0,"34",,terminal_output +218,441444,"TERMINAL",0,0,"45",,terminal_output +219,442463,"TERMINAL",0,0,"56",,terminal_output +220,443478,"TERMINAL",0,0,"67",,terminal_output +221,444483,"TERMINAL",0,0,"78",,terminal_output +222,445492,"TERMINAL",0,0,"89",,terminal_output +223,446507,"TERMINAL",0,0,"950",,terminal_output +224,447520,"TERMINAL",0,0,"301",,terminal_output +225,448538,"TERMINAL",0,0,"12",,terminal_output +226,449559,"TERMINAL",0,0,"23",,terminal_output +227,450580,"TERMINAL",0,0,"34",,terminal_output +228,451570,"TERMINAL",0,0,"45",,terminal_output +229,452581,"TERMINAL",0,0,"56",,terminal_output +230,453339,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",845,0,"",shellscript,selection_mouse +231,453645,"TERMINAL",0,0,"67",,terminal_output +232,454087,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",656,0,"",shellscript,selection_mouse +233,454608,"TERMINAL",0,0,"78",,terminal_output +234,454741,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",842,0,"",shellscript,selection_mouse +235,454783,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",841,0,"",shellscript,selection_command +236,455610,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",844,0,"",shellscript,selection_mouse +237,455647,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",843,0,"",shellscript,selection_command +238,455692,"TERMINAL",0,0,"89",,terminal_output +239,456719,"TERMINAL",0,0,"95:00",,terminal_output +240,457741,"TERMINAL",0,0,"401",,terminal_output +241,458666,"TERMINAL",0,0,"12",,terminal_output +242,459714,"TERMINAL",0,0,"23",,terminal_output +243,460711,"TERMINAL",0,0,"34",,terminal_output +244,461697,"TERMINAL",0,0,"45",,terminal_output +245,462764,"TERMINAL",0,0,"56",,terminal_output +246,463179,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +247,474273,"sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nfrom flax.training.train_state import TrainState\nimport numpy as np\nimport orbax.checkpoint as ocp\nimport optax\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\n\ndef _sampling_wrapper(module, batch):\n return module.sample(\n batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax\n )\n\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n sampling_fn = jax.jit(nn.apply(_sampling_wrapper, genie))\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\n generated_vid = sampling_fn(params, batch)\n return generated_vid\n\n\n# --- Get video + latent actions ---\narray_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n]\ndataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n num_workers=0,\n prefetch_buffer_size=1,\n seed=args.seed,\n)\nvideo_batch = next(iter(dataloader))\nvideo_batch = jnp.array(video_batch)\nprint(video_batch.dtype)\nvideo_batch = video_batch.astype(args.dtype) # / 255.0\nprint(video_batch.dtype)\nvideo_batch = video_batch / 255.0\nprint(video_batch.dtype)\n# Get latent actions for all videos in the batch\nbatch = dict(videos=video_batch)\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(video_batch.shape[0], args.seq_len - 1, 1)\n\n# --- Sample + evaluate video ---\nprint(""Autoregressively sampling..."")\nvid = _autoreg_sample(rng, video_batch, action_batch)\nprint(""Sampling done. Calculating ssim and saving video."")\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\nprint(f""SSIM: {ssim}"")\n\n# --- Construct video ---\ntrue_videos = (video_batch * 255).astype(np.uint8)\npred_videos = (vid * 255).astype(np.uint8)\nvideo_comparison = np.zeros((2, *vid.shape), dtype=np.uint8)\nvideo_comparison[0] = true_videos[:, : args.seq_len]\nvideo_comparison[1] = pred_videos\nframes = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n# --- Save video ---\nimgs = [Image.fromarray(img) for img in frames]\n# Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\nfor t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(action_batch.shape[0]):\n action = action_batch[row, t, 0]\n y_offset = row * video_batch.shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\nimgs[0].save(\n f""generation_{time.time()}.gif"",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n)\n",python,tab +248,476917,"genie.py",0,0,"",python,tab +249,478690,"genie.py",5618,0,"",python,selection_mouse +250,481496,"genie.py",6539,0,"",python,selection_mouse +251,481640,"genie.py",6533,19,"final_carry_maskgit",python,selection_mouse +252,506112,"genie.py",6539,0,"",python,selection_mouse +253,506491,"genie.py",6458,0,"",python,selection_mouse +254,506671,"genie.py",6453,7,"loop_fn",python,selection_mouse +255,507300,"genie.py",6543,0,"",python,selection_mouse +256,507455,"genie.py",6533,19,"final_carry_maskgit",python,selection_mouse +257,508183,"genie.py",6415,0,"",python,selection_mouse +258,508224,"genie.py",6414,0,"",python,selection_command +259,508759,"genie.py",6458,0,"",python,selection_mouse +260,508916,"genie.py",6453,7,"loop_fn",python,selection_mouse +261,509556,"genie.py",6543,0,"",python,selection_mouse +262,509715,"genie.py",6533,19,"final_carry_maskgit",python,selection_mouse +263,510583,"genie.py",6459,0,"",python,selection_mouse +264,510750,"genie.py",6453,7,"loop_fn",python,selection_mouse +265,857839,"TERMINAL",0,0,"fqueue",,terminal_output +266,858045,"TERMINAL",0,0,"clear",,terminal_output +267,858275,"TERMINAL",0,0,"queue",,terminal_output +268,858599,"TERMINAL",0,0,"idling",,terminal_output +269,858905,"TERMINAL",0,0,"queue",,terminal_output +270,859164,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_output +271,859381,"TERMINAL",0,0,"git push --set-upstream origin fix/spatiotemporal-pe-once-in-STTransformer",,terminal_output +272,859708,"TERMINAL",0,0,"\r",,terminal_output +273,860131,"TERMINAL",0,0,"checkout fix/spatiotemporal-pe-once-in-STTransformer",,terminal_output +274,860435,"TERMINAL",0,0,"main",,terminal_output +275,860822,"TERMINAL",0,0,"diff",,terminal_output +276,861111,"TERMINAL",0,0,"checkout fix/spatiotemporal-pe-once-in-STTransformer",,terminal_output +277,861322,"TERMINAL",0,0,"\rbranch",,terminal_output +278,861453,"TERMINAL",0,0,"queue",,terminal_output +279,861591,"TERMINAL",0,0,"salloc --time=05:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5",,terminal_output +280,862553,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +281,864719,"TERMINAL",0,0,"[?25l# [?25h",,terminal_output +282,864817,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +283,865416,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +284,865729,"TERMINAL",0,0,"[?25l\r/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\n[?2004l\r]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ [?25h",,terminal_output +285,866420,"TERMINAL",0,0,"bash",,terminal_focus +286,867474,"TERMINAL",0,0,"cd /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_command +287,867522,"TERMINAL",0,0,"]633;E;2025-07-31 12:44:30 cd /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838;b0f4cde3-0eba-4fa4-b58c-a6b835c83057]633;C]0;tum_cte0515@hkn1991:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838]633;D;0",,terminal_output +288,867933,"TERMINAL",0,0,"ls",,terminal_command +289,867991,"TERMINAL",0,0,"]633;E;2025-07-31 12:44:30 ls;b0f4cde3-0eba-4fa4-b58c-a6b835c83057]633;C020000 040000 060000 080000 100000 117000 118000 119000\r\n]0;tum_cte0515@hkn1991:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838]633;D;0",,terminal_output +290,869122,"TERMINAL",0,0,"srun",,terminal_focus +291,875155,"TERMINAL",0,0,"\r(reverse-i-search)`': ",,terminal_output +292,875596,"TERMINAL",0,0,"s': # /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +293,875738,"TERMINAL",0,0,"[?25ls\ra': salloc --time=05:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5\r[?25h",,terminal_output +294,875901,"TERMINAL",0,0,"[?25lsm': git checkout new-arch-sampling[?25h",,terminal_output +295,876159,"TERMINAL",0,0,"[?25las\rp': git checkout new-arch-sampling[?25h",,terminal_output +296,876213,"TERMINAL",0,0,"\rl': git checkout new-arch-sampling",,terminal_output +297,876557,"TERMINAL",0,0,"[?25lasi': git checkout new-arch-sampling[?25hn': git checkout new-arch-sampling",,terminal_output +298,876669,"TERMINAL",0,0,"[?25lsg': git checkout new-arch-sampling[?25h",,terminal_output +299,877332,"TERMINAL",0,0,"[?25ls_': sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/causal/train_dynamics_causal_8_node/3373408[?25h",,terminal_output +300,877808,"TERMINAL",0,0,"[?25ls\rd': sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/causal/train_dynamics_causal_8_node/3373408[?25h",,terminal_output +301,877916,"TERMINAL",0,0,"[?25ls\re': sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/causal/train_dynamics_causal_8_node/3373408[?25h",,terminal_output +302,878218,"TERMINAL",0,0,"[?25ls\rv': sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/causal/train_dynamics_causal_8_node/3373408[?25h",,terminal_output +303,878485,"TERMINAL",0,0,"\rjafar) [tum_cte0515@hkn0602 jafar]$ sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/causal/train_dynamics_causal_8_node/3373408",,terminal_output +304,880332,"TERMINAL",0,0,"\r\n\r",,terminal_output +305,880840,"TERMINAL",0,0,"\r\n\r",,terminal_output +306,881664,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +307,883084,"TERMINAL",0,0,"[?25l/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=32 \\r\n --seed=69 \\r\n --batch_size=4 \\r\n --maskgit_steps=2 \\r\n --start_frame=16 \\r\n --data_dir $array_records_dir\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n[?25h",,terminal_output +308,883285,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +309,883418,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +310,886016,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\ndynamics_ckpt_dir=$1\necho $dynamics_ckpt_dir\n\nenv | grep SLURM\n\nsrun python sample.py \\n --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=32 \\n --seed=69 \\n --batch_size=4 \\n --maskgit_steps=2 \\n --start_frame=16 \\n --data_dir $array_records_dir\n\n# srun python sample.py \\n # --checkpoint $dynamics_ckpt_dir \\n # --start_frame=0 \\n # --batch_size=12 \\n # --seq_len=2 \\n # --data_dir $array_records_dir\n",shellscript,tab +311,888427,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",774,0,"",shellscript,selection_mouse +312,889519,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",773,0,"",shellscript,selection_command +313,890204,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",772,1,"",shellscript,content +314,890344,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",771,1,"",shellscript,content +315,893068,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",771,0,"3",shellscript,content +316,893069,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",772,0,"",shellscript,selection_keyboard +317,893096,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",772,0,"2",shellscript,content +318,893097,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",773,0,"",shellscript,selection_keyboard +319,893896,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",772,0,"",shellscript,selection_command +320,894674,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",746,0,"",shellscript,selection_command +321,895092,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",720,0,"",shellscript,selection_command +322,895633,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",693,0,"",shellscript,selection_command +323,896017,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",720,0,"",shellscript,selection_command +324,896401,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",746,0,"",shellscript,selection_command +325,896685,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",772,0,"",shellscript,selection_command +326,896937,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",773,0,"",shellscript,selection_command +327,897456,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",772,1,"",shellscript,content +328,897580,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",771,1,"",shellscript,content +329,898765,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",771,0,"1",shellscript,content +330,898766,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",772,0,"",shellscript,selection_keyboard +331,899192,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",772,0,"6",shellscript,content +332,899193,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",773,0,"",shellscript,selection_keyboard +333,899585,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",772,0,"",shellscript,selection_command +334,899715,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",790,0,"",shellscript,selection_command +335,899812,"TERMINAL",0,0,"2025-07-31 12:45:02.464282: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +336,900147,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",789,0,"",shellscript,selection_command +337,901182,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",805,0,"",shellscript,selection_command +338,901411,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",806,0,"",shellscript,selection_command +339,901549,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",807,0,"",shellscript,selection_command +340,901695,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",808,0,"",shellscript,selection_command +341,901834,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",809,0,"",shellscript,selection_command +342,902300,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",790,0,"",shellscript,selection_command +343,902744,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",789,0,"",shellscript,selection_command +344,903545,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",788,1,"",shellscript,content +345,903683,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",787,1,"",shellscript,content +346,903904,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",787,0,"4",shellscript,content +347,903904,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",788,0,"",shellscript,selection_keyboard +348,904054,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",788,0,"2",shellscript,content +349,904054,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",789,0,"",shellscript,selection_keyboard +350,904285,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",788,0,"",shellscript,selection_command +351,904473,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",804,0,"",shellscript,selection_command +352,904609,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",805,0,"",shellscript,selection_command +353,904966,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",812,0,"",shellscript,selection_command +354,905622,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",811,0,"",shellscript,selection_command +355,906296,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",810,0,"",shellscript,selection_command +356,906806,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",831,0,"",shellscript,selection_command +357,907188,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",832,0,"",shellscript,selection_command +358,907368,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",833,0,"",shellscript,selection_command +359,907759,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",834,0,"",shellscript,selection_command +360,907906,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",833,1,"",shellscript,content +361,908036,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",833,0,"1",shellscript,content +362,908037,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",834,0,"",shellscript,selection_keyboard +363,908370,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",858,0,"",shellscript,selection_command +364,908859,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",834,0,"",shellscript,selection_command +365,909988,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",833,1,"",shellscript,content +366,910088,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",833,0,"2",shellscript,content +367,910089,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",834,0,"",shellscript,selection_keyboard +368,910276,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",834,0,"5",shellscript,content +369,910277,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",835,0,"",shellscript,selection_keyboard +370,910585,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",834,0,"",shellscript,selection_command +371,910783,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",859,0,"",shellscript,selection_command +372,911068,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",858,0,"",shellscript,selection_command +373,912072,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",857,1,"",shellscript,content +374,912190,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",856,1,"",shellscript,content +375,912364,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",856,0,"0",shellscript,content +376,912365,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",857,0,"",shellscript,selection_keyboard +377,912540,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",856,0,"",shellscript,selection_command +378,912860,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",831,0,"",shellscript,selection_command +379,912997,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",810,0,"",shellscript,selection_command +380,913138,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",790,0,"",shellscript,selection_command +381,913278,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",774,0,"",shellscript,selection_command +382,913444,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",749,0,"",shellscript,selection_command +383,913572,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",723,0,"",shellscript,selection_command +384,913739,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",696,0,"",shellscript,selection_command +385,914068,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",674,0,"",shellscript,selection_command +386,916382,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",656,21," --dyna_dim=1024 \",shellscript,selection_command +387,916634,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",656,48," --dyna_dim=1024 \\n --dyna_num_blocks=16 \",shellscript,selection_command +388,916772,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",656,74," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \",shellscript,selection_command +389,917354,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",656,75,"",shellscript,content +390,917363,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",660,0,"",shellscript,selection_command +391,917574,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",686,0,"",shellscript,selection_command +392,917742,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",705,0,"",shellscript,selection_command +393,917897,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",721,0,"",shellscript,selection_command +394,918027,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",742,0,"",shellscript,selection_command +395,918171,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",767,0,"",shellscript,selection_command +396,918333,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",789,0,"",shellscript,selection_command +397,918734,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",818,0,"\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \",shellscript,content +398,918744,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",823,0,"",shellscript,selection_command +399,919892,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",819,21," --dyna_dim=1024 \",shellscript,selection_command +400,920122,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",819,48," --dyna_dim=1024 \\n --dyna_num_blocks=16 \",shellscript,selection_command +401,920249,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",819,74," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \",shellscript,selection_command +402,920442,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",823,0,"",shellscript,selection_command +403,921149,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",872,0,"#",shellscript,content +404,921149,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",845,0,"#",shellscript,content +405,921149,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",823,0,"#",shellscript,content +406,921150,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",824,0,"",shellscript,selection_keyboard +407,921194,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",875,0," ",shellscript,content +408,921194,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",847,0," ",shellscript,content +409,921195,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",824,0," ",shellscript,content +410,921195,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",825,0,"",shellscript,selection_keyboard +411,921561,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",824,0,"",shellscript,selection_command +412,924342,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3387190.0 task 0: running\r\n",,terminal_output +413,924601,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3387190.0\r\nsrun: forcing job termination\r\n2025-07-31 12:45:27.198192: F external/xla/xla/service/gpu/autotuning/gemm_fusion_autotuner.cc:1136] Non-OK-status: executable.status()\r\nStatus: INTERNAL: ptxas exited with non-zero error code 2, output: - Failure occured when compiling fusion gemm_fusion_dot with config '{block_m:128,block_n:128,block_k:32,split_k:8,num_stages:4,num_warps:8,num_ctas:1}'\r\nFused HLO computation:\r\n%gemm_fusion_dot_computation (parameter_0: bf16[4,32,61,2048], parameter_1: bf16[2048,512]) -> bf16[4,32,61,512] {\r\n %parameter_0 = bf16[4,32,61,2048]{3,2,1,0} parameter(0)\r\n %bitcast.2 = bf16[7808,2048]{1,0} bitcast(%parameter_0), metadata={op_name=""args[0]""}\r\n %parameter_1 = bf16[2048,512]{1,0} parameter(1)\r\n %dot.1 = bf16[7808,512]{1,0} dot(%bitcast.2, %parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_name=""jit(dot_general)/jit(main)/dot_general"" source_file=""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"" source_line=287}\r\n ROOT %bitcast.3 = bf16[4,32,61,512]{3,2,1,0} bitcast(%dot.1), metadata={op_name=""jit(dot_general)/jit(main)/dot_general"" source_file=""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/linear.py"" source_line=287}\r\n}\r\nsrun: Job step aborted: Waiting up to 32 seconds for job step to finish.\r\nslurmstepd: error: *** STEP 3387190.0 ON hkn0602 CANCELLED AT 2025-07-31T12:45:27 ***\r\n",,terminal_output +414,924691,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3387190.0\r\nsrun: job abort in progress\r\n",,terminal_output +415,924958,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3387190.0\r\nslurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh: line 36: =16: command not found\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +416,925292,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +417,925490,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --dyna_ffn_dim=4096 \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +418,925623,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +419,925759,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +420,928993,"TERMINAL",0,0,"2025-07-31 12:45:31.481728: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +421,941410,"TERMINAL",0,0,"2025-07-31 12:45:43.998700: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +422,954082,"TERMINAL",0,0,"2025-07-31 12:45:56.681425: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +423,962052,"TERMINAL",0,0,"2025-07-31 12:46:04.692573: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +424,965454,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\n",,terminal_output +425,965810,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 124, in \r\n restored = checkpoint_manager.restore(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\r\n restored = self._checkpointer.restore(restore_directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\r\n return super().restore(directory, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\r\n restored[item_name] = handler.restore(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/standard_checkpoint_handler.py"", line 246, in restore\r\n return self._impl.restore(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\r\n return self._handler_impl.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\r\n raise ValueError(\r\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'params': {'params': {'dynamics': {'dynamics': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_5': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_10': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_11': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_8': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_9': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'lam': {'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'tokenizer': {'decoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}, 'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}}}, 'opt_state': [{'mu': {'params': {'dynamics': {'dynamics': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_5': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_10': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_11': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_8': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_9': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'lam': {'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'tokenizer': {'decoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}, 'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}}}, 'nu': {'params': {'dynamics': {'dynamics': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_5': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(4096, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_10': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEnt",,terminal_output +426,965913,"TERMINAL",0,0,"ry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_11': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_8': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_9': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'lam': {'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'tokenizer': {'decoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}, 'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}}}}, None, None]}\r\n",,terminal_output +427,966774,"TERMINAL",0,0,"srun: error: hkn0602: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +428,1269337,"sample.py",0,0,"",python,tab +429,1271928,"utils/nn.py",0,0,"import math\nfrom typing import Tuple\n\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass PositionalEncoding(nn.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n d_model: int # Hidden dimensionality of the input.\n max_len: int = 5000 # Maximum length of a sequence to expect.\n\n def setup(self):\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n self.pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\n self.pe = self.pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def __call__(self, x):\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nn.Module):\n dim: int\n ffn_dim: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.remat\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=False\n ),\n )(z)\n x = x + z\n\n # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n # FIXME (f.srambical): check whether we should still pass the mask if we set is_causal=True\n )(z, mask=causal_mask)\n x = x + z\n x = x.swapaxes(1, 2)\n\n # --- Feedforward ---\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n z = nn.Dense(\n self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.gelu(z)\n z = nn.Dense(\n self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n x = x + z\n\n return x\n\n\nclass STTransformer(nn.Module):\n model_dim: int\n ffn_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n x = nn.Sequential(\n [\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n ]\n )(x)\n for _ in range(self.num_blocks):\n x = STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )(x)\n x = nn.Dense(\n self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n return x # (B, T, E)\n\n\ndef normalize(x):\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nn.Module):\n latent_dim: int\n num_latents: int\n dropout: float\n\n def setup(self):\n self.codebook = normalize(\n self.param(\n ""codebook"",\n nn.initializers.lecun_uniform(),\n (self.num_latents, self.latent_dim),\n )\n )\n self.drop = nn.Dropout(self.dropout, deterministic=False)\n\n def __call__(\n self, x: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x = normalize(x)\n codebook = normalize(self.codebook)\n distance = -jnp.matmul(x, codebook.T)\n if training:\n dropout_key = self.make_rng(""dropout"")\n distance = self.drop(distance, rng=dropout_key)\n\n # --- Get indices and embeddings ---\n indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]\n\n # --- Straight through estimator ---\n z_q = x + jax.lax.stop_gradient(z - x)\n return z_q, z, x, indices\n\n def get_codes(self, indices: jax.Array):\n return self.codebook[indices]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool):\n """"""\n Create an attention function that uses flash attention if enabled.\n\n Flax MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim)\n jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim).\n\n We need to reshape to ensure compatibility. cuDNN's flash attention additionally\n requires a sequence length that is a multiple of 4. We pad the sequence length to the nearest\n multiple of 4 and mask accordingly.\n """"""\n\n def attention_fn(query, key, value, bias=None, mask=None, **kwargs):\n implementation = ""cudnn"" if use_flash_attention else None\n\n def _rearrange(x):\n return einops.rearrange(x, ""... l h d -> (...) l h d"")\n\n def _pad(x):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n\n def _fuse_masks(mask: jax.Array, attention_mask: jax.Array) -> jax.Array:\n mask_bool = mask.astype(jnp.bool_)\n expanded_mask = jnp.pad(\n mask_bool, ((0, pad_size), (0, pad_size)), constant_values=False\n )\n return jnp.logical_and(attention_mask, expanded_mask)\n\n original_shape = query.shape\n original_seq_len = query.shape[-3]\n\n # Pad to nearest multiple of 4\n target_seq_len = ((original_seq_len + 3) // 4) * 4\n pad_size = target_seq_len - original_seq_len\n\n query_4d = _pad(_rearrange(query))\n key_4d = _pad(_rearrange(key))\n value_4d = _pad(_rearrange(value))\n\n attention_mask = jnp.ones((target_seq_len, target_seq_len), dtype=jnp.bool_)\n attention_mask = attention_mask.at[original_seq_len:, :].set(False)\n attention_mask = attention_mask.at[:, original_seq_len:].set(False)\n\n mask_4d = (\n _fuse_masks(mask, attention_mask) if mask is not None else attention_mask\n )\n mask_4d = mask_4d[jnp.newaxis, jnp.newaxis, :, :] # (1, 1, seq_len, seq_len)\n\n bias_4d = _pad(_rearrange(bias)) if bias is not None else None\n\n output_4d = jax.nn.dot_product_attention(\n query=query_4d,\n key=key_4d,\n value=value_4d,\n bias=bias_4d,\n mask=mask_4d,\n implementation=implementation,\n is_causal=is_causal,\n **kwargs\n )\n return output_4d[..., :original_seq_len, :, :].reshape(original_shape)\n\n return attention_fn\n",python,tab +430,1494793,"utils/nn.py",2410,0,"",python,selection_mouse +431,1682411,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=args.dtype\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n # Restore full dynamics model\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +432,1688040,"utils/nn.py",0,0,"",python,tab +433,1691526,"utils/nn.py",2830,0,"",python,selection_mouse +434,1692087,"utils/nn.py",2775,0,"",python,selection_mouse +435,1692716,"utils/nn.py",2898,0,"",python,selection_mouse +436,1692722,"utils/nn.py",2897,0,"",python,selection_command +437,1702875,"utils/nn.py",3050,0,"",python,selection_mouse +438,1702880,"utils/nn.py",3049,0,"",python,selection_command +439,2003096,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +440,2007432,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",672,0,"",shellscript,selection_mouse +441,2008600,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",656,26,"",shellscript,content +442,2008618,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",660,0,"",shellscript,selection_command +443,2008715,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",679,0,"",shellscript,selection_command +444,2008998,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",695,0,"",shellscript,selection_command +445,2009478,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",716,0,"",shellscript,selection_command +446,2009523,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",741,0,"",shellscript,selection_command +447,2009558,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",763,0,"",shellscript,selection_command +448,2009603,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",797,0,"",shellscript,selection_command +449,2009712,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",821,0,"",shellscript,selection_command +450,2009953,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",850,0,"",shellscript,selection_command +451,2010265,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",873,0,"\n --dyna_ffn_dim=4096 \",shellscript,content +452,2010272,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",878,0,"",shellscript,selection_command +453,2011843,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",878,0,"#",shellscript,content +454,2011844,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",879,0,"",shellscript,selection_keyboard +455,2011925,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",879,0," ",shellscript,content +456,2011926,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",880,0,"",shellscript,selection_keyboard +457,2012468,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",879,0,"",shellscript,selection_command +458,2016166,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +459,2017004,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +460,2017164,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +461,2017284,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +462,2020322,"TERMINAL",0,0,"2025-07-31 13:03:42.968965: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +463,2025033,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +464,2033326,"TERMINAL",0,0,"2025-07-31 13:03:55.867450: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +465,2046503,"TERMINAL",0,0,"2025-07-31 13:04:09.025504: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +466,2051241,"TERMINAL",0,0,"2025-07-31 13:04:13.766617: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +467,2054284,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\n",,terminal_output +468,2054634,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 124, in \r\n restored = checkpoint_manager.restore(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\r\n restored = self._checkpointer.restore(restore_directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\r\n return super().restore(directory, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\r\n restored[item_name] = handler.restore(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/standard_checkpoint_handler.py"", line 246, in restore\r\n return self._impl.restore(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\r\n return self._handler_impl.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\r\n raise ValueError(\r\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'params': {'params': {'dynamics': {'dynamics': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_5': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_10': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_11': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_8': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_9': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'lam': {'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'tokenizer': {'decoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}, 'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}}}, 'opt_state': [{'mu': {'params': {'dynamics': {'dynamics': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_5': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_10': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_11': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_8': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_9': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'lam': {'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'tokenizer': {'decoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}, 'encoder",,terminal_output +469,2054733,"TERMINAL",0,0,"': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}}}, 'nu': {'params': {'dynamics': {'dynamics': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_5': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_10': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_11': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_8': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_9': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'lam': {'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'tokenizer': {'decoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}, 'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(2048, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}}}}, None, None]}\r\n",,terminal_output +470,2055539,"TERMINAL",0,0,"srun: error: hkn0602: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +471,2073097,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +472,2331344,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +473,2376404,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",792,0,"",shellscript,selection_mouse +474,2376405,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",791,0,"",shellscript,selection_command +475,2384204,"sample.py",0,0,"",python,tab +476,2387868,"sample.py",843,0,"",python,selection_mouse +477,2388403,"sample.py",851,0,"",python,selection_mouse +478,2388404,"sample.py",850,0,"",python,selection_command +479,2389278,"sample.py",849,0,"",python,selection_mouse +480,2396445,"sample.py",847,4,"2048",python,selection_mouse +481,2398166,"sample.py",882,0,"",python,selection_mouse +482,2398174,"sample.py",881,0,"",python,selection_command +483,2398989,"sample.py",851,0,"",python,selection_mouse +484,2398990,"sample.py",850,0,"",python,selection_command +485,2400113,"sample.py",847,0,"",python,selection_mouse +486,2401302,"sample.py",847,0,"#",python,content +487,2401306,"sample.py",848,0,"",python,selection_keyboard +488,2401415,"sample.py",848,0," ",python,content +489,2401416,"sample.py",849,0,"",python,selection_keyboard +490,2401625,"sample.py",848,0,"",python,selection_command +491,2401822,"sample.py",847,0,"",python,selection_command +492,2402565,"sample.py",847,1,"",python,content +493,2402687,"sample.py",847,1,"",python,content +494,2403222,"sample.py",846,0,"",python,selection_command +495,2403777,"sample.py",837,0,"",python,selection_mouse +496,2403896,"sample.py",822,17,"tokenizer_ffn_dim",python,selection_mouse +497,2410957,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +498,2412135,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",793,0,"",shellscript,selection_command +499,2412937,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",759,0,"",shellscript,selection_command +500,2413373,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",792,0,"\n ",shellscript,content +501,2414267,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",797,0,"-",shellscript,content +502,2414270,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",798,0,"",shellscript,selection_keyboard +503,2414429,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",798,0,"-",shellscript,content +504,2414430,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",799,0,"",shellscript,selection_keyboard +505,2414644,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",799,0,"t",shellscript,content +506,2414645,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",800,0,"",shellscript,selection_keyboard +507,2414722,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",800,0,"o",shellscript,content +508,2414723,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",801,0,"",shellscript,selection_keyboard +509,2414808,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",801,0,"k",shellscript,content +510,2414809,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",802,0,"",shellscript,selection_keyboard +511,2415156,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",801,1,"",shellscript,content +512,2415296,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",800,1,"",shellscript,content +513,2415394,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",799,1,"",shellscript,content +514,2415557,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",799,0,"tokenizer_ffn_dim",shellscript,content +515,2416171,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",816,0,"=",shellscript,content +516,2416172,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",817,0,"",shellscript,selection_keyboard +517,2417115,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",817,0,"5",shellscript,content +518,2417116,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",818,0,"",shellscript,selection_keyboard +519,2417200,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",818,0,"1",shellscript,content +520,2417200,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",819,0,"",shellscript,selection_keyboard +521,2417222,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",819,0,"2",shellscript,content +522,2417223,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",820,0,"",shellscript,selection_keyboard +523,2417867,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",820,0," ",shellscript,content +524,2417868,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",821,0,"",shellscript,selection_keyboard +525,2418149,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",821,0,"\",shellscript,content +526,2418150,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",822,0,"",shellscript,selection_keyboard +527,2418315,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",822,0,"\n ",shellscript,content +528,2419466,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",758,0,"",shellscript,selection_mouse +529,2420261,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",792,0,"",shellscript,selection_mouse +530,2421113,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",823,4,"",shellscript,content +531,2421114,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",792,0," ",shellscript,content +532,2421114,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",793,0,"",shellscript,selection_keyboard +533,2421260,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",793,0,"\",shellscript,content +534,2421261,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",794,0,"",shellscript,selection_keyboard +535,2423028,"sample.py",0,0,"",python,tab +536,2425274,"sample.py",1059,0,"",python,selection_mouse +537,2425432,"sample.py",1056,11,"lam_ffn_dim",python,selection_mouse +538,2427556,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +539,2429226,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",824,0,"",shellscript,selection_command +540,2429493,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",824,0,"\n ",shellscript,content +541,2430423,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",829,0,"-",shellscript,content +542,2430424,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",830,0,"",shellscript,selection_keyboard +543,2430572,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",830,0,"-",shellscript,content +544,2430573,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",831,0,"",shellscript,selection_keyboard +545,2430700,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",831,0,"lam_ffn_dim",shellscript,content +546,2431814,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",842,0,"=",shellscript,content +547,2431814,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",843,0,"",shellscript,selection_keyboard +548,2432362,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",843,0,"5",shellscript,content +549,2432363,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",844,0,"",shellscript,selection_keyboard +550,2432441,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",844,0,"1",shellscript,content +551,2432442,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",845,0,"",shellscript,selection_keyboard +552,2432479,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",845,0,"2",shellscript,content +553,2432480,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",846,0,"",shellscript,selection_keyboard +554,2432951,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",846,0," ",shellscript,content +555,2432951,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",847,0,"",shellscript,selection_keyboard +556,2433438,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",847,0,"\",shellscript,content +557,2433438,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",848,0,"",shellscript,selection_keyboard +558,2433569,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",848,0,"\n ",shellscript,content +559,2434909,"sample.py",0,0,"",python,tab +560,2436826,"sample.py",1289,0,"",python,selection_mouse +561,2436987,"sample.py",1282,12,"dyna_ffn_dim",python,selection_mouse +562,2439186,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +563,2440654,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",853,0,"-",shellscript,content +564,2440655,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",854,0,"",shellscript,selection_keyboard +565,2440795,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",854,0,"-",shellscript,content +566,2440796,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",855,0,"",shellscript,selection_keyboard +567,2441112,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",855,0,"dyna_ffn_dim",shellscript,content +568,2444076,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",867,0,"=",shellscript,content +569,2444077,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",868,0,"",shellscript,selection_keyboard +570,2444655,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",868,0,"5",shellscript,content +571,2444656,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",869,0,"",shellscript,selection_keyboard +572,2444742,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",869,0,"1",shellscript,content +573,2444742,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",870,0,"",shellscript,selection_keyboard +574,2444836,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",870,0,"2",shellscript,content +575,2444837,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",871,0,"",shellscript,selection_keyboard +576,2447671,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n --tokenizer_ffn_dim=512 \\r\n --lam_ffn_dim=512 \\r\n --dyna_ffn_dim=512\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +577,2447759,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +578,2447874,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +579,2450916,"TERMINAL",0,0,"2025-07-31 13:10:53.569631: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +580,2462148,"TERMINAL",0,0,"2025-07-31 13:11:04.774423: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +581,2473412,"TERMINAL",0,0,"2025-07-31 13:11:15.886496: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +582,2476779,"TERMINAL",0,0,"2025-07-31 13:11:19.411160: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +583,2479964,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\n",,terminal_output +584,2480169,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 124, in \r\n restored = checkpoint_manager.restore(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\r\n restored = self._checkpointer.restore(restore_directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\r\n return super().restore(directory, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\r\n restored[item_name] = handler.restore(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/standard_checkpoint_handler.py"", line 246, in restore\r\n return self._impl.restore(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\r\n return self._handler_impl.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\r\n raise ValueError(\r\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'params': {'params': {'dynamics': {'dynamics': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_5': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_10': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_11': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_8': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_9': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'lam': {'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'tokenizer': {'decoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}, 'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}}}, 'opt_state': [{'mu': {'params': {'dynamics': {'dynamics': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_5': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_10': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_11': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_8': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_9': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'lam': {'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'tokenizer': {'decoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}, 'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}}}, 'nu': {'params': {'dynamics': {'dynamics': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_5': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_10': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_11': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_8': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_9': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'lam': {'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'tokenizer': {'decoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)),",,terminal_output +585,2480264,"TERMINAL",0,0," 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}, 'encoder': {'STBlock_0': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_1': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_2': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_3': {'Dense_1': Diff(lhs={'bias': ShapeDtypeStruct(shape=(512,), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device)), 'kernel': ShapeDtypeStruct(shape=(512, 512), dtype=float32, sharding=SingleDeviceSharding(device=CudaDevice(id=0), memory_kind=device))}, rhs=None)}, 'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}}}}, None, None]}\r\n",,terminal_output +586,2481200,"TERMINAL",0,0,"srun: error: hkn0602: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +587,2509070,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +588,2511009,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",868,3,"512",shellscript,selection_mouse +589,2511024,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",863,8,"_dim=512",shellscript,selection_mouse +590,2511043,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",834,37,"_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +591,2511060,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",831,40,"lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +592,2511076,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",829,42,"--lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +593,2511104,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",827,44," --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +594,2511112,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",826,45," --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +595,2511146,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",825,46," --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +596,2511377,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",795,76," --tokenizer_ffn_dim=512 \\n --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +597,2547205,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",871,0,"",shellscript,selection_mouse +598,2547531,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",868,3,"512",shellscript,selection_mouse +599,2547715,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",842,29,"=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +600,2547731,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",831,40,"lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +601,2547767,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",801,70,"tokenizer_ffn_dim=512 \\n --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +602,2547806,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",800,71,"-tokenizer_ffn_dim=512 \\n --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +603,2547842,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",799,72,"--tokenizer_ffn_dim=512 \\n --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +604,2547878,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",798,73," --tokenizer_ffn_dim=512 \\n --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +605,2547921,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",797,74," --tokenizer_ffn_dim=512 \\n --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +606,2547954,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",796,75," --tokenizer_ffn_dim=512 \\n --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +607,2547992,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",795,76," --tokenizer_ffn_dim=512 \\n --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +608,2549456,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",871,0,"",shellscript,selection_mouse +609,2549638,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",868,3,"512",shellscript,selection_mouse +610,2549825,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",855,16,"dyna_ffn_dim=512",shellscript,selection_mouse +611,2549858,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",831,40,"lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +612,2549934,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",830,41,"-lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +613,2549940,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",829,42,"--lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +614,2549965,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",828,43," --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +615,2550001,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",827,44," --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +616,2550037,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",826,45," --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +617,2550114,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",825,46," --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +618,2550996,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",796,0,"",shellscript,selection_mouse +619,2551144,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",795,4," ",shellscript,selection_mouse +620,2551307,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",795,5," -",shellscript,selection_mouse +621,2551324,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",795,47," --tokenizer_ffn_dim=512 \\n --lam_ffn_dim",shellscript,selection_mouse +622,2551404,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",795,51," --tokenizer_ffn_dim=512 \\n --lam_ffn_dim=512",shellscript,selection_mouse +623,2551411,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",795,76," --tokenizer_ffn_dim=512 \\n --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,selection_mouse +624,2551976,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",795,76,"",shellscript,content +625,2552535,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",794,1,"",shellscript,content +626,2553150,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",794,1,"",shellscript,content +627,2553839,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",793,1,"",shellscript,content +628,2553992,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",792,1,"",shellscript,content +629,2595614,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",1036,0,"",shellscript,selection_mouse +630,2597464,"TERMINAL",0,0,"[?25lgi[?25h",,terminal_output +631,2597479,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +632,2597585,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +633,2597675,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +634,2597801,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +635,2597889,"TERMINAL",0,0,"o",,terminal_output +636,2598006,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +637,2598925,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +638,2599509,"TERMINAL",0,0,"[?1h=\rcommit 9e110d9c377549bf5294d694b61f13d495a83c67 (HEAD -> before-nnx)\r\nAuthor: Mihir Mahajan \r\nDate: Thu Jul 31 10:54:55 2025 +0200\r\n\r\n reverted utils.py\r\n\r\ncommit a255568811dfa19e6ab33f52e1d4e14a83fcbb86\r\nAuthor: Mihir Mahajan \r\nDate: Wed Jul 30 13:06:55 2025 +0200\r\n\r\n fix pe\r\n\r\ncommit 42a76552fbb4e02b2e0c739ca2d0b4903fc7a05b\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Sun Jul 27 12:33:36 2025 +0200\r\n\r\n fix: readme typo (#115)\r\n\r\ncommit d1dfba5e33a35381435fa55aa62834aa28b3ed49\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Sun Jul 27 09:23:18 2025 +0200\r\n\r\n fix: typo (#114)\r\n\r\ncommit 110181b8d466c63bdb89a1644d3a9b6b58753faf\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Sat Jul 26 16:55:22 2025 +0200\r\n\r\n fix: typo (#112)\r\n\r\ncommit 4519a2ed772c65b29dabae72e74697b4e3a7e46f\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Sat Jul 26 16:52:49 2025 +0200\r\n\r\n feat: updated README (#111)\r\n\r\ncommit 341b66787a24b5e40d9177f172769e404673eb26\r\nMerge: b11de72 b4864e6\r\n:",,terminal_output +639,2601022,"TERMINAL",0,0,"\r/",,terminal_output +640,2602223,"TERMINAL",0,0,"[?25lcc6688ee0033ee224455881199007700bbcc77ccdd007711cc33ee9944ccbbdd993388aa11ff3333[?25h",,terminal_output +641,2603047,"TERMINAL",0,0,"\rcommit 9e110d9c377549bf5294d694b61f13d495a83c67 (HEAD -> before-nnx)\r\nAuthor: Mihir Mahajan \r\nDate: Thu Jul 31 10:54:55 2025 +0200\r\n\r\n reverted utils.py\r\n\r\ncommit a255568811dfa19e6ab33f52e1d4e14a83fcbb86\r\nAuthor: Mihir Mahajan \r\nDate: Wed Jul 30 13:06:55 2025 +0200\r\n\r\n fix pe\r\n\r\ncommit 42a76552fbb4e02b2e0c739ca2d0b4903fc7a05b\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Sun Jul 27 12:33:36 2025 +0200\r\n\r\n fix: readme typo (#115)\r\n\r\ncommit d1dfba5e33a35381435fa55aa62834aa28b3ed49\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Sun Jul 27 09:23:18 2025 +0200\r\n\r\n fix: typo (#114)\r\n\r\ncommit 110181b8d466c63bdb89a1644d3a9b6b58753faf\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Sat Jul 26 16:55:22 2025 +0200\r\n\r\n fix: typo (#112)\r\n\r\ncommit 4519a2ed772c65b29dabae72e74697b4e3a7e46f\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Sat Jul 26 16:52:49 2025 +0200\r\n\r\n feat: updated README (#111)\r\n\r\ncommit 341b66787a24b5e40d9177f172769e404673eb26\r\nMerge: b11de72 b4864e6\r\ncommit 9e110d9c377549bf5294d694b61f13d495a83c67 (HEAD -> before-nnx)\r\nAuthor: Mihir Mahajan \r\nDate: Thu Jul 31 10:54:55 2025 +0200\r\n\r\n reverted utils.py\r\n\r\ncommit a255568811dfa19e6ab33f52e1d4e14a83fcbb86\r\nAuthor: Mihir Mahajan \r\nDate: Wed Jul 30 13:06:55 2025 +0200\r\n\r\n fix pe\r\n\r\ncommit 42a76552fbb4e02b2e0c739ca2d0b4903fc7a05b\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Sun Jul 27 12:33:36 2025 +0200\r\n\r\n fix: readme typo (#115)\r\n\r\ncommit d1dfba5e33a35381435fa55aa62834aa28b3ed49\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Sun Jul 27 09:23:18 2025 +0200\r\n\r\n fix: typo (#114)\r\n\r\ncommit 110181b8d466c63bdb89a1644d3a9b6b58753faf\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Sat Jul 26 16:55:22 2025 +0200\r\n\r\n fix: typo (#112)\r\n\r\ncommit 4519a2ed772c65b29dabae72e74697b4e3a7e46f\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Sat Jul 26 16:52:49 2025 +0200\r\n\r\n feat: updated README (#111)\r\n\r\ncommit 341b66787a24b5e40d9177f172769e404673eb26\r\nMerge: b11de72 b4864e6\r\n...skipping...\r\ncommit c68e03e245819070bc7cd071c3e94cbd938a1f33\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Sun Jul 20 16:43:11 2025 +0200\r\n\r\n feat: omit dataloader checkpoint restore in `restore_genie_components` (#96)\r\n \r\n Co-authored-by: maharajamihir \r\n\r\ncommit bff38fa8bc098d8fc67cda3d5a506357b23ba21a (origin/fix-dtype-hint-warning)\r\nAuthor: emergenz \r\nDate: Sun Jul 20 14:58:36 2025 +0200\r\n\r\n fix: infer dtype type annotation (fix warning)\r\n\r\ncommit bf51eba9bf68dcea140c17fbd96eba343fb48293\r\nAuthor: Alfred Nguyen <85162596+avocadoali@users.noreply.github.com>\r\nDate: Sat Jul 19 08:58:38 2025 +0200\r\n\r\n feature: download openai action file (#92)\r\n\r\ncommit 11f14b9a7a32a2886961179a2502487d616f9b85\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Fri Jul 18 12:08:14 2025 +0200\r\n\r\n revert: partial remat (#93)\r\n\r\ncommit cdcd8f6f7d6a6a134b3354027f39c4a45fb50268\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Thu Jul 17 16:40:31 2025 +0200\r\n\r\n feat: partial remat for higher throughput (#91)\r\n\r\ncommit 7ea4f4371689179dcae76bd5f8a30fb11cb29e9c\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Thu Jul 17 15:07:25 2025 +0200\r\n\r\n feat: use flash attention (#87)\r\n \r\n:",,terminal_output +642,2613157,"TERMINAL",0,0,"commit c68e03e245819070bc7cd071c3e94cbd938a1f33\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Sun Jul 20 16:43:11 2025 +0200\r\n\r\n feat: omit dataloader checkpoint restore in `restore_genie_components` (#96)\r\n \r\n Co-authored-by: maharajamihir \r\n\r\ncommit bff38fa8bc098d8fc67cda3d5a506357b23ba21a (origin/fix-dtype-hint-warning)\r\nAuthor: emergenz \r\nDate: Sun Jul 20 14:58:36 2025 +0200\r\n\r\n fix: infer dtype type annotation (fix warning)\r\n\r\ncommit bf51eba9bf68dcea140c17fbd96eba343fb48293\r\nAuthor: Alfred Nguyen <85162596+avocadoali@users.noreply.github.com>\r\nDate: Sat Jul 19 08:58:38 2025 +0200\r\n\r\n feature: download openai action file (#92)\r\n\r\ncommit 11f14b9a7a32a2886961179a2502487d616f9b85\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Fri Jul 18 12:08:14 2025 +0200\r\n\r\n revert: partial remat (#93)\r\n\r\ncommit cdcd8f6f7d6a6a134b3354027f39c4a45fb50268\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Thu Jul 17 16:40:31 2025 +0200\r\n\r\n feat: partial remat for higher throughput (#91)\r\n\r\ncommit 7ea4f4371689179dcae76bd5f8a30fb11cb29e9c\r\nAuthor: Franz Srambical <79149449+emergenz@users.noreply.github.com>\r\nDate: Thu Jul 17 15:07:25 2025 +0200\r\n\r\n feat: use flash attention (#87)\r\n \r\n:",,terminal_output +643,2614074,"TERMINAL",0,0,"\r[?1l>\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +644,2614367,"TERMINAL",0,0,"[?25lq[?25h",,terminal_output +645,2615106,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +646,2615611,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +647,2615683,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +648,2615751,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +649,2615821,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +650,2615980,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +651,2616053,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +652,2616189,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +653,2616302,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +654,2616444,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +655,2616550,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +656,2616664,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +657,2616879,"TERMINAL",0,0,"On branch before-nnx\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: sample.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdiff.diff\r\n\tdiff.log\r\n\tlogs/\r\n\toverfit_dir.zip\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tscripts_cremers/\r\n\tscripts_horeka/\r\n\tslurm-3373409.out\r\n\tslurm-3373410.out\r\n\tslurm-3379613.out\r\n\tslurm-3379615.out\r\n\tslurm-3379616.out\r\n\tslurm/\r\n\tutils/logger_bak.py\r\n\tutils/nn_bak.py\r\n\tutils/visualizer.py\r\n\tweekend-job-requeuer.sh\r\n\tweekend-job-starter.sh\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +658,2618100,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +659,2618238,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +660,2618347,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +661,2618445,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +662,2619487,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +663,2619610,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +664,2619820,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +665,2619882,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +666,2620166,"TERMINAL",0,0,"[?25lc[?25h[?25lh[?25h",,terminal_output +667,2620439,"TERMINAL",0,0,"\r\n[?2004l\r[?1h=\r add-wandb-name-and-tags\r\n* before-nnx\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n logging-variants\r\n lr-schedules\r\n main\r\n maskgit-different-maskprob-per-sample\r\n metrics-logging-for-dynamics-model\r\n monkey-patch\r\n new-arch-sampling\r\n preprocess_video\r\n refactor-tmp\r\n revised-dataloader\r\n runner\r\n runner-grain\r\n sample-from-different-topologies\r\n speedup-tfrecord-preprocessing\r\n tmp\r\n\r[?1l>]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +668,2621426,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +669,2621539,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +670,2621611,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +671,2621867,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +672,2621998,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +673,2622506,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +674,2622799,"TERMINAL",0,0,"[?25lc[?25h[?25lo[?25h",,terminal_output +675,2622907,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +676,2623049,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +677,2623163,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +678,2623202,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +679,2623296,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +680,2623434,"TERMINAL",0,0,"[?25l-[?25h",,terminal_output +681,2623555,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +682,2623690,"TERMINAL",0,0,"[?25lm[?25h[?25l [?25h",,terminal_output +683,2624025,"TERMINAL",0,0,"[?25l""[?25h",,terminal_output +684,2626466,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +685,2626645,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +686,2626807,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +687,2626896,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +688,2627353,"TERMINAL",0,0,"[?25lf[?25h",,terminal_output +689,2627598,"TERMINAL",0,0,"[?25ly[?25h",,terminal_output +690,2627671,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +691,2627910,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +692,2628004,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +693,2628120,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +694,2628377,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +695,2628480,"TERMINAL",0,0,"[?25ll[?25h[?25le[?25h",,terminal_output +696,2628622,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +697,2628775,"TERMINAL",0,0,"[?25lp[?25h[?25ly[?25h",,terminal_output +698,2629326,"TERMINAL",0,0,"[?25l""[?25h",,terminal_output +699,2629535,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +700,2629844,"TERMINAL",0,0,"[before-nnx 8ff672b] modify sample.py\r\n 1 file changed, 42 insertions(+), 4 deletions(-)\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +701,2630388,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +702,2630492,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +703,2630560,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +704,2630628,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +705,2630820,"TERMINAL",0,0,"[?25lc[?25h[?25lh[?25h",,terminal_output +706,2630893,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +707,2631059,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +708,2631294,"TERMINAL",0,0,"[?25lk[?25h",,terminal_output +709,2631379,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +710,2631534,"TERMINAL",0,0,"[?25lu[?25h[?25lt[?25h",,terminal_output +711,2631669,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +712,2632470,"TERMINAL",0,0,"c68e03e245819070bc7cd071c3e94cbd938a1f33c68e03e245819070bc7cd071c3e94cbd938a1f33\r\n[?2004l\r",,terminal_output +713,2633157,"TERMINAL",0,0,"Note: switching to 'c68e03e245819070bc7cd071c3e94cbd938a1f33'.\r\n\r\nYou are in 'detached HEAD' state. You can look around, make experimental\r\nchanges and commit them, and you can discard any commits you make in this\r\nstate without impacting any branches by switching back to a branch.\r\n\r\nIf you want to create a new branch to retain commits you create, you may\r\ndo so (now or later) by using -c with the switch command. Example:\r\n\r\n git switch -c \r\n\r\nOr undo this operation with:\r\n\r\n git switch -\r\n\r\nTurn off this advice by setting config variable advice.detachedHead to false\r\n\r\nHEAD is now at c68e03e feat: omit dataloader checkpoint restore in `restore_genie_components` (#96)\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +714,2634279,"TERMINAL",0,0,"git checkout c68e03e245819070bc7cd071c3e94cbd938a1f33\rommit -am ""modify sample.py""",,terminal_output +715,2634856,"TERMINAL",0,0,"branch",,terminal_output +716,2635287,"TERMINAL",0,0,"status",,terminal_output +717,2635682,"TERMINAL",0,0,"log",,terminal_output +718,2636072,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +719,2638866,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +720,2638952,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +721,2639070,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +722,2640119,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +723,2642144,"TERMINAL",0,0,"2025-07-31 13:14:04.792717: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +724,2644143,"sample.py",0,0,"",python,tab +725,2644385,"sample.py",151,5803,"import numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n\n\ndef _sampling_wrapper(module, batch):\n return module.sample(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n sampling_fn = jax.jit(nn.apply(_sampling_wrapper, genie)) \n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\n generated_vid = sampling_fn(\n params,\n batch\n )\n return generated_vid\n\n# --- Get video + latent actions ---\narray_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n]\ndataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n)\nvideo_batch = next(iter(dataloader))\n# Get latent actions for all videos in the batch\nbatch = dict(videos=video_batch)\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(video_batch.shape[0], args.seq_len - 1, 1)\n\n# --- Sample + evaluate video ---\nvid = _autoreg_sample(rng, video_batch, action_batch)\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\nprint(f""SSIM: {ssim}"")\n\n# --- Construct video ---\ntrue_videos = (video_batch * 255).astype(np.uint8)\npred_videos = (vid * 255).astype(np.uint8)\nvideo_comparison = np.zeros((2, *vid.shape), dtype=np.uint8)\nvideo_comparison[0] = true_videos[:, :args.seq_len]\nvideo_comparison[1] = pred_videos\nframes = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n# --- Save video --- \n",python,content +726,2652300,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +727,2653686,"TERMINAL",0,0,"2025-07-31 13:14:16.333531: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +728,2663730,"utils/nn.py",0,0,"",python,tab +729,2663907,"utils/nn.py",37,8117,"from functools import partial\n\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass PositionalEncoding(nn.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n d_model: int # Hidden dimensionality of the input.\n max_len: int = 5000 # Maximum length of a sequence to expect.\n\n def setup(self):\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n self.pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\n self.pe = self.pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def __call__(self, x):\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nn.Module):\n dim: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.remat\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(self.use_flash_attention, is_causal=False),\n )(z)\n x = x + z\n\n # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(self.use_flash_attention, is_causal=True),\n # FIXME (f.srambical): check whether we should still pass the mask if we set is_causal=True\n )(z, mask=causal_mask)\n x = x + z\n x = x.swapaxes(1, 2)\n\n # --- Feedforward ---\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n # FIXME (f.srambical): Here, the attention hidden dimension is the same as the FFN's. Usually, FFN hidden dimension is 4x model_dim\n z = nn.Dense(\n self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.gelu(z)\n x = x + z\n\n return x\n\n\nclass STTransformer(nn.Module):\n model_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n x = nn.Sequential(\n [\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.Dense(self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n ]\n )(x)\n for _ in range(self.num_blocks):\n x = STBlock(\n dim=self.model_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )(x)\n x = nn.Dense(\n self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n return x # (B, T, E)\n\n\ndef normalize(x):\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nn.Module):\n latent_dim: int\n num_latents: int\n dropout: float\n\n def setup(self):\n self.codebook = normalize(\n self.param(\n ""codebook"",\n nn.initializers.lecun_uniform(),\n (self.num_latents, self.latent_dim),\n )\n )\n self.drop = nn.Dropout(self.dropout, deterministic=False)\n\n def __call__(\n self, x: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x = normalize(x)\n codebook = normalize(self.codebook)\n distance = -jnp.matmul(x, codebook.T)\n if training:\n dropout_key = self.make_rng(""dropout"")\n distance = self.drop(distance, rng=dropout_key)\n\n # --- Get indices and embeddings ---\n indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]\n\n # --- Straight through estimator ---\n z_q = x + jax.lax.stop_gradient(z - x)\n return z_q, z, x, indices\n\n def get_codes(self, indices: jax.Array):\n return self.codebook[indices]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool):\n """"""\n Create an attention function that uses flash attention if enabled.\n\n Flax MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim)\n jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim).\n\n We need to reshape to ensure compatibility. cuDNN's flash attention additionally\n requires a sequence length that is a multiple of 4. We pad the sequence length to the nearest\n multiple of 4 and mask accordingly.\n """"""\n \n def attention_fn(query, key, value, bias=None, mask=None, **kwargs):\n implementation = 'cudnn' if use_flash_attention else None\n\n def _rearrange(x):\n return einops.rearrange(x, '... l h d -> (...) l h d')\n def _pad(x):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n def _fuse_masks(mask: jax.Array, attention_mask: jax.Array) -> jax.Array:\n mask_bool = mask.astype(jnp.bool_)\n expanded_mask = jnp.pad(mask_bool, ((0, pad_size), (0, pad_size)), constant_values=False)\n return jnp.logical_and(attention_mask, expanded_mask)\n \n original_shape = query.shape\n original_seq_len = query.shape[-3]\n \n # Pad to nearest multiple of 4\n target_seq_len = ((original_seq_len + 3) // 4) * 4\n pad_size = target_seq_len - original_seq_len\n\n query_4d = _pad(_rearrange(query))\n key_4d = _pad(_rearrange(key))\n value_4d = _pad(_rearrange(value))\n \n attention_mask = jnp.ones((target_seq_len, target_seq_len), dtype=jnp.bool_)\n attention_mask = attention_mask.at[original_seq_len:, :].set(False)\n attention_mask = attention_mask.at[:, original_seq_len:].set(False)\n\n mask_4d = _fuse_masks(mask, attention_mask) if mask is not None else attention_mask\n mask_4d = mask_4d[jnp.newaxis, jnp.newaxis, :, :] # (1, 1, seq_len, seq_len)\n \n bias_4d = _pad(_rearrange(bias)) if bias is not None else None\n \n output_4d = jax.nn.dot_product_attention(\n query=query_4d,\n key=key_4d,\n value=value_4d,\n bias=bias_4d,\n mask=mask_4d,\n implementation=implementation,\n is_causal=is_causal,\n **kwargs\n )\n return output_4d[..., :original_seq_len, :, :].reshape(original_shape)\n \n return attention_fn\n\n",python,content +730,2665357,"TERMINAL",0,0,"2025-07-31 13:14:28.008346: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +731,2669714,"TERMINAL",0,0,"2025-07-31 13:14:32.361292: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +732,2672477,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 95, in \r\n ckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 803, in restore\r\n structure, use_zarr3_metadata = self._get_internal_metadata(directory)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 959, in _get_internal_metadata\r\n raise FileNotFoundError(\r\nFileNotFoundError: No structure could be identified for the checkpoint at /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838.\r\n",,terminal_output +733,2673275,"TERMINAL",0,0,"srun: error: hkn0602: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +734,2716664,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +735,2716729,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +736,2716942,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +737,2717171,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +738,2717384,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +739,2717473,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +740,2717721,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +741,2717794,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +742,2717894,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +743,2717962,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +744,2718192,"TERMINAL",0,0,"\r\n[?2004l\r[?1h=\r* (HEAD detached at c68e03e)\r\n add-wandb-name-and-tags\r\n before-nnx\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n logging-variants\r\n lr-schedules\r\n main\r\n maskgit-different-maskprob-per-sample\r\n metrics-logging-for-dynamics-model\r\n monkey-patch\r\n new-arch-sampling\r\n preprocess_video\r\n refactor-tmp\r\n revised-dataloader\r\n runner\r\n runner-grain\r\n sample-from-different-topologies\r\n speedup-tfrecord-preprocessing\r\n tmp\r\n\r[?1l>]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +745,2725756,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +746,2725833,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +747,2725964,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +748,2726032,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +749,2726682,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +750,2727081,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +751,2727462,"TERMINAL",0,0,"[?25lw[?25h",,terminal_output +752,2727551,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +753,2727675,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +754,2727869,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +755,2727972,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +756,2728085,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +757,2728520,"TERMINAL",0,0,"[?25l-[?25h",,terminal_output +758,2728777,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +759,2728924,"TERMINAL",0,0,"Previous HEAD position was c68e03e feat: omit dataloader checkpoint restore in `restore_genie_components` (#96)\r\nSwitched to branch 'before-nnx'\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +760,2738531,"sample.py",0,0,"",python,tab +761,2738707,"sample.py",151,4471,"from flax.training.train_state import TrainState\nimport numpy as np\nimport orbax.checkpoint as ocp\nimport optax\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params\n\n\n\ndef _sampling_wrapper(module, batch):\n return module.sample(\n batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax\n )\n\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n sampling_fn = jax.jit(nn.apply(_sampling_wrapper, genie))\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\n generated_vid = sampling_fn(params, batch)\n return generated_vid\n\n\n# --- Get video + latent actions ---\narray_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n]\ndataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n num_workers=0,\n prefetch_buffer_size=1,\n seed=args.seed,\n)\nvideo_batch = next(iter(dataloader))\nvideo_batch = jnp.array(video_batch)\nprint(video_batch.dtype)\nvideo_batch = video_batch.astype(args.dtype) # / 255.0\nprint(video_batch.dtype)\nvideo_batch = video_batch / 255.0\nprint(video_batch.dtype)\n# Get latent actions for all videos in the batch\nbatch = dict(videos=video_batch)\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(video_batch.shape[0], args.seq_len - 1, 1)\n\n# --- Sample + evaluate video ---\nprint(""Autoregressively sampling..."")\nvid = _autoreg_sample(rng, video_batch, action_batch)\nprint(""Sampling done. Calculating ssim and saving video."")\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\nprint(f""SSIM: {ssim}"")\n\n# --- Construct video ---\ntrue_videos = (video_batch * 255).astype(np.uint8)\npred_videos = (vid * 255).astype(np.uint8)\nvideo_comparison = np.zeros((2, *vid.shape), dtype=np.uint8)\nvideo_comparison[0] = true_videos[:, : args.seq_len]\nvideo_comparison[1] = pred_videos\nframes = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n# --- Save video ---\n",python,content +762,2742516,"utils/nn.py",0,0,"",python,tab +763,2742675,"utils/nn.py",37,7986,"\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass PositionalEncoding(nn.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n d_model: int # Hidden dimensionality of the input.\n max_len: int = 5000 # Maximum length of a sequence to expect.\n\n def setup(self):\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n self.pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\n self.pe = self.pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def __call__(self, x):\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nn.Module):\n dim: int\n ffn_dim: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.remat\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=False\n ),\n )(z)\n x = x + z\n\n # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n # FIXME (f.srambical): check whether we should still pass the mask if we set is_causal=True\n )(z, mask=causal_mask)\n x = x + z\n x = x.swapaxes(1, 2)\n\n # --- Feedforward ---\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n z = nn.Dense(\n self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.gelu(z)\n z = nn.Dense(\n self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n x = x + z\n\n return x\n\n\nclass STTransformer(nn.Module):\n model_dim: int\n ffn_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n x = nn.Sequential(\n [\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n ]\n )(x)\n for _ in range(self.num_blocks):\n x = STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )(x)\n x = nn.Dense(\n self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n return x # (B, T, E)\n\n\ndef normalize(x):\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nn.Module):\n latent_dim: int\n num_latents: int\n dropout: float\n\n def setup(self):\n self.codebook = normalize(\n self.param(\n ""codebook"",\n nn.initializers.lecun_uniform(),\n (self.num_latents, self.latent_dim),\n )\n )\n self.drop = nn.Dropout(self.dropout, deterministic=False)\n\n def __call__(\n self, x: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x = normalize(x)\n codebook = normalize(self.codebook)\n distance = -jnp.matmul(x, codebook.T)\n if training:\n dropout_key = self.make_rng(""dropout"")\n distance = self.drop(distance, rng=dropout_key)\n\n # --- Get indices and embeddings ---\n indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]\n\n # --- Straight through estimator ---\n z_q = x + jax.lax.stop_gradient(z - x)\n return z_q, z, x, indices\n\n def get_codes(self, indices: jax.Array):\n return self.codebook[indices]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool):\n """"""\n Create an attention function that uses flash attention if enabled.\n\n Flax MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim)\n jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim).\n\n We need to reshape to ensure compatibility. cuDNN's flash attention additionally\n requires a sequence length that is a multiple of 4. We pad the sequence length to the nearest\n multiple of 4 and mask accordingly.\n """"""\n\n def attention_fn(query, key, value, bias=None, mask=None, **kwargs):\n implementation = ""cudnn"" if use_flash_attention else None\n\n def _rearrange(x):\n return einops.rearrange(x, ""... l h d -> (...) l h d"")\n\n def _pad(x):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n\n def _fuse_masks(mask: jax.Array, attention_mask: jax.Array) -> jax.Array:\n mask_bool = mask.astype(jnp.bool_)\n expanded_mask = jnp.pad(\n mask_bool, ((0, pad_size), (0, pad_size)), constant_values=False\n )\n return jnp.logical_and(attention_mask, expanded_mask)\n\n original_shape = query.shape\n original_seq_len = query.shape[-3]\n\n # Pad to nearest multiple of 4\n target_seq_len = ((original_seq_len + 3) // 4) * 4\n pad_size = target_seq_len - original_seq_len\n\n query_4d = _pad(_rearrange(query))\n key_4d = _pad(_rearrange(key))\n value_4d = _pad(_rearrange(value))\n\n attention_mask = jnp.ones((target_seq_len, target_seq_len), dtype=jnp.bool_)\n attention_mask = attention_mask.at[original_seq_len:, :].set(False)\n attention_mask = attention_mask.at[:, original_seq_len:].set(False)\n\n mask_4d = (\n _fuse_masks(mask, attention_mask) if mask is not None else attention_mask\n )\n mask_4d = mask_4d[jnp.newaxis, jnp.newaxis, :, :] # (1, 1, seq_len, seq_len)\n\n bias_4d = _pad(_rearrange(bias)) if bias is not None else None\n\n output_4d = jax.nn.dot_product_attention(\n query=query_4d,\n key=key_4d,\n value=value_4d,\n bias=bias_4d,\n mask=mask_4d,\n implementation=implementation,\n is_causal=is_causal,\n **kwargs\n )\n return output_4d[..., :original_seq_len, :, :].reshape(original_shape)\n\n return attention_fn\n",python,content +764,2744558,"utils/nn.py",2965,0,"",python,selection_mouse +765,2744594,"utils/nn.py",2964,0,"",python,selection_command +766,2745174,"utils/nn.py",3050,0,"",python,selection_mouse +767,2745175,"utils/nn.py",3049,0,"",python,selection_command +768,2745934,"utils/nn.py",3038,12," )(z)",python,selection_command +769,2746133,"utils/nn.py",3008,42," dtype=self.dtype,\n )(z)",python,selection_command +770,2746268,"utils/nn.py",2966,84," param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)",python,selection_command +771,2746394,"utils/nn.py",2944,106," self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)",python,selection_command +772,2746621,"utils/nn.py",2922,128," z = nn.Dense(\n self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)",python,selection_command +773,2746852,"utils/nn.py",2922,128," z = nn.Dense(\n self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)",python,selection_command +774,2747899,"utils/nn.py",2930,0,"",python,selection_command +775,2748908,"utils/nn.py",3046,0,"#",python,content +776,2748909,"utils/nn.py",3020,0,"#",python,content +777,2748909,"utils/nn.py",2978,0,"#",python,content +778,2748909,"utils/nn.py",2956,0,"#",python,content +779,2748909,"utils/nn.py",2930,0,"#",python,content +780,2748910,"utils/nn.py",2931,0,"",python,selection_keyboard +781,2749040,"utils/nn.py",3051,0," ",python,content +782,2749040,"utils/nn.py",3024,0," ",python,content +783,2749040,"utils/nn.py",2981,0," ",python,content +784,2749040,"utils/nn.py",2958,0," ",python,content +785,2749040,"utils/nn.py",2931,0," ",python,content +786,2749041,"utils/nn.py",2932,0,"",python,selection_keyboard +787,2749350,"utils/nn.py",2931,0,"",python,selection_command +788,2751007,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +789,2752018,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",890,0,"",shellscript,selection_mouse +790,2752938,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",781,0,"",shellscript,selection_mouse +791,2753823,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",792,0," \",shellscript,content +792,2754191,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",794,0,"\n",shellscript,content +793,2754604,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",794,0,"\n",shellscript,content +794,2754952,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",795,0," --tokenizer_ffn_dim=512 \\n --lam_ffn_dim=512 \\n --dyna_ffn_dim=512",shellscript,content +795,2756132,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",870,0,"",shellscript,selection_command +796,2757844,"TERMINAL",0,0,"git switch -",,terminal_output +797,2758555,"TERMINAL",0,0,"\r\n[?2004l\rfatal: a branch is expected, got commit 'c68e03e245819070bc7cd071c3e94cbd938a1f33'\r\nhint: If you want to detach HEAD at the commit, try again with the --detach option.\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +798,2761089,"TERMINAL",0,0,"git switch -",,terminal_output +799,2761851,"TERMINAL",0,0,"branch",,terminal_output +800,2764275,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +801,2765425,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n --tokenizer_ffn_dim=512 \\r\n --lam_ffn_dim=512 \\r\n --dyna_ffn_dim=512\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +802,2765514,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +803,2765625,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +804,2768637,"TERMINAL",0,0,"2025-07-31 13:16:11.238858: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +805,2779728,"TERMINAL",0,0,"2025-07-31 13:16:22.381172: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +806,2790921,"TERMINAL",0,0,"2025-07-31 13:16:33.571549: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +807,2794677,"TERMINAL",0,0,"2025-07-31 13:16:37.241269: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +808,2797738,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\n",,terminal_output +809,2797990,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 124, in \r\n restored = checkpoint_manager.restore(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\r\n restored = self._checkpointer.restore(restore_directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\r\n return super().restore(directory, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\r\n restored[item_name] = handler.restore(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/standard_checkpoint_handler.py"", line 246, in restore\r\n return self._impl.restore(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\r\n return self._handler_impl.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\r\n raise ValueError(\r\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'params': {'params': {'dynamics': {'dynamics': {'STBlock_10': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_11': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_8': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_9': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'lam': {'encoder': {'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'tokenizer': {'decoder': {'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}, 'encoder': {'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}}}, 'opt_state': [{'mu': {'params': {'dynamics': {'dynamics': {'STBlock_10': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_11': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_8': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_9': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'lam': {'encoder': {'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'tokenizer': {'decoder': {'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}, 'encoder': {'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}}}, 'nu': {'params': {'dynamics': {'dynamics': {'STBlock_10': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_11': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(v",,terminal_output +810,2798100,"TERMINAL",0,0,"alue_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_8': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_9': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'lam': {'encoder': {'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}, 'tokenizer': {'decoder': {'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}, 'encoder': {'STBlock_4': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_5': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_6': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}}), 'STBlock_7': Diff(lhs=None, rhs={'Dense_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 512))}, 'LayerNorm_0': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_1': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'LayerNorm_2': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'scale': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,))}, 'MultiHeadAttention_0': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}, 'MultiHeadAttention_1': {'key': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'out': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64,)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64, 512))}, 'query': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}, 'value': {'bias': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(1, 64)), 'kernel': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(64, 8, 64))}}})}}}}}, None, None]}\r\n",,terminal_output +811,2798902,"TERMINAL",0,0,"srun: error: hkn0602: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +812,2830240,"utils/nn.py",0,0,"",python,tab +813,2868993,"utils/nn.py",0,0,"",python,tab +814,2879683,"utils/nn.py",2813,0,"",python,selection_mouse +815,2879684,"utils/nn.py",2812,0,"",python,selection_command +816,2880225,"utils/nn.py",2969,0,"",python,selection_mouse +817,2880226,"utils/nn.py",2968,0,"",python,selection_command +818,2881184,"utils/nn.py",3055,1,"",python,content +819,2881184,"utils/nn.py",3027,1,"",python,content +820,2881184,"utils/nn.py",2983,1,"",python,content +821,2881184,"utils/nn.py",2959,1,"",python,content +822,2881184,"utils/nn.py",2931,1,"",python,content +823,2881624,"utils/nn.py",3050,1,"",python,content +824,2881625,"utils/nn.py",3023,1,"",python,content +825,2881625,"utils/nn.py",2980,1,"",python,content +826,2881625,"utils/nn.py",2957,1,"",python,content +827,2881625,"utils/nn.py",2930,1,"",python,content +828,2884870,"sample.py",0,0,"",python,tab +829,2889140,"sample.py",3728,0,"",python,selection_mouse +830,2889145,"sample.py",3727,0,"",python,selection_command +831,2889262,"sample.py",3727,1,"s",python,selection_mouse +832,2889268,"sample.py",3728,0,"",python,selection_command +833,2889283,"sample.py",3728,1,"\n",python,selection_mouse +834,2889295,"sample.py",3724,4,"rams",python,selection_mouse +835,2889316,"sample.py",3673,55,"red[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +836,2889327,"sample.py",3644,84,"\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +837,2889350,"sample.py",3552,176,"args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +838,2889366,"sample.py",3460,268,"\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +839,2889378,"sample.py",3247,481," options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +840,2889398,"sample.py",3080,648,"handler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +841,2889432,"sample.py",3003,725," ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +842,2889433,"sample.py",2955,773," 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +843,2889482,"sample.py",2911,817," optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +844,2889483,"sample.py",2891,837," tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +845,2889516,"sample.py",2872,856," params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +846,2889517,"sample.py",2846,882," apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +847,2889555,"sample.py",2807,921,"dummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +848,2889588,"sample.py",2806,922,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +849,2889749,"sample.py",2807,921,"dummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +850,2889760,"sample.py",2872,856," params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +851,2889782,"sample.py",2891,837," tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +852,2889794,"sample.py",2911,817," optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +853,2889820,"sample.py",2955,773," 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +854,2889901,"sample.py",2993,735," )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +855,2889928,"sample.py",2955,773," 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +856,2889972,"sample.py",2911,817," optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +857,2889972,"sample.py",2872,856," params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +858,2890009,"sample.py",2807,921,"dummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +859,2890010,"sample.py",2766,962,"params = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +860,2890100,"sample.py",2732,996,"rng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +861,2890100,"sample.py",2730,998,")\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +862,2890101,"sample.py",2623,1105," videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +863,2890101,"sample.py",2602,1126,"dummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +864,2890114,"sample.py",2529,1199,"image_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +865,2890132,"sample.py",2495,1233,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +866,2890151,"sample.py",2493,1235,")\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +867,2890164,"sample.py",2443,1285," use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +868,2890184,"sample.py",2421,1307," dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +869,2890201,"sample.py",2387,1341," param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +870,2890220,"sample.py",2347,1381," dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +871,2890237,"sample.py",2305,1423," dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +872,2890256,"sample.py",2269,1459," dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +873,2890281,"sample.py",2241,1487," dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +874,2890296,"sample.py",2226,1502," # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +875,2890317,"sample.py",2202,1526," lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +876,2890336,"sample.py",2164,1564," lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +877,2890354,"sample.py",2124,1604," lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +878,2890355,"sample.py",2084,1644," lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +879,2890376,"sample.py",1990,1738," latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +880,2890418,"sample.py",1956,1772," lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +881,2890455,"sample.py",1930,1798," lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +882,2890456,"sample.py",1920,1808," # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +883,2890456,"sample.py",1870,1858," tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +884,2890494,"sample.py",1818,1910," tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +885,2890530,"sample.py",1786,1942," patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +886,2890531,"sample.py",1740,1988," num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +887,2890531,"sample.py",1650,2078," tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +888,2890567,"sample.py",1612,2116," tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +889,2890568,"sample.py",1580,2148," in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +890,2890603,"sample.py",1564,2164," # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +891,2890604,"sample.py",1549,2179,"genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +892,2890604,"sample.py",1564,2164," # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +893,2890641,"sample.py",1612,2116," tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +894,2890642,"sample.py",1786,1942," patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +895,2890680,"sample.py",1920,1808," # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +896,2890681,"sample.py",2084,1644," lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +897,2890712,"sample.py",2269,1459," dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +898,2890713,"sample.py",2421,1307," dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +899,2890713,"sample.py",2493,1235,")\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +900,2890744,"sample.py",2602,1126,"dummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +901,2890745,"sample.py",2711,1017," mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +902,2890775,"sample.py",2732,996,"rng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +903,2890810,"sample.py",2766,962,"params = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +904,2890842,"sample.py",2806,922,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +905,2890843,"sample.py",2807,921,"dummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +906,2890953,"sample.py",2846,882," apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +907,2891030,"sample.py",2807,921,"dummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +908,2891047,"sample.py",2806,922,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +909,2891066,"sample.py",2766,962,"params = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +910,2891088,"sample.py",2732,996,"rng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +911,2891123,"sample.py",2730,998,")\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +912,2891157,"sample.py",2711,1017," mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +913,2891194,"sample.py",2623,1105," videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +914,2891230,"sample.py",2602,1126,"dummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +915,2891312,"sample.py",2529,1199,"image_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +916,2891847,"sample.py",2495,1233,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,selection_mouse +917,2895601,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +918,2895943,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +919,2896048,"TERMINAL",0,0,"git switch -\r\n\r",,terminal_output +920,2896580,"TERMINAL",0,0,"branch",,terminal_output +921,2896880,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +922,2897503,"TERMINAL",0,0,"git checkout c68e03e245819070bc7cd071c3e94cbd938a1f33\r\n\r",,terminal_output +923,2898633,"TERMINAL",0,0,"[?25l[?2004l\r[?25h",,terminal_output +924,2898788,"TERMINAL",0,0,"Note: switching to 'c68e03e245819070bc7cd071c3e94cbd938a1f33'.\r\n\r\nYou are in 'detached HEAD' state. You can look around, make experimental\r\nchanges and commit them, and you can discard any commits you make in this\r\nstate without impacting any branches by switching back to a branch.\r\n\r\nIf you want to create a new branch to retain commits you create, you may\r\ndo so (now or later) by using -c with the switch command. Example:\r\n\r\n git switch -c \r\n\r\nOr undo this operation with:\r\n\r\n git switch -\r\n\r\nTurn off this advice by setting config variable advice.detachedHead to false\r\n\r\nHEAD is now at c68e03e feat: omit dataloader checkpoint restore in `restore_genie_components` (#96)\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +925,2901055,"sample.py",0,0,"",python,tab +926,2901205,"sample.py",151,5803,"import numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n\n\ndef _sampling_wrapper(module, batch):\n return module.sample(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n sampling_fn = jax.jit(nn.apply(_sampling_wrapper, genie)) \n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\n generated_vid = sampling_fn(\n params,\n batch\n )\n return generated_vid\n\n# --- Get video + latent actions ---\narray_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n]\ndataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n)\nvideo_batch = next(iter(dataloader))\n# Get latent actions for all videos in the batch\nbatch = dict(videos=video_batch)\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(video_batch.shape[0], args.seq_len - 1, 1)\n\n# --- Sample + evaluate video ---\nvid = _autoreg_sample(rng, video_batch, action_batch)\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\nprint(f""SSIM: {ssim}"")\n\n# --- Construct video ---\ntrue_videos = (video_batch * 255).astype(np.uint8)\npred_videos = (vid * 255).astype(np.uint8)\nvideo_comparison = np.zeros((2, *vid.shape), dtype=np.uint8)\nvideo_comparison[0] = true_videos[:, :args.seq_len]\nvideo_comparison[1] = pred_videos\nframes = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n# --- Save video --- \n",python,content +927,2903028,"sample.py",2688,0,"",python,selection_mouse +928,2903036,"sample.py",2687,0,"",python,selection_command +929,2903148,"sample.py",2687,1,")",python,selection_mouse +930,2903183,"sample.py",2688,0,"",python,selection_command +931,2903184,"sample.py",2605,83,"restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)",python,selection_mouse +932,2903184,"sample.py",2598,90,"nter().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)",python,selection_mouse +933,2903220,"sample.py",2552,136,"init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)",python,selection_mouse +934,2903221,"sample.py",2510,178,"ng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)",python,selection_mouse +935,2903257,"sample.py",2502,186,"\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)",python,selection_mouse +936,2903291,"sample.py",2482,206," mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)",python,selection_mouse +937,2903328,"sample.py",2394,294," videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)",python,selection_mouse +938,2903368,"sample.py",2373,315,"dummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)",python,selection_mouse +939,2903401,"sample.py",2300,388,"image_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)",python,selection_mouse +940,2903844,"sample.py",2266,422,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)",python,selection_mouse +941,2904582,"sample.py",2266,422,"",python,content +942,2905076,"sample.py",2266,0,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,content +943,2916133,"sample.py",331,0,"",python,selection_mouse +944,2916298,"sample.py",287,44,"\nfrom utils.dataloader import get_dataloader",python,selection_mouse +945,2916338,"sample.py",263,68,"\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader",python,selection_mouse +946,2916374,"sample.py",169,162,"\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader",python,selection_mouse +947,2916374,"sample.py",102,229,"\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader",python,selection_mouse +948,2916376,"sample.py",86,245,"inops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader",python,selection_mouse +949,2916414,"sample.py",62,269,"t dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader",python,selection_mouse +950,2916415,"sample.py",48,283,"port os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader",python,selection_mouse +951,2916450,"sample.py",35,296,"mport time\nimport os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader",python,selection_mouse +952,2916451,"sample.py",34,297,"import time\nimport os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader",python,selection_mouse +953,2916485,"sample.py",0,331,"from dataclasses import dataclass\nimport time\nimport os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader",python,selection_mouse +954,2917172,"sample.py",0,331,"",python,content +955,2917468,"sample.py",0,0,"from dataclasses import dataclass\nfrom typing import Optional\nimport time\nimport os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nfrom flax.training.train_state import TrainState\nimport grain\nimport orbax.checkpoint as ocp\nimport optax\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n",python,content +956,2920719,"sample.py",259,0,"",python,selection_mouse +957,2920875,"sample.py",254,5,"grain",python,selection_mouse +958,2920974,"sample.py",247,13,"import grain\n",python,selection_mouse +959,2921293,"sample.py",247,13,"",python,content +960,2925546,"sample.py",4481,0,"",python,selection_mouse +961,2926318,"sample.py",4480,1,"",python,content +962,2926410,"sample.py",4480,0,"0",python,content +963,2926411,"sample.py",4481,0,"",python,selection_keyboard +964,2931414,"sample.py",3077,0,"",python,selection_mouse +965,2931543,"sample.py",3075,4,"args",python,selection_mouse +966,2931700,"sample.py",3075,15,"args.checkpoint",python,selection_mouse +967,2932137,"sample.py",3087,0,"",python,selection_mouse +968,2932138,"sample.py",3080,10,"checkpoint",python,selection_mouse +969,2932382,"sample.py",3079,11,".checkpoint",python,selection_mouse +970,2932418,"sample.py",3075,15,"args.checkpoint",python,selection_mouse +971,2932882,"sample.py",3078,0,"",python,selection_mouse +972,2932882,"sample.py",3075,4,"args",python,selection_mouse +973,2933039,"sample.py",3075,15,"args.checkpoint",python,selection_mouse +974,2933454,"sample.py",3089,0,"",python,selection_mouse +975,2938229,"TERMINAL",0,0,"git checkout c68e03e245819070bc7cd071c3e94cbd938a1f33",,terminal_output +976,2938399,"TERMINAL",0,0,"\rsh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +977,2939368,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n --tokenizer_ffn_dim=512 \\r\n --lam_ffn_dim=512 \\r\n --dyna_ffn_dim=512\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +978,2939501,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +979,2939577,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +980,2940566,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n╭─ Unrecognized options ───────────────────────────────────────────────────────╮\r\n│ Unrecognized options: --tokenizer-ffn-dim=512 --lam-ffn-dim=512 │\r\n│ --dyna-ffn-dim=512 │\r\n│ ──────────────────────────────────────────────────────────────────────────── │\r\n│ For full helptext, run sample.py --help │\r\n╰──────────────────────────────────────────────────────────────────────────────╯\r\n",,terminal_output +981,2940626,"TERMINAL",0,0,"srun: error: hkn0602: task 0: Exited with exit code 2\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +982,2944204,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +983,2946647,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",825,47,"",shellscript,content +984,2946900,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",816,0,"",shellscript,selection_command +985,2947194,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",795,30,"",shellscript,content +986,2949512,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +987,2950136,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +988,2950269,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +989,2950346,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +990,2951318,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +991,2953347,"TERMINAL",0,0,"2025-07-31 13:19:15.994834: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +992,2964698,"TERMINAL",0,0,"2025-07-31 13:19:27.350819: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +993,2975905,"TERMINAL",0,0,"2025-07-31 13:19:38.557438: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +994,2980195,"TERMINAL",0,0,"2025-07-31 13:19:42.775910: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +995,2983783,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\n",,terminal_output +996,3018134,"sample.py",0,0,"",python,tab +997,3021653,"genie.py",0,0,"",python,tab +998,3021810,"genie.py",437,13952,"\n latent_patch_dim: int\n num_patch_latents: int\n patch_size: int\n tokenizer_num_blocks: int\n tokenizer_num_heads: int\n # --- LAM ---\n lam_dim: int\n latent_action_dim: int\n num_latent_actions: int\n lam_patch_size: int\n lam_num_blocks: int\n lam_num_heads: int\n lam_co_train: bool\n # --- Dynamics ---\n dyna_dim: int\n dyna_num_blocks: int\n dyna_num_heads: int\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n dropout: float = 0.0\n mask_limit: float = 0.0\n\n def setup(self):\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\n lam_outputs = self.lam.vq_encode(batch[""videos""], training=False)\n latent_actions = jax.lax.cond(\n self.lam_co_train,\n lambda: lam_outputs[""z_q""],\n lambda: jax.lax.stop_gradient(lam_outputs[""z_q""])\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\n latent_actions=latent_actions,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )\n outputs[""lam_indices""] = lam_outputs[""indices""]\n return outputs\n\n @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> Any:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by \n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size \n T: number of input (conditioning) frames \n N: patches per frame \n S: sequence length \n A: action space \n D: model latent dimension\n """"""\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""] # (B, T, N)\n B, T, N = token_idxs.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs.dtype)\n token_idxs = jnp.concatenate([token_idxs, pad], axis=1) # (B, S, N)\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n \n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n\n def generation_step_fn(carry, step_t):\n rng, current_token_idxs = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current and future frames (i.e., t >= step_t)\n mask = jnp.arange(seq_len) >= step_t # (S,)\n mask = jnp.broadcast_to(mask[None, :, None], (B, seq_len, N)) # (B, S, N)\n mask = mask.astype(bool)\n masked_token_idxs = current_token_idxs * ~mask\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs,\n mask,\n action_tokens,\n )\n final_carry_maskgit, _ = loop_fn(init_carry_maskgit, jnp.arange(steps))\n updated_token_idxs = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs)\n return new_carry, None\n\n # --- Run the autoregressive generation using scan ---\n initial_carry = (batch[""rng""], token_idxs)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn,\n initial_carry,\n timesteps_to_scan\n )\n final_token_idxs = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n final_token_idxs,\n video_hw=batch[""videos""].shape[2:4],\n )\n return final_frames\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, token_idxs, mask, action_tokens = carry\n step = x\n N = token_idxs.shape[2]\n\n # --- Construct + encode video ---\n vid_embed = self.dynamics.patch_embed(token_idxs) # (B, S, N, D)\n mask_token = self.dynamics.mask_token # (1, 1, 1, D,)\n mask_expanded = mask[..., None] # (B, S, N, 1) \n vid_embed = jnp.where(mask_expanded, mask_token, vid_embed)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed) / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jax.random.categorical(_rng, final_logits)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, token_idxs, new_mask, action_tokens)\n return new_carry, None\n\ndef restore_genie_components(\n train_state: TrainState,\n sharding: jax.sharding.NamedSharding,\n grain_iterator: grain.DataLoaderIterator,\n inputs: Dict[str, jax.Array],\n rng: jax.Array,\n args,\n):\n """"""Restore pre-trained Genie components""""""\n rng, _rng = jax.random.split(rng)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.adamw(\n learning_rate=optax.constant_schedule(args.max_lr),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n )\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\n \n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n tokenizer_init_params = dummy_tokenizer.init(_rng, inputs)\n dummy_tokenizer_train_state = TrainState.create(\n apply_fn=dummy_tokenizer.apply, params=tokenizer_init_params, tx=dummy_tx\n )\n abstract_sharded_tokenizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_train_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_tokenizer_state),\n ),\n )[""model_state""]\n restored_tokenizer_params = restored_tokenizer.params[""params""]\n train_state.params[""params""][""tokenizer""].update(restored_tokenizer_params)\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n lam_init_params = dummy_lam.init(_rng, inputs)\n dummy_lam_train_state = TrainState.create(\n apply_fn=dummy_lam.apply, params=lam_init_params, tx=dummy_tx\n )\n abstract_sharded_lam_state = _create_abstract_sharded_pytree(\n dummy_lam_train_state, sharding\n )\n restored_lam = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_lam_state),\n ),\n )[""model_state""]\n restored_lam_params = restored_lam.params[""params""]\n # Genie does not initialize all LAM modules, thus we omit those extra modules during restoration\n # (f.srambical) FIXME: Currently, this is a small HBM memory crunch since the LAM's decoder is loaded into HBM and immediately dicarded.\n # A workaround would be to restore to host memory first, and only move the weights to HBM after pruning the decoder\n restored_lam_params = {\n k: v\n for k, v in restored_lam_params.items()\n if k in train_state.params[""params""][""lam""]\n }\n train_state.params[""params""][""lam""].update(restored_lam_params)\n lam_checkpoint_manager.close()\n\n return train_state\n\ndef _create_abstract_sharded_pytree(pytree_template, sharding_spec):\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)",python,content +999,3027768,"genie.py",8205,0,"",python,selection_mouse +1000,3027892,"genie.py",8200,9,"step_temp",python,selection_mouse +1001,3041943,"TERMINAL",0,0,"2025-07-31 13:20:44.326328: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 13:20:44.326636: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 13:20:44.327036: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 13:20:44.327069: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1002,3052360,"genie.py",0,0,"",python,tab +1003,3086705,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 172, in \r\n ssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/dm_pix/_src/metrics.py"", line 221, in ssim\r\n chex.assert_type([a, b], float)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/chex/_src/asserts_internal.py"", line 279, in _chex_assert_fn\r\n host_assertion_fn(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/chex/_src/asserts_internal.py"", line 197, in _assert_on_host\r\n raise exception_type(error_msg)\r\nAssertionError: [Chex] Assertion assert_type failed: Error in type compatibility check: input 0 has type uint8 but expected .\r\n",,terminal_output +1004,3088432,"TERMINAL",0,0,"srun: error: hkn0602: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +1005,3156384,"genie.py",8210,0,"",python,selection_mouse +1006,3161936,"TERMINAL",0,0,"bash",,terminal_focus +1007,3163191,"utils/nn.py",0,0,"",python,tab +1008,3163342,"utils/nn.py",37,8117,"from functools import partial\n\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass PositionalEncoding(nn.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n d_model: int # Hidden dimensionality of the input.\n max_len: int = 5000 # Maximum length of a sequence to expect.\n\n def setup(self):\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n self.pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\n self.pe = self.pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def __call__(self, x):\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nn.Module):\n dim: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.remat\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(self.use_flash_attention, is_causal=False),\n )(z)\n x = x + z\n\n # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(self.use_flash_attention, is_causal=True),\n # FIXME (f.srambical): check whether we should still pass the mask if we set is_causal=True\n )(z, mask=causal_mask)\n x = x + z\n x = x.swapaxes(1, 2)\n\n # --- Feedforward ---\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n # FIXME (f.srambical): Here, the attention hidden dimension is the same as the FFN's. Usually, FFN hidden dimension is 4x model_dim\n z = nn.Dense(\n self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.gelu(z)\n x = x + z\n\n return x\n\n\nclass STTransformer(nn.Module):\n model_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n x = nn.Sequential(\n [\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.Dense(self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n ]\n )(x)\n for _ in range(self.num_blocks):\n x = STBlock(\n dim=self.model_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )(x)\n x = nn.Dense(\n self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n return x # (B, T, E)\n\n\ndef normalize(x):\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nn.Module):\n latent_dim: int\n num_latents: int\n dropout: float\n\n def setup(self):\n self.codebook = normalize(\n self.param(\n ""codebook"",\n nn.initializers.lecun_uniform(),\n (self.num_latents, self.latent_dim),\n )\n )\n self.drop = nn.Dropout(self.dropout, deterministic=False)\n\n def __call__(\n self, x: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x = normalize(x)\n codebook = normalize(self.codebook)\n distance = -jnp.matmul(x, codebook.T)\n if training:\n dropout_key = self.make_rng(""dropout"")\n distance = self.drop(distance, rng=dropout_key)\n\n # --- Get indices and embeddings ---\n indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]\n\n # --- Straight through estimator ---\n z_q = x + jax.lax.stop_gradient(z - x)\n return z_q, z, x, indices\n\n def get_codes(self, indices: jax.Array):\n return self.codebook[indices]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool):\n """"""\n Create an attention function that uses flash attention if enabled.\n\n Flax MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim)\n jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim).\n\n We need to reshape to ensure compatibility. cuDNN's flash attention additionally\n requires a sequence length that is a multiple of 4. We pad the sequence length to the nearest\n multiple of 4 and mask accordingly.\n """"""\n \n def attention_fn(query, key, value, bias=None, mask=None, **kwargs):\n implementation = 'cudnn' if use_flash_attention else None\n\n def _rearrange(x):\n return einops.rearrange(x, '... l h d -> (...) l h d')\n def _pad(x):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n def _fuse_masks(mask: jax.Array, attention_mask: jax.Array) -> jax.Array:\n mask_bool = mask.astype(jnp.bool_)\n expanded_mask = jnp.pad(mask_bool, ((0, pad_size), (0, pad_size)), constant_values=False)\n return jnp.logical_and(attention_mask, expanded_mask)\n \n original_shape = query.shape\n original_seq_len = query.shape[-3]\n \n # Pad to nearest multiple of 4\n target_seq_len = ((original_seq_len + 3) // 4) * 4\n pad_size = target_seq_len - original_seq_len\n\n query_4d = _pad(_rearrange(query))\n key_4d = _pad(_rearrange(key))\n value_4d = _pad(_rearrange(value))\n \n attention_mask = jnp.ones((target_seq_len, target_seq_len), dtype=jnp.bool_)\n attention_mask = attention_mask.at[original_seq_len:, :].set(False)\n attention_mask = attention_mask.at[:, original_seq_len:].set(False)\n\n mask_4d = _fuse_masks(mask, attention_mask) if mask is not None else attention_mask\n mask_4d = mask_4d[jnp.newaxis, jnp.newaxis, :, :] # (1, 1, seq_len, seq_len)\n \n bias_4d = _pad(_rearrange(bias)) if bias is not None else None\n \n output_4d = jax.nn.dot_product_attention(\n query=query_4d,\n key=key_4d,\n value=value_4d,\n bias=bias_4d,\n mask=mask_4d,\n implementation=implementation,\n is_causal=is_causal,\n **kwargs\n )\n return output_4d[..., :original_seq_len, :, :].reshape(original_shape)\n \n return attention_fn\n\n",python,content +1009,3165881,"sample.py",0,0,"",python,tab +1010,3181248,"sample.py",4569,0,"",python,selection_mouse +1011,3181622,"sample.py",4569,0,"\n",python,content +1012,3181940,"sample.py",4570,0,"video_batch = jnp.array(video_batch)\nprint(video_batch.dtype)\nvideo_batch = video_batch.astype(args.dtype) # / 255.0\nprint(video_batch.dtype)\nvideo_batch = video_batch / 255.0\nprint(video_batch.dtype)",python,content +1013,3182680,"sample.py",4655,0,"",python,selection_mouse +1014,3182828,"sample.py",4654,0,"",python,selection_command +1015,3183386,"sample.py",4629,0,"",python,selection_command +1016,3183708,"sample.py",4607,25,"",python,content +1017,3183831,"sample.py",4662,0,"",python,selection_command +1018,3184357,"sample.py",4662,25,"",python,content +1019,3184538,"sample.py",4696,0,"",python,selection_command +1020,3184946,"sample.py",4696,25,"",python,content +1021,3186065,"TERMINAL",0,0,"srun",,terminal_focus +1022,3187061,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +1023,3187642,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +1024,3187966,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +1025,3188098,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +1026,3196793,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +1027,3198974,"TERMINAL",0,0,"2025-07-31 13:23:21.627575: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1028,3210397,"TERMINAL",0,0,"2025-07-31 13:23:32.995429: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1029,3221967,"TERMINAL",0,0,"2025-07-31 13:23:44.581485: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1030,3225936,"TERMINAL",0,0,"2025-07-31 13:23:48.586462: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1031,3229811,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\n",,terminal_output +1032,3274188,"TERMINAL",0,0,"2025-07-31 13:24:36.795096: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 13:24:36.795411: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 13:24:36.795801: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 13:24:36.795834: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1033,3324106,"TERMINAL",0,0,"SSIM: 0.33350828289985657\r\n",,terminal_output +1034,3325802,"TERMINAL",0,0,"]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +1035,3353704,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +1036,3355009,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",734,0,"",shellscript,selection_mouse +1037,3356255,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",733,1,"",shellscript,content +1038,3356390,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",732,1,"",shellscript,content +1039,3356494,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",732,0,"1",shellscript,content +1040,3356496,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",733,0,"",shellscript,selection_keyboard +1041,3361465,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +1042,3362050,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=1 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +1043,3362181,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +1044,3362286,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +1045,3364608,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +1046,3366720,"TERMINAL",0,0,"2025-07-31 13:26:09.372170: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1047,3376078,"TERMINAL",0,0,"bash",,terminal_focus +1048,3377213,"TERMINAL",0,0,"bash",,terminal_focus +1049,3378230,"TERMINAL",0,0,"2025-07-31 13:26:20.840994: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1050,3385138,"TERMINAL",0,0,"git checkout -b ""sample-ali-branch""",,terminal_command +1051,3385183,"TERMINAL",0,0,"]633;E;2025-07-31 13:26:27 git checkout -b ""sample-ali-branch"";47fb02a1-5160-42e3-a5e8-8fc5a2d3ac19]633;C",,terminal_output +1052,3385285,"TERMINAL",0,0,"Switched to a new branch 'sample-ali-branch'\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1053,3385440,"",0,0,"Switched from branch 'before-nnx' to 'sample-ali-branch'",,git_branch_checkout +1054,3388495,"TERMINAL",0,0,"git status",,terminal_command +1055,3388563,"TERMINAL",0,0,"]633;E;2025-07-31 13:26:31 git status;47fb02a1-5160-42e3-a5e8-8fc5a2d3ac19]633;C",,terminal_output +1056,3388633,"TERMINAL",0,0,"On branch sample-ali-branch\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: sample.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdiff.diff\r\n\tdiff.log\r\n\tlogs/\r\n\toverfit_dir.zip\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tscripts_cremers/\r\n\tscripts_horeka/\r\n\tslurm-3373409.out\r\n\tslurm-3373410.out\r\n\tslurm-3379613.out\r\n\tslurm-3379615.out\r\n\tslurm-3379616.out\r\n\tslurm/\r\n\tutils/logger_bak.py\r\n\tutils/nn_bak.py\r\n\tutils/visualizer.py\r\n\tweekend-job-requeuer.sh\r\n\tweekend-job-starter.sh\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1057,3389914,"TERMINAL",0,0,"2025-07-31 13:26:32.516208: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1058,3394161,"TERMINAL",0,0,"2025-07-31 13:26:36.785489: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1059,3394281,"TERMINAL",0,0,"git commit -am ""fixed sampling""",,terminal_command +1060,3394324,"TERMINAL",0,0,"]633;E;2025-07-31 13:26:36 git commit -am ""fixed sampling"";47fb02a1-5160-42e3-a5e8-8fc5a2d3ac19]633;C",,terminal_output +1061,3394586,"TERMINAL",0,0,"[sample-ali-branch 05f66c8] fixed sampling\r\n 1 file changed, 38 insertions(+), 4 deletions(-)\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1062,3397391,"TERMINAL",0,0,"srun",,terminal_focus +1063,3397888,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\n",,terminal_output +1064,3441923,"TERMINAL",0,0,"2025-07-31 13:27:24.554777: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 13:27:24.555097: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 13:27:24.555495: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 13:27:24.555527: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1065,3463940,"TERMINAL",0,0,"SSIM: 0.5732285976409912\r\n",,terminal_output +1066,3465552,"TERMINAL",0,0,"]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +1067,3621337,"genie.py",0,0,"from typing import Dict, Any\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nfrom flax.training.train_state import TrainState\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\nimport grain\n\n\nclass Genie(nn.Module):\n """"""Genie model""""""\n\n # --- Tokenizer ---\n in_dim: int\n tokenizer_dim: int\n latent_patch_dim: int\n num_patch_latents: int\n patch_size: int\n tokenizer_num_blocks: int\n tokenizer_num_heads: int\n # --- LAM ---\n lam_dim: int\n latent_action_dim: int\n num_latent_actions: int\n lam_patch_size: int\n lam_num_blocks: int\n lam_num_heads: int\n lam_co_train: bool\n # --- Dynamics ---\n dyna_dim: int\n dyna_num_blocks: int\n dyna_num_heads: int\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n dropout: float = 0.0\n mask_limit: float = 0.0\n\n def setup(self):\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\n lam_outputs = self.lam.vq_encode(batch[""videos""], training=False)\n latent_actions = jax.lax.cond(\n self.lam_co_train,\n lambda: lam_outputs[""z_q""],\n lambda: jax.lax.stop_gradient(lam_outputs[""z_q""])\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\n latent_actions=latent_actions,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )\n outputs[""lam_indices""] = lam_outputs[""indices""]\n return outputs\n\n @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> Any:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by \n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size \n T: number of input (conditioning) frames \n N: patches per frame \n S: sequence length \n A: action space \n D: model latent dimension\n """"""\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""] # (B, T, N)\n B, T, N = token_idxs.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs.dtype)\n token_idxs = jnp.concatenate([token_idxs, pad], axis=1) # (B, S, N)\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n \n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n\n def generation_step_fn(carry, step_t):\n rng, current_token_idxs = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current and future frames (i.e., t >= step_t)\n mask = jnp.arange(seq_len) >= step_t # (S,)\n mask = jnp.broadcast_to(mask[None, :, None], (B, seq_len, N)) # (B, S, N)\n mask = mask.astype(bool)\n masked_token_idxs = current_token_idxs * ~mask\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs,\n mask,\n action_tokens,\n )\n final_carry_maskgit, _ = loop_fn(init_carry_maskgit, jnp.arange(steps))\n updated_token_idxs = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs)\n return new_carry, None\n\n # --- Run the autoregressive generation using scan ---\n initial_carry = (batch[""rng""], token_idxs)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn,\n initial_carry,\n timesteps_to_scan\n )\n final_token_idxs = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n final_token_idxs,\n video_hw=batch[""videos""].shape[2:4],\n )\n return final_frames\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, token_idxs, mask, action_tokens = carry\n step = x\n N = token_idxs.shape[2]\n\n # --- Construct + encode video ---\n vid_embed = self.dynamics.patch_embed(token_idxs) # (B, S, N, D)\n mask_token = self.dynamics.mask_token # (1, 1, 1, D,)\n mask_expanded = mask[..., None] # (B, S, N, 1) \n vid_embed = jnp.where(mask_expanded, mask_token, vid_embed)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed) / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jax.random.categorical(_rng, final_logits)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, token_idxs, new_mask, action_tokens)\n return new_carry, None\n\ndef restore_genie_components(\n train_state: TrainState,\n sharding: jax.sharding.NamedSharding,\n grain_iterator: grain.DataLoaderIterator,\n inputs: Dict[str, jax.Array],\n rng: jax.Array,\n args,\n):\n """"""Restore pre-trained Genie components""""""\n rng, _rng = jax.random.split(rng)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.adamw(\n learning_rate=optax.constant_schedule(args.max_lr),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n )\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\n \n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n tokenizer_init_params = dummy_tokenizer.init(_rng, inputs)\n dummy_tokenizer_train_state = TrainState.create(\n apply_fn=dummy_tokenizer.apply, params=tokenizer_init_params, tx=dummy_tx\n )\n abstract_sharded_tokenizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_train_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_tokenizer_state),\n ),\n )[""model_state""]\n restored_tokenizer_params = restored_tokenizer.params[""params""]\n train_state.params[""params""][""tokenizer""].update(restored_tokenizer_params)\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n lam_init_params = dummy_lam.init(_rng, inputs)\n dummy_lam_train_state = TrainState.create(\n apply_fn=dummy_lam.apply, params=lam_init_params, tx=dummy_tx\n )\n abstract_sharded_lam_state = _create_abstract_sharded_pytree(\n dummy_lam_train_state, sharding\n )\n restored_lam = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_lam_state),\n ),\n )[""model_state""]\n restored_lam_params = restored_lam.params[""params""]\n # Genie does not initialize all LAM modules, thus we omit those extra modules during restoration\n # (f.srambical) FIXME: Currently, this is a small HBM memory crunch since the LAM's decoder is loaded into HBM and immediately dicarded.\n # A workaround would be to restore to host memory first, and only move the weights to HBM after pruning the decoder\n restored_lam_params = {\n k: v\n for k, v in restored_lam_params.items()\n if k in train_state.params[""params""][""lam""]\n }\n train_state.params[""params""][""lam""].update(restored_lam_params)\n lam_checkpoint_manager.close()\n\n return train_state\n\ndef _create_abstract_sharded_pytree(pytree_template, sharding_spec):\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)",python,tab +1068,3627501,"genie.py",7749,0,"",python,selection_mouse +1069,3627652,"genie.py",7745,4,"None",python,selection_mouse +1070,3628142,"genie.py",7805,0,"",python,selection_mouse +1071,3628286,"genie.py",7797,13,"mask_expanded",python,selection_mouse +1072,3628842,"genie.py",7817,0,"",python,selection_mouse +1073,3628994,"genie.py",7812,10,"mask_token",python,selection_mouse +1074,3635929,"genie.py",7804,0,"",python,selection_mouse +1075,3636068,"genie.py",7797,13,"mask_expanded",python,selection_mouse +1076,3648533,"genie.py",7587,0,"",python,selection_mouse +1077,3648670,"genie.py",7583,9,"vid_embed",python,selection_mouse +1078,3662609,"genie.py",7780,0,"",python,selection_mouse +1079,3662715,"genie.py",7775,9,"vid_embed",python,selection_mouse +1080,3664177,"genie.py",7724,0,"",python,selection_mouse +1081,3664368,"genie.py",7719,13,"mask_expanded",python,selection_mouse +1082,3666978,"genie.py",7590,0,"",python,selection_mouse +1083,3667117,"genie.py",7583,9,"vid_embed",python,selection_mouse +1084,3698730,"genie.py",2107,0,"",python,selection_mouse +1085,3699060,"models/dynamics.py",0,0,"from typing import Dict, Any\n\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\n\nfrom utils.nn import STTransformer\n\n\nclass DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n return dict(token_logits=logits, mask=mask)\n",python,tab +1086,3707017,"models/dynamics.py",1221,0,"",python,selection_mouse +1087,3708831,"models/dynamics.py",1443,0,"",python,selection_mouse +1088,3726934,"genie.py",0,0,"",python,tab +1089,3764000,"genie.py",7800,0,"",python,selection_mouse +1090,3764180,"genie.py",7797,13,"mask_expanded",python,selection_mouse +1091,3766947,"genie.py",7738,0,"",python,selection_mouse +1092,3767133,"genie.py",7735,4,"mask",python,selection_mouse +1093,3774216,"genie.py",8673,0,"",python,selection_mouse +1094,3774355,"genie.py",8661,17,"final_token_probs",python,selection_mouse +1095,3775268,"genie.py",8685,0,"",python,selection_mouse +1096,3775393,"genie.py",8683,4,"mask",python,selection_mouse +1097,3806918,"genie.py",9237,0,"",python,selection_mouse +1098,3807047,"genie.py",9234,8,"new_mask",python,selection_mouse +1099,3808149,"genie.py",9170,0,"",python,selection_mouse +1100,3808317,"genie.py",9161,14,"mask_update_fn",python,selection_mouse +1101,3809244,"genie.py",9195,0,"",python,selection_mouse +1102,3809817,"genie.py",9178,0,"",python,selection_mouse +1103,3809945,"genie.py",9176,4,"mask",python,selection_mouse +1104,3819571,"genie.py",9135,0,"",python,selection_mouse +1105,3819699,"genie.py",9131,8,"idx_mask",python,selection_mouse +1106,3822214,"genie.py",9195,0,"",python,selection_mouse +1107,3825705,"genie.py",9258,0,"",python,selection_mouse +1108,3825706,"genie.py",9257,0,"",python,selection_command +1109,3875219,"genie.py",8930,0,"",python,selection_mouse +1110,3875340,"genie.py",8927,6,"arange",python,selection_mouse +1111,3876203,"genie.py",8841,0,"",python,selection_mouse +1112,3876358,"genie.py",8832,19,"num_unmasked_tokens",python,selection_mouse +1113,3878148,"genie.py",8889,0,"",python,selection_mouse +1114,3878296,"genie.py",8875,14,"unmasked_ratio",python,selection_mouse +1115,3879330,"genie.py",8937,0,"",python,selection_mouse +1116,3879469,"genie.py",8934,17,"final_token_probs",python,selection_mouse +1117,3880697,"genie.py",8973,0,"",python,selection_mouse +1118,3880868,"genie.py",8965,19,"num_unmasked_tokens",python,selection_mouse +1119,3890006,"genie.py",8939,0,"",python,selection_mouse +1120,3890137,"genie.py",8934,17,"final_token_probs",python,selection_mouse +1121,3906537,"genie.py",8722,0,"",python,selection_mouse +1122,3907101,"genie.py",8684,0,"",python,selection_mouse +1123,3907252,"genie.py",8683,4,"mask",python,selection_mouse +1124,3921420,"genie.py",8681,0,"",python,selection_mouse +1125,3922304,"genie.py",8682,0,"",python,selection_mouse +1126,3951734,"genie.py",8674,0,"",python,selection_mouse +1127,3951915,"genie.py",8661,17,"final_token_probs",python,selection_mouse +1128,3988320,"genie.py",8971,0,"",python,selection_mouse +1129,3988503,"genie.py",8965,19,"num_unmasked_tokens",python,selection_mouse +1130,4251634,"genie.py",0,0,"",python,tab +1131,4259366,"genie.py",8682,0,"",python,selection_mouse +1132,4383034,"genie.py",8914,0,"",python,selection_mouse +1133,4383195,"genie.py",8912,8,"idx_mask",python,selection_mouse +1134,4449735,"genie.py",9194,0,"",python,selection_mouse +1135,4449746,"genie.py",9193,0,"",python,selection_command +1136,4449860,"genie.py",9193,1,")",python,selection_mouse +1137,4449898,"genie.py",9194,0,"",python,selection_command +1138,4449934,"genie.py",9185,9,"ted_idxs)",python,selection_mouse +1139,4449935,"genie.py",9097,97,"(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)",python,selection_mouse +1140,4449971,"genie.py",9006,188," jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)",python,selection_mouse +1141,4449971,"genie.py",8918,276,"sk = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)",python,selection_mouse +1142,4450006,"genie.py",8912,282,"idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)",python,selection_mouse +1143,4450007,"genie.py",8909,285," idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)",python,selection_mouse +1144,4450045,"genie.py",8906,288," idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)",python,selection_mouse +1145,4450046,"genie.py",8904,290," idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)",python,selection_mouse +1146,4450087,"genie.py",8824,370," num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)",python,selection_mouse +1147,4701833,"genie.py",8626,0,"",python,selection_mouse +1148,4723469,"genie.py",8864,0,"",python,selection_mouse +1149,4723665,"genie.py",8864,1,"N",python,selection_mouse +1150,4902343,"genie.py",9290,0,"",python,selection_mouse +1151,5226902,"genie.py",8130,0,"",python,selection_mouse +1152,5227065,"genie.py",8125,14,"unmasked_ratio",python,selection_mouse +1153,5228273,"genie.py",8139,0,"",python,selection_mouse +1154,5228682,"genie.py",8136,0,"",python,selection_mouse +1155,5228849,"genie.py",8125,14,"unmasked_ratio",python,selection_mouse +1156,5229373,"genie.py",8140,0,"",python,selection_mouse +1157,5229378,"genie.py",8139,0,"",python,selection_command +1158,5229890,"genie.py",8134,0,"",python,selection_mouse +1159,5230026,"genie.py",8125,14,"unmasked_ratio",python,selection_mouse +1160,5230529,"genie.py",8140,0,"",python,selection_mouse +1161,5230529,"genie.py",8139,0,"",python,selection_command +1162,5231053,"genie.py",8132,0,"",python,selection_mouse +1163,5231189,"genie.py",8125,14,"unmasked_ratio",python,selection_mouse +1164,5231697,"genie.py",8140,0,"",python,selection_mouse +1165,5231702,"genie.py",8139,0,"",python,selection_command +1166,5232242,"genie.py",8133,0,"",python,selection_mouse +1167,5232372,"genie.py",8125,14,"unmasked_ratio",python,selection_mouse +1168,5232943,"genie.py",8140,0,"",python,selection_mouse +1169,5232949,"genie.py",8139,0,"",python,selection_command +1170,5233512,"genie.py",8196,0,"",python,selection_mouse +1171,5233643,"genie.py",8187,9,"vid_embed",python,selection_mouse +1172,5234166,"genie.py",8136,0,"",python,selection_mouse +1173,5234294,"genie.py",8125,14,"unmasked_ratio",python,selection_mouse +1174,5234813,"genie.py",8140,0,"",python,selection_mouse +1175,5234818,"genie.py",8139,0,"",python,selection_command +1176,5235354,"genie.py",8136,0,"",python,selection_mouse +1177,5235495,"genie.py",8125,14,"unmasked_ratio",python,selection_mouse +1178,5236170,"genie.py",8140,0,"",python,selection_mouse +1179,5236176,"genie.py",8139,0,"",python,selection_command +1180,5236704,"genie.py",8134,0,"",python,selection_mouse +1181,5236822,"genie.py",8125,14,"unmasked_ratio",python,selection_mouse +1182,5237566,"genie.py",8140,0,"",python,selection_mouse +1183,5237600,"genie.py",8139,0,"",python,selection_command +1184,5238124,"genie.py",8134,0,"",python,selection_mouse +1185,5238261,"genie.py",8125,14,"unmasked_ratio",python,selection_mouse +1186,5238989,"genie.py",8140,0,"",python,selection_mouse +1187,5238994,"genie.py",8139,0,"",python,selection_command +1188,5239571,"genie.py",8133,0,"",python,selection_mouse +1189,5239703,"genie.py",8125,14,"unmasked_ratio",python,selection_mouse +1190,5240537,"genie.py",8140,0,"",python,selection_mouse +1191,5240549,"genie.py",8139,0,"",python,selection_command +1192,5241139,"genie.py",8058,0,"",python,selection_mouse +1193,5241274,"genie.py",8058,1," ",python,selection_mouse +1194,5241807,"genie.py",8131,0,"",python,selection_mouse +1195,5241970,"genie.py",8125,14,"unmasked_ratio",python,selection_mouse +1196,5322947,"genie.py",7971,0,"",python,selection_mouse +1197,5348597,"genie.py",8069,0,"",python,selection_mouse +1198,5348753,"genie.py",8067,5,"steps",python,selection_mouse +1199,5349317,"genie.py",8065,0,"",python,selection_mouse +1200,5349450,"genie.py",8062,4,"self",python,selection_mouse +1201,5350675,"genie.py",8140,0,"",python,selection_mouse +1202,5350681,"genie.py",8139,0,"",python,selection_command +1203,5351289,"genie.py",8069,0,"",python,selection_mouse +1204,5351437,"genie.py",8067,5,"steps",python,selection_mouse +1205,5412428,"genie.py",8051,0,"",python,selection_mouse +1206,5412549,"genie.py",8049,4,"step",python,selection_mouse +1207,5435129,"genie.py",8172,0,"",python,selection_mouse +1208,5435723,"genie.py",8033,0,"",python,selection_mouse +1209,5436322,"genie.py",8110,0,"",python,selection_mouse +1210,5436866,"genie.py",8035,0,"",python,selection_mouse +1211,5437476,"genie.py",8038,0,"",python,selection_mouse +1212,5437635,"genie.py",8035,3,"cos",python,selection_mouse +1213,5438329,"genie.py",8036,0,"",python,selection_mouse +1214,5438330,"genie.py",8035,3,"cos",python,selection_mouse +1215,5438852,"genie.py",7957,0,"",python,selection_mouse +1216,5439031,"genie.py",7957,3,"pad",python,selection_mouse +1217,5439554,"genie.py",8032,0,"",python,selection_mouse +1218,5439733,"genie.py",8031,3,"jnp",python,selection_mouse +1219,5468591,"genie.py",8683,0,"",python,selection_mouse +1220,5468725,"genie.py",8683,4,"mask",python,selection_mouse +1221,5469360,"genie.py",8675,0,"",python,selection_mouse +1222,5469513,"genie.py",8661,17,"final_token_probs",python,selection_mouse +1223,5542124,"genie.py",8590,72," = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1224,5542276,"genie.py",8661,18,"final_token_probs ",python,selection_command +1225,5542517,"genie.py",8590,72," = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1226,5542659,"genie.py",8661,18,"final_token_probs ",python,selection_command +1227,5542947,"genie.py",8661,17,"final_token_probs",python,selection_command +1228,5543197,"genie.py",8589,73,"s = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1229,5543486,"genie.py",8661,17,"final_token_probs",python,selection_command +1230,5543659,"genie.py",8589,73,"s = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1231,5543708,"genie.py",8661,17,"final_token_probs",python,selection_command +1232,5543864,"genie.py",8589,73,"s = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1233,5543904,"genie.py",8661,17,"final_token_probs",python,selection_command +1234,5544061,"genie.py",8589,73,"s = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1235,5544136,"genie.py",8661,17,"final_token_probs",python,selection_command +1236,5544260,"genie.py",8589,73,"s = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1237,5544359,"genie.py",8661,17,"final_token_probs",python,selection_command +1238,5544445,"genie.py",8589,73,"s = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1239,5544527,"genie.py",8661,17,"final_token_probs",python,selection_command +1240,5544640,"genie.py",8589,73,"s = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1241,5544721,"genie.py",8661,17,"final_token_probs",python,selection_command +1242,5544835,"genie.py",8589,73,"s = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1243,5544913,"genie.py",8661,17,"final_token_probs",python,selection_command +1244,5545077,"genie.py",8589,73,"s = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1245,5545126,"genie.py",8661,17,"final_token_probs",python,selection_command +1246,5545251,"genie.py",8589,73,"s = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1247,5545373,"genie.py",8661,17,"final_token_probs",python,selection_command +1248,5545465,"genie.py",8589,73,"s = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1249,5545576,"genie.py",8661,17,"final_token_probs",python,selection_command +1250,5545674,"genie.py",8589,73,"s = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1251,5545785,"genie.py",8661,17,"final_token_probs",python,selection_command +1252,5545877,"genie.py",8589,73,"s = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1253,5545974,"genie.py",8661,17,"final_token_probs",python,selection_command +1254,5546110,"genie.py",8589,73,"s = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n f",python,selection_command +1255,5546217,"genie.py",8661,17,"final_token_probs",python,selection_command +1256,5547241,"genie.py",8677,0,"",python,selection_command +1257,5547597,"genie.py",8589,0,"",python,selection_command +1258,5547969,"genie.py",8677,0,"",python,selection_command +1259,5548165,"genie.py",8589,0,"",python,selection_command +1260,5548273,"genie.py",8677,0,"",python,selection_command +1261,5548411,"genie.py",8589,0,"",python,selection_command +1262,5548501,"genie.py",8677,0,"",python,selection_command +1263,5548650,"genie.py",8589,0,"",python,selection_command +1264,5548726,"genie.py",8677,0,"",python,selection_command +1265,5548840,"genie.py",8589,0,"",python,selection_command +1266,5548931,"genie.py",8677,0,"",python,selection_command +1267,5549085,"genie.py",8589,0,"",python,selection_command +1268,5549188,"genie.py",8677,0,"",python,selection_command +1269,5549368,"genie.py",8712,0,"",python,selection_command +1270,5549799,"genie.py",8677,0,"",python,selection_command +1271,5549983,"genie.py",8589,0,"",python,selection_command +1272,5550599,"genie.py",8677,0,"",python,selection_command +1273,5550795,"genie.py",8589,0,"",python,selection_command +1274,5550954,"genie.py",8677,0,"",python,selection_command +1275,5551145,"genie.py",8589,0,"",python,selection_command +1276,5551309,"genie.py",8677,0,"",python,selection_command +1277,5551537,"genie.py",8589,0,"",python,selection_command +1278,5552192,"genie.py",8677,0,"",python,selection_command +1279,5552390,"genie.py",8589,0,"",python,selection_command +1280,5552577,"genie.py",8677,0,"",python,selection_command +1281,5552830,"genie.py",8589,0,"",python,selection_command +1282,5553114,"genie.py",8677,0,"",python,selection_command +1283,5553319,"genie.py",8589,0,"",python,selection_command +1284,5553595,"genie.py",8677,0,"",python,selection_command +1285,5553842,"genie.py",8589,0,"",python,selection_command +1286,5554080,"genie.py",8677,0,"",python,selection_command +1287,5554314,"genie.py",8589,0,"",python,selection_command +1288,5554549,"genie.py",8677,0,"",python,selection_command +1289,5554996,"genie.py",8712,0,"",python,selection_command +1290,5555557,"genie.py",8677,0,"",python,selection_command +1291,5556002,"genie.py",8589,0,"",python,selection_command +1292,5557334,"genie.py",8677,0,"",python,selection_command +1293,5557586,"genie.py",8589,0,"",python,selection_command +1294,5557766,"genie.py",8677,0,"",python,selection_command +1295,5558005,"genie.py",8589,0,"",python,selection_command +1296,5558211,"genie.py",8677,0,"",python,selection_command +1297,5558504,"genie.py",8589,0,"",python,selection_command +1298,5558781,"genie.py",8677,0,"",python,selection_command +1299,5559027,"genie.py",8589,0,"",python,selection_command +1300,5559445,"genie.py",8677,0,"",python,selection_command +1301,5562590,"genie.py",8589,0,"",python,selection_command +1302,5615981,"genie.py",8687,0,"",python,selection_mouse +1303,5615982,"genie.py",8686,0,"",python,selection_command +1304,5641531,"genie.py",8767,0,"",python,selection_mouse +1305,5642075,"genie.py",8687,0,"",python,selection_mouse +1306,5642111,"genie.py",8686,0,"",python,selection_command +1307,5643039,"genie.py",8957,0,"",python,selection_mouse +1308,5643843,"genie.py",8865,0,"",python,selection_mouse +1309,5645402,"genie.py",8864,1,"N",python,selection_mouse +1310,5650569,"genie.py",9139,0,"",python,selection_mouse +1311,5651985,"genie.py",8917,0,"",python,selection_mouse +1312,5652101,"genie.py",8912,8,"idx_mask",python,selection_mouse +1313,5652906,"genie.py",8923,0,"",python,selection_mouse +1314,5681043,"genie.py",9290,0,"",python,selection_mouse +1315,5681648,"genie.py",8861,0,"",python,selection_mouse +1316,5682219,"genie.py",8863,0,"",python,selection_mouse +1317,5682822,"genie.py",8865,0,"",python,selection_mouse +1318,5683131,"genie.py",8864,1,"N",python,selection_mouse +1319,5683165,"genie.py",8863,2,"(N",python,selection_mouse +1320,5683534,"genie.py",8863,0,"",python,selection_mouse +1321,5684027,"genie.py",8864,0,"",python,selection_mouse +1322,5691769,"genie.py",8942,0,"",python,selection_mouse +1323,5691889,"genie.py",8934,17,"final_token_probs",python,selection_mouse +1324,5703962,"genie.py",8723,0,"",python,selection_mouse +1325,5703995,"genie.py",8722,0,"",python,selection_command +1326,5704576,"genie.py",8687,0,"",python,selection_mouse +1327,5704582,"genie.py",8686,0,"",python,selection_command +1328,5708421,"genie.py",8685,0,"",python,selection_mouse +1329,5708539,"genie.py",8683,4,"mask",python,selection_mouse +1330,5709825,"genie.py",8682,0,"",python,selection_mouse +1331,5709977,"genie.py",8681,1," ",python,selection_mouse +1332,5710160,"genie.py",8681,2," ~",python,selection_mouse +1333,5710173,"genie.py",8681,6," ~mask",python,selection_mouse +1334,5710685,"genie.py",8686,0,"",python,selection_mouse +1335,5710879,"genie.py",8683,4,"mask",python,selection_mouse +1336,5711293,"genie.py",8682,5,"~mask",python,selection_mouse +1337,5712321,"genie.py",8682,0,"",python,selection_mouse +1338,5712322,"genie.py",8681,1," ",python,selection_mouse +1339,5712589,"genie.py",8681,2," ~",python,selection_mouse +1340,5712625,"genie.py",8681,6," ~mask",python,selection_mouse +1341,5713188,"genie.py",8685,0,"",python,selection_mouse +1342,5713188,"genie.py",8683,4,"mask",python,selection_mouse +1343,5713524,"genie.py",8682,5,"~mask",python,selection_mouse +1344,5714170,"genie.py",8682,0,"",python,selection_mouse +1345,5714347,"genie.py",8681,1," ",python,selection_mouse +1346,5714574,"genie.py",8681,2," ~",python,selection_mouse +1347,5714610,"genie.py",8681,6," ~mask",python,selection_mouse +1348,5715162,"genie.py",8684,0,"",python,selection_mouse +1349,5715799,"genie.py",8683,1,"m",python,selection_mouse +1350,5716185,"genie.py",8682,2,"~m",python,selection_mouse +1351,5716738,"genie.py",8682,0,"",python,selection_mouse +1352,5720920,"genie.py",8719,0,"",python,selection_mouse +1353,5721098,"genie.py",8719,4,"only",python,selection_mouse +1354,5721393,"genie.py",8683,40,"mask\n # Update masked tokens only",python,selection_mouse +1355,5721715,"genie.py",8684,0,"",python,selection_mouse +1356,5722904,"genie.py",8683,4,"mask",python,selection_mouse +1357,5750916,"genie.py",8736,0,"",python,selection_mouse +1358,5751082,"genie.py",8732,10,"token_idxs",python,selection_mouse +1359,5757939,"genie.py",9156,0,"",python,selection_mouse +1360,5758159,"genie.py",9150,8,"new_mask",python,selection_mouse +1361,5759144,"genie.py",8740,0,"",python,selection_mouse +1362,5759271,"genie.py",8732,10,"token_idxs",python,selection_mouse +1363,5760584,"genie.py",9188,0,"",python,selection_mouse +1364,5761316,"genie.py",9107,0,"",python,selection_mouse +1365,5761439,"genie.py",9105,3,"msk",python,selection_mouse +1366,5761990,"genie.py",9113,0,"",python,selection_mouse +1367,5762152,"genie.py",9110,3,"ids",python,selection_mouse +1368,5762864,"genie.py",9110,0,"",python,selection_mouse +1369,5762865,"genie.py",9110,3,"ids",python,selection_mouse +1370,5763390,"genie.py",9103,0,"",python,selection_mouse +1371,5763541,"genie.py",9098,6,"lambda",python,selection_mouse +1372,5764053,"genie.py",9107,0,"",python,selection_mouse +1373,5764194,"genie.py",9105,3,"msk",python,selection_mouse +1374,5764943,"genie.py",9101,0,"",python,selection_mouse +1375,5765082,"genie.py",9098,6,"lambda",python,selection_mouse +1376,5767993,"genie.py",8683,0,"",python,selection_mouse +1377,5768215,"genie.py",8683,4,"mask",python,selection_mouse +1378,5787443,"genie.py",8917,0,"",python,selection_mouse +1379,5787589,"genie.py",8912,8,"idx_mask",python,selection_mouse +1380,5788754,"genie.py",9134,0,"",python,selection_mouse +1381,5788950,"genie.py",9131,8,"idx_mask",python,selection_mouse +1382,5789567,"genie.py",9118,0,"",python,selection_mouse +1383,5789692,"genie.py",9115,3,"msk",python,selection_mouse +1384,5790505,"genie.py",9100,0,"",python,selection_mouse +1385,5790665,"genie.py",9098,6,"lambda",python,selection_mouse +1386,5790911,"genie.py",9098,7,"lambda ",python,selection_mouse +1387,5790946,"genie.py",9098,10,"lambda msk",python,selection_mouse +1388,5790947,"genie.py",9098,11,"lambda msk,",python,selection_mouse +1389,5790999,"genie.py",9098,15,"lambda msk, ids",python,selection_mouse +1390,5791000,"genie.py",9098,95,"lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs",python,selection_mouse +1391,5791030,"genie.py",9098,96,"lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)",python,selection_mouse +1392,5791859,"genie.py",9098,41,"lambda msk, ids: msk.at[ids].set(idx_mask",python,selection_mouse +1393,5792376,"genie.py",9139,0,"",python,selection_mouse +1394,5792795,"genie.py",9138,1,"k",python,selection_mouse +1395,5792796,"genie.py",9136,3,"ask",python,selection_mouse +1396,5792819,"genie.py",9132,7,"dx_mask",python,selection_mouse +1397,5792820,"genie.py",9130,9,"(idx_mask",python,selection_mouse +1398,5792838,"genie.py",9128,11,"et(idx_mask",python,selection_mouse +1399,5792858,"genie.py",9123,16,"ds].set(idx_mask",python,selection_mouse +1400,5792875,"genie.py",9121,18,"[ids].set(idx_mask",python,selection_mouse +1401,5792894,"genie.py",9119,20,"at[ids].set(idx_mask",python,selection_mouse +1402,5792915,"genie.py",9117,22,"k.at[ids].set(idx_mask",python,selection_mouse +1403,5792932,"genie.py",9115,24,"msk.at[ids].set(idx_mask",python,selection_mouse +1404,5792968,"genie.py",9113,26,": msk.at[ids].set(idx_mask",python,selection_mouse +1405,5792974,"genie.py",9111,28,"ds: msk.at[ids].set(idx_mask",python,selection_mouse +1406,5793006,"genie.py",9109,30," ids: msk.at[ids].set(idx_mask",python,selection_mouse +1407,5793007,"genie.py",9108,31,", ids: msk.at[ids].set(idx_mask",python,selection_mouse +1408,5793013,"genie.py",9107,32,"k, ids: msk.at[ids].set(idx_mask",python,selection_mouse +1409,5793043,"genie.py",9106,33,"sk, ids: msk.at[ids].set(idx_mask",python,selection_mouse +1410,5793044,"genie.py",9105,34,"msk, ids: msk.at[ids].set(idx_mask",python,selection_mouse +1411,5793083,"genie.py",9103,36,"a msk, ids: msk.at[ids].set(idx_mask",python,selection_mouse +1412,5793121,"genie.py",9102,37,"da msk, ids: msk.at[ids].set(idx_mask",python,selection_mouse +1413,5793121,"genie.py",9101,38,"bda msk, ids: msk.at[ids].set(idx_mask",python,selection_mouse +1414,5793158,"genie.py",9100,39,"mbda msk, ids: msk.at[ids].set(idx_mask",python,selection_mouse +1415,5793237,"genie.py",9099,40,"ambda msk, ids: msk.at[ids].set(idx_mask",python,selection_mouse +1416,5793536,"genie.py",9099,0,"",python,selection_mouse +1417,5793537,"genie.py",9098,6,"lambda",python,selection_mouse +1418,5793766,"genie.py",9098,10,"lambda msk",python,selection_mouse +1419,5793777,"genie.py",9098,11,"lambda msk,",python,selection_mouse +1420,5793818,"genie.py",9098,15,"lambda msk, ids",python,selection_mouse +1421,5793819,"genie.py",9098,16,"lambda msk, ids:",python,selection_mouse +1422,5793855,"genie.py",9098,20,"lambda msk, ids: msk",python,selection_mouse +1423,5793897,"genie.py",9098,23,"lambda msk, ids: msk.at",python,selection_mouse +1424,5793898,"genie.py",9098,24,"lambda msk, ids: msk.at[",python,selection_mouse +1425,5793938,"genie.py",9098,27,"lambda msk, ids: msk.at[ids",python,selection_mouse +1426,5793976,"genie.py",9098,29,"lambda msk, ids: msk.at[ids].",python,selection_mouse +1427,5794010,"genie.py",9098,32,"lambda msk, ids: msk.at[ids].set",python,selection_mouse +1428,5794087,"genie.py",9098,33,"lambda msk, ids: msk.at[ids].set(",python,selection_mouse +1429,5794088,"genie.py",9098,41,"lambda msk, ids: msk.at[ids].set(idx_mask",python,selection_mouse +1430,5794376,"genie.py",9098,42,"lambda msk, ids: msk.at[ids].set(idx_mask)",python,selection_mouse +1431,5795050,"genie.py",9140,0,"",python,selection_mouse +1432,5795162,"genie.py",9139,2,"))",python,selection_mouse +1433,5795326,"genie.py",9139,2,"))",python,selection_mouse +1434,5795342,"genie.py",9139,55,"))\n new_mask = mask_update_fn(mask, sorted_idxs)",python,selection_mouse +1435,5795422,"genie.py",9127,14,"set(idx_mask))",python,selection_mouse +1436,5795423,"genie.py",9122,19,"ids].set(idx_mask))",python,selection_mouse +1437,5795427,"genie.py",9121,20,"[ids].set(idx_mask))",python,selection_mouse +1438,5795461,"genie.py",9119,22,"at[ids].set(idx_mask))",python,selection_mouse +1439,5795467,"genie.py",9115,26,"msk.at[ids].set(idx_mask))",python,selection_mouse +1440,5795498,"genie.py",9113,28,": msk.at[ids].set(idx_mask))",python,selection_mouse +1441,5795537,"genie.py",9110,31,"ids: msk.at[ids].set(idx_mask))",python,selection_mouse +1442,5795571,"genie.py",9109,32," ids: msk.at[ids].set(idx_mask))",python,selection_mouse +1443,5795611,"genie.py",9108,33,", ids: msk.at[ids].set(idx_mask))",python,selection_mouse +1444,5795611,"genie.py",9105,36,"msk, ids: msk.at[ids].set(idx_mask))",python,selection_mouse +1445,5795690,"genie.py",9104,37," msk, ids: msk.at[ids].set(idx_mask))",python,selection_mouse +1446,5795706,"genie.py",9098,43,"lambda msk, ids: msk.at[ids].set(idx_mask))",python,selection_mouse +1447,5796210,"genie.py",9100,0,"",python,selection_mouse +1448,5796210,"genie.py",9098,6,"lambda",python,selection_mouse +1449,5796464,"genie.py",9098,7,"lambda ",python,selection_mouse +1450,5796479,"genie.py",9098,10,"lambda msk",python,selection_mouse +1451,5796480,"genie.py",9098,11,"lambda msk,",python,selection_mouse +1452,5796517,"genie.py",9098,15,"lambda msk, ids",python,selection_mouse +1453,5796557,"genie.py",9098,17,"lambda msk, ids: ",python,selection_mouse +1454,5796558,"genie.py",9098,20,"lambda msk, ids: msk",python,selection_mouse +1455,5796598,"genie.py",9098,23,"lambda msk, ids: msk.at",python,selection_mouse +1456,5796637,"genie.py",9043,61,"-1, descending=True)\n mask_update_fn = jax.vmap(lambda",python,selection_mouse +1457,5796673,"genie.py",9044,60,"1, descending=True)\n mask_update_fn = jax.vmap(lambda",python,selection_mouse +1458,5796674,"genie.py",9046,58," descending=True)\n mask_update_fn = jax.vmap(lambda",python,selection_mouse +1459,5796715,"genie.py",9047,57,"descending=True)\n mask_update_fn = jax.vmap(lambda",python,selection_mouse +1460,5796752,"genie.py",9098,32,"lambda msk, ids: msk.at[ids].set",python,selection_mouse +1461,5796791,"genie.py",9098,33,"lambda msk, ids: msk.at[ids].set(",python,selection_mouse +1462,5796829,"genie.py",9098,41,"lambda msk, ids: msk.at[ids].set(idx_mask",python,selection_mouse +1463,5797652,"genie.py",9098,42,"lambda msk, ids: msk.at[ids].set(idx_mask)",python,selection_mouse +1464,5798353,"genie.py",9140,0,"",python,selection_mouse +1465,5799085,"genie.py",9116,0,"",python,selection_mouse +1466,5799251,"genie.py",9115,3,"msk",python,selection_mouse +1467,5800388,"genie.py",9140,0,"",python,selection_mouse +1468,5801602,"genie.py",9139,0,"",python,selection_mouse +1469,5805892,"genie.py",9131,8,"idx_mask",python,selection_mouse +1470,5806079,"genie.py",9064,78," mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n",python,selection_mouse +1471,5809735,"genie.py",9139,0,"",python,selection_mouse +1472,5809735,"genie.py",9131,8,"idx_mask",python,selection_mouse +1473,5809873,"genie.py",9064,78," mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n",python,selection_mouse +1474,5872291,"genie.py",9194,0,"",python,selection_mouse +1475,5872292,"genie.py",9193,0,"",python,selection_command +1476,5872964,"genie.py",9195,0,"",python,selection_mouse +1477,6137749,"genie.py",8936,0,"",python,selection_mouse +1478,6139001,"genie.py",8856,0,"",python,selection_mouse +1479,6139569,"genie.py",8860,0,"",python,selection_mouse +1480,6139719,"genie.py",8858,5,"round",python,selection_mouse +1481,6140826,"genie.py",8860,0,"",python,selection_mouse +1482,6140827,"genie.py",8858,5,"round",python,selection_mouse +1483,6141116,"genie.py",8824,80," num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n",python,selection_mouse +1484,6141811,"genie.py",8931,0,"",python,selection_mouse +1485,6141995,"genie.py",8927,6,"arange",python,selection_mouse +1486,6142569,"genie.py",8845,0,"",python,selection_mouse +1487,6142707,"genie.py",8832,19,"num_unmasked_tokens",python,selection_mouse +1488,6149562,"genie.py",8845,0,"",python,selection_mouse +1489,6151788,"genie.py",9003,0,"",python,selection_mouse +1490,6211120,"genie.py",8029,0,"",python,selection_mouse +1491,6363968,"genie.py",8210,0,"",python,selection_mouse +1492,6364539,"genie.py",8040,0,"",python,selection_mouse +1493,6365341,"genie.py",8113,0,"",python,selection_command +1494,6365736,"genie.py",8040,0,"",python,selection_command +1495,6365897,"genie.py",8113,0,"",python,selection_command +1496,6366067,"genie.py",8040,0,"",python,selection_command +1497,6366164,"genie.py",8113,0,"",python,selection_command +1498,6366329,"genie.py",8040,0,"",python,selection_command +1499,6366409,"genie.py",8113,0,"",python,selection_command +1500,6366509,"genie.py",8040,0,"",python,selection_command +1501,6366638,"genie.py",8113,0,"",python,selection_command +1502,6366789,"genie.py",8040,0,"",python,selection_command +1503,6366888,"genie.py",8113,0,"",python,selection_command +1504,6367075,"genie.py",8175,0,"",python,selection_command +1505,6367221,"genie.py",8113,0,"",python,selection_command +1506,6367484,"genie.py",8040,0,"",python,selection_command +1507,6367677,"genie.py",8113,0,"",python,selection_command +1508,6367837,"genie.py",8040,0,"",python,selection_command +1509,6367944,"genie.py",8113,0,"",python,selection_command +1510,6368069,"genie.py",8040,0,"",python,selection_command +1511,6368160,"genie.py",8113,0,"",python,selection_command +1512,6368271,"genie.py",8040,0,"",python,selection_command +1513,6368350,"genie.py",8113,0,"",python,selection_command +1514,6368477,"genie.py",8040,0,"",python,selection_command +1515,6368541,"genie.py",8113,0,"",python,selection_command +1516,6368685,"genie.py",8040,0,"",python,selection_command +1517,6368762,"genie.py",8113,0,"",python,selection_command +1518,6368896,"genie.py",8040,0,"",python,selection_command +1519,6368991,"genie.py",8113,0,"",python,selection_command +1520,6369123,"genie.py",8040,0,"",python,selection_command +1521,6369200,"genie.py",8113,0,"",python,selection_command +1522,6369355,"genie.py",8040,0,"",python,selection_command +1523,6369433,"genie.py",8113,0,"",python,selection_command +1524,6369551,"genie.py",8040,0,"",python,selection_command +1525,6369633,"genie.py",8113,0,"",python,selection_command +1526,6369790,"genie.py",8040,0,"",python,selection_command +1527,6369851,"genie.py",8113,0,"",python,selection_command +1528,6370009,"genie.py",8040,0,"",python,selection_command +1529,6370065,"genie.py",8113,0,"",python,selection_command +1530,6370219,"genie.py",8040,0,"",python,selection_command +1531,6370297,"genie.py",8113,0,"",python,selection_command +1532,6370434,"genie.py",8040,0,"",python,selection_command +1533,6370488,"genie.py",8113,0,"",python,selection_command +1534,6370677,"genie.py",8175,0,"",python,selection_command +1535,6370846,"genie.py",8113,0,"",python,selection_command +1536,6370942,"genie.py",8175,0,"",python,selection_command +1537,6371089,"genie.py",8113,0,"",python,selection_command +1538,6371364,"genie.py",8040,0,"",python,selection_command +1539,6371479,"genie.py",8113,0,"",python,selection_command +1540,6371749,"genie.py",8175,0,"",python,selection_command +1541,6371976,"genie.py",8113,0,"",python,selection_command +1542,6372128,"genie.py",8175,0,"",python,selection_command +1543,6372253,"genie.py",8113,0,"",python,selection_command +1544,6372361,"genie.py",8175,0,"",python,selection_command +1545,6372492,"genie.py",8113,0,"",python,selection_command +1546,6372573,"genie.py",8175,0,"",python,selection_command +1547,6372719,"genie.py",8113,0,"",python,selection_command +1548,6372794,"genie.py",8175,0,"",python,selection_command +1549,6372959,"genie.py",8113,0,"",python,selection_command +1550,6372997,"genie.py",8175,0,"",python,selection_command +1551,6373197,"genie.py",8113,0,"",python,selection_command +1552,6373300,"genie.py",8175,0,"",python,selection_command +1553,6373485,"genie.py",8113,0,"",python,selection_command +1554,6373611,"genie.py",8175,0,"",python,selection_command +1555,6373917,"genie.py",8113,0,"",python,selection_command +1556,6373987,"genie.py",8175,0,"",python,selection_command +1557,6374266,"genie.py",8113,0,"",python,selection_command +1558,6374351,"genie.py",8175,0,"",python,selection_command +1559,6374555,"genie.py",8113,0,"",python,selection_command +1560,6374667,"genie.py",8175,0,"",python,selection_command +1561,6374824,"genie.py",8113,0,"",python,selection_command +1562,6374909,"genie.py",8175,0,"",python,selection_command +1563,6375085,"genie.py",8113,0,"",python,selection_command +1564,6375178,"genie.py",8175,0,"",python,selection_command +1565,6375343,"genie.py",8113,0,"",python,selection_command +1566,6375437,"genie.py",8175,0,"",python,selection_command +1567,6375615,"genie.py",8113,0,"",python,selection_command +1568,6375676,"genie.py",8175,0,"",python,selection_command +1569,6375841,"genie.py",8113,0,"",python,selection_command +1570,6375910,"genie.py",8175,0,"",python,selection_command +1571,6376104,"genie.py",8113,0,"",python,selection_command +1572,6376151,"genie.py",8175,0,"",python,selection_command +1573,6376364,"genie.py",8113,0,"",python,selection_command +1574,6376430,"genie.py",8175,0,"",python,selection_command +1575,6376586,"genie.py",8113,0,"",python,selection_command +1576,6376675,"genie.py",8175,0,"",python,selection_command +1577,6376877,"genie.py",8113,0,"",python,selection_command +1578,6376910,"genie.py",8175,0,"",python,selection_command +1579,6377207,"genie.py",8113,0,"",python,selection_command +1580,6377240,"genie.py",8175,0,"",python,selection_command +1581,6377546,"genie.py",8113,0,"",python,selection_command +1582,6377592,"genie.py",8175,0,"",python,selection_command +1583,6377861,"genie.py",8113,0,"",python,selection_command +1584,6377903,"genie.py",8175,0,"",python,selection_command +1585,6378188,"genie.py",8113,0,"",python,selection_command +1586,6378222,"genie.py",8175,0,"",python,selection_command +1587,6378489,"genie.py",8210,0,"",python,selection_command +1588,6378833,"genie.py",8175,0,"",python,selection_command +1589,6378881,"genie.py",8210,0,"",python,selection_command +1590,6379342,"genie.py",8175,0,"",python,selection_command +1591,6379391,"genie.py",8210,0,"",python,selection_command +1592,6380079,"genie.py",8175,0,"",python,selection_command +1593,6380249,"genie.py",8113,0,"",python,selection_command +1594,6380535,"genie.py",8040,0,"",python,selection_command +1595,6380615,"genie.py",8113,0,"",python,selection_command +1596,6380789,"genie.py",8040,0,"",python,selection_command +1597,6386371,"genie.py",8178,0,"",python,selection_mouse +1598,6387718,"genie.py",8101,0,"",python,selection_mouse +1599,6388565,"genie.py",8172,0,"",python,selection_mouse +1600,6390439,"genie.py",8105,0,"",python,selection_mouse +1601,6391202,"genie.py",8109,0,"",python,selection_mouse +1602,6391770,"genie.py",8172,0,"",python,selection_mouse +1603,6392405,"genie.py",8111,0,"",python,selection_mouse +1604,6418975,"genie.py",8938,0,"",python,selection_mouse +1605,6419114,"genie.py",8934,17,"final_token_probs",python,selection_mouse +1606,6419753,"genie.py",8860,0,"",python,selection_mouse +1607,6419910,"genie.py",8858,5,"round",python,selection_mouse +1608,6420648,"genie.py",8940,0,"",python,selection_mouse +1609,6420805,"genie.py",8934,17,"final_token_probs",python,selection_mouse +1610,6421980,"genie.py",9099,0,"",python,selection_mouse +1611,6422582,"genie.py",9178,0,"",python,selection_mouse +1612,6446236,"genie.py",8967,0,"",python,selection_mouse +1613,6446388,"genie.py",8965,19,"num_unmasked_tokens",python,selection_mouse +1614,6447509,"genie.py",8683,0,"",python,selection_mouse +1615,6449168,"genie.py",8682,0,"",python,selection_command +1616,6449322,"genie.py",8681,0,"",python,selection_command +1617,6449468,"genie.py",8680,0,"",python,selection_command +1618,6450684,"genie.py",8679,1,"",python,content +1619,6451517,"genie.py",8679,0,"-",python,content +1620,6451517,"genie.py",8680,0,"",python,selection_keyboard +1621,6469430,"genie.py",5792,0,"",python,selection_mouse +1622,6469542,"genie.py",5791,4,"mask",python,selection_mouse +1623,6475446,"genie.py",8680,0,"",python,selection_mouse +1624,6477722,"genie.py",8679,1,"",python,content +1625,6477959,"genie.py",8679,0,"+",python,content +1626,6477960,"genie.py",8680,0,"",python,selection_keyboard +1627,6478362,"genie.py",8681,0,"",python,selection_command +1628,6478543,"genie.py",8682,0,"",python,selection_command +1629,6478695,"genie.py",8683,0,"",python,selection_command +1630,6479035,"genie.py",8682,1,"",python,content +1631,6482314,"genie.py",9009,0,"",python,selection_mouse +1632,6482949,"genie.py",8846,0,"",python,selection_mouse +1633,6483087,"genie.py",8831,19,"num_unmasked_tokens",python,selection_mouse +1634,6483932,"genie.py",8679,0,"",python,selection_mouse +1635,6484868,"genie.py",8861,0,"",python,selection_mouse +1636,6485430,"genie.py",8864,0,"",python,selection_mouse +1637,6486041,"genie.py",8863,0,"",python,selection_mouse +1638,6486164,"genie.py",8863,1,"N",python,selection_mouse +1639,6512254,"genie.py",5820,0,"",python,selection_mouse +1640,6512848,"genie.py",5819,0,"",python,selection_command +1641,6513151,"genie.py",5818,1,"",python,content +1642,6513610,"genie.py",5818,0,"?",python,content +1643,6513611,"genie.py",5819,0,"",python,selection_keyboard +1644,6513960,"genie.py",5818,1,"",python,content +1645,6514234,"genie.py",5818,0,"=",python,content +1646,6514235,"genie.py",5819,0,"",python,selection_keyboard +1647,6525042,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +1648,6525958,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +1649,6527103,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=1 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +1650,6527253,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +1651,6527397,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +1652,6534187,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\ndynamics_ckpt_dir=$1\necho $dynamics_ckpt_dir\n\nenv | grep SLURM\n\nsrun python sample.py \\n --checkpoint $dynamics_ckpt_dir \\n --seq_len=16 \\n --seed=42 \\n --batch_size=4 \\n --maskgit_steps=1 \\n --start_frame=0 \\n --data_dir $array_records_dir \\n\n # --dyna_dim=1024 \\n # --dyna_num_blocks=16 \\n # --dyna_num_heads=16 \\n # --dyna_ffn_dim=4096 \\n\n# srun python sample.py \\n # --checkpoint $dynamics_ckpt_dir \\n # --start_frame=0 \\n # --batch_size=12 \\n # --seq_len=2 \\n # --data_dir $array_records_dir\n",shellscript,tab +1653,6535648,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",732,1,"",shellscript,content +1654,6535689,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",732,0,"2",shellscript,content +1655,6535689,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",733,0,"",shellscript,selection_keyboard +1656,6535710,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +1657,6536033,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",733,0,"5",shellscript,content +1658,6536034,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",734,0,"",shellscript,selection_keyboard +1659,6537931,"TERMINAL",0,0,"2025-07-31 14:19:00.579892: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1660,6538220,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3387190.10 task 0: running\r\n",,terminal_output +1661,6538466,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3387190.10\r\nsrun: forcing job termination\r\nsrun: Job step aborted: Waiting up to 32 seconds for job step to finish.\r\nslurmstepd: error: *** STEP 3387190.10 ON hkn0602 CANCELLED AT 2025-07-31T14:19:01 ***\r\n",,terminal_output +1662,6538601,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3387190.10\r\nsrun: job abort in progress\r\n",,terminal_output +1663,6538756,"TERMINAL",0,0,"]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +1664,6539279,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +1665,6539562,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +1666,6539696,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +1667,6539833,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +1668,6540792,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +1669,6542759,"TERMINAL",0,0,"2025-07-31 14:19:05.389446: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1670,6554305,"TERMINAL",0,0,"2025-07-31 14:19:16.957365: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1671,6565977,"TERMINAL",0,0,"2025-07-31 14:19:28.627994: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1672,6570157,"TERMINAL",0,0,"2025-07-31 14:19:32.807998: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1673,6574270,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\n",,terminal_output +1674,6629877,"TERMINAL",0,0,"2025-07-31 14:20:32.474509: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 14:20:32.474812: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 14:20:32.475207: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 14:20:32.475237: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1675,6678835,"TERMINAL",0,0,"SSIM: 0.33350828289985657\r\n",,terminal_output +1676,6680473,"TERMINAL",0,0,"]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +1677,7665894,"genie.py",0,0,"",python,tab +1678,7672207,"genie.py",8864,0,"",python,selection_mouse +1679,7672325,"genie.py",8863,1,"N",python,selection_mouse +1680,7673373,"genie.py",8944,0,"",python,selection_mouse +1681,7673493,"genie.py",8933,17,"final_token_probs",python,selection_mouse +1682,7674316,"genie.py",8939,0,"",python,selection_mouse +1683,7674664,"genie.py",8924,0,"",python,selection_mouse +1684,7675546,"genie.py",8792,0,"",python,selection_mouse +1685,7676292,"genie.py",8716,0,"",python,selection_mouse +1686,7676892,"genie.py",8679,0,"",python,selection_mouse +1687,7677629,"genie.py",8681,0,"",python,selection_mouse +1688,7678790,"genie.py",8682,0,"",python,selection_mouse +1689,7679139,"genie.py",8682,34,"mask\n # Update masked token",python,selection_mouse +1690,7679303,"genie.py",8682,33,"mask\n # Update masked toke",python,selection_mouse +1691,7679890,"genie.py",8681,1," ",python,selection_mouse +1692,7728165,"genie.py",9136,0,"",python,selection_mouse +1693,7728366,"genie.py",9130,8,"idx_mask",python,selection_mouse +1694,7769267,"genie.py",8963,0,"",python,selection_mouse +1695,7779039,"genie.py",8962,1,"",python,content +1696,7779307,"genie.py",8962,0,"<",python,content +1697,7779308,"genie.py",8963,0,"",python,selection_keyboard +1698,7779547,"genie.py",8963,0,"=",python,content +1699,7779548,"genie.py",8964,0,"",python,selection_keyboard +1700,7791305,"genie.py",8891,0,"",python,selection_mouse +1701,7791819,"genie.py",8972,0,"",python,selection_mouse +1702,7791944,"genie.py",8965,19,"num_unmasked_tokens",python,selection_mouse +1703,7794733,"genie.py",8972,0,"",python,selection_mouse +1704,7794734,"genie.py",8965,19,"num_unmasked_tokens",python,selection_mouse +1705,7795492,"genie.py",8964,0,"",python,selection_mouse +1706,7796479,"genie.py",8923,0,"",python,selection_mouse +1707,7796605,"genie.py",8922,3,"jnp",python,selection_mouse +1708,7796794,"genie.py",8922,4,"jnp.",python,selection_mouse +1709,7796832,"genie.py",8922,10,"jnp.arange",python,selection_mouse +1710,7796867,"genie.py",8922,11,"jnp.arange(",python,selection_mouse +1711,7796907,"genie.py",8922,28,"jnp.arange(final_token_probs",python,selection_mouse +1712,7797063,"genie.py",8922,29,"jnp.arange(final_token_probs.",python,selection_mouse +1713,7797087,"genie.py",8922,34,"jnp.arange(final_token_probs.shape",python,selection_mouse +1714,7797205,"genie.py",8922,35,"jnp.arange(final_token_probs.shape[",python,selection_mouse +1715,7797227,"genie.py",8922,36,"jnp.arange(final_token_probs.shape[-",python,selection_mouse +1716,7797239,"genie.py",8922,37,"jnp.arange(final_token_probs.shape[-1",python,selection_mouse +1717,7797291,"genie.py",8922,38,"jnp.arange(final_token_probs.shape[-1]",python,selection_mouse +1718,7797332,"genie.py",8922,39,"jnp.arange(final_token_probs.shape[-1])",python,selection_mouse +1719,7813207,"genie.py",8965,0,"",python,selection_mouse +1720,7814052,"genie.py",8962,2,"",python,content +1721,7814519,"genie.py",8962,0,">",python,content +1722,7815416,"genie.py",8965,0,"",python,selection_mouse +1723,7815595,"genie.py",8964,19,"num_unmasked_tokens",python,selection_mouse +1724,8087234,"genie.py",8769,0,"",python,selection_mouse +1725,8088649,"genie.py",8792,0,"",python,selection_mouse +1726,8089153,"genie.py",8765,0,"",python,selection_mouse +1727,8089272,"genie.py",8764,0,"",python,selection_command +1728,8092177,"genie.py",8853,0,"",python,selection_mouse +1729,8092326,"genie.py",8853,3,"jnp",python,selection_mouse +1730,8092556,"genie.py",8853,4,"jnp.",python,selection_mouse +1731,8092572,"genie.py",8853,9,"jnp.round",python,selection_mouse +1732,8092604,"genie.py",8853,10,"jnp.round(",python,selection_mouse +1733,8092641,"genie.py",8853,13,"jnp.round(N *",python,selection_mouse +1734,8092642,"genie.py",8853,15,"jnp.round(N * (",python,selection_mouse +1735,8092677,"genie.py",8853,17,"jnp.round(N * (1.",python,selection_mouse +1736,8092677,"genie.py",8853,20,"jnp.round(N * (1.0 -",python,selection_mouse +1737,8092720,"genie.py",8853,21,"jnp.round(N * (1.0 - ",python,selection_mouse +1738,8092721,"genie.py",8853,35,"jnp.round(N * (1.0 - unmasked_ratio",python,selection_mouse +1739,8092754,"genie.py",8822,34,"\n num_unmasked_tokens = jnp",python,selection_mouse +1740,8092829,"genie.py",8853,36,"jnp.round(N * (1.0 - unmasked_ratio)",python,selection_mouse +1741,8092830,"genie.py",8853,38,"jnp.round(N * (1.0 - unmasked_ratio)).",python,selection_mouse +1742,8092869,"genie.py",8853,44,"jnp.round(N * (1.0 - unmasked_ratio)).astype",python,selection_mouse +1743,8092951,"genie.py",8853,45,"jnp.round(N * (1.0 - unmasked_ratio)).astype(",python,selection_mouse +1744,8092956,"genie.py",8853,48,"jnp.round(N * (1.0 - unmasked_ratio)).astype(int",python,selection_mouse +1745,8093087,"genie.py",8853,49,"jnp.round(N * (1.0 - unmasked_ratio)).astype(int)",python,selection_mouse +1746,8093345,"genie.py",8902,0,"",python,selection_mouse +1747,8093351,"genie.py",8901,0,"",python,selection_command +1748,8095192,"genie.py",8902,0,"",python,selection_command +1749,8095472,"genie.py",8902,0," ",python,content +1750,8095474,"genie.py",8903,0,"",python,selection_keyboard +1751,8095593,"genie.py",8903,0,"-",python,content +1752,8095594,"genie.py",8904,0,"",python,selection_keyboard +1753,8095692,"genie.py",8904,0," ",python,content +1754,8095693,"genie.py",8905,0,"",python,selection_keyboard +1755,8097245,"genie.py",8905,0,"()",python,content +1756,8097246,"genie.py",8906,0,"",python,selection_keyboard +1757,8119766,"genie.py",8905,2,"",python,content +1758,8119914,"genie.py",8904,1,"",python,content +1759,8120042,"genie.py",8903,1,"",python,content +1760,8120374,"genie.py",8902,1,"",python,content +1761,8121353,"genie.py",8792,0,"",python,selection_mouse +1762,8121789,"genie.py",8822,0,"",python,selection_mouse +1763,8122142,"genie.py",8822,0,"\n ",python,content +1764,8125040,"genie.py",8831,0,"s",python,content +1765,8125042,"genie.py",8832,0,"",python,selection_keyboard +1766,8125058,"genie.py",8832,0,"u",python,content +1767,8125059,"genie.py",8833,0,"",python,selection_keyboard +1768,8125291,"genie.py",8833,0,"m",python,content +1769,8125292,"genie.py",8834,0,"",python,selection_keyboard +1770,8125484,"genie.py",8834,0," ",python,content +1771,8125485,"genie.py",8835,0,"",python,selection_keyboard +1772,8125837,"genie.py",8834,1,"",python,content +1773,8126508,"genie.py",8834,0,"_",python,content +1774,8126509,"genie.py",8835,0,"",python,selection_keyboard +1775,8126808,"genie.py",8835,0,"m",python,content +1776,8126808,"genie.py",8836,0,"",python,selection_keyboard +1777,8126908,"genie.py",8836,0,"a",python,content +1778,8126909,"genie.py",8837,0,"",python,selection_keyboard +1779,8126956,"genie.py",8837,0,"s",python,content +1780,8126957,"genie.py",8838,0,"",python,selection_keyboard +1781,8127058,"genie.py",8838,0,"k",python,content +1782,8127059,"genie.py",8839,0,"",python,selection_keyboard +1783,8127304,"genie.py",8839,0," ",python,content +1784,8127305,"genie.py",8840,0,"",python,selection_keyboard +1785,8127680,"genie.py",8840,0,"=",python,content +1786,8127680,"genie.py",8841,0,"",python,selection_keyboard +1787,8127750,"genie.py",8841,0," ",python,content +1788,8127751,"genie.py",8842,0,"",python,selection_keyboard +1789,8129066,"genie.py",8801,0,"",python,selection_command +1790,8131291,"genie.py",8834,0,"",python,selection_mouse +1791,8131415,"genie.py",8831,8,"sum_mask",python,selection_mouse +1792,8131525,"genie.py",8823,20," sum_mask = \n",python,selection_mouse +1793,8132278,"genie.py",8831,0,"",python,selection_command +1794,8137648,"genie.py",8823,0,"",python,selection_command +1795,8139174,"genie.py",8823,0," sum_mask = jnp.sum(mask[0])\n",python,content +1796,8139177,"genie.py",8859,20,"",python,content +1797,8143437,"genie.py",8846,0,"",python,selection_mouse +1798,8143957,"genie.py",8836,0,"",python,selection_mouse +1799,8144072,"genie.py",8831,8,"sum_mask",python,selection_mouse +1800,8145075,"genie.py",8813,0,"",python,selection_mouse +1801,8145587,"genie.py",8837,0,"",python,selection_mouse +1802,8145716,"genie.py",8831,8,"sum_mask",python,selection_mouse +1803,8146363,"genie.py",8834,0,"",python,selection_mouse +1804,8146363,"genie.py",8831,8,"sum_mask",python,selection_mouse +1805,8147020,"genie.py",8834,0,"",python,selection_mouse +1806,8160622,"genie.py",8877,0,"",python,selection_mouse +1807,8160785,"genie.py",8867,19,"num_unmasked_tokens",python,selection_mouse +1808,8161890,"genie.py",8876,0,"",python,selection_mouse +1809,8161891,"genie.py",8867,19,"num_unmasked_tokens",python,selection_mouse +1810,8162933,"genie.py",8876,0,"",python,selection_mouse +1811,8162934,"genie.py",8867,19,"num_unmasked_tokens",python,selection_mouse +1812,8163812,"genie.py",8869,0,"",python,selection_mouse +1813,8164169,"genie.py",8843,0,"",python,selection_mouse +1814,8164936,"genie.py",8842,0,"",python,selection_mouse +1815,8171645,"genie.py",8842,0,"N",python,content +1816,8171647,"genie.py",8843,0,"",python,selection_keyboard +1817,8171901,"genie.py",8843,0,"-",python,content +1818,8171902,"genie.py",8844,0,"",python,selection_keyboard +1819,8172009,"genie.py",8844,0," ",python,content +1820,8172009,"genie.py",8845,0,"",python,selection_keyboard +1821,8174489,"genie.py",8843,0,"",python,selection_mouse +1822,8174707,"genie.py",8843,0," ",python,content +1823,8174708,"genie.py",8844,0,"",python,selection_keyboard +1824,8176075,"genie.py",8965,0,"",python,selection_mouse +1825,8176684,"genie.py",8850,0,"",python,selection_mouse +1826,8176840,"genie.py",8850,3,"sum",python,selection_mouse +1827,8177789,"genie.py",8907,0,"",python,selection_mouse +1828,8177934,"genie.py",8906,1," ",python,selection_mouse +1829,8178121,"genie.py",8904,3," * ",python,selection_mouse +1830,8178142,"genie.py",8862,45,"\n num_unmasked_tokens = jnp.round(N * ",python,selection_mouse +1831,8178179,"genie.py",8861,46,")\n num_unmasked_tokens = jnp.round(N * ",python,selection_mouse +1832,8185058,"genie.py",8840,0,"",python,selection_mouse +1833,8185574,"genie.py",8835,0,"",python,selection_mouse +1834,8185724,"genie.py",8831,8,"sum_mask",python,selection_mouse +1835,8186566,"genie.py",8843,0,"",python,selection_mouse +1836,8186750,"genie.py",8842,1,"N",python,selection_mouse +1837,8186939,"genie.py",8842,2,"N ",python,selection_mouse +1838,8186955,"genie.py",8842,4,"N - ",python,selection_mouse +1839,8186978,"genie.py",8842,7,"N - jnp",python,selection_mouse +1840,8187011,"genie.py",8842,8,"N - jnp.",python,selection_mouse +1841,8187060,"genie.py",8842,11,"N - jnp.sum",python,selection_mouse +1842,8187089,"genie.py",8842,12,"N - jnp.sum(",python,selection_mouse +1843,8187090,"genie.py",8842,16,"N - jnp.sum(mask",python,selection_mouse +1844,8187178,"genie.py",8842,17,"N - jnp.sum(mask[",python,selection_mouse +1845,8187245,"genie.py",8842,18,"N - jnp.sum(mask[0",python,selection_mouse +1846,8187323,"genie.py",8842,19,"N - jnp.sum(mask[0]",python,selection_mouse +1847,8187757,"genie.py",8842,20,"N - jnp.sum(mask[0])",python,selection_mouse +1848,8188368,"genie.py",8862,0,"",python,selection_mouse +1849,8189241,"genie.py",8842,0,"",python,selection_mouse +1850,8189387,"genie.py",8842,1,"N",python,selection_mouse +1851,8190122,"genie.py",8838,0,"",python,selection_mouse +1852,8190961,"genie.py",8831,0,"",python,selection_mouse +1853,8191142,"genie.py",8831,8,"sum_mask",python,selection_mouse +1854,8193458,"genie.py",8831,8,"n",python,content +1855,8193459,"genie.py",8832,0,"",python,selection_keyboard +1856,8193649,"genie.py",8832,0,"u",python,content +1857,8193650,"genie.py",8833,0,"",python,selection_keyboard +1858,8193796,"genie.py",8833,0,"m",python,content +1859,8193797,"genie.py",8834,0,"",python,selection_keyboard +1860,8194199,"genie.py",8834,0,"_",python,content +1861,8194200,"genie.py",8835,0,"",python,selection_keyboard +1862,8194474,"genie.py",8835,0,"t",python,content +1863,8194475,"genie.py",8836,0,"",python,selection_keyboard +1864,8194583,"genie.py",8836,0,"o",python,content +1865,8194585,"genie.py",8837,0,"",python,selection_keyboard +1866,8194622,"genie.py",8837,0,"k",python,content +1867,8194623,"genie.py",8838,0,"",python,selection_keyboard +1868,8194851,"genie.py",8838,0,"e",python,content +1869,8194852,"genie.py",8839,0,"",python,selection_keyboard +1870,8194890,"genie.py",8839,0,"n",python,content +1871,8194891,"genie.py",8840,0,"",python,selection_keyboard +1872,8195676,"genie.py",8839,1,"",python,content +1873,8195827,"genie.py",8838,1,"",python,content +1874,8195963,"genie.py",8837,1,"",python,content +1875,8196119,"genie.py",8836,1,"",python,content +1876,8196233,"genie.py",8835,1,"",python,content +1877,8196635,"genie.py",8835,0,"u",python,content +1878,8196636,"genie.py",8836,0,"",python,selection_keyboard +1879,8196745,"genie.py",8836,0,"n",python,content +1880,8196746,"genie.py",8837,0,"",python,selection_keyboard +1881,8196976,"genie.py",8837,0,"m",python,content +1882,8196977,"genie.py",8838,0,"",python,selection_keyboard +1883,8197117,"genie.py",8838,0,"a",python,content +1884,8197118,"genie.py",8839,0,"",python,selection_keyboard +1885,8197353,"genie.py",8839,0,"s",python,content +1886,8197354,"genie.py",8840,0,"",python,selection_keyboard +1887,8197421,"genie.py",8840,0,"k",python,content +1888,8197422,"genie.py",8841,0,"",python,selection_keyboard +1889,8197825,"genie.py",8841,0,"e",python,content +1890,8197826,"genie.py",8842,0,"",python,selection_keyboard +1891,8197909,"genie.py",8842,0,"d",python,content +1892,8197910,"genie.py",8843,0,"",python,selection_keyboard +1893,8198106,"genie.py",8843,0,"_",python,content +1894,8198106,"genie.py",8844,0,"",python,selection_keyboard +1895,8198308,"genie.py",8844,0,"t",python,content +1896,8198309,"genie.py",8845,0,"",python,selection_keyboard +1897,8198419,"genie.py",8845,0,"o",python,content +1898,8198420,"genie.py",8846,0,"",python,selection_keyboard +1899,8198455,"genie.py",8846,0,"k",python,content +1900,8198455,"genie.py",8847,0,"",python,selection_keyboard +1901,8198597,"genie.py",8847,0,"e",python,content +1902,8198598,"genie.py",8848,0,"",python,selection_keyboard +1903,8198707,"genie.py",8848,0,"n",python,content +1904,8198708,"genie.py",8849,0,"",python,selection_keyboard +1905,8198821,"genie.py",8849,0,"s",python,content +1906,8198822,"genie.py",8850,0,"",python,selection_keyboard +1907,8198985,"genie.py",8850,0,"_",python,content +1908,8198985,"genie.py",8851,0,"",python,selection_keyboard +1909,8199246,"genie.py",8851,0,"o",python,content +1910,8199247,"genie.py",8852,0,"",python,selection_keyboard +1911,8199397,"genie.py",8852,0,"l",python,content +1912,8199398,"genie.py",8853,0,"",python,selection_keyboard +1913,8199445,"genie.py",8853,0,"d",python,content +1914,8199447,"genie.py",8854,0,"",python,selection_keyboard +1915,8199977,"genie.py",8853,0,"",python,selection_command +1916,8202621,"genie.py",8850,0,"",python,selection_mouse +1917,8203183,"genie.py",8905,0,"",python,selection_mouse +1918,8203861,"genie.py",8919,0,"",python,selection_mouse +1919,8205451,"genie.py",8957,0,"\n ",python,content +1920,8206652,"genie.py",8966,0,"n",python,content +1921,8206652,"genie.py",8967,0,"",python,selection_keyboard +1922,8206838,"genie.py",8967,0,"u",python,content +1923,8206839,"genie.py",8968,0,"",python,selection_keyboard +1924,8207005,"genie.py",8968,0,"m",python,content +1925,8207006,"genie.py",8969,0,"",python,selection_keyboard +1926,8207309,"genie.py",8969,0,"_",python,content +1927,8207310,"genie.py",8970,0,"",python,selection_keyboard +1928,8207685,"genie.py",8970,0,"u",python,content +1929,8207685,"genie.py",8971,0,"",python,selection_keyboard +1930,8207827,"genie.py",8971,0,"n",python,content +1931,8207828,"genie.py",8972,0,"",python,selection_keyboard +1932,8208037,"genie.py",8972,0,"m",python,content +1933,8208038,"genie.py",8973,0,"",python,selection_keyboard +1934,8208112,"genie.py",8973,0,"a",python,content +1935,8208113,"genie.py",8974,0,"",python,selection_keyboard +1936,8208190,"genie.py",8974,0,"s",python,content +1937,8208190,"genie.py",8975,0,"",python,selection_keyboard +1938,8208228,"genie.py",8975,0,"k",python,content +1939,8208229,"genie.py",8976,0,"",python,selection_keyboard +1940,8208399,"genie.py",8976,0,"e",python,content +1941,8208400,"genie.py",8977,0,"",python,selection_keyboard +1942,8208491,"genie.py",8977,0,"d",python,content +1943,8208492,"genie.py",8978,0,"",python,selection_keyboard +1944,8208728,"genie.py",8978,0,"_",python,content +1945,8208729,"genie.py",8979,0,"",python,selection_keyboard +1946,8208936,"genie.py",8979,0,"t",python,content +1947,8208937,"genie.py",8980,0,"",python,selection_keyboard +1948,8209082,"genie.py",8980,0,"k",python,content +1949,8209083,"genie.py",8981,0,"",python,selection_keyboard +1950,8209480,"genie.py",8980,1,"",python,content +1951,8209679,"genie.py",8980,0,"o",python,content +1952,8209680,"genie.py",8981,0,"",python,selection_keyboard +1953,8209759,"genie.py",8981,0,"k",python,content +1954,8209760,"genie.py",8982,0,"",python,selection_keyboard +1955,8209856,"genie.py",8982,0,"e",python,content +1956,8209857,"genie.py",8983,0,"",python,selection_keyboard +1957,8209987,"genie.py",8983,0,"n",python,content +1958,8209988,"genie.py",8984,0,"",python,selection_keyboard +1959,8210133,"genie.py",8984,0,"s",python,content +1960,8210133,"genie.py",8985,0,"",python,selection_keyboard +1961,8210488,"genie.py",8985,0,"_",python,content +1962,8210489,"genie.py",8986,0,"",python,selection_keyboard +1963,8210812,"genie.py",8986,0,"d",python,content +1964,8210812,"genie.py",8987,0,"",python,selection_keyboard +1965,8211018,"genie.py",8987,0,"e",python,content +1966,8211019,"genie.py",8988,0,"",python,selection_keyboard +1967,8211501,"genie.py",8988,0,"l",python,content +1968,8211502,"genie.py",8989,0,"",python,selection_keyboard +1969,8211614,"genie.py",8989,0,"t",python,content +1970,8211614,"genie.py",8990,0,"",python,selection_keyboard +1971,8211847,"genie.py",8990,0,"a",python,content +1972,8211848,"genie.py",8991,0,"",python,selection_keyboard +1973,8212069,"genie.py",8991,0," ",python,content +1974,8212070,"genie.py",8992,0,"",python,selection_keyboard +1975,8212216,"genie.py",8992,0,"=",python,content +1976,8212217,"genie.py",8993,0,"",python,selection_keyboard +1977,8212333,"genie.py",8993,0," ",python,content +1978,8212334,"genie.py",8994,0,"",python,selection_keyboard +1979,8213324,"genie.py",8994,0,"n",python,content +1980,8213325,"genie.py",8995,0,"",python,selection_keyboard +1981,8213543,"genie.py",8995,0,"u",python,content +1982,8213544,"genie.py",8996,0,"",python,selection_keyboard +1983,8213748,"genie.py",8996,0,"m",python,content +1984,8213750,"genie.py",8997,0,"",python,selection_keyboard +1985,8215427,"genie.py",8994,3,"num_unmasked_tokens",python,content +1986,8215670,"genie.py",9013,0," ",python,content +1987,8215672,"genie.py",9014,0,"",python,selection_keyboard +1988,8215788,"genie.py",9014,0,"-",python,content +1989,8215789,"genie.py",9015,0,"",python,selection_keyboard +1990,8215906,"genie.py",9015,0," ",python,content +1991,8215907,"genie.py",9016,0,"",python,selection_keyboard +1992,8216537,"genie.py",9016,0,"n",python,content +1993,8216539,"genie.py",9017,0,"",python,selection_keyboard +1994,8216701,"genie.py",9017,0,"u",python,content +1995,8216702,"genie.py",9018,0,"",python,selection_keyboard +1996,8216866,"genie.py",9018,0,"m",python,content +1997,8216867,"genie.py",9019,0,"",python,selection_keyboard +1998,8218709,"genie.py",9016,3,"num_unmasked_tokens_old",python,content +1999,8219930,"genie.py",8986,0,"",python,selection_mouse +2000,8220039,"genie.py",8966,25,"num_unmasked_tokens_delta",python,selection_mouse +2001,8223488,"genie.py",8902,0,"",python,selection_mouse +2002,8223679,"genie.py",8886,19,"num_unmasked_tokens",python,selection_mouse +2003,8225379,"genie.py",9120,0,"",python,selection_mouse +2004,8226447,"genie.py",9120,0,"_",python,content +2005,8226448,"genie.py",9121,0,"",python,selection_keyboard +2006,8226640,"genie.py",9121,0,"d",python,content +2007,8226640,"genie.py",9122,0,"",python,selection_keyboard +2008,8226717,"genie.py",9122,0,"e",python,content +2009,8226719,"genie.py",9123,0,"",python,selection_keyboard +2010,8227392,"genie.py",9101,22,"num_unmasked_tokens_delta",python,content +2011,8228346,"genie.py",9110,0,"",python,selection_mouse +2012,8228510,"genie.py",9101,25,"num_unmasked_tokens_delta",python,selection_mouse +2013,8229544,"genie.py",8985,0,"",python,selection_mouse +2014,8230146,"genie.py",8978,0,"",python,selection_mouse +2015,8230232,"genie.py",8966,25,"num_unmasked_tokens_delta",python,selection_mouse +2016,8234724,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +2017,8235023,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +2018,8235200,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +2019,8235332,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +2020,8238949,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +2021,8241044,"TERMINAL",0,0,"2025-07-31 14:47:23.691423: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2022,8252709,"TERMINAL",0,0,"2025-07-31 14:47:35.358739: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2023,8264114,"TERMINAL",0,0,"2025-07-31 14:47:46.739110: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2024,8268296,"TERMINAL",0,0,"2025-07-31 14:47:50.909421: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2025,8271891,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\n",,terminal_output +2026,8327697,"TERMINAL",0,0,"2025-07-31 14:48:50.352344: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 14:48:50.352591: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 14:48:50.352946: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 14:48:50.352966: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2027,8376030,"TERMINAL",0,0,"SSIM: 0.2639240026473999\r\n",,terminal_output +2028,8377348,"TERMINAL",0,0,"]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +2029,8514482,"genie.py",0,0,"",python,tab +2030,8518953,"genie.py",9431,0,"",python,selection_mouse +2031,8520736,"genie.py",9400,0,"",python,selection_mouse +2032,8524268,"genie.py",8891,0,"",python,selection_mouse +2033,8524937,"genie.py",8973,0,"",python,selection_mouse +2034,8535265,"genie.py",9230,0,"",python,selection_mouse +2035,8536215,"genie.py",8976,0,"",python,selection_mouse +2036,8538514,"genie.py",9053,0,"",python,selection_mouse +2037,8556520,"genie.py",9362,0,"",python,selection_mouse +2038,8557755,"genie.py",9067,0,"",python,selection_mouse +2039,8578695,"genie.py",9064,0,"",python,selection_mouse +2040,8579549,"genie.py",9094,0,"",python,selection_mouse +2041,8590888,"genie.py",9088,0,"",python,selection_mouse +2042,8591383,"genie.py",9083,0,"",python,selection_mouse +2043,8591540,"genie.py",9070,17,"final_token_probs",python,selection_mouse +2044,8620981,"genie.py",8682,0,"",python,selection_mouse +2045,8622269,"genie.py",8682,0,"~",python,content +2046,8622271,"genie.py",8683,0,"",python,selection_keyboard +2047,8623299,"genie.py",8684,0,"",python,selection_mouse +2048,8623437,"genie.py",8683,4,"mask",python,selection_mouse +2049,8623746,"genie.py",8682,5,"~mask",python,selection_mouse +2050,8624150,"genie.py",8682,0,"",python,selection_mouse +2051,8624151,"genie.py",8681,1," ",python,selection_mouse +2052,8624378,"genie.py",8681,2," ~",python,selection_mouse +2053,8624390,"genie.py",8681,6," ~mask",python,selection_mouse +2054,8624701,"genie.py",8687,0,"",python,selection_mouse +2055,8625258,"genie.py",8684,0,"",python,selection_mouse +2056,8625418,"genie.py",8683,4,"mask",python,selection_mouse +2057,8625643,"genie.py",8682,5,"~mask",python,selection_mouse +2058,8626605,"genie.py",8682,0,"",python,selection_mouse +2059,8626606,"genie.py",8681,1," ",python,selection_mouse +2060,8626840,"genie.py",8681,2," ~",python,selection_mouse +2061,8626883,"genie.py",8681,6," ~mask",python,selection_mouse +2062,8627360,"genie.py",8685,0,"",python,selection_mouse +2063,8627361,"genie.py",8683,4,"mask",python,selection_mouse +2064,8628220,"genie.py",8683,0,"",python,selection_mouse +2065,8628596,"genie.py",8680,0,"",python,selection_mouse +2066,8629859,"genie.py",8721,0,"",python,selection_mouse +2067,8630023,"genie.py",8719,4,"only",python,selection_mouse +2068,8630326,"genie.py",8683,40,"mask\n # Update masked tokens only",python,selection_mouse +2069,8630742,"genie.py",8686,0,"",python,selection_mouse +2070,8630743,"genie.py",8683,4,"mask",python,selection_mouse +2071,8632096,"genie.py",8682,5,"~mask",python,selection_mouse +2072,8632775,"genie.py",8682,0,"",python,selection_mouse +2073,8641358,"genie.py",8859,0,"",python,selection_mouse +2074,8642076,"genie.py",8842,0,"",python,selection_mouse +2075,8642197,"genie.py",8832,23,"num_unmasked_tokens_old",python,selection_mouse +2076,8642480,"genie.py",8832,24,"num_unmasked_tokens_old ",python,selection_mouse +2077,8642487,"genie.py",8832,26,"num_unmasked_tokens_old = ",python,selection_mouse +2078,8642505,"genie.py",8832,28,"num_unmasked_tokens_old = N ",python,selection_mouse +2079,8642519,"genie.py",8832,29,"num_unmasked_tokens_old = N -",python,selection_mouse +2080,8642539,"genie.py",8832,33,"num_unmasked_tokens_old = N - jnp",python,selection_mouse +2081,8642616,"genie.py",8832,34,"num_unmasked_tokens_old = N - jnp.",python,selection_mouse +2082,8642617,"genie.py",8832,37,"num_unmasked_tokens_old = N - jnp.sum",python,selection_mouse +2083,8642636,"genie.py",8832,38,"num_unmasked_tokens_old = N - jnp.sum(",python,selection_mouse +2084,8642670,"genie.py",8832,42,"num_unmasked_tokens_old = N - jnp.sum(mask",python,selection_mouse +2085,8642757,"genie.py",8832,43,"num_unmasked_tokens_old = N - jnp.sum(mask[",python,selection_mouse +2086,8642758,"genie.py",8832,44,"num_unmasked_tokens_old = N - jnp.sum(mask[0",python,selection_mouse +2087,8642786,"genie.py",8832,45,"num_unmasked_tokens_old = N - jnp.sum(mask[0]",python,selection_mouse +2088,8642872,"genie.py",8832,46,"num_unmasked_tokens_old = N - jnp.sum(mask[0])",python,selection_mouse +2089,8643256,"genie.py",8878,0,"",python,selection_mouse +2090,8643404,"genie.py",8877,1,")",python,selection_mouse +2091,8643427,"genie.py",8876,2,"])",python,selection_mouse +2092,8643549,"genie.py",8874,4,"[0])",python,selection_mouse +2093,8643550,"genie.py",8870,8,"mask[0])",python,selection_mouse +2094,8643550,"genie.py",8866,12,"sum(mask[0])",python,selection_mouse +2095,8643557,"genie.py",8862,16,"jnp.sum(mask[0])",python,selection_mouse +2096,8643558,"genie.py",8861,17," jnp.sum(mask[0])",python,selection_mouse +2097,8643558,"genie.py",8859,19," - jnp.sum(mask[0])",python,selection_mouse +2098,8643558,"genie.py",8856,22,"= N - jnp.sum(mask[0])",python,selection_mouse +2099,8643558,"genie.py",8832,46,"num_unmasked_tokens_old = N - jnp.sum(mask[0])",python,selection_mouse +2100,8644154,"genie.py",8841,0,"",python,selection_mouse +2101,8644154,"genie.py",8832,23,"num_unmasked_tokens_old",python,selection_mouse +2102,8644456,"genie.py",8832,26,"num_unmasked_tokens_old = ",python,selection_mouse +2103,8644457,"genie.py",8832,27,"num_unmasked_tokens_old = N",python,selection_mouse +2104,8644482,"genie.py",8832,28,"num_unmasked_tokens_old = N ",python,selection_mouse +2105,8644491,"genie.py",8832,33,"num_unmasked_tokens_old = N - jnp",python,selection_mouse +2106,8644523,"genie.py",8832,34,"num_unmasked_tokens_old = N - jnp.",python,selection_mouse +2107,8644560,"genie.py",8832,37,"num_unmasked_tokens_old = N - jnp.sum",python,selection_mouse +2108,8644561,"genie.py",8832,38,"num_unmasked_tokens_old = N - jnp.sum(",python,selection_mouse +2109,8644594,"genie.py",8832,42,"num_unmasked_tokens_old = N - jnp.sum(mask",python,selection_mouse +2110,8644629,"genie.py",8832,43,"num_unmasked_tokens_old = N - jnp.sum(mask[",python,selection_mouse +2111,8644665,"genie.py",8832,44,"num_unmasked_tokens_old = N - jnp.sum(mask[0",python,selection_mouse +2112,8644701,"genie.py",8832,45,"num_unmasked_tokens_old = N - jnp.sum(mask[0]",python,selection_mouse +2113,8644741,"genie.py",8832,46,"num_unmasked_tokens_old = N - jnp.sum(mask[0])",python,selection_mouse +2114,8645312,"genie.py",8878,0,"",python,selection_mouse +2115,8645638,"genie.py",8876,2,"])",python,selection_mouse +2116,8645655,"genie.py",8874,4,"[0])",python,selection_mouse +2117,8645667,"genie.py",8870,8,"mask[0])",python,selection_mouse +2118,8645689,"genie.py",8866,12,"sum(mask[0])",python,selection_mouse +2119,8645727,"genie.py",8865,13,".sum(mask[0])",python,selection_mouse +2120,8645728,"genie.py",8862,16,"jnp.sum(mask[0])",python,selection_mouse +2121,8645766,"genie.py",8860,18,"- jnp.sum(mask[0])",python,selection_mouse +2122,8645766,"genie.py",8858,20,"N - jnp.sum(mask[0])",python,selection_mouse +2123,8645801,"genie.py",8855,23," = N - jnp.sum(mask[0])",python,selection_mouse +2124,8645802,"genie.py",8832,46,"num_unmasked_tokens_old = N - jnp.sum(mask[0])",python,selection_mouse +2125,8646393,"genie.py",8846,0,"",python,selection_mouse +2126,8646393,"genie.py",8832,23,"num_unmasked_tokens_old",python,selection_mouse +2127,8646674,"genie.py",8832,24,"num_unmasked_tokens_old ",python,selection_mouse +2128,8646686,"genie.py",8832,26,"num_unmasked_tokens_old = ",python,selection_mouse +2129,8646704,"genie.py",8832,28,"num_unmasked_tokens_old = N ",python,selection_mouse +2130,8646724,"genie.py",8832,30,"num_unmasked_tokens_old = N - ",python,selection_mouse +2131,8646750,"genie.py",8832,33,"num_unmasked_tokens_old = N - jnp",python,selection_mouse +2132,8646787,"genie.py",8832,34,"num_unmasked_tokens_old = N - jnp.",python,selection_mouse +2133,8646824,"genie.py",8832,37,"num_unmasked_tokens_old = N - jnp.sum",python,selection_mouse +2134,8646825,"genie.py",8832,38,"num_unmasked_tokens_old = N - jnp.sum(",python,selection_mouse +2135,8646874,"genie.py",8832,42,"num_unmasked_tokens_old = N - jnp.sum(mask",python,selection_mouse +2136,8646916,"genie.py",8832,43,"num_unmasked_tokens_old = N - jnp.sum(mask[",python,selection_mouse +2137,8646956,"genie.py",8832,44,"num_unmasked_tokens_old = N - jnp.sum(mask[0",python,selection_mouse +2138,8647019,"genie.py",8832,45,"num_unmasked_tokens_old = N - jnp.sum(mask[0]",python,selection_mouse +2139,8647020,"genie.py",8832,46,"num_unmasked_tokens_old = N - jnp.sum(mask[0])",python,selection_mouse +2140,8647237,"genie.py",8878,0,"",python,selection_mouse +2141,8647724,"genie.py",8877,1,")",python,selection_mouse +2142,8647740,"genie.py",8875,3,"0])",python,selection_mouse +2143,8647755,"genie.py",8874,4,"[0])",python,selection_mouse +2144,8647777,"genie.py",8870,8,"mask[0])",python,selection_mouse +2145,8648319,"genie.py",8870,0,"",python,selection_mouse +2146,8648891,"genie.py",8865,0,"",python,selection_mouse +2147,8649046,"genie.py",8862,3,"jnp",python,selection_mouse +2148,8649608,"genie.py",8867,0,"",python,selection_mouse +2149,8649756,"genie.py",8866,3,"sum",python,selection_mouse +2150,8650238,"genie.py",8874,0,"",python,selection_mouse +2151,8650412,"genie.py",8870,4,"mask",python,selection_mouse +2152,8651359,"genie.py",8877,0,"",python,selection_mouse +2153,8658478,"genie.py",8858,0,"",python,selection_mouse +2154,8658708,"genie.py",8858,1,"N",python,selection_mouse +2155,8660128,"genie.py",8685,0,"",python,selection_mouse +2156,8660268,"genie.py",8683,4,"mask",python,selection_mouse +2157,8660832,"genie.py",8675,0,"",python,selection_mouse +2158,8661002,"genie.py",8661,17,"final_token_probs",python,selection_mouse +2159,8661595,"genie.py",8686,0,"",python,selection_mouse +2160,8661727,"genie.py",8683,4,"mask",python,selection_mouse +2161,8665063,"genie.py",9008,0,"",python,selection_mouse +2162,8665223,"genie.py",8995,19,"num_unmasked_tokens",python,selection_mouse +2163,8666108,"genie.py",8860,0,"",python,selection_mouse +2164,8674587,"genie.py",8893,0,"",python,selection_mouse +2165,8676405,"genie.py",9075,0,"",python,selection_mouse +2166,8677559,"genie.py",9002,0,"",python,selection_mouse +2167,8677732,"genie.py",8995,19,"num_unmasked_tokens",python,selection_mouse +2168,8683371,"genie.py",8861,0,"",python,selection_mouse +2169,8684090,"genie.py",8858,0,"",python,selection_mouse +2170,8684245,"genie.py",8858,1,"N",python,selection_mouse +2171,8684756,"genie.py",8922,0,"",python,selection_mouse +2172,8684918,"genie.py",8922,1," ",python,selection_mouse +2173,8685497,"genie.py",8860,0,"",python,selection_mouse +2174,8686661,"genie.py",8877,0,"",python,selection_mouse +2175,8687191,"genie.py",8878,0,"",python,selection_mouse +2176,8687392,"genie.py",8875,3,"0])",python,selection_mouse +2177,8687407,"genie.py",8873,5,"k[0])",python,selection_mouse +2178,8687437,"genie.py",8872,6,"sk[0])",python,selection_mouse +2179,8687452,"genie.py",8823,55,"\n num_unmasked_tokens_old = N - jnp.sum(mask[0])",python,selection_mouse +2180,8687657,"genie.py",8864,14,"p.sum(mask[0])",python,selection_mouse +2181,8687725,"genie.py",8863,15,"np.sum(mask[0])",python,selection_mouse +2182,8688073,"genie.py",8862,16,"jnp.sum(mask[0])",python,selection_mouse +2183,8707653,"genie.py",8832,0,"",python,selection_mouse +2184,8708918,"genie.py",8836,0,"",python,selection_mouse +2185,8709168,"genie.py",8836,1,"u",python,selection_mouse +2186,8709188,"genie.py",8836,2,"un",python,selection_mouse +2187,8709197,"genie.py",8836,3,"unm",python,selection_mouse +2188,8709229,"genie.py",8836,4,"unma",python,selection_mouse +2189,8709267,"genie.py",8836,5,"unmas",python,selection_mouse +2190,8709268,"genie.py",8836,6,"unmask",python,selection_mouse +2191,8709307,"genie.py",8836,7,"unmaske",python,selection_mouse +2192,8709320,"genie.py",8836,8,"unmasked",python,selection_mouse +2193,8709356,"genie.py",8836,9,"unmasked_",python,selection_mouse +2194,8709439,"genie.py",8836,10,"unmasked_t",python,selection_mouse +2195,8709440,"genie.py",8836,11,"unmasked_to",python,selection_mouse +2196,8709472,"genie.py",8836,12,"unmasked_tok",python,selection_mouse +2197,8709524,"genie.py",8836,13,"unmasked_toke",python,selection_mouse +2198,8709537,"genie.py",8836,14,"unmasked_token",python,selection_mouse +2199,8709569,"genie.py",8836,15,"unmasked_tokens",python,selection_mouse +2200,8709604,"genie.py",8836,16,"unmasked_tokens_",python,selection_mouse +2201,8709689,"genie.py",8836,17,"unmasked_tokens_o",python,selection_mouse +2202,8709690,"genie.py",8836,18,"unmasked_tokens_ol",python,selection_mouse +2203,8709774,"genie.py",8836,19,"unmasked_tokens_old",python,selection_mouse +2204,8710351,"genie.py",8855,0,"",python,selection_mouse +2205,8710716,"genie.py",8857,0,"",python,selection_mouse +2206,8711279,"genie.py",8862,0,"",python,selection_mouse +2207,8711440,"genie.py",8862,3,"jnp",python,selection_mouse +2208,8711640,"genie.py",8862,7,"jnp.sum",python,selection_mouse +2209,8711651,"genie.py",8862,8,"jnp.sum(",python,selection_mouse +2210,8711668,"genie.py",8862,12,"jnp.sum(mask",python,selection_mouse +2211,8711704,"genie.py",8862,14,"jnp.sum(mask[0",python,selection_mouse +2212,8711739,"genie.py",8862,16,"jnp.sum(mask[0])",python,selection_mouse +2213,8712813,"genie.py",8878,0,"",python,selection_mouse +2214,8713514,"genie.py",8877,1,")",python,selection_mouse +2215,8713528,"genie.py",8875,3,"0])",python,selection_mouse +2216,8713542,"genie.py",8874,4,"[0])",python,selection_mouse +2217,8713566,"genie.py",8873,5,"k[0])",python,selection_mouse +2218,8713604,"genie.py",8872,6,"sk[0])",python,selection_mouse +2219,8713605,"genie.py",8871,7,"ask[0])",python,selection_mouse +2220,8713649,"genie.py",8870,8,"mask[0])",python,selection_mouse +2221,8713685,"genie.py",8869,9,"(mask[0])",python,selection_mouse +2222,8713685,"genie.py",8868,10,"m(mask[0])",python,selection_mouse +2223,8713771,"genie.py",8867,11,"um(mask[0])",python,selection_mouse +2224,8713771,"genie.py",8866,12,"sum(mask[0])",python,selection_mouse +2225,8713772,"genie.py",8865,13,".sum(mask[0])",python,selection_mouse +2226,8713857,"genie.py",8864,14,"p.sum(mask[0])",python,selection_mouse +2227,8713870,"genie.py",8863,15,"np.sum(mask[0])",python,selection_mouse +2228,8713948,"genie.py",8862,16,"jnp.sum(mask[0])",python,selection_mouse +2229,8715057,"genie.py",8862,0,"",python,selection_mouse +2230,8715538,"genie.py",8862,1,"j",python,selection_mouse +2231,8715555,"genie.py",8862,3,"jnp",python,selection_mouse +2232,8715565,"genie.py",8862,5,"jnp.s",python,selection_mouse +2233,8715590,"genie.py",8862,7,"jnp.sum",python,selection_mouse +2234,8715623,"genie.py",8862,9,"jnp.sum(m",python,selection_mouse +2235,8715624,"genie.py",8862,11,"jnp.sum(mas",python,selection_mouse +2236,8715662,"genie.py",8862,12,"jnp.sum(mask",python,selection_mouse +2237,8715663,"genie.py",8862,14,"jnp.sum(mask[0",python,selection_mouse +2238,8715702,"genie.py",8862,15,"jnp.sum(mask[0]",python,selection_mouse +2239,8715742,"genie.py",8862,16,"jnp.sum(mask[0])",python,selection_mouse +2240,8716056,"genie.py",8878,0,"",python,selection_mouse +2241,8716855,"genie.py",8877,1,")",python,selection_mouse +2242,8716856,"genie.py",8875,3,"0])",python,selection_mouse +2243,8716870,"genie.py",8873,5,"k[0])",python,selection_mouse +2244,8716892,"genie.py",8871,7,"ask[0])",python,selection_mouse +2245,8716907,"genie.py",8823,55,"\n num_unmasked_tokens_old = N - jnp.sum(mask[0])",python,selection_mouse +2246,8717208,"genie.py",8861,17," jnp.sum(mask[0])",python,selection_mouse +2247,8717220,"genie.py",8860,18,"- jnp.sum(mask[0])",python,selection_mouse +2248,8717297,"genie.py",8859,19," - jnp.sum(mask[0])",python,selection_mouse +2249,8717373,"genie.py",8858,20,"N - jnp.sum(mask[0])",python,selection_mouse +2250,8724739,"genie.py",8937,0,"",python,selection_mouse +2251,8725267,"genie.py",8878,0,"",python,selection_mouse +2252,8726480,"genie.py",8878,0," ",python,content +2253,8726481,"genie.py",8879,0,"",python,selection_keyboard +2254,8726576,"genie.py",8879,0,"#",python,content +2255,8726577,"genie.py",8880,0,"",python,selection_keyboard +2256,8726616,"genie.py",8880,0," ",python,content +2257,8726616,"genie.py",8881,0,"",python,selection_keyboard +2258,8727405,"genie.py",8881,0,"N",python,content +2259,8727406,"genie.py",8882,0,"",python,selection_keyboard +2260,8727870,"genie.py",8882,0," ",python,content +2261,8727871,"genie.py",8883,0,"",python,selection_keyboard +2262,8728134,"genie.py",8883,0,"-",python,content +2263,8728135,"genie.py",8884,0,"",python,selection_keyboard +2264,8728273,"genie.py",8884,0," ",python,content +2265,8728274,"genie.py",8885,0,"",python,selection_keyboard +2266,8729064,"genie.py",8885,0,"n",python,content +2267,8729065,"genie.py",8886,0,"",python,selection_keyboard +2268,8729246,"genie.py",8886,0,"u",python,content +2269,8729247,"genie.py",8887,0,"",python,selection_keyboard +2270,8729411,"genie.py",8887,0,"m",python,content +2271,8729412,"genie.py",8888,0,"",python,selection_keyboard +2272,8729728,"genie.py",8888,0,"_",python,content +2273,8729728,"genie.py",8889,0,"",python,selection_keyboard +2274,8730076,"genie.py",8889,0,"o",python,content +2275,8730077,"genie.py",8890,0,"",python,selection_keyboard +2276,8730409,"genie.py",8889,1,"",python,content +2277,8730560,"genie.py",8889,0,"t",python,content +2278,8730560,"genie.py",8890,0,"",python,selection_keyboard +2279,8730631,"genie.py",8890,0,"o",python,content +2280,8730632,"genie.py",8891,0,"",python,selection_keyboard +2281,8730708,"genie.py",8891,0,"k",python,content +2282,8730709,"genie.py",8892,0,"",python,selection_keyboard +2283,8730830,"genie.py",8892,0,"e",python,content +2284,8730831,"genie.py",8893,0,"",python,selection_keyboard +2285,8730953,"genie.py",8893,0,"n",python,content +2286,8730954,"genie.py",8894,0,"",python,selection_keyboard +2287,8731088,"genie.py",8894,0,"s",python,content +2288,8731089,"genie.py",8895,0,"",python,selection_keyboard +2289,8731355,"genie.py",8895,0,"_",python,content +2290,8731356,"genie.py",8896,0,"",python,selection_keyboard +2291,8731686,"genie.py",8896,0,"t",python,content +2292,8731687,"genie.py",8897,0,"",python,selection_keyboard +2293,8731810,"genie.py",8897,0,"o",python,content +2294,8731811,"genie.py",8898,0,"",python,selection_keyboard +2295,8732467,"genie.py",8898,0,"_",python,content +2296,8732468,"genie.py",8899,0,"",python,selection_keyboard +2297,8732779,"genie.py",8899,0,"p",python,content +2298,8732780,"genie.py",8900,0,"",python,selection_keyboard +2299,8733340,"genie.py",8900,0,"r",python,content +2300,8733340,"genie.py",8901,0,"",python,selection_keyboard +2301,8733496,"genie.py",8901,0,"e",python,content +2302,8733497,"genie.py",8902,0,"",python,selection_keyboard +2303,8733996,"genie.py",8902,0,"d",python,content +2304,8733997,"genie.py",8903,0,"",python,selection_keyboard +2305,8734129,"genie.py",8903,0,"i",python,content +2306,8734130,"genie.py",8904,0,"",python,selection_keyboard +2307,8734277,"genie.py",8904,0,"c",python,content +2308,8734278,"genie.py",8905,0,"",python,selection_keyboard +2309,8734413,"genie.py",8905,0,"t",python,content +2310,8734414,"genie.py",8906,0,"",python,selection_keyboard +2311,8734687,"genie.py",8906,0," ",python,content +2312,8734688,"genie.py",8907,0,"",python,selection_keyboard +2313,8735551,"genie.py",8907,0,"=",python,content +2314,8735552,"genie.py",8908,0,"",python,selection_keyboard +2315,8735588,"genie.py",8908,0," ",python,content +2316,8735589,"genie.py",8909,0,"",python,selection_keyboard +2317,8736072,"genie.py",8909,0,"n",python,content +2318,8736073,"genie.py",8910,0,"",python,selection_keyboard +2319,8736389,"genie.py",8910,0,"u",python,content +2320,8736390,"genie.py",8911,0,"",python,selection_keyboard +2321,8736518,"genie.py",8911,0,"m",python,content +2322,8736519,"genie.py",8912,0,"",python,selection_keyboard +2323,8736784,"genie.py",8912,0,"_",python,content +2324,8736785,"genie.py",8913,0,"",python,selection_keyboard +2325,8737031,"genie.py",8913,0,"t",python,content +2326,8737032,"genie.py",8914,0,"",python,selection_keyboard +2327,8737088,"genie.py",8914,0,"o",python,content +2328,8737089,"genie.py",8915,0,"",python,selection_keyboard +2329,8737162,"genie.py",8915,0,"k",python,content +2330,8737163,"genie.py",8916,0,"",python,selection_keyboard +2331,8737308,"genie.py",8916,0,"e",python,content +2332,8737309,"genie.py",8917,0,"",python,selection_keyboard +2333,8737393,"genie.py",8917,0,"n",python,content +2334,8737394,"genie.py",8918,0,"",python,selection_keyboard +2335,8737529,"genie.py",8918,0,"s",python,content +2336,8737530,"genie.py",8919,0,"",python,selection_keyboard +2337,8737742,"genie.py",8919,0,"_",python,content +2338,8737743,"genie.py",8920,0,"",python,selection_keyboard +2339,8738036,"genie.py",8920,0,"p",python,content +2340,8738036,"genie.py",8921,0,"",python,selection_keyboard +2341,8738236,"genie.py",8921,0,"t",python,content +2342,8738237,"genie.py",8922,0,"",python,selection_keyboard +2343,8738711,"genie.py",8922,0,"e",python,content +2344,8738712,"genie.py",8923,0,"",python,selection_keyboard +2345,8738972,"genie.py",8922,1,"",python,content +2346,8739109,"genie.py",8921,1,"",python,content +2347,8739186,"genie.py",8921,0,"r",python,content +2348,8739187,"genie.py",8922,0,"",python,selection_keyboard +2349,8739374,"genie.py",8922,0,"e",python,content +2350,8739375,"genie.py",8923,0,"",python,selection_keyboard +2351,8740741,"genie.py",8923,0,"d",python,content +2352,8740742,"genie.py",8924,0,"",python,selection_keyboard +2353,8740859,"genie.py",8924,0,"i",python,content +2354,8740860,"genie.py",8925,0,"",python,selection_keyboard +2355,8740960,"genie.py",8925,0,"c",python,content +2356,8740961,"genie.py",8926,0,"",python,selection_keyboard +2357,8741159,"genie.py",8926,0,"t",python,content +2358,8741160,"genie.py",8927,0,"",python,selection_keyboard +2359,8741299,"genie.py",8927,0,"e",python,content +2360,8741300,"genie.py",8928,0,"",python,selection_keyboard +2361,8741352,"genie.py",8928,0,"d",python,content +2362,8741353,"genie.py",8929,0,"",python,selection_keyboard +2363,8747334,"genie.py",8823,0,"",python,selection_mouse +2364,8747873,"genie.py",8863,0,"",python,selection_mouse +2365,8748461,"genie.py",8872,0,"",python,selection_mouse +2366,8748629,"genie.py",8870,4,"mask",python,selection_mouse +2367,8748842,"genie.py",8869,5,"(mask",python,selection_mouse +2368,8748870,"genie.py",8866,8,"sum(mask",python,selection_mouse +2369,8750054,"genie.py",8850,0,"",python,selection_mouse +2370,8750154,"genie.py",8832,23,"num_unmasked_tokens_old",python,selection_mouse +2371,8751414,"genie.py",8879,0,"",python,selection_mouse +2372,8751609,"genie.py",8877,2,") ",python,selection_mouse +2373,8751622,"genie.py",8876,3,"]) ",python,selection_mouse +2374,8751638,"genie.py",8875,4,"0]) ",python,selection_mouse +2375,8751659,"genie.py",8874,5,"[0]) ",python,selection_mouse +2376,8751674,"genie.py",8873,6,"k[0]) ",python,selection_mouse +2377,8751705,"genie.py",8871,8,"ask[0]) ",python,selection_mouse +2378,8751720,"genie.py",8870,9,"mask[0]) ",python,selection_mouse +2379,8751739,"genie.py",8869,10,"(mask[0]) ",python,selection_mouse +2380,8751739,"genie.py",8868,11,"m(mask[0]) ",python,selection_mouse +2381,8751755,"genie.py",8867,12,"um(mask[0]) ",python,selection_mouse +2382,8751796,"genie.py",8866,13,"sum(mask[0]) ",python,selection_mouse +2383,8751796,"genie.py",8865,14,".sum(mask[0]) ",python,selection_mouse +2384,8751837,"genie.py",8864,15,"p.sum(mask[0]) ",python,selection_mouse +2385,8751883,"genie.py",8863,16,"np.sum(mask[0]) ",python,selection_mouse +2386,8751883,"genie.py",8862,17,"jnp.sum(mask[0]) ",python,selection_mouse +2387,8751888,"genie.py",8861,18," jnp.sum(mask[0]) ",python,selection_mouse +2388,8751909,"genie.py",8860,19,"- jnp.sum(mask[0]) ",python,selection_mouse +2389,8751923,"genie.py",8879,87,"# N - num_tokens_to_predict = num_tokens_predicted\n num_unmasked_tokens = jnp.ro",python,selection_mouse +2390,8751945,"genie.py",8879,86,"# N - num_tokens_to_predict = num_tokens_predicted\n num_unmasked_tokens = jnp.r",python,selection_mouse +2391,8751986,"genie.py",8879,85,"# N - num_tokens_to_predict = num_tokens_predicted\n num_unmasked_tokens = jnp.",python,selection_mouse +2392,8752024,"genie.py",8879,84,"# N - num_tokens_to_predict = num_tokens_predicted\n num_unmasked_tokens = jnp",python,selection_mouse +2393,8752444,"genie.py",8963,0,"",python,selection_mouse +2394,8764406,"genie.py",8949,0,"",python,selection_mouse +2395,8764548,"genie.py",8938,19,"num_unmasked_tokens",python,selection_mouse +2396,8767671,"genie.py",9009,0,"",python,selection_mouse +2397,8768588,"genie.py",9009,0," ",python,content +2398,8768590,"genie.py",9010,0,"",python,selection_keyboard +2399,8768719,"genie.py",9010,0,"#",python,content +2400,8768720,"genie.py",9011,0,"",python,selection_keyboard +2401,8768809,"genie.py",9011,0," ",python,content +2402,8768809,"genie.py",9012,0,"",python,selection_keyboard +2403,8769129,"genie.py",9012,0,"h",python,content +2404,8769130,"genie.py",9013,0,"",python,selection_keyboard +2405,8769550,"genie.py",9012,1,"",python,content +2406,8769832,"genie.py",9012,0,"n",python,content +2407,8769832,"genie.py",9013,0,"",python,selection_keyboard +2408,8769987,"genie.py",9013,0,"u",python,content +2409,8769988,"genie.py",9014,0,"",python,selection_keyboard +2410,8770139,"genie.py",9014,0,"m",python,content +2411,8770140,"genie.py",9015,0,"",python,selection_keyboard +2412,8770479,"genie.py",9015,0,"_",python,content +2413,8770480,"genie.py",9016,0,"",python,selection_keyboard +2414,8770754,"genie.py",9016,0,"t",python,content +2415,8770755,"genie.py",9017,0,"",python,selection_keyboard +2416,8770901,"genie.py",9017,0,"k",python,content +2417,8770902,"genie.py",9018,0,"",python,selection_keyboard +2418,8771238,"genie.py",9017,1,"",python,content +2419,8771393,"genie.py",9017,0,"o",python,content +2420,8771394,"genie.py",9018,0,"",python,selection_keyboard +2421,8771448,"genie.py",9018,0,"k",python,content +2422,8771449,"genie.py",9019,0,"",python,selection_keyboard +2423,8771571,"genie.py",9019,0,"e",python,content +2424,8771571,"genie.py",9020,0,"",python,selection_keyboard +2425,8771687,"genie.py",9020,0,"n",python,content +2426,8771688,"genie.py",9021,0,"",python,selection_keyboard +2427,8771840,"genie.py",9021,0,"s",python,content +2428,8771841,"genie.py",9022,0,"",python,selection_keyboard +2429,8772768,"genie.py",9022,0,"_",python,content +2430,8772768,"genie.py",9023,0,"",python,selection_keyboard +2431,8773042,"genie.py",9023,0,"s",python,content +2432,8773043,"genie.py",9024,0,"",python,selection_keyboard +2433,8773547,"genie.py",9024,0,"h",python,content +2434,8773548,"genie.py",9025,0,"",python,selection_keyboard +2435,8773817,"genie.py",9025,0,"u",python,content +2436,8773817,"genie.py",9026,0,"",python,selection_keyboard +2437,8774239,"genie.py",9025,1,"",python,content +2438,8775236,"genie.py",9025,0,"o",python,content +2439,8775236,"genie.py",9026,0,"",python,selection_keyboard +2440,8775336,"genie.py",9026,0,"u",python,content +2441,8775337,"genie.py",9027,0,"",python,selection_keyboard +2442,8775484,"genie.py",9027,0,"l",python,content +2443,8775484,"genie.py",9028,0,"",python,selection_keyboard +2444,8775581,"genie.py",9028,0,"d",python,content +2445,8775581,"genie.py",9029,0,"",python,selection_keyboard +2446,8775900,"genie.py",9029,0,"_",python,content +2447,8775901,"genie.py",9030,0,"",python,selection_keyboard +2448,8776236,"genie.py",9030,0,"b",python,content +2449,8776237,"genie.py",9031,0,"",python,selection_keyboard +2450,8776354,"genie.py",9031,0,"e",python,content +2451,8776355,"genie.py",9032,0,"",python,selection_keyboard +2452,8776549,"genie.py",9032,0,"_",python,content +2453,8776550,"genie.py",9033,0,"",python,selection_keyboard +2454,8776810,"genie.py",9033,0,"p",python,content +2455,8776811,"genie.py",9034,0,"",python,selection_keyboard +2456,8776953,"genie.py",9034,0,"r",python,content +2457,8776954,"genie.py",9035,0,"",python,selection_keyboard +2458,8777075,"genie.py",9035,0,"e",python,content +2459,8777076,"genie.py",9036,0,"",python,selection_keyboard +2460,8777170,"genie.py",9036,0,"d",python,content +2461,8777170,"genie.py",9037,0,"",python,selection_keyboard +2462,8777359,"genie.py",9037,0,"i",python,content +2463,8777359,"genie.py",9038,0,"",python,selection_keyboard +2464,8777513,"genie.py",9038,0,"c",python,content +2465,8777514,"genie.py",9039,0,"",python,selection_keyboard +2466,8777673,"genie.py",9039,0,"t",python,content +2467,8777674,"genie.py",9040,0,"",python,selection_keyboard +2468,8777869,"genie.py",9040,0,"s",python,content +2469,8777869,"genie.py",9041,0,"",python,selection_keyboard +2470,8778253,"genie.py",9040,1,"",python,content +2471,8778331,"genie.py",9040,0,"d",python,content +2472,8778331,"genie.py",9041,0,"",python,selection_keyboard +2473,8778706,"genie.py",9040,1,"",python,content +2474,8778748,"genie.py",9040,0,"e",python,content +2475,8778749,"genie.py",9041,0,"",python,selection_keyboard +2476,8778812,"genie.py",9041,0,"d",python,content +2477,8778813,"genie.py",9042,0,"",python,selection_keyboard +2478,8780253,"genie.py",8954,0,"",python,selection_mouse +2479,8780456,"genie.py",8938,19,"num_unmasked_tokens",python,selection_mouse +2480,8784187,"genie.py",9155,0,"",python,selection_mouse +2481,8784364,"genie.py",9155,17,"final_token_probs",python,selection_mouse +2482,8784908,"genie.py",9071,0,"",python,selection_mouse +2483,8785018,"genie.py",9051,25,"num_unmasked_tokens_delta",python,selection_mouse +2484,8786691,"genie.py",9124,0,"",python,selection_mouse +2485,8787517,"genie.py",9124,0," ",python,content +2486,8787518,"genie.py",9125,0,"",python,selection_keyboard +2487,8787845,"genie.py",9125,0,"#",python,content +2488,8787845,"genie.py",9126,0,"",python,selection_keyboard +2489,8787904,"genie.py",9126,0," ",python,content +2490,8787905,"genie.py",9127,0,"",python,selection_keyboard +2491,8788185,"genie.py",9127,0,"h",python,content +2492,8788186,"genie.py",9128,0,"",python,selection_keyboard +2493,8788308,"genie.py",9128,0,"o",python,content +2494,8788309,"genie.py",9129,0,"",python,selection_keyboard +2495,8788384,"genie.py",9129,0,"w",python,content +2496,8788385,"genie.py",9130,0,"",python,selection_keyboard +2497,8788496,"genie.py",9130,0," ",python,content +2498,8788497,"genie.py",9131,0,"",python,selection_keyboard +2499,8788570,"genie.py",9131,0,"m",python,content +2500,8788571,"genie.py",9132,0,"",python,selection_keyboard +2501,8788692,"genie.py",9132,0,"a",python,content +2502,8788692,"genie.py",9133,0,"",python,selection_keyboard +2503,8788838,"genie.py",9133,0,"n",python,content +2504,8788839,"genie.py",9134,0,"",python,selection_keyboard +2505,8788947,"genie.py",9134,0,"y",python,content +2506,8788948,"genie.py",9135,0,"",python,selection_keyboard +2507,8789048,"genie.py",9135,0," ",python,content +2508,8789049,"genie.py",9136,0,"",python,selection_keyboard +2509,8789728,"genie.py",9136,0,"p",python,content +2510,8789729,"genie.py",9137,0,"",python,selection_keyboard +2511,8789782,"genie.py",9137,0,"r",python,content +2512,8789783,"genie.py",9138,0,"",python,selection_keyboard +2513,8789942,"genie.py",9138,0,"e",python,content +2514,8789943,"genie.py",9139,0,"",python,selection_keyboard +2515,8790052,"genie.py",9139,0,"d",python,content +2516,8790053,"genie.py",9140,0,"",python,selection_keyboard +2517,8790157,"genie.py",9140,0,"i",python,content +2518,8790158,"genie.py",9141,0,"",python,selection_keyboard +2519,8790336,"genie.py",9141,0,"c",python,content +2520,8790337,"genie.py",9142,0,"",python,selection_keyboard +2521,8790525,"genie.py",9142,0,"t",python,content +2522,8790526,"genie.py",9143,0,"",python,selection_keyboard +2523,8790597,"genie.py",9143,0," ",python,content +2524,8790598,"genie.py",9144,0,"",python,selection_keyboard +2525,8790741,"genie.py",9144,0,"t",python,content +2526,8790742,"genie.py",9145,0,"",python,selection_keyboard +2527,8790790,"genie.py",9145,0,"h",python,content +2528,8790791,"genie.py",9146,0,"",python,selection_keyboard +2529,8790934,"genie.py",9146,0,"i",python,content +2530,8790935,"genie.py",9147,0,"",python,selection_keyboard +2531,8791003,"genie.py",9147,0,"s",python,content +2532,8791004,"genie.py",9148,0,"",python,selection_keyboard +2533,8791106,"genie.py",9148,0," ",python,content +2534,8791107,"genie.py",9149,0,"",python,selection_keyboard +2535,8792366,"genie.py",9149,0,"i",python,content +2536,8792367,"genie.py",9150,0,"",python,selection_keyboard +2537,8792504,"genie.py",9150,0,"t",python,content +2538,8792505,"genie.py",9151,0,"",python,selection_keyboard +2539,8792691,"genie.py",9151,0,"e",python,content +2540,8792692,"genie.py",9152,0,"",python,selection_keyboard +2541,8792766,"genie.py",9152,0,"r",python,content +2542,8792767,"genie.py",9153,0,"",python,selection_keyboard +2543,8792949,"genie.py",9153,0,"a",python,content +2544,8792950,"genie.py",9154,0,"",python,selection_keyboard +2545,8793128,"genie.py",9154,0,"t",python,content +2546,8793129,"genie.py",9155,0,"",python,selection_keyboard +2547,8793233,"genie.py",9155,0,"i",python,content +2548,8793234,"genie.py",9156,0,"",python,selection_keyboard +2549,8793303,"genie.py",9156,0,"o",python,content +2550,8793304,"genie.py",9157,0,"",python,selection_keyboard +2551,8793443,"genie.py",9157,0,"n",python,content +2552,8793444,"genie.py",9158,0,"",python,selection_keyboard +2553,8797809,"genie.py",9197,0,"",python,selection_mouse +2554,8797971,"genie.py",9189,17,"final_token_probs",python,selection_mouse +2555,8799559,"genie.py",9197,0,"",python,selection_mouse +2556,8799559,"genie.py",9189,17,"final_token_probs",python,selection_mouse +2557,8800700,"genie.py",9200,0,"",python,selection_mouse +2558,8800701,"genie.py",9189,17,"final_token_probs",python,selection_mouse +2559,8801396,"genie.py",9282,0,"",python,selection_mouse +2560,8801917,"genie.py",9199,0,"",python,selection_mouse +2561,8802079,"genie.py",9189,17,"final_token_probs",python,selection_mouse +2562,8802874,"genie.py",9185,0,"",python,selection_mouse +2563,8802981,"genie.py",9182,6,"arange",python,selection_mouse +2564,8803472,"genie.py",9286,0,"",python,selection_mouse +2565,8803643,"genie.py",9280,17,"final_token_probs",python,selection_mouse +2566,8808296,"genie.py",9288,0,"",python,selection_mouse +2567,8808365,"genie.py",9280,17,"final_token_probs",python,selection_mouse +2568,8808868,"genie.py",9287,0,"",python,selection_mouse +2569,8808955,"genie.py",9280,17,"final_token_probs",python,selection_mouse +2570,8809515,"genie.py",9199,0,"",python,selection_mouse +2571,8809667,"genie.py",9189,17,"final_token_probs",python,selection_mouse +2572,8811166,"genie.py",9233,0,"",python,selection_mouse +2573,8811342,"genie.py",9220,25,"num_unmasked_tokens_delta",python,selection_mouse +2574,8835142,"genie.py",9196,0,"",python,selection_mouse +2575,8835266,"genie.py",9189,17,"final_token_probs",python,selection_mouse +2576,8876994,"genie.py",9196,0,"",python,selection_mouse +2577,8876995,"genie.py",9189,17,"final_token_probs",python,selection_mouse +2578,8877884,"genie.py",9196,0,"",python,selection_mouse +2579,8877885,"genie.py",9189,17,"final_token_probs",python,selection_mouse +2580,8878741,"genie.py",9196,0,"",python,selection_mouse +2581,8878741,"genie.py",9189,17,"final_token_probs",python,selection_mouse +2582,8879602,"genie.py",9183,0,"",python,selection_mouse +2583,8879764,"genie.py",9182,6,"arange",python,selection_mouse +2584,8880416,"genie.py",9082,0,"",python,selection_mouse +2585,8880596,"genie.py",9079,19,"num_unmasked_tokens",python,selection_mouse +2586,8881128,"genie.py",9198,0,"",python,selection_mouse +2587,8881303,"genie.py",9189,17,"final_token_probs",python,selection_mouse +2588,8884315,"genie.py",9198,0,"",python,selection_mouse +2589,8884753,"genie.py",9189,17,"final_token_probs",python,selection_mouse +2590,8993435,"genie.py",9184,0,"",python,selection_mouse +2591,8993573,"genie.py",9182,6,"arange",python,selection_mouse +2592,8994600,"genie.py",9184,0,"",python,selection_mouse +2593,8994601,"genie.py",9182,6,"arange",python,selection_mouse +2594,8994767,"genie.py",9159,87," idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens_delta\n",python,selection_mouse +2595,8995339,"genie.py",9184,0,"",python,selection_mouse +2596,9040069,"genie.py",9182,6,"arange",python,selection_mouse +2597,9040697,"genie.py",9189,0,"",python,selection_mouse +2598,9050985,"genie.py",9245,0,"",python,selection_mouse +2599,9051540,"genie.py",9128,0,"",python,selection_mouse +2600,9051676,"genie.py",9127,3,"how",python,selection_mouse +2601,9051903,"genie.py",9127,8,"how many",python,selection_mouse +2602,9051954,"genie.py",9127,16,"how many predict",python,selection_mouse +2603,9052051,"genie.py",9127,17,"how many predict ",python,selection_mouse +2604,9052070,"genie.py",9127,21,"how many predict this",python,selection_mouse +2605,9052170,"genie.py",9127,22,"how many predict this ",python,selection_mouse +2606,9052246,"genie.py",9127,31,"how many predict this iteration",python,selection_mouse +2607,9052715,"genie.py",9153,0,"",python,selection_mouse +2608,9053170,"genie.py",9245,0,"",python,selection_mouse +2609,9073266,"genie.py",9220,0,"",python,selection_mouse +2610,9073408,"genie.py",9220,25,"num_unmasked_tokens_delta",python,selection_mouse +2611,9215398,"genie.py",9428,0,"",python,selection_mouse +2612,9216688,"genie.py",9441,0,"",python,selection_mouse +2613,9217405,"genie.py",9446,0,"",python,selection_mouse +2614,9266735,"genie.py",9313,0,"",python,selection_mouse +2615,9266912,"genie.py",9308,10,"descending",python,selection_mouse +2616,9267069,"genie.py",9308,11,"descending=",python,selection_mouse +2617,9267103,"genie.py",9308,15,"descending=True",python,selection_mouse +2618,9271052,"genie.py",9314,0,"",python,selection_mouse +2619,9271175,"genie.py",9308,10,"descending",python,selection_mouse +2620,9271316,"genie.py",9308,11,"descending=",python,selection_mouse +2621,9271351,"genie.py",9308,15,"descending=True",python,selection_mouse +2622,9271739,"genie.py",9308,15,"",python,content +2623,9271932,"genie.py",9307,1,"",python,content +2624,9272161,"genie.py",9306,1,"",python,content +2625,9273774,"genie.py",9197,0,"",python,selection_mouse +2626,9274268,"genie.py",9346,0,"",python,selection_mouse +2627,9274405,"genie.py",9342,6,"lambda",python,selection_mouse +2628,9274909,"genie.py",9275,0,"",python,selection_mouse +2629,9275036,"genie.py",9272,7,"argsort",python,selection_mouse +2630,9276711,"genie.py",9219,0,"",python,selection_mouse +2631,9277477,"genie.py",9218,1,"",python,content +2632,9278396,"genie.py",9218,0,"<",python,content +2633,9278397,"genie.py",9219,0,"",python,selection_keyboard +2634,9278527,"genie.py",9219,0,"=",python,content +2635,9278528,"genie.py",9220,0,"",python,selection_keyboard +2636,9280335,"genie.py",9274,0,"",python,selection_mouse +2637,9280452,"genie.py",9273,7,"argsort",python,selection_mouse +2638,9281026,"genie.py",9338,0,"",python,selection_mouse +2639,9281118,"genie.py",9338,4,"vmap",python,selection_mouse +2640,9283585,"genie.py",9440,0,"",python,selection_mouse +2641,9284135,"genie.py",9439,0,"",python,selection_mouse +2642,9300086,"genie.py",9138,0,"",python,selection_mouse +2643,9300173,"genie.py",9136,7,"predict",python,selection_mouse +2644,9300383,"genie.py",9136,12,"predict this",python,selection_mouse +2645,9300416,"genie.py",9136,22,"predict this iteration",python,selection_mouse +2646,9300827,"genie.py",9154,0,"",python,selection_mouse +2647,9300828,"genie.py",9149,9,"iteration",python,selection_mouse +2648,9301037,"genie.py",9148,10," iteration",python,selection_mouse +2649,9301067,"genie.py",9144,14,"this iteration",python,selection_mouse +2650,9301102,"genie.py",9136,22,"predict this iteration",python,selection_mouse +2651,9301191,"genie.py",9135,23," predict this iteration",python,selection_mouse +2652,9301200,"genie.py",9131,27,"many predict this iteration",python,selection_mouse +2653,9301284,"genie.py",9130,28," many predict this iteration",python,selection_mouse +2654,9301319,"genie.py",9127,31,"how many predict this iteration",python,selection_mouse +2655,9301416,"genie.py",9126,32," how many predict this iteration",python,selection_mouse +2656,9301492,"genie.py",9125,33,"# how many predict this iteration",python,selection_mouse +2657,9301853,"genie.py",9125,0,"",python,selection_mouse +2658,9301854,"genie.py",9124,1," ",python,selection_mouse +2659,9302045,"genie.py",9124,2," #",python,selection_mouse +2660,9302049,"genie.py",9124,3," # ",python,selection_mouse +2661,9302064,"genie.py",9124,6," # how",python,selection_mouse +2662,9302091,"genie.py",9124,11," # how many",python,selection_mouse +2663,9302174,"genie.py",9124,19," # how many predict",python,selection_mouse +2664,9302285,"genie.py",9124,20," # how many predict ",python,selection_mouse +2665,9302301,"genie.py",9124,24," # how many predict this",python,selection_mouse +2666,9302405,"genie.py",9124,25," # how many predict this ",python,selection_mouse +2667,9302437,"genie.py",9124,34," # how many predict this iteration",python,selection_mouse +2668,9303210,"genie.py",9155,0,"",python,selection_mouse +2669,9303211,"genie.py",9149,9,"iteration",python,selection_mouse +2670,9303448,"genie.py",9144,14,"this iteration",python,selection_mouse +2671,9303481,"genie.py",9143,15," this iteration",python,selection_mouse +2672,9303488,"genie.py",9136,22,"predict this iteration",python,selection_mouse +2673,9303569,"genie.py",9135,23," predict this iteration",python,selection_mouse +2674,9303586,"genie.py",9131,27,"many predict this iteration",python,selection_mouse +2675,9303686,"genie.py",9130,28," many predict this iteration",python,selection_mouse +2676,9303717,"genie.py",9127,31,"how many predict this iteration",python,selection_mouse +2677,9303941,"genie.py",9126,32," how many predict this iteration",python,selection_mouse +2678,9304518,"genie.py",9126,0,"",python,selection_mouse +2679,9304519,"genie.py",9126,1," ",python,selection_mouse +2680,9304775,"genie.py",9126,4," how",python,selection_mouse +2681,9304791,"genie.py",9126,5," how ",python,selection_mouse +2682,9304820,"genie.py",9126,9," how many",python,selection_mouse +2683,9304857,"genie.py",9126,17," how many predict",python,selection_mouse +2684,9304933,"genie.py",9126,18," how many predict ",python,selection_mouse +2685,9304933,"genie.py",9126,22," how many predict this",python,selection_mouse +2686,9304979,"genie.py",9126,23," how many predict this ",python,selection_mouse +2687,9305012,"genie.py",9126,32," how many predict this iteration",python,selection_mouse +2688,9305487,"genie.py",9154,0,"",python,selection_mouse +2689,9305487,"genie.py",9149,9,"iteration",python,selection_mouse +2690,9305660,"genie.py",9148,10," iteration",python,selection_mouse +2691,9305697,"genie.py",9144,14,"this iteration",python,selection_mouse +2692,9305697,"genie.py",9136,22,"predict this iteration",python,selection_mouse +2693,9305775,"genie.py",9131,27,"many predict this iteration",python,selection_mouse +2694,9305801,"genie.py",9130,28," many predict this iteration",python,selection_mouse +2695,9305819,"genie.py",9127,31,"how many predict this iteration",python,selection_mouse +2696,9306002,"genie.py",9126,32," how many predict this iteration",python,selection_mouse +2697,9306119,"genie.py",9125,33,"# how many predict this iteration",python,selection_mouse +2698,9306628,"genie.py",9125,0,"",python,selection_mouse +2699,9306629,"genie.py",9124,1," ",python,selection_mouse +2700,9306897,"genie.py",9124,2," #",python,selection_mouse +2701,9306913,"genie.py",9124,3," # ",python,selection_mouse +2702,9306950,"genie.py",9124,6," # how",python,selection_mouse +2703,9307026,"genie.py",9124,7," # how ",python,selection_mouse +2704,9307031,"genie.py",9124,11," # how many",python,selection_mouse +2705,9307032,"genie.py",9124,12," # how many ",python,selection_mouse +2706,9307039,"genie.py",9124,19," # how many predict",python,selection_mouse +2707,9307166,"genie.py",9124,20," # how many predict ",python,selection_mouse +2708,9307167,"genie.py",9124,24," # how many predict this",python,selection_mouse +2709,9307195,"genie.py",9124,25," # how many predict this ",python,selection_mouse +2710,9307210,"genie.py",9124,34," # how many predict this iteration",python,selection_mouse +2711,9307791,"genie.py",9154,0,"",python,selection_mouse +2712,9307791,"genie.py",9149,9,"iteration",python,selection_mouse +2713,9307987,"genie.py",9148,10," iteration",python,selection_mouse +2714,9308006,"genie.py",9144,14,"this iteration",python,selection_mouse +2715,9308018,"genie.py",9143,15," this iteration",python,selection_mouse +2716,9308043,"genie.py",9136,22,"predict this iteration",python,selection_mouse +2717,9308126,"genie.py",9135,23," predict this iteration",python,selection_mouse +2718,9308127,"genie.py",9131,27,"many predict this iteration",python,selection_mouse +2719,9308204,"genie.py",9130,28," many predict this iteration",python,selection_mouse +2720,9308205,"genie.py",9127,31,"how many predict this iteration",python,selection_mouse +2721,9308372,"genie.py",9126,32," how many predict this iteration",python,selection_mouse +2722,9309194,"genie.py",9125,33,"# how many predict this iteration",python,selection_mouse +2723,9310061,"genie.py",9125,0,"",python,selection_mouse +2724,9310205,"genie.py",9124,1," ",python,selection_mouse +2725,9310431,"genie.py",9124,3," # ",python,selection_mouse +2726,9310445,"genie.py",9124,6," # how",python,selection_mouse +2727,9310482,"genie.py",9124,122," # how many predict this iteration\n idx_mask = jnp.arange(final_token_probs.shape[-1]) <= num_unmasked_tokens_delta",python,selection_mouse +2728,9310762,"genie.py",9124,24," # how many predict this",python,selection_mouse +2729,9310969,"genie.py",9124,25," # how many predict this ",python,selection_mouse +2730,9311350,"genie.py",9149,0,"",python,selection_mouse +2731,9366073,"genie.py",9221,0,"",python,selection_mouse +2732,9367654,"genie.py",9221,0,"N",python,content +2733,9367656,"genie.py",9222,0,"",python,selection_keyboard +2734,9368126,"genie.py",9222,0,"-",python,content +2735,9368127,"genie.py",9223,0,"",python,selection_keyboard +2736,9369237,"genie.py",9248,0,"",python,selection_mouse +2737,9369432,"genie.py",9247,1,"a",python,selection_mouse +2738,9369501,"genie.py",9246,2,"ta",python,selection_mouse +2739,9369501,"genie.py",9245,3,"lta",python,selection_mouse +2740,9369580,"genie.py",9244,4,"elta",python,selection_mouse +2741,9369717,"genie.py",9243,5,"delta",python,selection_mouse +2742,9369929,"genie.py",9242,6,"_delta",python,selection_mouse +2743,9385602,"genie.py",9242,6,"",python,content +2744,9391365,"genie.py",8875,0,"",python,selection_mouse +2745,9391958,"genie.py",9228,0,"",python,selection_mouse +2746,9392509,"genie.py",9223,0,"",python,selection_mouse +2747,9393108,"genie.py",9220,0,"",python,selection_mouse +2748,9393490,"genie.py",9220,1," ",python,selection_mouse +2749,9393716,"genie.py",9159,84," idx_mask = jnp.arange(final_token_probs.shape[-1]) <= N-num_unmasked_tokens\n",python,selection_mouse +2750,9394434,"genie.py",9221,0,"",python,selection_mouse +2751,9394435,"genie.py",9221,1,"N",python,selection_mouse +2752,9395036,"genie.py",9227,0,"",python,selection_mouse +2753,9395184,"genie.py",9223,19,"num_unmasked_tokens",python,selection_mouse +2754,9399105,"genie.py",9436,0,"",python,selection_mouse +2755,9401159,"genie.py",9110,0,"",python,selection_mouse +2756,9401288,"genie.py",9101,23,"num_unmasked_tokens_old",python,selection_mouse +2757,9403225,"genie.py",9071,0,"",python,selection_mouse +2758,9403352,"genie.py",9051,25,"num_unmasked_tokens_delta",python,selection_mouse +2759,9404206,"genie.py",9071,0,"",python,selection_mouse +2760,9404844,"genie.py",9070,0,"",python,selection_command +2761,9405677,"genie.py",9051,0,"",python,selection_command +2762,9406075,"genie.py",9051,0,"#",python,content +2763,9406076,"genie.py",9052,0,"",python,selection_keyboard +2764,9406192,"genie.py",9052,0," ",python,content +2765,9406192,"genie.py",9053,0,"",python,selection_keyboard +2766,9406613,"genie.py",9052,0,"",python,selection_command +2767,9406718,"genie.py",8939,0,"",python,selection_command +2768,9407916,"genie.py",8938,0,"",python,selection_command +2769,9408696,"genie.py",8832,0,"",python,selection_command +2770,9409636,"genie.py",8832,0,"#",python,content +2771,9409637,"genie.py",8833,0,"",python,selection_keyboard +2772,9409705,"genie.py",8833,0," ",python,content +2773,9409706,"genie.py",8834,0,"",python,selection_keyboard +2774,9409950,"genie.py",8833,0,"",python,selection_command +2775,9411651,"genie.py",8596,0,"",python,selection_mouse +2776,9411798,"genie.py",8593,9,"gather_fn",python,selection_mouse +2777,9412314,"genie.py",8719,0,"",python,selection_mouse +2778,9412828,"genie.py",8685,0,"",python,selection_mouse +2779,9435918,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +2780,9436738,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +2781,9436824,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +2782,9436972,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +2783,9437108,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +2784,9441424,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +2785,9443576,"TERMINAL",0,0,"2025-07-31 15:07:26.229633: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2786,9455236,"TERMINAL",0,0,"2025-07-31 15:07:37.755158: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2787,9467007,"TERMINAL",0,0,"2025-07-31 15:07:49.555399: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2788,9471044,"TERMINAL",0,0,"2025-07-31 15:07:53.697008: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2789,9474741,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\n",,terminal_output +2790,9529876,"TERMINAL",0,0,"2025-07-31 15:08:52.498839: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:08:52.499090: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:08:52.499450: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:08:52.499470: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2791,9577997,"TERMINAL",0,0,"SSIM: 0.3171235918998718\r\n",,terminal_output +2792,9579431,"TERMINAL",0,0,"]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +2793,9681479,"genie.py",0,0,"",python,tab +2794,9708211,"genie.py",8771,0,"",python,selection_mouse +2795,9715627,"genie.py",8623,0,"",python,selection_mouse +2796,9716199,"genie.py",8723,0,"",python,selection_mouse +2797,9716200,"genie.py",8722,0,"",python,selection_command +2798,9760648,"genie.py",8723,0,"",python,selection_mouse +2799,9760652,"genie.py",8722,0,"",python,selection_command +2800,9762977,"genie.py",8686,0,"",python,selection_command +2801,9763084,"genie.py",8722,0,"",python,selection_command +2802,9763664,"genie.py",8758,0,"",python,selection_command +2803,9764522,"genie.py",8722,0,"",python,selection_command +2804,9764749,"genie.py",8686,0,"",python,selection_command +2805,9764983,"genie.py",8722,0,"",python,selection_command +2806,9765119,"genie.py",8758,0,"",python,selection_command +2807,9765828,"genie.py",8757,0,"",python,selection_command +2808,9766371,"genie.py",8721,0,"",python,selection_command +2809,9766758,"genie.py",8686,0,"",python,selection_command +2810,9767042,"genie.py",8721,0,"",python,selection_command +2811,9767227,"genie.py",8757,0,"",python,selection_command +2812,9770681,"genie.py",8766,0,"",python,selection_mouse +2813,9770845,"genie.py",8761,18,"sampled_token_idxs",python,selection_mouse +2814,9771580,"genie.py",8757,0,"",python,selection_mouse +2815,9771724,"genie.py",8755,4,"mask",python,selection_mouse +2816,9772874,"genie.py",8684,0,"",python,selection_mouse +2817,9773012,"genie.py",8683,4,"mask",python,selection_mouse +2818,9773630,"genie.py",8707,0,"",python,selection_mouse +2819,9774211,"genie.py",8674,0,"",python,selection_mouse +2820,9774356,"genie.py",8661,17,"final_token_probs",python,selection_mouse +2821,9780429,"genie.py",8684,0,"",python,selection_mouse +2822,9780557,"genie.py",8683,4,"mask",python,selection_mouse +2823,9785785,"genie.py",8759,0,"",python,selection_mouse +2824,9785929,"genie.py",8755,4,"mask",python,selection_mouse +2825,9786636,"genie.py",8757,0,"",python,selection_mouse +2826,9787470,"genie.py",8601,0,"",python,selection_mouse +2827,9787641,"genie.py",8593,9,"gather_fn",python,selection_mouse +2828,9788338,"genie.py",8687,0,"",python,selection_mouse +2829,9788343,"genie.py",8686,0,"",python,selection_command +2830,9788481,"genie.py",8683,4,"mask",python,selection_mouse +2831,9788517,"genie.py",8684,3,"ask",python,selection_command +2832,9789274,"genie.py",8759,0,"",python,selection_mouse +2833,9790676,"genie.py",8687,0,"",python,selection_mouse +2834,9790682,"genie.py",8686,0,"",python,selection_command +2835,9790819,"genie.py",8683,4,"mask",python,selection_mouse +2836,9790820,"genie.py",8684,3,"ask",python,selection_command +2837,9791535,"genie.py",8721,0,"",python,selection_mouse +2838,9791683,"genie.py",8719,4,"only",python,selection_mouse +2839,9792293,"genie.py",8757,0,"",python,selection_mouse +2840,9792436,"genie.py",8755,4,"mask",python,selection_mouse +2841,9794437,"genie.py",9274,0,"",python,selection_mouse +2842,9794614,"genie.py",9273,7,"argsort",python,selection_mouse +2843,9795260,"genie.py",9181,0,"",python,selection_mouse +2844,9795850,"genie.py",9261,0,"",python,selection_mouse +2845,9795963,"genie.py",9255,11,"sorted_idxs",python,selection_mouse +2846,9796681,"genie.py",9176,0,"",python,selection_mouse +2847,9796848,"genie.py",9171,8,"idx_mask",python,selection_mouse +2848,9803577,"genie.py",8439,0,"",python,selection_mouse +2849,9803734,"genie.py",8433,18,"sampled_token_idxs",python,selection_mouse +2850,9807714,"genie.py",8583,0,"",python,selection_mouse +2851,9807882,"genie.py",8573,17,"final_token_probs",python,selection_mouse +2852,9811271,"genie.py",8599,0,"",python,selection_mouse +2853,9811392,"genie.py",8593,9,"gather_fn",python,selection_mouse +2854,9813554,"genie.py",8589,0,"",python,selection_mouse +2855,9813717,"genie.py",8573,17,"final_token_probs",python,selection_mouse +2856,9818788,"genie.py",8471,0,"",python,selection_mouse +2857,9820205,"genie.py",8469,0,"",python,selection_mouse +2858,9820994,"genie.py",8585,0,"",python,selection_mouse +2859,9852931,"genie.py",9224,0,"",python,selection_mouse +2860,9853076,"genie.py",9224,1," ",python,selection_mouse +2861,9853657,"genie.py",9227,0,"",python,selection_mouse +2862,9853816,"genie.py",9227,19,"num_unmasked_tokens",python,selection_mouse +2863,9854423,"genie.py",9225,0,"",python,selection_mouse +2864,9854590,"genie.py",9225,1,"N",python,selection_mouse +2865,9855183,"genie.py",9232,0,"",python,selection_mouse +2866,9855328,"genie.py",9227,19,"num_unmasked_tokens",python,selection_mouse +2867,9893715,"genie.py",9234,0,"",python,selection_mouse +2868,9894304,"genie.py",9225,0,"",python,selection_mouse +2869,9894445,"genie.py",9225,1,"N",python,selection_mouse +2870,9895049,"genie.py",9096,0,"",python,selection_mouse +2871,9896550,"genie.py",9045,118,"",python,content +2872,9896556,"genie.py",9053,0,"",python,selection_command +2873,9896591,"genie.py",8940,0,"",python,selection_command +2874,9896946,"genie.py",8832,0,"",python,selection_command +2875,9897241,"genie.py",8824,108,"",python,content +2876,9897252,"genie.py",8832,0,"",python,selection_command +2877,9897735,"genie.py",8936,0,"",python,selection_command +2878,9898120,"genie.py",8906,30,"",python,content +2879,9898256,"genie.py",8904,2,"",python,content +2880,9899377,"genie.py",8903,0,"",python,selection_command +2881,9899453,"genie.py",8984,0,"",python,selection_command +2882,9900129,"genie.py",8988,0,"",python,selection_command +2883,9901296,"genie.py",8987,0,"",python,selection_command +2884,9901395,"genie.py",8903,0,"",python,selection_command +2885,9901855,"genie.py",8904,0,"",python,selection_command +2886,9902566,"genie.py",8904,0,"#",python,content +2887,9902567,"genie.py",8905,0,"",python,selection_keyboard +2888,9902979,"genie.py",8905,0," ",python,content +2889,9902979,"genie.py",8906,0,"",python,selection_keyboard +2890,9903965,"genie.py",8906,0,"t",python,content +2891,9903965,"genie.py",8907,0,"",python,selection_keyboard +2892,9904045,"genie.py",8907,0,"o",python,content +2893,9904046,"genie.py",8908,0,"",python,selection_keyboard +2894,9904089,"genie.py",8908,0,"k",python,content +2895,9904090,"genie.py",8909,0,"",python,selection_keyboard +2896,9904209,"genie.py",8909,0,"e",python,content +2897,9904210,"genie.py",8910,0,"",python,selection_keyboard +2898,9904343,"genie.py",8910,0,"n",python,content +2899,9904344,"genie.py",8911,0,"",python,selection_keyboard +2900,9904449,"genie.py",8911,0,"s",python,content +2901,9904449,"genie.py",8912,0,"",python,selection_keyboard +2902,9904547,"genie.py",8912,0," ",python,content +2903,9904548,"genie.py",8913,0,"",python,selection_keyboard +2904,9905126,"genie.py",8913,0,"t",python,content +2905,9905127,"genie.py",8914,0,"",python,selection_keyboard +2906,9905504,"genie.py",8913,1,"",python,content +2907,9905753,"genie.py",8912,1,"",python,content +2908,9905887,"genie.py",8911,1,"",python,content +2909,9906040,"genie.py",8910,1,"",python,content +2910,9906175,"genie.py",8909,1,"",python,content +2911,9906308,"genie.py",8908,1,"",python,content +2912,9906453,"genie.py",8907,1,"",python,content +2913,9907006,"genie.py",8906,1,"",python,content +2914,9907357,"genie.py",8905,1,"",python,content +2915,9907489,"genie.py",8904,1,"",python,content +2916,9907956,"genie.py",8903,1,"",python,content +2917,9908088,"genie.py",8902,0,"",python,selection_command +2918,9909531,"genie.py",8823,0,"\n ",python,content +2919,9910045,"genie.py",8832,0,"#",python,content +2920,9910046,"genie.py",8833,0,"",python,selection_keyboard +2921,9910312,"genie.py",8833,0," ",python,content +2922,9910312,"genie.py",8834,0,"",python,selection_keyboard +2923,9910476,"genie.py",8834,0,"n",python,content +2924,9910477,"genie.py",8835,0,"",python,selection_keyboard +2925,9910831,"genie.py",8835,0,"u",python,content +2926,9910832,"genie.py",8836,0,"",python,selection_keyboard +2927,9911000,"genie.py",8836,0,"m",python,content +2928,9911001,"genie.py",8837,0,"",python,selection_keyboard +2929,9911215,"genie.py",8837,0,"b",python,content +2930,9911215,"genie.py",8838,0,"",python,selection_keyboard +2931,9911316,"genie.py",8838,0,"e",python,content +2932,9911317,"genie.py",8839,0,"",python,selection_keyboard +2933,9911393,"genie.py",8839,0,"r",python,content +2934,9911394,"genie.py",8840,0,"",python,selection_keyboard +2935,9911501,"genie.py",8840,0," ",python,content +2936,9911501,"genie.py",8841,0,"",python,selection_keyboard +2937,9911564,"genie.py",8841,0,"o",python,content +2938,9911565,"genie.py",8842,0,"",python,selection_keyboard +2939,9911637,"genie.py",8842,0,"f",python,content +2940,9911638,"genie.py",8843,0,"",python,selection_keyboard +2941,9911754,"genie.py",8843,0," ",python,content +2942,9911754,"genie.py",8844,0,"",python,selection_keyboard +2943,9911890,"genie.py",8844,0,"t",python,content +2944,9911890,"genie.py",8845,0,"",python,selection_keyboard +2945,9912026,"genie.py",8845,0,"o",python,content +2946,9912027,"genie.py",8846,0,"",python,selection_keyboard +2947,9912417,"genie.py",8846,0,"k",python,content +2948,9912418,"genie.py",8847,0,"",python,selection_keyboard +2949,9912552,"genie.py",8847,0,"e",python,content +2950,9912553,"genie.py",8848,0,"",python,selection_keyboard +2951,9912611,"genie.py",8848,0,"n",python,content +2952,9912612,"genie.py",8849,0,"",python,selection_keyboard +2953,9912794,"genie.py",8849,0,"s",python,content +2954,9912795,"genie.py",8850,0,"",python,selection_keyboard +2955,9912862,"genie.py",8850,0," ",python,content +2956,9912863,"genie.py",8851,0,"",python,selection_keyboard +2957,9913035,"genie.py",8851,0,"t",python,content +2958,9913036,"genie.py",8852,0,"",python,selection_keyboard +2959,9913079,"genie.py",8852,0,"h",python,content +2960,9913079,"genie.py",8853,0,"",python,selection_keyboard +2961,9913288,"genie.py",8853,0,"a",python,content +2962,9913289,"genie.py",8854,0,"",python,selection_keyboard +2963,9913489,"genie.py",8854,0,"t",python,content +2964,9913490,"genie.py",8855,0,"",python,selection_keyboard +2965,9913543,"genie.py",8855,0," ",python,content +2966,9913543,"genie.py",8856,0,"",python,selection_keyboard +2967,9913828,"genie.py",8856,0,"s",python,content +2968,9913828,"genie.py",8857,0,"",python,selection_keyboard +2969,9913953,"genie.py",8857,0,"h",python,content +2970,9913954,"genie.py",8858,0,"",python,selection_keyboard +2971,9914407,"genie.py",8857,1,"",python,content +2972,9914557,"genie.py",8856,1,"",python,content +2973,9915037,"genie.py",8856,0,"s",python,content +2974,9915038,"genie.py",8857,0,"",python,selection_keyboard +2975,9915100,"genie.py",8857,0,"h",python,content +2976,9915101,"genie.py",8858,0,"",python,selection_keyboard +2977,9915264,"genie.py",8858,0,"o",python,content +2978,9915264,"genie.py",8859,0,"",python,selection_keyboard +2979,9915387,"genie.py",8859,0,"u",python,content +2980,9915387,"genie.py",8860,0,"",python,selection_keyboard +2981,9915539,"genie.py",8860,0,"l",python,content +2982,9915540,"genie.py",8861,0,"",python,selection_keyboard +2983,9915688,"genie.py",8861,0,"d",python,content +2984,9915689,"genie.py",8862,0,"",python,selection_keyboard +2985,9915756,"genie.py",8862,0," ",python,content +2986,9915757,"genie.py",8863,0,"",python,selection_keyboard +2987,9916030,"genie.py",8863,0,"b",python,content +2988,9916031,"genie.py",8864,0,"",python,selection_keyboard +2989,9916142,"genie.py",8864,0,"e",python,content +2990,9916143,"genie.py",8865,0,"",python,selection_keyboard +2991,9916339,"genie.py",8865,0," ",python,content +2992,9916339,"genie.py",8866,0,"",python,selection_keyboard +2993,9916573,"genie.py",8866,0,"u",python,content +2994,9916574,"genie.py",8867,0,"",python,selection_keyboard +2995,9916713,"genie.py",8867,0,"n",python,content +2996,9916714,"genie.py",8868,0,"",python,selection_keyboard +2997,9916920,"genie.py",8868,0,"m",python,content +2998,9916921,"genie.py",8869,0,"",python,selection_keyboard +2999,9917092,"genie.py",8869,0,"a",python,content +3000,9917093,"genie.py",8870,0,"",python,selection_keyboard +3001,9917228,"genie.py",8870,0,"k",python,content +3002,9917229,"genie.py",8871,0,"",python,selection_keyboard +3003,9917461,"genie.py",8871,0,"e",python,content +3004,9917462,"genie.py",8872,0,"",python,selection_keyboard +3005,9918126,"genie.py",8871,1,"",python,content +3006,9918250,"genie.py",8870,1,"",python,content +3007,9918357,"genie.py",8870,0,"s",python,content +3008,9918357,"genie.py",8871,0,"",python,selection_keyboard +3009,9918626,"genie.py",8871,0,"e",python,content +3010,9918627,"genie.py",8872,0,"",python,selection_keyboard +3011,9918941,"genie.py",8871,1,"",python,content +3012,9919145,"genie.py",8871,0,"k",python,content +3013,9919146,"genie.py",8872,0,"",python,selection_keyboard +3014,9919285,"genie.py",8872,0,"e",python,content +3015,9919286,"genie.py",8873,0,"",python,selection_keyboard +3016,9919414,"genie.py",8873,0,"d",python,content +3017,9919415,"genie.py",8874,0,"",python,selection_keyboard +3018,9919482,"genie.py",8874,0," ",python,content +3019,9919483,"genie.py",8875,0,"",python,selection_keyboard +3020,9919873,"genie.py",8875,0,"a",python,content +3021,9919874,"genie.py",8876,0,"",python,selection_keyboard +3022,9920033,"genie.py",8876,0,"f",python,content +3023,9920034,"genie.py",8877,0,"",python,selection_keyboard +3024,9920230,"genie.py",8877,0,"t",python,content +3025,9920231,"genie.py",8878,0,"",python,selection_keyboard +3026,9920497,"genie.py",8878,0,"e",python,content +3027,9920498,"genie.py",8879,0,"",python,selection_keyboard +3028,9920597,"genie.py",8879,0,"r",python,content +3029,9920598,"genie.py",8880,0,"",python,selection_keyboard +3030,9920630,"genie.py",8880,0," ",python,content +3031,9920630,"genie.py",8881,0,"",python,selection_keyboard +3032,9920773,"genie.py",8881,0,"t",python,content +3033,9920773,"genie.py",8882,0,"",python,selection_keyboard +3034,9920857,"genie.py",8882,0,"h",python,content +3035,9920858,"genie.py",8883,0,"",python,selection_keyboard +3036,9920973,"genie.py",8883,0,"i",python,content +3037,9920974,"genie.py",8884,0,"",python,selection_keyboard +3038,9921085,"genie.py",8884,0,"s",python,content +3039,9921086,"genie.py",8885,0,"",python,selection_keyboard +3040,9921184,"genie.py",8885,0," ",python,content +3041,9921185,"genie.py",8886,0,"",python,selection_keyboard +3042,9921458,"genie.py",8886,0,"s",python,content +3043,9921459,"genie.py",8887,0,"",python,selection_keyboard +3044,9921632,"genie.py",8887,0,"t",python,content +3045,9921633,"genie.py",8888,0,"",python,selection_keyboard +3046,9921841,"genie.py",8888,0,"e",python,content +3047,9921842,"genie.py",8889,0,"",python,selection_keyboard +3048,9921947,"genie.py",8889,0,"p",python,content +3049,9921948,"genie.py",8890,0,"",python,selection_keyboard +3050,9922252,"genie.py",8889,0,"",python,selection_command +3051,9922771,"genie.py",8956,0,"",python,selection_command +3052,9922947,"genie.py",8970,0,"\n ",python,content +3053,9944074,"genie.py",8979,0,"#",python,content +3054,9944075,"genie.py",8980,0,"",python,selection_keyboard +3055,9944982,"genie.py",8980,0," ",python,content +3056,9944983,"genie.py",8981,0,"",python,selection_keyboard +3057,9949024,"genie.py",8981,0,"m",python,content +3058,9949026,"genie.py",8982,0,"",python,selection_keyboard +3059,9949259,"genie.py",8982,0,"a",python,content +3060,9949260,"genie.py",8983,0,"",python,selection_keyboard +3061,9949625,"genie.py",8982,1,"",python,content +3062,9949746,"genie.py",8981,1,"",python,content +3063,9949954,"genie.py",8981,0,"i",python,content +3064,9949955,"genie.py",8982,0,"",python,selection_keyboard +3065,9950061,"genie.py",8982,0,"d",python,content +3066,9950062,"genie.py",8983,0,"",python,selection_keyboard +3067,9950459,"genie.py",8983,0,"x",python,content +3068,9950460,"genie.py",8984,0,"",python,selection_keyboard +3069,9950578,"genie.py",8984,0," ",python,content +3070,9950579,"genie.py",8985,0,"",python,selection_keyboard +3071,9950740,"genie.py",8985,0,"m",python,content +3072,9950741,"genie.py",8986,0,"",python,selection_keyboard +3073,9950823,"genie.py",8986,0,"a",python,content +3074,9950824,"genie.py",8987,0,"",python,selection_keyboard +3075,9950893,"genie.py",8987,0,"s",python,content +3076,9950894,"genie.py",8988,0,"",python,selection_keyboard +3077,9950970,"genie.py",8988,0,"k",python,content +3078,9950970,"genie.py",8989,0,"",python,selection_keyboard +3079,9951086,"genie.py",8989,0," ",python,content +3080,9951087,"genie.py",8990,0,"",python,selection_keyboard +3081,9951825,"genie.py",8990,0,"t",python,content +3082,9951825,"genie.py",8991,0,"",python,selection_keyboard +3083,9952380,"genie.py",8991,0,"e",python,content +3084,9952381,"genie.py",8992,0,"",python,selection_keyboard +3085,9952514,"genie.py",8992,0,"l",python,content +3086,9952515,"genie.py",8993,0,"",python,selection_keyboard +3087,9952638,"genie.py",8993,0,"l",python,content +3088,9952639,"genie.py",8994,0,"",python,selection_keyboard +3089,9952840,"genie.py",8994,0,"i",python,content +3090,9952841,"genie.py",8995,0,"",python,selection_keyboard +3091,9952945,"genie.py",8995,0,"n",python,content +3092,9952946,"genie.py",8996,0,"",python,selection_keyboard +3093,9952978,"genie.py",8996,0,"g",python,content +3094,9952978,"genie.py",8997,0,"",python,selection_keyboard +3095,9953088,"genie.py",8997,0," ",python,content +3096,9953089,"genie.py",8998,0,"",python,selection_keyboard +3097,9953195,"genie.py",8998,0,"u",python,content +3098,9953195,"genie.py",8999,0,"",python,selection_keyboard +3099,9953276,"genie.py",8999,0,"s",python,content +3100,9953277,"genie.py",9000,0,"",python,selection_keyboard +3101,9953379,"genie.py",9000,0," ",python,content +3102,9953380,"genie.py",9001,0,"",python,selection_keyboard +3103,9960516,"genie.py",8998,3,"",python,content +3104,9961123,"genie.py",8990,8,"",python,content +3105,9961894,"genie.py",8990,0,"w",python,content +3106,9961895,"genie.py",8991,0,"",python,selection_keyboard +3107,9961929,"genie.py",8991,0,"i",python,content +3108,9961930,"genie.py",8992,0,"",python,selection_keyboard +3109,9962397,"genie.py",8992,0,"t",python,content +3110,9962398,"genie.py",8993,0,"",python,selection_keyboard +3111,9962471,"genie.py",8993,0,"h",python,content +3112,9962472,"genie.py",8994,0,"",python,selection_keyboard +3113,9962575,"genie.py",8994,0," ",python,content +3114,9962576,"genie.py",8995,0,"",python,selection_keyboard +3115,9970442,"genie.py",8995,0,"n",python,content +3116,9970443,"genie.py",8996,0,"",python,selection_keyboard +3117,9970585,"genie.py",8996,0,"u",python,content +3118,9970585,"genie.py",8997,0,"",python,selection_keyboard +3119,9970773,"genie.py",8997,0,"m",python,content +3120,9970775,"genie.py",8998,0,"",python,selection_keyboard +3121,9971086,"genie.py",8998,0,"_",python,content +3122,9971087,"genie.py",8999,0,"",python,selection_keyboard +3123,9971456,"genie.py",8999,0,"u",python,content +3124,9971456,"genie.py",9000,0,"",python,selection_keyboard +3125,9971610,"genie.py",9000,0,"n",python,content +3126,9971611,"genie.py",9001,0,"",python,selection_keyboard +3127,9971809,"genie.py",9001,0,"m",python,content +3128,9971809,"genie.py",9002,0,"",python,selection_keyboard +3129,9971913,"genie.py",9002,0,"a",python,content +3130,9971914,"genie.py",9003,0,"",python,selection_keyboard +3131,9972299,"genie.py",9003,0,"s",python,content +3132,9972299,"genie.py",9004,0,"",python,selection_keyboard +3133,9972334,"genie.py",9004,0,"k",python,content +3134,9972335,"genie.py",9005,0,"",python,selection_keyboard +3135,9972541,"genie.py",9005,0,"e",python,content +3136,9972542,"genie.py",9006,0,"",python,selection_keyboard +3137,9972696,"genie.py",9006,0,"d",python,content +3138,9972697,"genie.py",9007,0,"",python,selection_keyboard +3139,9972797,"genie.py",9007,0," ",python,content +3140,9972798,"genie.py",9008,0,"",python,selection_keyboard +3141,9972950,"genie.py",9008,0,"t",python,content +3142,9972951,"genie.py",9009,0,"",python,selection_keyboard +3143,9973061,"genie.py",9009,0,"o",python,content +3144,9973062,"genie.py",9010,0,"",python,selection_keyboard +3145,9973153,"genie.py",9010,0,"k",python,content +3146,9973154,"genie.py",9011,0,"",python,selection_keyboard +3147,9973207,"genie.py",9011,0,"e",python,content +3148,9973208,"genie.py",9012,0,"",python,selection_keyboard +3149,9973363,"genie.py",9012,0,"n",python,content +3150,9973364,"genie.py",9013,0,"",python,selection_keyboard +3151,9973434,"genie.py",9013,0,"s",python,content +3152,9973434,"genie.py",9014,0,"",python,selection_keyboard +3153,9973546,"genie.py",9014,0," ",python,content +3154,9973547,"genie.py",9015,0,"",python,selection_keyboard +3155,9973836,"genie.py",9015,0,"f",python,content +3156,9973836,"genie.py",9016,0,"",python,selection_keyboard +3157,9977267,"genie.py",9016,0,"a",python,content +3158,9977268,"genie.py",9017,0,"",python,selection_keyboard +3159,9977383,"genie.py",9017,0,"l",python,content +3160,9977384,"genie.py",9018,0,"",python,selection_keyboard +3161,9977431,"genie.py",9018,0,"s",python,content +3162,9977432,"genie.py",9019,0,"",python,selection_keyboard +3163,9977606,"genie.py",9019,0,"e",python,content +3164,9977607,"genie.py",9020,0,"",python,selection_keyboard +3165,9977698,"genie.py",9020,0," ",python,content +3166,9977698,"genie.py",9021,0,"",python,selection_keyboard +3167,9991954,"genie.py",9021,0,"n",python,content +3168,9991955,"genie.py",9022,0,"",python,selection_keyboard +3169,9992303,"genie.py",9021,1,"",python,content +3170,9992410,"genie.py",9021,0,"a",python,content +3171,9992411,"genie.py",9022,0,"",python,selection_keyboard +3172,9992553,"genie.py",9022,0,"n",python,content +3173,9992553,"genie.py",9023,0,"",python,selection_keyboard +3174,9992618,"genie.py",9023,0,"d",python,content +3175,9992618,"genie.py",9024,0,"",python,selection_keyboard +3176,9992718,"genie.py",9024,0," ",python,content +3177,9992719,"genie.py",9025,0,"",python,selection_keyboard +3178,9992868,"genie.py",9025,0,"t",python,content +3179,9992871,"genie.py",9026,0,"",python,selection_keyboard +3180,9993670,"genie.py",9026,0,"r",python,content +3181,9993670,"genie.py",9027,0,"",python,selection_keyboard +3182,9993778,"genie.py",9027,0,"u",python,content +3183,9993779,"genie.py",9028,0,"",python,selection_keyboard +3184,9993921,"genie.py",9028,0,"e",python,content +3185,9993922,"genie.py",9029,0,"",python,selection_keyboard +3186,9994024,"genie.py",9029,0," ",python,content +3187,9994025,"genie.py",9030,0,"",python,selection_keyboard +3188,9994370,"genie.py",9030,0,"b",python,content +3189,9994371,"genie.py",9031,0,"",python,selection_keyboard +3190,9994485,"genie.py",9031,0,"e",python,content +3191,9994485,"genie.py",9032,0,"",python,selection_keyboard +3192,9994658,"genie.py",9032,0,"f",python,content +3193,9994659,"genie.py",9033,0,"",python,selection_keyboard +3194,9994789,"genie.py",9033,0,"o",python,content +3195,9994790,"genie.py",9034,0,"",python,selection_keyboard +3196,9994897,"genie.py",9034,0,"r",python,content +3197,9994898,"genie.py",9035,0,"",python,selection_keyboard +3198,9995044,"genie.py",9035,0,"e",python,content +3199,9995045,"genie.py",9036,0,"",python,selection_keyboard +3200,9997957,"genie.py",8998,0,"",python,selection_mouse +3201,9998104,"genie.py",8995,12,"num_unmasked",python,selection_mouse +3202,9998333,"genie.py",8995,13,"num_unmasked ",python,selection_mouse +3203,9998344,"genie.py",8995,19,"num_unmasked tokens",python,selection_mouse +3204,9998649,"genie.py",8995,20,"num_unmasked tokens ",python,selection_mouse +3205,9998672,"genie.py",8995,25,"num_unmasked tokens false",python,selection_mouse +3206,9998749,"genie.py",8995,26,"num_unmasked tokens false ",python,selection_mouse +3207,9998755,"genie.py",8995,29,"num_unmasked tokens false and",python,selection_mouse +3208,9999739,"genie.py",8995,29,"",python,content +3209,10000410,"genie.py",8994,1,"",python,content +3210,10000793,"genie.py",8995,0,"",python,selection_command +3211,10000981,"genie.py",8996,0,"",python,selection_command +3212,10001135,"genie.py",8997,0,"",python,selection_command +3213,10001374,"genie.py",8998,0,"",python,selection_command +3214,10001560,"genie.py",8999,0,"",python,selection_command +3215,10002170,"genie.py",8999,0,",",python,content +3216,10002171,"genie.py",9000,0,"",python,selection_keyboard +3217,10002260,"genie.py",9000,0," ",python,content +3218,10002261,"genie.py",9001,0,"",python,selection_keyboard +3219,10002424,"genie.py",9001,0,"t",python,content +3220,10002425,"genie.py",9002,0,"",python,selection_keyboard +3221,10002775,"genie.py",9002,0,"r",python,content +3222,10002776,"genie.py",9003,0,"",python,selection_keyboard +3223,10002893,"genie.py",9003,0,"u",python,content +3224,10002894,"genie.py",9004,0,"",python,selection_keyboard +3225,10002991,"genie.py",9004,0,"e",python,content +3226,10002992,"genie.py",9005,0,"",python,selection_keyboard +3227,10003319,"genie.py",9005,0,",",python,content +3228,10003319,"genie.py",9006,0,"",python,selection_keyboard +3229,10003401,"genie.py",9006,0," ",python,content +3230,10003402,"genie.py",9007,0,"",python,selection_keyboard +3231,10003564,"genie.py",9007,0,"t",python,content +3232,10003565,"genie.py",9008,0,"",python,selection_keyboard +3233,10003999,"genie.py",9008,0,"r",python,content +3234,10003999,"genie.py",9009,0,"",python,selection_keyboard +3235,10004127,"genie.py",9009,0,"u",python,content +3236,10004128,"genie.py",9010,0,"",python,selection_keyboard +3237,10004210,"genie.py",9010,0,"e",python,content +3238,10004211,"genie.py",9011,0,"",python,selection_keyboard +3239,10004638,"genie.py",9011,0," ",python,content +3240,10004638,"genie.py",9012,0,"",python,selection_keyboard +3241,10014696,"genie.py",9007,5,"",python,content +3242,10014840,"genie.py",9005,2,"",python,content +3243,10015272,"genie.py",9001,4,"",python,content +3244,10015578,"genie.py",8999,2,"",python,content +3245,10015923,"genie.py",8995,4,"",python,content +3246,10016460,"genie.py",8995,7,"",python,content +3247,10016634,"genie.py",8995,9,"",python,content +3248,10017809,"genie.py",8995,0,"\n ",python,content +3249,10018326,"genie.py",8990,5,"",python,content +3250,10018500,"genie.py",8985,5,"",python,content +3251,10018779,"genie.py",8981,4,"",python,content +3252,10019239,"genie.py",8981,0,"N",python,content +3253,10019240,"genie.py",8982,0,"",python,selection_keyboard +3254,10019567,"genie.py",8982,0,"-",python,content +3255,10019568,"genie.py",8983,0,"",python,selection_keyboard +3256,10020239,"genie.py",8983,0,"n",python,content +3257,10020240,"genie.py",8984,0,"",python,selection_keyboard +3258,10020391,"genie.py",8984,0,"u",python,content +3259,10020392,"genie.py",8985,0,"",python,selection_keyboard +3260,10020538,"genie.py",8985,0,"m",python,content +3261,10020539,"genie.py",8986,0,"",python,selection_keyboard +3262,10020835,"genie.py",8986,0,"_",python,content +3263,10020835,"genie.py",8987,0,"",python,selection_keyboard +3264,10021261,"genie.py",8987,0,"u",python,content +3265,10021261,"genie.py",8988,0,"",python,selection_keyboard +3266,10021364,"genie.py",8988,0,"n",python,content +3267,10021365,"genie.py",8989,0,"",python,selection_keyboard +3268,10021823,"genie.py",8989,0,"m",python,content +3269,10021824,"genie.py",8990,0,"",python,selection_keyboard +3270,10021964,"genie.py",8990,0,"a",python,content +3271,10021965,"genie.py",8991,0,"",python,selection_keyboard +3272,10022097,"genie.py",8991,0,"s",python,content +3273,10022098,"genie.py",8992,0,"",python,selection_keyboard +3274,10022223,"genie.py",8992,0,"k",python,content +3275,10022224,"genie.py",8993,0,"",python,selection_keyboard +3276,10022364,"genie.py",8993,0,"e",python,content +3277,10022364,"genie.py",8994,0,"",python,selection_keyboard +3278,10022451,"genie.py",8994,0,"d",python,content +3279,10022452,"genie.py",8995,0,"",python,selection_keyboard +3280,10022635,"genie.py",8995,0,"_",python,content +3281,10022636,"genie.py",8996,0,"",python,selection_keyboard +3282,10022827,"genie.py",8996,0,"t",python,content +3283,10022828,"genie.py",8997,0,"",python,selection_keyboard +3284,10022931,"genie.py",8997,0,"o",python,content +3285,10022932,"genie.py",8998,0,"",python,selection_keyboard +3286,10023032,"genie.py",8998,0,"k",python,content +3287,10023033,"genie.py",8999,0,"",python,selection_keyboard +3288,10023186,"genie.py",8999,0,"e",python,content +3289,10023187,"genie.py",9000,0,"",python,selection_keyboard +3290,10023269,"genie.py",9000,0,"n",python,content +3291,10023270,"genie.py",9001,0,"",python,selection_keyboard +3292,10023376,"genie.py",9001,0,"s",python,content +3293,10023376,"genie.py",9002,0,"",python,selection_keyboard +3294,10023517,"genie.py",9002,0," ",python,content +3295,10023518,"genie.py",9003,0,"",python,selection_keyboard +3296,10023690,"genie.py",9003,0,"i",python,content +3297,10023691,"genie.py",9004,0,"",python,selection_keyboard +3298,10023792,"genie.py",9004,0,"s",python,content +3299,10023793,"genie.py",9005,0,"",python,selection_keyboard +3300,10023892,"genie.py",9005,0," ",python,content +3301,10023893,"genie.py",9006,0,"",python,selection_keyboard +3302,10024515,"genie.py",9006,0,"h",python,content +3303,10024516,"genie.py",9007,0,"",python,selection_keyboard +3304,10024660,"genie.py",9007,0,"o",python,content +3305,10024660,"genie.py",9008,0,"",python,selection_keyboard +3306,10024733,"genie.py",9008,0,"w",python,content +3307,10024734,"genie.py",9009,0,"",python,selection_keyboard +3308,10024823,"genie.py",9009,0," ",python,content +3309,10024824,"genie.py",9010,0,"",python,selection_keyboard +3310,10024954,"genie.py",9010,0,"m",python,content +3311,10024955,"genie.py",9011,0,"",python,selection_keyboard +3312,10025059,"genie.py",9011,0,"a",python,content +3313,10025060,"genie.py",9012,0,"",python,selection_keyboard +3314,10025160,"genie.py",9012,0,"n",python,content +3315,10025160,"genie.py",9013,0,"",python,selection_keyboard +3316,10025262,"genie.py",9013,0,"y",python,content +3317,10025262,"genie.py",9014,0,"",python,selection_keyboard +3318,10025440,"genie.py",9014,0," ",python,content +3319,10025440,"genie.py",9015,0,"",python,selection_keyboard +3320,10025755,"genie.py",9015,0,"t",python,content +3321,10025755,"genie.py",9016,0,"",python,selection_keyboard +3322,10025797,"genie.py",9016,0,"o",python,content +3323,10025799,"genie.py",9017,0,"",python,selection_keyboard +3324,10025911,"genie.py",9017,0,"k",python,content +3325,10025912,"genie.py",9018,0,"",python,selection_keyboard +3326,10026015,"genie.py",9018,0,"e",python,content +3327,10026016,"genie.py",9019,0,"",python,selection_keyboard +3328,10026124,"genie.py",9019,0,"n",python,content +3329,10026125,"genie.py",9020,0,"",python,selection_keyboard +3330,10026204,"genie.py",9020,0,"s",python,content +3331,10026204,"genie.py",9021,0,"",python,selection_keyboard +3332,10026243,"genie.py",9021,0," ",python,content +3333,10026244,"genie.py",9022,0,"",python,selection_keyboard +3334,10026411,"genie.py",9022,0,"h",python,content +3335,10026412,"genie.py",9023,0,"",python,selection_keyboard +3336,10026428,"genie.py",9023,0,"s",python,content +3337,10026429,"genie.py",9024,0,"",python,selection_keyboard +3338,10026850,"genie.py",9023,1,"",python,content +3339,10026981,"genie.py",9022,1,"",python,content +3340,10027089,"genie.py",9022,0,"s",python,content +3341,10027090,"genie.py",9023,0,"",python,selection_keyboard +3342,10027196,"genie.py",9023,0,"h",python,content +3343,10027197,"genie.py",9024,0,"",python,selection_keyboard +3344,10027374,"genie.py",9024,0,"o",python,content +3345,10027375,"genie.py",9025,0,"",python,selection_keyboard +3346,10027468,"genie.py",9025,0,"u",python,content +3347,10027469,"genie.py",9026,0,"",python,selection_keyboard +3348,10027579,"genie.py",9026,0,"l",python,content +3349,10027579,"genie.py",9027,0,"",python,selection_keyboard +3350,10027680,"genie.py",9027,0,"d",python,content +3351,10027681,"genie.py",9028,0,"",python,selection_keyboard +3352,10027757,"genie.py",9028,0," ",python,content +3353,10027757,"genie.py",9029,0,"",python,selection_keyboard +3354,10027907,"genie.py",9029,0,"b",python,content +3355,10027908,"genie.py",9030,0,"",python,selection_keyboard +3356,10027939,"genie.py",9030,0,"e",python,content +3357,10027940,"genie.py",9031,0,"",python,selection_keyboard +3358,10028069,"genie.py",9031,0," ",python,content +3359,10028070,"genie.py",9032,0,"",python,selection_keyboard +3360,10028269,"genie.py",9032,0,"e",python,content +3361,10028270,"genie.py",9033,0,"",python,selection_keyboard +3362,10028604,"genie.py",9032,1,"",python,content +3363,10028860,"genie.py",9032,0,"l",python,content +3364,10028861,"genie.py",9033,0,"",python,selection_keyboard +3365,10028949,"genie.py",9033,0,"e",python,content +3366,10028950,"genie.py",9034,0,"",python,selection_keyboard +3367,10029025,"genie.py",9034,0,"f",python,content +3368,10029026,"genie.py",9035,0,"",python,selection_keyboard +3369,10029252,"genie.py",9035,0,"t",python,content +3370,10029253,"genie.py",9036,0,"",python,selection_keyboard +3371,10029320,"genie.py",9036,0," ",python,content +3372,10029321,"genie.py",9037,0,"",python,selection_keyboard +3373,10029504,"genie.py",9037,0,"m",python,content +3374,10029505,"genie.py",9038,0,"",python,selection_keyboard +3375,10029608,"genie.py",9038,0,"a",python,content +3376,10029609,"genie.py",9039,0,"",python,selection_keyboard +3377,10029680,"genie.py",9039,0,"s",python,content +3378,10029681,"genie.py",9040,0,"",python,selection_keyboard +3379,10029855,"genie.py",9040,0,"k",python,content +3380,10029856,"genie.py",9041,0,"",python,selection_keyboard +3381,10029952,"genie.py",9041,0,"e",python,content +3382,10029953,"genie.py",9042,0,"",python,selection_keyboard +3383,10030023,"genie.py",9042,0,"d",python,content +3384,10030024,"genie.py",9043,0,"",python,selection_keyboard +3385,10030197,"genie.py",9043,0," ",python,content +3386,10030198,"genie.py",9044,0,"",python,selection_keyboard +3387,10030756,"genie.py",9044,0,"a",python,content +3388,10030757,"genie.py",9045,0,"",python,selection_keyboard +3389,10030945,"genie.py",9045,0,"f",python,content +3390,10030946,"genie.py",9046,0,"",python,selection_keyboard +3391,10031141,"genie.py",9046,0,"t",python,content +3392,10031142,"genie.py",9047,0,"",python,selection_keyboard +3393,10031344,"genie.py",9047,0,"e",python,content +3394,10031345,"genie.py",9048,0,"",python,selection_keyboard +3395,10031416,"genie.py",9048,0,"r",python,content +3396,10031417,"genie.py",9049,0,"",python,selection_keyboard +3397,10031504,"genie.py",9049,0," ",python,content +3398,10031505,"genie.py",9050,0,"",python,selection_keyboard +3399,10031668,"genie.py",9050,0,"t",python,content +3400,10031669,"genie.py",9051,0,"",python,selection_keyboard +3401,10031757,"genie.py",9051,0,"h",python,content +3402,10031758,"genie.py",9052,0,"",python,selection_keyboard +3403,10032080,"genie.py",9052,0,"i",python,content +3404,10032081,"genie.py",9053,0,"",python,selection_keyboard +3405,10032256,"genie.py",9053,0,"s",python,content +3406,10032257,"genie.py",9054,0,"",python,selection_keyboard +3407,10032348,"genie.py",9054,0," ",python,content +3408,10032348,"genie.py",9055,0,"",python,selection_keyboard +3409,10032541,"genie.py",9055,0,"s",python,content +3410,10032542,"genie.py",9056,0,"",python,selection_keyboard +3411,10032745,"genie.py",9056,0,"t",python,content +3412,10032745,"genie.py",9057,0,"",python,selection_keyboard +3413,10032941,"genie.py",9057,0,"e",python,content +3414,10032941,"genie.py",9058,0,"",python,selection_keyboard +3415,10033005,"genie.py",9058,0,"p",python,content +3416,10033006,"genie.py",9059,0,"",python,selection_keyboard +3417,10033413,"genie.py",9059,0,".",python,content +3418,10033413,"genie.py",9060,0,"",python,selection_keyboard +3419,10043740,"genie.py",9060,0,"\n ",python,content +3420,10044002,"genie.py",9069,0,"#",python,content +3421,10044003,"genie.py",9070,0,"",python,selection_keyboard +3422,10044891,"genie.py",9070,0," ",python,content +3423,10044892,"genie.py",9071,0,"",python,selection_keyboard +3424,10056469,"genie.py",9071,0,"m",python,content +3425,10056471,"genie.py",9072,0,"",python,selection_keyboard +3426,10056809,"genie.py",9072,0,"a",python,content +3427,10056810,"genie.py",9073,0,"",python,selection_keyboard +3428,10056921,"genie.py",9073,0,"s",python,content +3429,10056922,"genie.py",9074,0,"",python,selection_keyboard +3430,10056977,"genie.py",9074,0,"k",python,content +3431,10056978,"genie.py",9075,0,"",python,selection_keyboard +3432,10057127,"genie.py",9075,0," ",python,content +3433,10057128,"genie.py",9076,0,"",python,selection_keyboard +3434,10057393,"genie.py",9076,0,"t",python,content +3435,10057394,"genie.py",9077,0,"",python,selection_keyboard +3436,10057654,"genie.py",9077,0,"h",python,content +3437,10057654,"genie.py",9078,0,"",python,selection_keyboard +3438,10057846,"genie.py",9078,0,"o",python,content +3439,10057847,"genie.py",9079,0,"",python,selection_keyboard +3440,10057980,"genie.py",9079,0,"s",python,content +3441,10057981,"genie.py",9080,0,"",python,selection_keyboard +3442,10058145,"genie.py",9080,0,"e",python,content +3443,10058145,"genie.py",9081,0,"",python,selection_keyboard +3444,10058281,"genie.py",9081,0," ",python,content +3445,10058282,"genie.py",9082,0,"",python,selection_keyboard +3446,10058413,"genie.py",9082,0,"t",python,content +3447,10058414,"genie.py",9083,0,"",python,selection_keyboard +3448,10058514,"genie.py",9083,0,"o",python,content +3449,10058515,"genie.py",9084,0,"",python,selection_keyboard +3450,10058550,"genie.py",9084,0,"k",python,content +3451,10058551,"genie.py",9085,0,"",python,selection_keyboard +3452,10058722,"genie.py",9085,0,"e",python,content +3453,10058723,"genie.py",9086,0,"",python,selection_keyboard +3454,10058792,"genie.py",9086,0,"n",python,content +3455,10058793,"genie.py",9087,0,"",python,selection_keyboard +3456,10058890,"genie.py",9087,0,"s",python,content +3457,10058891,"genie.py",9088,0,"",python,selection_keyboard +3458,10059756,"genie.py",9088,0," ",python,content +3459,10059757,"genie.py",9089,0,"",python,selection_keyboard +3460,10060451,"genie.py",9089,0,"i",python,content +3461,10060452,"genie.py",9090,0,"",python,selection_keyboard +3462,10060621,"genie.py",9090,0,".",python,content +3463,10060622,"genie.py",9091,0,"",python,selection_keyboard +3464,10060748,"genie.py",9091,0,"e",python,content +3465,10060749,"genie.py",9092,0,"",python,selection_keyboard +3466,10060800,"genie.py",9092,0,".",python,content +3467,10060801,"genie.py",9093,0,"",python,selection_keyboard +3468,10060900,"genie.py",9093,0," ",python,content +3469,10060901,"genie.py",9094,0,"",python,selection_keyboard +3470,10062168,"genie.py",9094,0,"t",python,content +3471,10062169,"genie.py",9095,0,"",python,selection_keyboard +3472,10062430,"genie.py",9095,0,"r",python,content +3473,10062432,"genie.py",9096,0,"",python,selection_keyboard +3474,10062772,"genie.py",9096,0,"u",python,content +3475,10062773,"genie.py",9097,0,"",python,selection_keyboard +3476,10064838,"genie.py",9097,0,"e",python,content +3477,10064839,"genie.py",9098,0,"",python,selection_keyboard +3478,10066213,"genie.py",9098,0," ",python,content +3479,10066214,"genie.py",9099,0,"",python,selection_keyboard +3480,10066400,"genie.py",9099,0,"a",python,content +3481,10066401,"genie.py",9100,0,"",python,selection_keyboard +3482,10066488,"genie.py",9100,0,"n",python,content +3483,10066489,"genie.py",9101,0,"",python,selection_keyboard +3484,10066634,"genie.py",9101,0,"d",python,content +3485,10066635,"genie.py",9102,0,"",python,selection_keyboard +3486,10066742,"genie.py",9102,0," ",python,content +3487,10066743,"genie.py",9103,0,"",python,selection_keyboard +3488,10069152,"genie.py",9103,0,"f",python,content +3489,10069153,"genie.py",9104,0,"",python,selection_keyboard +3490,10069365,"genie.py",9104,0,"l",python,content +3491,10069366,"genie.py",9105,0,"",python,selection_keyboard +3492,10069475,"genie.py",9105,0,"a",python,content +3493,10069475,"genie.py",9106,0,"",python,selection_keyboard +3494,10069546,"genie.py",9106,0,"s",python,content +3495,10069547,"genie.py",9107,0,"",python,selection_keyboard +3496,10069780,"genie.py",9107,0,"e",python,content +3497,10069781,"genie.py",9108,0,"",python,selection_keyboard +3498,10069816,"genie.py",9108,0," ",python,content +3499,10069817,"genie.py",9109,0,"",python,selection_keyboard +3500,10070010,"genie.py",9109,0,"f",python,content +3501,10070011,"genie.py",9110,0,"",python,selection_keyboard +3502,10070283,"genie.py",9109,1,"",python,content +3503,10070427,"genie.py",9108,1,"",python,content +3504,10070549,"genie.py",9107,1,"",python,content +3505,10070689,"genie.py",9106,1,"",python,content +3506,10070830,"genie.py",9105,1,"",python,content +3507,10070974,"genie.py",9104,1,"",python,content +3508,10071145,"genie.py",9104,0,"a",python,content +3509,10071145,"genie.py",9105,0,"",python,selection_keyboard +3510,10071257,"genie.py",9105,0,"s",python,content +3511,10071258,"genie.py",9106,0,"",python,selection_keyboard +3512,10071292,"genie.py",9106,0,"l",python,content +3513,10071293,"genie.py",9107,0,"",python,selection_keyboard +3514,10071901,"genie.py",9106,1,"",python,content +3515,10072235,"genie.py",9105,1,"",python,content +3516,10072480,"genie.py",9105,0,"l",python,content +3517,10072481,"genie.py",9106,0,"",python,selection_keyboard +3518,10072604,"genie.py",9106,0,"s",python,content +3519,10072605,"genie.py",9107,0,"",python,selection_keyboard +3520,10072785,"genie.py",9107,0,"e",python,content +3521,10072786,"genie.py",9108,0,"",python,selection_keyboard +3522,10072863,"genie.py",9108,0," ",python,content +3523,10072864,"genie.py",9109,0,"",python,selection_keyboard +3524,10073002,"genie.py",9109,0,"f",python,content +3525,10073003,"genie.py",9110,0,"",python,selection_keyboard +3526,10073086,"genie.py",9110,0,"o",python,content +3527,10073087,"genie.py",9111,0,"",python,selection_keyboard +3528,10073198,"genie.py",9111,0,"r",python,content +3529,10073199,"genie.py",9112,0,"",python,selection_keyboard +3530,10073234,"genie.py",9112,0," ",python,content +3531,10073234,"genie.py",9113,0,"",python,selection_keyboard +3532,10073377,"genie.py",9113,0,"r",python,content +3533,10073378,"genie.py",9114,0,"",python,selection_keyboard +3534,10073618,"genie.py",9114,0,"e",python,content +3535,10073620,"genie.py",9115,0,"",python,selection_keyboard +3536,10073801,"genie.py",9115,0,"s",python,content +3537,10073802,"genie.py",9116,0,"",python,selection_keyboard +3538,10074124,"genie.py",9116,0,"t",python,content +3539,10074125,"genie.py",9117,0,"",python,selection_keyboard +3540,10079264,"genie.py",9260,0,"",python,selection_mouse +3541,10079878,"genie.py",9177,0,"",python,selection_mouse +3542,10080069,"genie.py",9177,1,"<",python,selection_mouse +3543,10080087,"genie.py",9177,2,"<=",python,selection_mouse +3544,10080165,"genie.py",9177,3,"<= ",python,selection_mouse +3545,10080451,"genie.py",9177,2,"<=",python,selection_mouse +3546,10081102,"genie.py",9179,0,"",python,selection_mouse +3547,10081271,"genie.py",9179,1," ",python,selection_mouse +3548,10081634,"genie.py",9178,2,"= ",python,selection_mouse +3549,10082125,"genie.py",9178,0,"",python,selection_mouse +3550,10082126,"genie.py",9177,2,"<=",python,selection_mouse +3551,10083017,"genie.py",9178,0,"",python,selection_mouse +3552,10083227,"genie.py",9177,2,"<=",python,selection_mouse +3553,10083417,"genie.py",9118,84," idx_mask = jnp.arange(final_token_probs.shape[-1]) <= N-num_unmasked_tokens\n",python,selection_mouse +3554,10084030,"genie.py",9178,0,"",python,selection_mouse +3555,10084030,"genie.py",9177,2,"<=",python,selection_mouse +3556,10085055,"genie.py",9262,0,"",python,selection_mouse +3557,10085584,"genie.py",9263,0,"",python,selection_mouse +3558,10085792,"genie.py",9260,3,"-1)",python,selection_mouse +3559,10085812,"genie.py",9254,9," axis=-1)",python,selection_mouse +3560,10085829,"genie.py",9249,14,"robs, axis=-1)",python,selection_mouse +3561,10085866,"genie.py",9242,21,"token_probs, axis=-1)",python,selection_mouse +3562,10085901,"genie.py",9235,28,"(final_token_probs, axis=-1)",python,selection_mouse +3563,10085901,"genie.py",9232,31,"ort(final_token_probs, axis=-1)",python,selection_mouse +3564,10085901,"genie.py",9229,34,"rgsort(final_token_probs, axis=-1)",python,selection_mouse +3565,10085936,"genie.py",9226,37,"p.argsort(final_token_probs, axis=-1)",python,selection_mouse +3566,10085936,"genie.py",9224,39,"jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3567,10085977,"genie.py",9223,40," jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3568,10085978,"genie.py",9222,41,"= jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3569,10086014,"genie.py",9221,42," = jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3570,10086048,"genie.py",9220,43,"s = jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3571,10086081,"genie.py",9219,44,"xs = jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3572,10086115,"genie.py",9218,45,"dxs = jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3573,10086147,"genie.py",9217,46,"idxs = jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3574,10086180,"genie.py",9216,47,"_idxs = jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3575,10086180,"genie.py",9215,48,"d_idxs = jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3576,10086212,"genie.py",9213,50,"ted_idxs = jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3577,10086245,"genie.py",9212,51,"rted_idxs = jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3578,10086309,"genie.py",9211,52,"orted_idxs = jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3579,10086346,"genie.py",9127,136,"dx_mask = jnp.arange(final_token_probs.shape[-1]) <= N-num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3580,10087539,"genie.py",9235,0,"",python,selection_mouse +3581,10088158,"genie.py",9215,0,"",python,selection_mouse +3582,10088294,"genie.py",9210,11,"sorted_idxs",python,selection_mouse +3583,10088576,"genie.py",9210,12,"sorted_idxs ",python,selection_mouse +3584,10088616,"genie.py",9210,14,"sorted_idxs = ",python,selection_mouse +3585,10088617,"genie.py",9210,17,"sorted_idxs = jnp",python,selection_mouse +3586,10088663,"genie.py",9210,18,"sorted_idxs = jnp.",python,selection_mouse +3587,10088663,"genie.py",9210,25,"sorted_idxs = jnp.argsort",python,selection_mouse +3588,10088700,"genie.py",9210,26,"sorted_idxs = jnp.argsort(",python,selection_mouse +3589,10088706,"genie.py",9210,43,"sorted_idxs = jnp.argsort(final_token_probs",python,selection_mouse +3590,10088967,"genie.py",9210,44,"sorted_idxs = jnp.argsort(final_token_probs,",python,selection_mouse +3591,10088988,"genie.py",9210,45,"sorted_idxs = jnp.argsort(final_token_probs, ",python,selection_mouse +3592,10089007,"genie.py",9210,49,"sorted_idxs = jnp.argsort(final_token_probs, axis",python,selection_mouse +3593,10089074,"genie.py",9210,50,"sorted_idxs = jnp.argsort(final_token_probs, axis=",python,selection_mouse +3594,10089110,"genie.py",9210,51,"sorted_idxs = jnp.argsort(final_token_probs, axis=-",python,selection_mouse +3595,10089146,"genie.py",9210,52,"sorted_idxs = jnp.argsort(final_token_probs, axis=-1",python,selection_mouse +3596,10089183,"genie.py",9210,53,"sorted_idxs = jnp.argsort(final_token_probs, axis=-1)",python,selection_mouse +3597,10090245,"genie.py",9179,0,"",python,selection_mouse +3598,10094486,"genie.py",9099,0,"",python,selection_mouse +3599,10095437,"genie.py",9098,0,"",python,selection_command +3600,10095994,"genie.py",9099,0,"",python,selection_command +3601,10096170,"genie.py",9100,0,"",python,selection_command +3602,10096331,"genie.py",9101,0,"",python,selection_command +3603,10096674,"genie.py",9102,0,"",python,selection_command +3604,10096866,"genie.py",9101,1,"",python,content +3605,10097014,"genie.py",9100,1,"",python,content +3606,10097150,"genie.py",9099,1,"",python,content +3607,10097292,"genie.py",9098,1,"",python,content +3608,10098477,"genie.py",9098,0,";",python,content +3609,10098478,"genie.py",9099,0,"",python,selection_keyboard +3610,10100076,"genie.py",9114,0,"",python,selection_mouse +3611,10101972,"genie.py",9392,0,"",python,selection_mouse +3612,10103081,"genie.py",9148,0,"",python,selection_mouse +3613,10108910,"genie.py",9103,0,"",python,selection_mouse +3614,10109034,"genie.py",9100,5,"false",python,selection_mouse +3615,10109235,"genie.py",9100,6,"false ",python,selection_mouse +3616,10109254,"genie.py",9100,9,"false for",python,selection_mouse +3617,10109289,"genie.py",9100,14,"false for rest",python,selection_mouse +3618,10109750,"genie.py",9113,0,"",python,selection_mouse +3619,10110697,"genie.py",9128,0,"",python,selection_mouse +3620,10110849,"genie.py",9123,8,"idx_mask",python,selection_mouse +3621,10146248,"genie.py",9107,0,"",python,selection_mouse +3622,10147342,"genie.py",9161,0,"",python,selection_mouse +3623,10149795,"genie.py",9127,0,"",python,selection_mouse +3624,10149911,"genie.py",9123,8,"idx_mask",python,selection_mouse +3625,10216580,"genie.py",9211,0,"",python,selection_mouse +3626,10217561,"genie.py",9210,0,"",python,selection_command +3627,10217690,"genie.py",9126,0,"",python,selection_command +3628,10217866,"genie.py",9072,0,"",python,selection_command +3629,10218175,"genie.py",8982,0,"",python,selection_command +3630,10218339,"genie.py",8902,0,"",python,selection_command +3631,10218575,"genie.py",8970,0,"\n ",python,content +3632,10218991,"genie.py",8979,0,"j",python,content +3633,10218991,"genie.py",8980,0,"",python,selection_keyboard +3634,10219085,"genie.py",8980,0,"a",python,content +3635,10219086,"genie.py",8981,0,"",python,selection_keyboard +3636,10219260,"genie.py",8981,0,"x",python,content +3637,10219261,"genie.py",8982,0,"",python,selection_keyboard +3638,10219366,"genie.py",8982,0,".",python,content +3639,10219367,"genie.py",8983,0,"",python,selection_keyboard +3640,10219544,"genie.py",8983,0,"d",python,content +3641,10219545,"genie.py",8984,0,"",python,selection_keyboard +3642,10219712,"genie.py",8984,0,"e",python,content +3643,10219714,"genie.py",8985,0,"",python,selection_keyboard +3644,10219824,"genie.py",8985,0,"b",python,content +3645,10219825,"genie.py",8986,0,"",python,selection_keyboard +3646,10219950,"genie.py",8986,0,"u",python,content +3647,10219952,"genie.py",8987,0,"",python,selection_keyboard +3648,10220007,"genie.py",8987,0,"g",python,content +3649,10220008,"genie.py",8988,0,"",python,selection_keyboard +3650,10220245,"genie.py",8988,0,".",python,content +3651,10220245,"genie.py",8989,0,"",python,selection_keyboard +3652,10221537,"genie.py",8989,0,"breakpoint",python,content +3653,10222288,"genie.py",8999,0,"()",python,content +3654,10222289,"genie.py",9000,0,"",python,selection_keyboard +3655,10222376,"genie.py",9000,1,")",python,content +3656,10222377,"genie.py",9001,0,"",python,selection_keyboard +3657,10223176,"genie.py",9000,0,"",python,selection_command +3658,10224035,"genie.py",9031,0,"",python,selection_command +3659,10224201,"genie.py",9121,0,"",python,selection_command +3660,10224325,"genie.py",9175,0,"",python,selection_command +3661,10224587,"genie.py",9229,0,"\n jax.debug.breakpoint()",python,content +3662,10224588,"genie.py",9238,0,"",python,selection_command +3663,10224785,"genie.py",9269,0,"",python,selection_command +3664,10225097,"genie.py",9322,0,"\n jax.debug.breakpoint()",python,content +3665,10225098,"genie.py",9331,0,"",python,selection_command +3666,10225280,"genie.py",9362,0,"",python,selection_command +3667,10225561,"genie.py",9431,0,"\n jax.debug.breakpoint()",python,content +3668,10225561,"genie.py",9440,0,"",python,selection_command +3669,10226074,"genie.py",9471,0,"",python,selection_command +3670,10226288,"genie.py",9515,0,"\n jax.debug.breakpoint()",python,content +3671,10226317,"genie.py",9524,0,"",python,selection_command +3672,10229405,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +3673,10229496,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +3674,10229624,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +3675,10229784,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +3676,10233363,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +3677,10235626,"TERMINAL",0,0,"2025-07-31 15:20:38.172686: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3678,10247301,"TERMINAL",0,0,"2025-07-31 15:20:49.866903: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3679,10259078,"TERMINAL",0,0,"2025-07-31 15:21:01.666599: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3680,10263379,"TERMINAL",0,0,"2025-07-31 15:21:05.873163: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3681,10267477,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\n",,terminal_output +3682,10323263,"TERMINAL",0,0,"2025-07-31 15:22:05.907561: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:22:05.907815: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:22:05.908176: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:22:05.908195: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3683,10339155,"TERMINAL",0,0,"Entering jdb:\r\n",,terminal_output +3684,11095031,"TERMINAL",0,0,"l",,terminal_output +3685,11095161,"TERMINAL",0,0,"\r\n(jdb) > /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py(252)\r\n # Update masked tokens only\r\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\r\n \r\n # --- Update mask ---\r\n # number of tokens that should be unmasked after this step\r\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\r\n-> jax.debug.breakpoint()\r\n # N-num_unmasked_tokens is how many tokens should be left masked after this step.\r\n # mask those tokens i.e. true; false for rest\r\n idx_mask = jnp.arange(final_token_probs.shape[-1]) <= N-num_unmasked_tokens\r\n jax.debug.breakpoint()\r\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1)\r\n",,terminal_output +3686,11097954,"TERMINAL",0,0,"n",,terminal_output +3687,11098123,"TERMINAL",0,0,"u",,terminal_output +3688,11098308,"TERMINAL",0,0,"m",,terminal_output +3689,11098825,"TERMINAL",0,0,"_",,terminal_output +3690,11100005,"TERMINAL",0,0,"u",,terminal_output +3691,11100177,"TERMINAL",0,0,"n",,terminal_output +3692,11100636,"TERMINAL",0,0,"m",,terminal_output +3693,11100865,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +3694,11101072,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3695,11101202,"TERMINAL",0,0,"[?25lk[?25h",,terminal_output +3696,11101334,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3697,11101423,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3698,11101627,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +3699,11101906,"TERMINAL",0,0,"[?25lt[?25h[?25lo[?25h",,terminal_output +3700,11101978,"TERMINAL",0,0,"[?25lk[?25h",,terminal_output +3701,11102140,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3702,11102232,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +3703,11102348,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3704,11102454,"TERMINAL",0,0,"\r\n(jdb) Array(2, dtype=int32)\r\n",,terminal_output +3705,11108996,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +3706,11109082,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +3707,11109322,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +3708,11109390,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +3709,11109531,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3710,11109597,"TERMINAL",0,0,"[?25lk[?25h",,terminal_output +3711,11109754,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3712,11109824,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3713,11110018,"TERMINAL",0,0,"[?25l:[?25h",,terminal_output +3714,11110803,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +3715,11111032,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +3716,11111520,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +3717,11112032,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +3718,11112285,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +3719,11112516,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +3720,11112603,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +3721,11112859,"TERMINAL",0,0,"\r\n(jdb) Array(0.9980267, dtype=float32)\r\n",,terminal_output +3722,11119759,"TERMINAL",0,0,"[?25lN[?25h",,terminal_output +3723,11119972,"TERMINAL",0,0,"\r\n(jdb) 920\r\n",,terminal_output +3724,11127405,"TERMINAL",0,0,"[?25lN * (1.0 - unmasked_ratio)[?25h",,terminal_output +3725,11127709,"TERMINAL",0,0,"\r\n",,terminal_output +3726,11127842,"TERMINAL",0,0,"(jdb) Array(1.8154097, dtype=float32)\r\n",,terminal_output +3727,11145829,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3728,11146039,"TERMINAL",0,0,"\r\n",,terminal_output +3729,11146482,"TERMINAL",0,0,"(jdb) Entering jdb:\r\n",,terminal_output +3730,11150170,"genie.py",0,0,"",python,tab +3731,11152082,"genie.py",0,0,"",python,tab +3732,11157730,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +3733,11158115,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3734,11158599,"TERMINAL",0,0,"[?25lx[?25h",,terminal_output +3735,11158809,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +3736,11159128,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +3737,11159178,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +3738,11159256,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3739,11159321,"TERMINAL",0,0,"[?25lk[?25h",,terminal_output +3740,11159616,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +3741,11159704,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3742,11159969,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +3743,11160075,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +3744,11160180,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +3745,11160266,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3746,11160384,"TERMINAL",0,0,"\r\n(jdb) (920,)\r\n",,terminal_output +3747,11184261,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +3748,11186179,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3749,11186347,"TERMINAL",0,0,"\r\n",,terminal_output +3750,11186832,"TERMINAL",0,0,"(jdb) Entering jdb:\r\n",,terminal_output +3751,11188824,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3752,11188945,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +3753,11189386,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +3754,11189700,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +3755,11189887,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3756,11190035,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3757,11190206,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +3758,11190539,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +3759,11190668,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3760,11190978,"TERMINAL",0,0,"[?25lx[?25h",,terminal_output +3761,11191293,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3762,11191538,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +3763,11191710,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3764,11192169,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +3765,11192218,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +3766,11192423,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +3767,11192551,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3768,11193154,"TERMINAL",0,0,"\r\n(jdb) (4, 16, 920)\r\n",,terminal_output +3769,11241308,"genie.py",0,0,"",python,tab +3770,11241309,"genie.py",9272,0,"",python,selection_mouse +3771,11241986,"genie.py",9161,0,"",python,selection_mouse +3772,11242103,"genie.py",9154,8,"idx_mask",python,selection_mouse +3773,11243220,"genie.py",9260,0,"",python,selection_mouse +3774,11243228,"genie.py",9259,0,"",python,selection_command +3775,11243796,"genie.py",9203,0,"",python,selection_mouse +3776,11246210,"genie.py",9203,0,"*",python,content +3777,11246212,"genie.py",9204,0,"",python,selection_keyboard +3778,11247320,"genie.py",9180,0,"",python,selection_mouse +3779,11247442,"genie.py",9176,17,"final_token_probs",python,selection_mouse +3780,11247755,"genie.py",9176,85,"final_token_probs.shape[-1]*) <= N-num_unmasked_tokens\n jax.debug.breakpoint()",python,selection_mouse +3781,11248270,"genie.py",9176,26,"final_token_probs.shape[-1",python,selection_mouse +3782,11248701,"genie.py",9176,27,"final_token_probs.shape[-1]",python,selection_mouse +3783,11249423,"genie.py",9203,0,"",python,selection_mouse +3784,11249895,"genie.py",9202,1,"]",python,selection_mouse +3785,11249936,"genie.py",9201,2,"1]",python,selection_mouse +3786,11249937,"genie.py",9200,3,"-1]",python,selection_mouse +3787,11249972,"genie.py",9198,5,"e[-1]",python,selection_mouse +3788,11249973,"genie.py",9197,6,"pe[-1]",python,selection_mouse +3789,11250009,"genie.py",9196,7,"ape[-1]",python,selection_mouse +3790,11250044,"genie.py",9195,8,"hape[-1]",python,selection_mouse +3791,11250045,"genie.py",9194,9,"shape[-1]",python,selection_mouse +3792,11250082,"genie.py",9192,11,"s.shape[-1]",python,selection_mouse +3793,11250082,"genie.py",9191,12,"bs.shape[-1]",python,selection_mouse +3794,11250083,"genie.py",9190,13,"obs.shape[-1]",python,selection_mouse +3795,11250119,"genie.py",9189,14,"robs.shape[-1]",python,selection_mouse +3796,11250120,"genie.py",9188,15,"probs.shape[-1]",python,selection_mouse +3797,11250154,"genie.py",9187,16,"_probs.shape[-1]",python,selection_mouse +3798,11250154,"genie.py",9186,17,"n_probs.shape[-1]",python,selection_mouse +3799,11250155,"genie.py",9185,18,"en_probs.shape[-1]",python,selection_mouse +3800,11250191,"genie.py",9184,19,"ken_probs.shape[-1]",python,selection_mouse +3801,11250191,"genie.py",9183,20,"oken_probs.shape[-1]",python,selection_mouse +3802,11250230,"genie.py",9182,21,"token_probs.shape[-1]",python,selection_mouse +3803,11250266,"genie.py",9181,22,"_token_probs.shape[-1]",python,selection_mouse +3804,11250302,"genie.py",9180,23,"l_token_probs.shape[-1]",python,selection_mouse +3805,11250340,"genie.py",9179,24,"al_token_probs.shape[-1]",python,selection_mouse +3806,11250341,"genie.py",9178,25,"nal_token_probs.shape[-1]",python,selection_mouse +3807,11250379,"genie.py",9177,26,"inal_token_probs.shape[-1]",python,selection_mouse +3808,11250456,"genie.py",9176,27,"final_token_probs.shape[-1]",python,selection_mouse +3809,11250535,"genie.py",9175,28,"(final_token_probs.shape[-1]",python,selection_mouse +3810,11250884,"genie.py",9176,27,"final_token_probs.shape[-1]",python,selection_mouse +3811,11258836,"genie.py",9202,0,"",python,selection_mouse +3812,11259884,"genie.py",9201,1,"",python,content +3813,11260024,"genie.py",9200,1,"",python,content +3814,11260873,"genie.py",9200,0,"1",python,content +3815,11260874,"genie.py",9201,0,"",python,selection_keyboard +3816,11262371,"genie.py",9202,0,"",python,selection_command +3817,11263162,"genie.py",9203,0,"",python,selection_command +3818,11263953,"genie.py",9203,0,"N",python,content +3819,11263954,"genie.py",9204,0,"",python,selection_keyboard +3820,11264969,"genie.py",9145,0,"",python,selection_mouse +3821,11265570,"genie.py",9261,0,"",python,selection_mouse +3822,11266069,"genie.py",9205,0,"",python,selection_mouse +3823,11267130,"genie.py",9172,0,"",python,selection_mouse +3824,11267286,"genie.py",9169,6,"arange",python,selection_mouse +3825,11267502,"genie.py",9169,24,"arange(final_token_probs",python,selection_mouse +3826,11267668,"genie.py",9169,30,"arange(final_token_probs.shape",python,selection_mouse +3827,11267769,"genie.py",9169,31,"arange(final_token_probs.shape[",python,selection_mouse +3828,11267787,"genie.py",9169,32,"arange(final_token_probs.shape[1",python,selection_mouse +3829,11267809,"genie.py",9169,33,"arange(final_token_probs.shape[1]",python,selection_mouse +3830,11267823,"genie.py",9169,34,"arange(final_token_probs.shape[1]*",python,selection_mouse +3831,11267867,"genie.py",9169,35,"arange(final_token_probs.shape[1]*N",python,selection_mouse +3832,11267949,"genie.py",9169,36,"arange(final_token_probs.shape[1]*N)",python,selection_mouse +3833,11268470,"genie.py",9205,0,"",python,selection_mouse +3834,11268658,"genie.py",9205,1," ",python,selection_mouse +3835,11268806,"genie.py",9204,2,") ",python,selection_mouse +3836,11268821,"genie.py",9202,4,"*N) ",python,selection_mouse +3837,11268834,"genie.py",9194,12,"shape[1]*N) ",python,selection_mouse +3838,11268868,"genie.py",9176,30,"final_token_probs.shape[1]*N) ",python,selection_mouse +3839,11269035,"genie.py",9175,31,"(final_token_probs.shape[1]*N) ",python,selection_mouse +3840,11269053,"genie.py",9169,37,"arange(final_token_probs.shape[1]*N) ",python,selection_mouse +3841,11269219,"genie.py",9168,38,".arange(final_token_probs.shape[1]*N) ",python,selection_mouse +3842,11269302,"genie.py",9165,41,"jnp.arange(final_token_probs.shape[1]*N) ",python,selection_mouse +3843,11269601,"genie.py",9167,0,"",python,selection_mouse +3844,11269601,"genie.py",9165,3,"jnp",python,selection_mouse +3845,11269779,"genie.py",9165,10,"jnp.arange",python,selection_mouse +3846,11269812,"genie.py",9165,11,"jnp.arange(",python,selection_mouse +3847,11269846,"genie.py",9165,28,"jnp.arange(final_token_probs",python,selection_mouse +3848,11269920,"genie.py",9165,29,"jnp.arange(final_token_probs.",python,selection_mouse +3849,11269936,"genie.py",9165,34,"jnp.arange(final_token_probs.shape",python,selection_mouse +3850,11270017,"genie.py",9165,35,"jnp.arange(final_token_probs.shape[",python,selection_mouse +3851,11270018,"genie.py",9165,36,"jnp.arange(final_token_probs.shape[1",python,selection_mouse +3852,11270054,"genie.py",9165,37,"jnp.arange(final_token_probs.shape[1]",python,selection_mouse +3853,11270154,"genie.py",9165,38,"jnp.arange(final_token_probs.shape[1]*",python,selection_mouse +3854,11270234,"genie.py",9165,39,"jnp.arange(final_token_probs.shape[1]*N",python,selection_mouse +3855,11270337,"genie.py",9165,40,"jnp.arange(final_token_probs.shape[1]*N)",python,selection_mouse +3856,11270964,"genie.py",9205,0,"",python,selection_mouse +3857,11271168,"genie.py",9205,1," ",python,selection_mouse +3858,11271359,"genie.py",9204,2,") ",python,selection_mouse +3859,11271385,"genie.py",9203,3,"N) ",python,selection_mouse +3860,11271420,"genie.py",9202,4,"*N) ",python,selection_mouse +3861,11271421,"genie.py",9200,6,"1]*N) ",python,selection_mouse +3862,11271456,"genie.py",9199,7,"[1]*N) ",python,selection_mouse +3863,11271457,"genie.py",9194,12,"shape[1]*N) ",python,selection_mouse +3864,11271533,"genie.py",9193,13,".shape[1]*N) ",python,selection_mouse +3865,11271539,"genie.py",9176,30,"final_token_probs.shape[1]*N) ",python,selection_mouse +3866,11271789,"genie.py",9175,31,"(final_token_probs.shape[1]*N) ",python,selection_mouse +3867,11271810,"genie.py",9169,37,"arange(final_token_probs.shape[1]*N) ",python,selection_mouse +3868,11271917,"genie.py",9168,38,".arange(final_token_probs.shape[1]*N) ",python,selection_mouse +3869,11271956,"genie.py",9165,41,"jnp.arange(final_token_probs.shape[1]*N) ",python,selection_mouse +3870,11272379,"genie.py",9166,0,"",python,selection_mouse +3871,11272379,"genie.py",9165,3,"jnp",python,selection_mouse +3872,11272576,"genie.py",9165,4,"jnp.",python,selection_mouse +3873,11272616,"genie.py",9165,10,"jnp.arange",python,selection_mouse +3874,11272656,"genie.py",9165,11,"jnp.arange(",python,selection_mouse +3875,11272656,"genie.py",9165,28,"jnp.arange(final_token_probs",python,selection_mouse +3876,11272739,"genie.py",9165,29,"jnp.arange(final_token_probs.",python,selection_mouse +3877,11272753,"genie.py",9165,34,"jnp.arange(final_token_probs.shape",python,selection_mouse +3878,11272837,"genie.py",9165,35,"jnp.arange(final_token_probs.shape[",python,selection_mouse +3879,11272862,"genie.py",9165,36,"jnp.arange(final_token_probs.shape[1",python,selection_mouse +3880,11272875,"genie.py",9165,37,"jnp.arange(final_token_probs.shape[1]",python,selection_mouse +3881,11272932,"genie.py",9165,38,"jnp.arange(final_token_probs.shape[1]*",python,selection_mouse +3882,11273119,"genie.py",9165,39,"jnp.arange(final_token_probs.shape[1]*N",python,selection_mouse +3883,11274249,"genie.py",9204,0,"",python,selection_mouse +3884,11274946,"genie.py",9261,0,"",python,selection_mouse +3885,11275589,"genie.py",9354,0,"",python,selection_mouse +3886,11276170,"genie.py",9304,0,"",python,selection_mouse +3887,11276351,"genie.py",9296,17,"final_token_probs",python,selection_mouse +3888,11278269,"genie.py",9463,0,"",python,selection_mouse +3889,11279323,"genie.py",9167,0,"",python,selection_mouse +3890,11279934,"genie.py",9285,0,"",python,selection_mouse +3891,11280507,"genie.py",9275,0,"",python,selection_mouse +3892,11280660,"genie.py",9270,11,"sorted_idxs",python,selection_mouse +3893,11281487,"genie.py",9354,0,"",python,selection_mouse +3894,11295348,"genie.py",9463,0,"",python,selection_mouse +3895,11296221,"genie.py",9354,0,"",python,selection_mouse +3896,11296901,"genie.py",9463,0,"",python,selection_mouse +3897,11299671,"genie.py",9463,0,"\n ",python,content +3898,11306824,"genie.py",9472,0,"m",python,content +3899,11306826,"genie.py",9473,0,"",python,selection_keyboard +3900,11306898,"genie.py",9473,0,"a",python,content +3901,11306899,"genie.py",9474,0,"",python,selection_keyboard +3902,11307008,"genie.py",9474,0,"s",python,content +3903,11307009,"genie.py",9475,0,"",python,selection_keyboard +3904,11307041,"genie.py",9475,0,"k",python,content +3905,11307042,"genie.py",9476,0,"",python,selection_keyboard +3906,11307486,"genie.py",9476,0,"_",python,content +3907,11307487,"genie.py",9477,0,"",python,selection_keyboard +3908,11308158,"genie.py",9477,0,"f",python,content +3909,11308159,"genie.py",9478,0,"",python,selection_keyboard +3910,11308162,"genie.py",9478,0,"l",python,content +3911,11308163,"genie.py",9479,0,"",python,selection_keyboard +3912,11308837,"genie.py",9479,0,"a",python,content +3913,11308838,"genie.py",9480,0,"",python,selection_keyboard +3914,11308942,"genie.py",9480,0,"o",python,content +3915,11308944,"genie.py",9481,0,"",python,selection_keyboard +3916,11309440,"genie.py",9481,0,"t",python,content +3917,11309441,"genie.py",9482,0,"",python,selection_keyboard +3918,11309702,"genie.py",9481,1,"",python,content +3919,11309878,"genie.py",9480,1,"",python,content +3920,11309953,"genie.py",9480,0,"t",python,content +3921,11309954,"genie.py",9481,0,"",python,selection_keyboard +3922,11310136,"genie.py",9481,0," ",python,content +3923,11310138,"genie.py",9482,0,"",python,selection_keyboard +3924,11310272,"genie.py",9482,0,"=",python,content +3925,11310274,"genie.py",9483,0,"",python,selection_keyboard +3926,11310403,"genie.py",9483,0," ",python,content +3927,11310404,"genie.py",9484,0,"",python,selection_keyboard +3928,11310694,"genie.py",9484,0,"e",python,content +3929,11310694,"genie.py",9485,0,"",python,selection_keyboard +3930,11310774,"genie.py",9485,0,"i",python,content +3931,11310775,"genie.py",9486,0,"",python,selection_keyboard +3932,11311438,"genie.py",9486,0,"n",python,content +3933,11311439,"genie.py",9487,0,"",python,selection_keyboard +3934,11312022,"genie.py",9487,0,"o",python,content +3935,11312023,"genie.py",9488,0,"",python,selection_keyboard +3936,11312139,"genie.py",9488,0,"p",python,content +3937,11312140,"genie.py",9489,0,"",python,selection_keyboard +3938,11312290,"genie.py",9489,0,"s",python,content +3939,11312291,"genie.py",9490,0,"",python,selection_keyboard +3940,11312409,"genie.py",9490,0,".",python,content +3941,11312410,"genie.py",9491,0,"",python,selection_keyboard +3942,11314092,"genie.py",9491,0,"r",python,content +3943,11314094,"genie.py",9492,0,"",python,selection_keyboard +3944,11314290,"genie.py",9492,0,"e",python,content +3945,11314291,"genie.py",9493,0,"",python,selection_keyboard +3946,11314474,"genie.py",9493,0,"a",python,content +3947,11314474,"genie.py",9494,0,"",python,selection_keyboard +3948,11314703,"genie.py",9494,0,"d",python,content +3949,11314704,"genie.py",9495,0,"",python,selection_keyboard +3950,11315319,"genie.py",9494,1,"",python,content +3951,11315822,"genie.py",9494,0,"r",python,content +3952,11315823,"genie.py",9495,0,"",python,selection_keyboard +3953,11316066,"genie.py",9495,0,"a",python,content +3954,11316067,"genie.py",9496,0,"",python,selection_keyboard +3955,11316179,"genie.py",9496,0,"n",python,content +3956,11316180,"genie.py",9497,0,"",python,selection_keyboard +3957,11316346,"genie.py",9497,0,"g",python,content +3958,11316347,"genie.py",9498,0,"",python,selection_keyboard +3959,11316523,"genie.py",9498,0,"e",python,content +3960,11316524,"genie.py",9499,0,"",python,selection_keyboard +3961,11317371,"genie.py",9498,0,"",python,selection_command +3962,11318162,"genie.py",0,0,"",python,selection_command +3963,11318900,"genie.py",29,0,"",python,selection_command +3964,11319058,"genie.py",30,0,"",python,selection_command +3965,11319214,"genie.py",43,0,"",python,selection_command +3966,11319401,"genie.py",54,0,"",python,selection_command +3967,11319546,"genie.py",78,0,"",python,selection_command +3968,11319682,"genie.py",102,0,"",python,selection_command +3969,11319806,"genie.py",151,0,"",python,selection_command +3970,11321172,"genie.py",181,0,"\n",python,content +3971,11321444,"genie.py",182,0,"i",python,content +3972,11321445,"genie.py",183,0,"",python,selection_keyboard +3973,11321513,"genie.py",183,0,"m",python,content +3974,11321515,"genie.py",184,0,"",python,selection_keyboard +3975,11321709,"genie.py",184,0,"p",python,content +3976,11321710,"genie.py",185,0,"",python,selection_keyboard +3977,11321753,"genie.py",185,0,"o",python,content +3978,11321754,"genie.py",186,0,"",python,selection_keyboard +3979,11322188,"genie.py",182,4,"import",python,content +3980,11322504,"genie.py",188,0," ",python,content +3981,11322506,"genie.py",189,0,"",python,selection_keyboard +3982,11322645,"genie.py",189,0,"e",python,content +3983,11322646,"genie.py",190,0,"",python,selection_keyboard +3984,11322729,"genie.py",190,0,"i",python,content +3985,11322730,"genie.py",191,0,"",python,selection_keyboard +3986,11322858,"genie.py",191,0,"n",python,content +3987,11322859,"genie.py",192,0,"",python,selection_keyboard +3988,11323575,"genie.py",192,0,"o",python,content +3989,11323576,"genie.py",193,0,"",python,selection_keyboard +3990,11323697,"genie.py",193,0,"p",python,content +3991,11323700,"genie.py",194,0,"",python,selection_keyboard +3992,11323756,"genie.py",194,0,"s",python,content +3993,11323757,"genie.py",195,0,"",python,selection_keyboard +3994,11324263,"genie.py",194,0,"",python,selection_command +3995,11326153,"genie.py",9498,0,"",python,selection_command +3996,11327095,"genie.py",9513,0,"",python,selection_command +3997,11328285,"genie.py",9513,0,"()",python,content +3998,11328286,"genie.py",9514,0,"",python,selection_keyboard +3999,11330423,"genie.py",9514,0,"a",python,content +4000,11330425,"genie.py",9515,0,"",python,selection_keyboard +4001,11330816,"genie.py",9514,1,"",python,content +4002,11331004,"genie.py",9514,0,"m",python,content +4003,11331004,"genie.py",9515,0,"",python,selection_keyboard +4004,11331075,"genie.py",9515,0,"a",python,content +4005,11331076,"genie.py",9516,0,"",python,selection_keyboard +4006,11331116,"genie.py",9516,0,"s",python,content +4007,11331117,"genie.py",9517,0,"",python,selection_keyboard +4008,11331218,"genie.py",9517,0,"k",python,content +4009,11331219,"genie.py",9518,0,"",python,selection_keyboard +4010,11331399,"genie.py",9518,0,",",python,content +4011,11331400,"genie.py",9519,0,"",python,selection_keyboard +4012,11331511,"genie.py",9519,0," ",python,content +4013,11331512,"genie.py",9520,0,"",python,selection_keyboard +4014,11331845,"genie.py",9520,0,"""""",python,content +4015,11331846,"genie.py",9521,0,"",python,selection_keyboard +4016,11334788,"genie.py",9521,0,"b",python,content +4017,11334789,"genie.py",9522,0,"",python,selection_keyboard +4018,11335758,"genie.py",9522,0," ",python,content +4019,11335759,"genie.py",9523,0,"",python,selection_keyboard +4020,11337289,"genie.py",9523,0,"t",python,content +4021,11337290,"genie.py",9524,0,"",python,selection_keyboard +4022,11337436,"genie.py",9524,0," ",python,content +4023,11337437,"genie.py",9525,0,"",python,selection_keyboard +4024,11339589,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +4025,11339698,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +4026,11339750,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +4027,11339845,"TERMINAL",0,0,"[?25lk[?25h",,terminal_output +4028,11340075,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +4029,11340785,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +4030,11340840,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +4031,11340949,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +4032,11341028,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +4033,11341153,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +4034,11341284,"TERMINAL",0,0,"\r\n(jdb) (4, 16, 920)\r\n",,terminal_output +4035,11346840,"genie.py",0,0,"",python,tab +4036,11348526,"genie.py",9525,0,"n",python,content +4037,11348527,"genie.py",9526,0,"",python,selection_keyboard +4038,11349445,"genie.py",9526,0," ",python,content +4039,11349445,"genie.py",9527,0,"",python,selection_keyboard +4040,11349760,"genie.py",9527,0,"-",python,content +4041,11349761,"genie.py",9528,0,"",python,selection_keyboard +4042,11350071,"genie.py",9528,0,">",python,content +4043,11350072,"genie.py",9529,0,"",python,selection_keyboard +4044,11350348,"genie.py",9529,0," ",python,content +4045,11350348,"genie.py",9530,0,"",python,selection_keyboard +4046,11352903,"genie.py",9530,0,"()",python,content +4047,11352904,"genie.py",9531,0,"",python,selection_keyboard +4048,11353359,"genie.py",9531,0,"b",python,content +4049,11353360,"genie.py",9532,0,"",python,selection_keyboard +4050,11354765,"genie.py",9531,1,"",python,content +4051,11355103,"genie.py",9530,2,"",python,content +4052,11356043,"genie.py",9530,0,"n",python,content +4053,11356043,"genie.py",9531,0,"",python,selection_keyboard +4054,11356500,"genie.py",9530,1,"",python,content +4055,11356645,"genie.py",9530,0,"b",python,content +4056,11356646,"genie.py",9531,0,"",python,selection_keyboard +4057,11356858,"genie.py",9531,0," ",python,content +4058,11356859,"genie.py",9532,0,"",python,selection_keyboard +4059,11357311,"genie.py",9532,0,"()",python,content +4060,11357312,"genie.py",9533,0,"",python,selection_keyboard +4061,11358391,"genie.py",9533,0,"t",python,content +4062,11358392,"genie.py",9534,0,"",python,selection_keyboard +4063,11358473,"genie.py",9534,0," ",python,content +4064,11358474,"genie.py",9535,0,"",python,selection_keyboard +4065,11358643,"genie.py",9535,0,"n",python,content +4066,11358644,"genie.py",9536,0,"",python,selection_keyboard +4067,11359333,"genie.py",9535,0,"",python,selection_command +4068,11365801,"genie.py",9539,0,"\n mask_flat = einops.rearange(mask, ""b t n -> b (t n)"")",python,content +4069,11365815,"genie.py",9548,0,"",python,selection_command +4070,11366180,"genie.py",9549,0,"",python,selection_command +4071,11366647,"genie.py",9550,0,"",python,selection_command +4072,11366756,"genie.py",9551,0,"",python,selection_command +4073,11366932,"genie.py",9552,0,"",python,selection_command +4074,11367092,"genie.py",9553,0,"",python,selection_command +4075,11368168,"genie.py",9552,0,"",python,selection_command +4076,11368323,"genie.py",9551,0,"",python,selection_command +4077,11368455,"genie.py",9550,0,"",python,selection_command +4078,11368599,"genie.py",9549,0,"",python,selection_command +4079,11368751,"genie.py",9548,0,"",python,selection_command +4080,11369198,"genie.py",9548,1,"",python,content +4081,11369364,"genie.py",9548,1,"",python,content +4082,11369543,"genie.py",9548,1,"",python,content +4083,11369940,"genie.py",9548,1,"",python,content +4084,11374227,"genie.py",9548,0,"i",python,content +4085,11374228,"genie.py",9549,0,"",python,selection_keyboard +4086,11375280,"genie.py",9549,0,"d",python,content +4087,11375280,"genie.py",9550,0,"",python,selection_keyboard +4088,11375708,"genie.py",9550,0,"x",python,content +4089,11375709,"genie.py",9551,0,"",python,selection_keyboard +4090,11375878,"genie.py",9551,0,"s",python,content +4091,11375879,"genie.py",9552,0,"",python,selection_keyboard +4092,11376388,"genie.py",9551,0,"",python,selection_command +4093,11377127,"genie.py",9576,0,"",python,selection_mouse +4094,11377271,"genie.py",9576,4,"mask",python,selection_mouse +4095,11378323,"genie.py",9576,4,"",python,content +4096,11378658,"genie.py",9576,0,"s",python,content +4097,11378659,"genie.py",9577,0,"",python,selection_keyboard +4098,11380121,"genie.py",9577,0,"o",python,content +4099,11380122,"genie.py",9578,0,"",python,selection_keyboard +4100,11381069,"genie.py",9576,2,"sorted_idxs",python,content +4101,11381549,"genie.py",9586,0,"",python,selection_command +4102,11384182,"genie.py",9736,0,"",python,selection_mouse +4103,11385151,"genie.py",9644,0,"",python,selection_mouse +4104,11386242,"genie.py",9643,0,"",python,selection_command +4105,11388054,"genie.py",9647,0,"_",python,content +4106,11388055,"genie.py",9648,0,"",python,selection_keyboard +4107,11388874,"genie.py",9648,0,"f",python,content +4108,11388875,"genie.py",9649,0,"",python,selection_keyboard +4109,11388952,"genie.py",9649,0,"l",python,content +4110,11388952,"genie.py",9650,0,"",python,selection_keyboard +4111,11389597,"genie.py",9643,7,"mask_flat",python,content +4112,11390938,"genie.py",9664,1,"",python,content +4113,11391089,"genie.py",9663,1,"",python,content +4114,11391223,"genie.py",9662,1,"",python,content +4115,11391379,"genie.py",9661,1,"",python,content +4116,11391529,"genie.py",9654,7,"",python,content +4117,11391951,"genie.py",9654,0,"i",python,content +4118,11391951,"genie.py",9655,0,"",python,selection_keyboard +4119,11392041,"genie.py",9655,0,"d",python,content +4120,11392041,"genie.py",9656,0,"",python,selection_keyboard +4121,11393781,"genie.py",9654,2,"idxs_flat",python,content +4122,11394691,"genie.py",9688,0,"",python,selection_mouse +4123,11395000,"genie.py",9687,0,"",python,selection_command +4124,11395226,"genie.py",9629,0,"",python,selection_mouse +4125,11395864,"genie.py",9625,0,"",python,selection_mouse +4126,11397597,"genie.py",9695,0,"",python,selection_mouse +4127,11397633,"genie.py",9694,0,"",python,selection_command +4128,11398296,"genie.py",9582,0,"",python,selection_mouse +4129,11399125,"genie.py",9649,0,"",python,selection_mouse +4130,11399795,"genie.py",9659,0,"",python,selection_mouse +4131,11418086,"genie.py",9275,0,"",python,selection_mouse +4132,11418090,"genie.py",9274,0,"",python,selection_command +4133,11418742,"genie.py",9307,0,"",python,selection_mouse +4134,11419859,"genie.py",9291,0,"",python,selection_mouse +4135,11419988,"genie.py",9284,11,"sorted_idxs",python,selection_mouse +4136,11420612,"genie.py",9305,0,"",python,selection_mouse +4137,11420758,"genie.py",9302,7,"argsort",python,selection_mouse +4138,11421547,"genie.py",9311,0,"",python,selection_mouse +4139,11421724,"genie.py",9310,17,"final_token_probs",python,selection_mouse +4140,11422468,"genie.py",9196,0,"",python,selection_mouse +4141,11422614,"genie.py",9190,17,"final_token_probs",python,selection_mouse +4142,11425076,"genie.py",9275,0,"",python,selection_mouse +4143,11425080,"genie.py",9274,0,"",python,selection_command +4144,11425867,"genie.py",9319,0,"",python,selection_mouse +4145,11426608,"genie.py",9275,0,"",python,selection_mouse +4146,11426609,"genie.py",9274,0,"",python,selection_command +4147,11430141,"TERMINAL",0,0,"[?25lf[?25h",,terminal_output +4148,11430290,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +4149,11430354,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +4150,11430444,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +4151,11430725,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +4152,11431052,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +4153,11431746,"TERMINAL",0,0,"[?25lt[?25h[?25lo[?25h",,terminal_output +4154,11432016,"TERMINAL",0,0,"[?25lk[?25h",,terminal_output +4155,11432857,"TERMINAL",0,0,"[?25le[?25h[?25ln[?25h",,terminal_output +4156,11433181,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +4157,11433509,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +4158,11433750,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +4159,11434316,"TERMINAL",0,0,"[?25lr[?25h[?25lo[?25h",,terminal_output +4160,11434609,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +4161,11434696,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +4162,11434853,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +4163,11434981,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +4164,11435141,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +4165,11435262,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +4166,11435350,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +4167,11435505,"TERMINAL",0,0,"[?25le[?25h\r\n(jdb) (4, 16, 920)\r\n",,terminal_output +4168,11436939,"genie.py",0,0,"",python,tab +4169,11436940,"genie.py",9315,0,"",python,selection_mouse +4170,11437729,"genie.py",9310,17,"final_token_probs",python,selection_mouse +4171,11439706,"genie.py",9583,0,"",python,selection_mouse +4172,11442073,"genie.py",9201,0,"",python,selection_mouse +4173,11442910,"genie.py",9275,0,"",python,selection_mouse +4174,11442910,"genie.py",9274,0,"",python,selection_command +4175,11443831,"genie.py",9275,0,"\n idxs_flat = einops.rearange(sorted_idxs, ""b t n -> b (t n)"")",python,content +4176,11443852,"genie.py",9284,0,"",python,selection_command +4177,11450023,"genie.py",9386,0,"",python,selection_mouse +4178,11450204,"genie.py",9379,17,"final_token_probs",python,selection_mouse +4179,11452403,"genie.py",9293,0,"",python,selection_mouse +4180,11453298,"genie.py",9284,9,"",python,content +4181,11453664,"genie.py",9284,0,"final_token_probs",python,content +4182,11454365,"genie.py",9301,0,"_",python,content +4183,11454366,"genie.py",9302,0,"",python,selection_keyboard +4184,11454714,"genie.py",9302,0,"f",python,content +4185,11454715,"genie.py",9303,0,"",python,selection_keyboard +4186,11454865,"genie.py",9303,0,"l",python,content +4187,11454866,"genie.py",9304,0,"",python,selection_keyboard +4188,11454904,"genie.py",9304,0,"a",python,content +4189,11454905,"genie.py",9305,0,"",python,selection_keyboard +4190,11455122,"genie.py",9305,0,"t",python,content +4191,11455123,"genie.py",9306,0,"",python,selection_keyboard +4192,11456140,"genie.py",9330,0,"",python,selection_mouse +4193,11456337,"genie.py",9325,11,"sorted_idxs",python,selection_mouse +4194,11456648,"genie.py",9325,11,"",python,content +4195,11456888,"genie.py",9325,0,"final_token_probs",python,content +4196,11458024,"genie.py",9412,0,"",python,selection_mouse +4197,11458676,"genie.py",9456,0,"",python,selection_mouse +4198,11459627,"genie.py",9416,0,"",python,selection_mouse +4199,11460433,"genie.py",9415,0,"",python,selection_mouse +4200,11461220,"genie.py",9415,0,"_",python,content +4201,11461221,"genie.py",9416,0,"",python,selection_keyboard +4202,11461402,"genie.py",9416,0,"f",python,content +4203,11461403,"genie.py",9417,0,"",python,selection_keyboard +4204,11461494,"genie.py",9417,0,"l",python,content +4205,11461495,"genie.py",9418,0,"",python,selection_keyboard +4206,11461572,"genie.py",9418,0,"a",python,content +4207,11461574,"genie.py",9419,0,"",python,selection_keyboard +4208,11461775,"genie.py",9419,0,"t",python,content +4209,11461776,"genie.py",9420,0,"",python,selection_keyboard +4210,11462202,"genie.py",9419,0,"",python,selection_command +4211,11473939,"genie.py",9273,0,"",python,selection_mouse +4212,11474487,"genie.py",9300,0,"",python,selection_mouse +4213,11474632,"genie.py",9284,22,"final_token_probs_flat",python,selection_mouse +4214,11475175,"genie.py",9378,0,"",python,selection_mouse +4215,11475312,"genie.py",9372,11,"sorted_idxs",python,selection_mouse +4216,11476701,"genie.py",9582,0,"",python,selection_mouse +4217,11476820,"genie.py",9579,9,"mask_flat",python,selection_mouse +4218,11478189,"genie.py",9668,0,"",python,selection_mouse +4219,11478940,"genie.py",9676,0,"",python,selection_mouse +4220,11479812,"genie.py",9383,0,"",python,selection_mouse +4221,11480760,"genie.py",9677,0,"",python,selection_mouse +4222,11482886,"genie.py",9633,69,"",python,content +4223,11482899,"genie.py",9641,0,"",python,selection_command +4224,11483401,"genie.py",9642,0,"",python,selection_command +4225,11483942,"genie.py",9688,0,"",python,selection_command +4226,11484466,"genie.py",9687,0,"",python,selection_command +4227,11484834,"genie.py",9678,9,"",python,content +4228,11485237,"genie.py",9678,0,"s",python,content +4229,11485238,"genie.py",9679,0,"",python,selection_keyboard +4230,11485306,"genie.py",9679,0,"o",python,content +4231,11485307,"genie.py",9680,0,"",python,selection_keyboard +4232,11486486,"genie.py",9678,2,"sorted_idxs",python,content +4233,11486894,"genie.py",9688,0,"",python,selection_command +4234,11489310,"genie.py",9690,0,"\n ",python,content +4235,11491704,"genie.py",9691,8,"",python,content +4236,11491874,"genie.py",9633,0,"",python,selection_command +4237,11492106,"genie.py",9571,0,"",python,selection_command +4238,11492494,"genie.py",9633,0,"",python,selection_command +4239,11492887,"genie.py",9691,0,"",python,selection_command +4240,11494171,"genie.py",9691,0,"\n mask_flat = einops.rearange(mask, ""b t n -> b (t n)"")",python,content +4241,11494184,"genie.py",9700,0,"",python,selection_command +4242,11494889,"genie.py",9691,0,"",python,selection_command +4243,11495141,"genie.py",9691,1,"",python,content +4244,11495142,"genie.py",9699,0,"",python,selection_command +4245,11495494,"genie.py",9700,0,"",python,selection_command +4246,11495676,"genie.py",9701,0,"",python,selection_command +4247,11495831,"genie.py",9702,0,"",python,selection_command +4248,11495972,"genie.py",9703,0,"",python,selection_command +4249,11496279,"genie.py",9702,0,"",python,selection_command +4250,11496397,"genie.py",9701,0,"",python,selection_command +4251,11496567,"genie.py",9700,0,"",python,selection_command +4252,11496684,"genie.py",9699,0,"",python,selection_command +4253,11496807,"genie.py",9698,0,"",python,selection_command +4254,11497198,"genie.py",9699,0,"",python,selection_command +4255,11497456,"genie.py",9699,10,"",python,content +4256,11497738,"genie.py",9699,0,"n",python,content +4257,11497739,"genie.py",9700,0,"",python,selection_keyboard +4258,11497878,"genie.py",9700,0,"e",python,content +4259,11497879,"genie.py",9701,0,"",python,selection_keyboard +4260,11498108,"genie.py",9701,0,"w",python,content +4261,11498109,"genie.py",9702,0,"",python,selection_keyboard +4262,11499380,"genie.py",9699,3,"new_mask",python,content +4263,11500066,"genie.py",9707,0," ",python,content +4264,11500067,"genie.py",9708,0,"",python,selection_keyboard +4265,11502525,"genie.py",9726,0,"n",python,content +4266,11502526,"genie.py",9727,0,"",python,selection_keyboard +4267,11502634,"genie.py",9727,0,"e",python,content +4268,11502635,"genie.py",9728,0,"",python,selection_keyboard +4269,11502803,"genie.py",9728,0,"w",python,content +4270,11502804,"genie.py",9729,0,"",python,selection_keyboard +4271,11503665,"genie.py",9729,0,"_",python,content +4272,11503666,"genie.py",9730,0,"",python,selection_keyboard +4273,11505605,"genie.py",9739,0,"",python,selection_command +4274,11506292,"genie.py",9739,0,"(",python,content +4275,11506293,"genie.py",9740,0,"",python,selection_keyboard +4276,11507062,"genie.py",9742,0,"",python,selection_command +4277,11507880,"genie.py",9743,0,")",python,content +4278,11507882,"genie.py",9744,0,"",python,selection_keyboard +4279,11509324,"genie.py",9751,1,"",python,content +4280,11509761,"genie.py",9750,1,"",python,content +4281,11510482,"genie.py",9753,0,"",python,selection_command +4282,11510677,"genie.py",9752,1,"",python,content +4283,11510914,"genie.py",9751,0,"",python,selection_command +4284,11511110,"genie.py",9750,0,"",python,selection_command +4285,11511782,"genie.py",9750,0,"t",python,content +4286,11511783,"genie.py",9751,0,"",python,selection_keyboard +4287,11513317,"genie.py",9649,0,"",python,selection_mouse +4288,11514177,"genie.py",9649,0,"_",python,content +4289,11514178,"genie.py",9650,0,"",python,selection_keyboard +4290,11514577,"genie.py",9650,0,"f",python,content +4291,11514578,"genie.py",9651,0,"",python,selection_keyboard +4292,11514694,"genie.py",9651,0,"l",python,content +4293,11514694,"genie.py",9652,0,"",python,selection_keyboard +4294,11514832,"genie.py",9652,0,"a",python,content +4295,11514832,"genie.py",9653,0,"",python,selection_keyboard +4296,11515012,"genie.py",9653,0,"t",python,content +4297,11515012,"genie.py",9654,0,"",python,selection_keyboard +4298,11516137,"genie.py",9739,0,"",python,selection_mouse +4299,11516906,"genie.py",9739,0,"_",python,content +4300,11516907,"genie.py",9740,0,"",python,selection_keyboard +4301,11517116,"genie.py",9740,0,"f",python,content +4302,11517117,"genie.py",9741,0,"",python,selection_keyboard +4303,11517266,"genie.py",9741,0,"l",python,content +4304,11517267,"genie.py",9742,0,"",python,selection_keyboard +4305,11517372,"genie.py",9742,0,"a",python,content +4306,11517373,"genie.py",9743,0,"",python,selection_keyboard +4307,11517506,"genie.py",9743,0,"t",python,content +4308,11517507,"genie.py",9744,0,"",python,selection_keyboard +4309,11517838,"genie.py",9743,0,"",python,selection_command +4310,11518492,"genie.py",9659,0,"",python,selection_mouse +4311,11519185,"genie.py",9721,0,"",python,selection_mouse +4312,11519698,"genie.py",9788,0,"",python,selection_mouse +4313,11521433,"genie.py",9563,0,"",python,selection_mouse +4314,11522816,"genie.py",9485,0,"",python,selection_command +4315,11522986,"genie.py",9454,0,"",python,selection_command +4316,11524100,"genie.py",9387,0,"",python,selection_command +4317,11524258,"genie.py",9299,0,"",python,selection_command +4318,11524402,"genie.py",9268,0,"",python,selection_command +4319,11524763,"genie.py",9245,31,"",python,content +4320,11524786,"genie.py",9253,0,"",python,selection_command +4321,11524837,"genie.py",9168,0,"",python,selection_command +4322,11525001,"genie.py",9114,0,"",python,selection_command +4323,11525167,"genie.py",9024,0,"",python,selection_command +4324,11526440,"genie.py",8993,0,"",python,selection_command +4325,11526755,"genie.py",8985,31,"",python,content +4326,11526794,"genie.py",8993,0,"",python,selection_command +4327,11526829,"genie.py",9083,0,"",python,selection_command +4328,11526974,"genie.py",9137,0,"",python,selection_command +4329,11527140,"genie.py",9222,0,"",python,selection_command +4330,11527414,"genie.py",9310,0,"",python,selection_command +4331,11527561,"genie.py",9377,0,"",python,selection_command +4332,11527900,"genie.py",9369,31,"",python,content +4333,11527937,"genie.py",9377,0,"",python,selection_command +4334,11528041,"genie.py",9455,0,"",python,selection_command +4335,11528395,"genie.py",9447,31,"",python,content +4336,11528398,"genie.py",9455,0,"",python,selection_command +4337,11528545,"genie.py",9517,0,"",python,selection_command +4338,11528753,"genie.py",9580,0,"",python,selection_command +4339,11528893,"genie.py",9650,0,"",python,selection_command +4340,11531158,"TERMINAL",0,0,"^[[A",,terminal_output +4341,11531702,"TERMINAL",0,0,"^[[B",,terminal_output +4342,11532815,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3387190.14 task 0: running\r\n",,terminal_output +4343,11533367,"TERMINAL",0,0,"(jdb) *** SyntaxError: invalid syntax\r\nERROR:2025-07-31 15:42:15,943:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nE0731 15:42:16.002002 2629845 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: INTERNAL: CpuCallback error calling callback: Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 172, in \r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 139, in _autoreg_sample\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/traceback_util.py"", line 182, in reraise_with_filtered_traceback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 292, in cache_miss\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 153, in _python_pjit_helper\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 1877, in _pjit_call_impl_python\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/profiler.py"", line 354, in wrapper\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/interpreters/pxla.py"", line 1297, in __call__\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/callback.py"", line 782, in _wrapped_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 202, in _callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 99, in debug_callback_impl\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 162, in run\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 145, in cmdloop\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\nSystemExit: 0\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 172, in \r\n vid = _autoreg_sample(rng, video_batch, action_batch)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 139, in _autoreg_sample\r\n generated_vid = sampling_fn(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: CpuCallback error calling callback: Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 172, in \r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 139, in _autoreg_sample\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/traceback_util.py"", line 182, in reraise_with_filtered_traceback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 292, in cache_miss\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 153, in _python_pjit_helper\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 1877, in _pjit_call_impl_python\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/profiler.py"", line 354, in wrapper\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/interpreters/pxla.py"", line 1297, in __call__\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/callback.py"", line 782, in _wrapped_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 202, in _callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 99, in debug_callback_impl\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 162, in run\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 145, in cmdloop\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\nSystemExit: 0\r\n",,terminal_output +4344,11534790,"TERMINAL",0,0,"(jdb) ",,terminal_output +4345,11534958,"TERMINAL",0,0,"srun: error: hkn0602: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +4346,11539115,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +4347,11543146,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +4348,11543309,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +4349,11543442,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +4350,11547012,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +4351,11549037,"TERMINAL",0,0,"2025-07-31 15:42:31.655205: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4352,11560492,"TERMINAL",0,0,"2025-07-31 15:42:43.138571: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4353,11571962,"TERMINAL",0,0,"2025-07-31 15:42:54.573519: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4354,11576351,"TERMINAL",0,0,"2025-07-31 15:42:59.004783: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4355,11580254,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\n",,terminal_output +4356,11632492,"TERMINAL",0,0,"jax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 172, in \r\n vid = _autoreg_sample(rng, video_batch, action_batch)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 139, in _autoreg_sample\r\n generated_vid = sampling_fn(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 131, in _sampling_wrapper\r\n return module.sample(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 192, in sample\r\n final_carry, _ = jax.lax.scan(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 184, in generation_step_fn\r\n final_carry_maskgit, _ = loop_fn(init_carry_maskgit, jnp.arange(steps))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 165, in scan_fn\r\n _, out_pvals, _ = pe.trace_to_jaxpr_nounits(f_flat, in_pvals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 135, in body_fn\r\n broadcast_out, c, ys = fn(broadcast_in, c, *xs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 256, in __call__\r\n final_token_probs_flat = einops.rearange(final_token_probs, ""b t n -> b (t n)"")\r\nAttributeError: module 'einops' has no attribute 'rearange'. Did you mean: 'rearrange'?\r\n",,terminal_output +4357,11633830,"TERMINAL",0,0,"srun: error: hkn0602: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +4358,11646989,"genie.py",0,0,"",python,tab +4359,11646990,"genie.py",9911,0,"",python,selection_mouse +4360,11648405,"genie.py",9602,0,"",python,selection_mouse +4361,11650016,"genie.py",9602,0,"r",python,content +4362,11650017,"genie.py",9603,0,"",python,selection_keyboard +4363,11651353,"genie.py",9478,0,"",python,selection_mouse +4364,11651841,"genie.py",9478,0,"r",python,content +4365,11651842,"genie.py",9479,0,"",python,selection_keyboard +4366,11652801,"genie.py",9257,0,"",python,selection_mouse +4367,11653154,"genie.py",9257,0,"r",python,content +4368,11653155,"genie.py",9258,0,"",python,selection_keyboard +4369,11659286,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +4370,11659956,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\nGpuFreq=control_disabled\r\n",,terminal_output +4371,11661272,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +4372,11663306,"TERMINAL",0,0,"2025-07-31 15:44:25.957670: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4373,11674872,"TERMINAL",0,0,"2025-07-31 15:44:37.464309: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4374,11686958,"TERMINAL",0,0,"2025-07-31 15:44:49.479695: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4375,11691181,"TERMINAL",0,0,"2025-07-31 15:44:53.801338: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4376,11694955,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\n",,terminal_output +4377,11736357,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/einops/einops.py"", line 531, in reduce\r\n recipe = _prepare_transformation_recipe(pattern, reduction, axes_names=tuple(axes_lengths), ndim=len(shape))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/einops/einops.py"", line 406, in _prepare_transformation_recipe\r\n raise EinopsError(""Could not infer sizes for {}"".format(unknown))\r\neinops.EinopsError: Could not infer sizes for {'t', 'n'}\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 172, in \r\n vid = _autoreg_sample(rng, video_batch, action_batch)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 139, in _autoreg_sample\r\n generated_vid = sampling_fn(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 131, in _sampling_wrapper\r\n return module.sample(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 192, in sample\r\n final_carry, _ = jax.lax.scan(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 184, in generation_step_fn\r\n final_carry_maskgit, _ = loop_fn(init_carry_maskgit, jnp.arange(steps))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 165, in scan_fn\r\n _, out_pvals, _ = pe.trace_to_jaxpr_nounits(f_flat, in_pvals)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 135, in body_fn\r\n broadcast_out, c, ys = fn(broadcast_in, c, *xs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 261, in __call__\r\n new_mask = einops.rearrange(new_mask_flat, ""b (t n) -> b t n"")\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/einops/einops.py"", line 600, in rearrange\r\n return reduce(tensor, pattern, reduction=""rearrange"", **axes_lengths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/einops/einops.py"", line 542, in reduce\r\n raise EinopsError(message + ""\n {}"".format(e))\r\neinops.EinopsError: Error while processing rearrange-reduction pattern ""b (t n) -> b t n"".\r\n Input tensor shape: (4, 14720). Additional info: {}.\r\n Could not infer sizes for {'t', 'n'}\r\n",,terminal_output +4378,11737549,"TERMINAL",0,0,"srun: error: hkn0602: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +4379,11750092,"genie.py",0,0,"",python,tab +4380,11750093,"genie.py",9643,0,"",python,selection_mouse +4381,11780005,"genie.py",9675,0,"",python,selection_mouse +4382,11781296,"genie.py",9643,0,"",python,selection_mouse +4383,11791469,"genie.py",9643,0,",",python,content +4384,11791470,"genie.py",9644,0,"",python,selection_keyboard +4385,11791506,"genie.py",9644,0," ",python,content +4386,11791507,"genie.py",9645,0,"",python,selection_keyboard +4387,11792070,"genie.py",9645,0,"N",python,content +4388,11792071,"genie.py",9646,0,"",python,selection_keyboard +4389,11793002,"genie.py",9645,1,"",python,content +4390,11793264,"genie.py",9645,0,"n",python,content +4391,11793265,"genie.py",9646,0,"",python,selection_keyboard +4392,11794738,"genie.py",9646,0,"=",python,content +4393,11794739,"genie.py",9647,0,"",python,selection_keyboard +4394,11795049,"genie.py",9647,0,"N",python,content +4395,11795050,"genie.py",9648,0,"",python,selection_keyboard +4396,11797701,"genie.py",9638,0,"",python,selection_mouse +4397,11798433,"genie.py",9641,0,"",python,selection_mouse +4398,11798610,"genie.py",9641,1,"n",python,selection_mouse +4399,11799494,"genie.py",9631,0,"",python,selection_mouse +4400,11799658,"genie.py",9631,1,"n",python,selection_mouse +4401,11802302,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +4402,11803553,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +4403,11803641,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +4404,11803745,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +4405,11805960,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +4406,11808053,"TERMINAL",0,0,"2025-07-31 15:46:50.687187: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4407,11819569,"TERMINAL",0,0,"2025-07-31 15:47:02.184229: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4408,11830826,"TERMINAL",0,0,"2025-07-31 15:47:13.478171: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4409,11835112,"TERMINAL",0,0,"2025-07-31 15:47:17.761357: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4410,11838923,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\n",,terminal_output +4411,11882997,"TERMINAL",0,0,"2025-07-31 15:48:05.617176: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:48:05.617423: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:48:05.617774: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:48:05.617792: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4412,11898244,"TERMINAL",0,0,"Entering jdb:\r\n",,terminal_output +4413,11919801,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +4414,11919969,"TERMINAL",0,0,"\r\n(jdb) > /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py(262)\r\n final_token_probs_flat = einops.rearrange(final_token_probs, ""b t n -> b (t n)"")\r\n sorted_idxs = jnp.argsort(final_token_probs_flat, axis=-1)\r\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\r\n mask_flat = einops.rearrange(mask, ""b t n -> b (t n)"")\r\n new_mask_flat = mask_update_fn(mask_flat, sorted_idxs)\r\n new_mask = einops.rearrange(new_mask_flat, ""b (t n) -> b t n"", n=N)\r\n-> jax.debug.breakpoint()\r\n \r\n new_carry = (rng, token_idxs, new_mask, action_tokens)\r\n return new_carry, None\r\n \r\n",,terminal_output +4415,11925260,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +4416,11925721,"TERMINAL",0,0,"\r\n",,terminal_output +4417,11926446,"TERMINAL",0,0,"(jdb) Entering jdb:\r\n",,terminal_output +4418,11929845,"TERMINAL",0,0,"ERROR:2025-07-31 15:48:52,452:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nE0731 15:48:52.469726 2647302 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: INTERNAL: CpuCallback error calling callback: Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 172, in \r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 139, in _autoreg_sample\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/traceback_util.py"", line 182, in reraise_with_filtered_traceback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 292, in cache_miss\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 153, in _python_pjit_helper\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 1877, in _pjit_call_impl_python\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/profiler.py"", line 354, in wrapper\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/interpreters/pxla.py"", line 1297, in __call__\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/callback.py"", line 782, in _wrapped_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 202, in _callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 99, in debug_callback_impl\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 162, in run\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 145, in cmdloop\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\nSystemExit: 0\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 172, in \r\n vid = _autoreg_sample(rng, video_batch, action_batch)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 139, in _autoreg_sample\r\n generated_vid = sampling_fn(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: CpuCallback error calling callback: Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 172, in \r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 139, in _autoreg_sample\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/traceback_util.py"", line 182, in reraise_with_filtered_traceback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 292, in cache_miss\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 153, in _python_pjit_helper\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 1877, in _pjit_call_impl_python\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/profiler.py"", line 354, in wrapper\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/interpreters/pxla.py"", line 1297, in __call__\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/callback.py"", line 782, in _wrapped_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 202, in _callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 99, in debug_callback_impl\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 162, in run\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 145, in cmdloop\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\nSystemExit: 0\r\n",,terminal_output +4419,11930877,"genie.py",0,0,"",python,tab +4420,11930879,"genie.py",9668,0,"",python,selection_mouse +4421,11930956,"genie.py",9667,0,"",python,selection_command +4422,11931234,"TERMINAL",0,0,"(jdb) ",,terminal_output +4423,11931457,"TERMINAL",0,0,"srun: error: hkn0602: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +4424,11931629,"genie.py",9650,31,"",python,content +4425,11933699,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +4426,11935164,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=42 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +4427,11935302,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +4428,11935437,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +4429,11936713,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +4430,11938815,"TERMINAL",0,0,"2025-07-31 15:49:01.468701: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4431,11950267,"TERMINAL",0,0,"2025-07-31 15:49:12.815934: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4432,11961701,"TERMINAL",0,0,"2025-07-31 15:49:24.329456: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4433,11965799,"TERMINAL",0,0,"2025-07-31 15:49:28.390386: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4434,11969552,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\n",,terminal_output +4435,12013114,"TERMINAL",0,0,"2025-07-31 15:50:15.757625: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:50:15.757889: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:50:15.758257: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:50:15.758277: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4436,12062458,"TERMINAL",0,0,"SSIM: 0.5539136528968811\r\n",,terminal_output +4437,12063878,"TERMINAL",0,0,"]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +4438,12153335,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +4439,12155056,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",688,0,"",shellscript,selection_mouse +4440,12156037,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",687,1,"",shellscript,content +4441,12156194,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",686,1,"",shellscript,content +4442,12156589,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",686,0,"6",shellscript,content +4443,12156589,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",687,0,"",shellscript,selection_keyboard +4444,12156900,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",687,0,"9",shellscript,content +4445,12156901,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",688,0,"",shellscript,selection_keyboard +4446,12157396,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",704,0,"",shellscript,selection_command +4447,12157595,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",725,0,"",shellscript,selection_command +4448,12157796,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",750,0,"",shellscript,selection_command +4449,12158180,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",725,0,"",shellscript,selection_command +4450,12158507,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",750,0,"",shellscript,selection_command +4451,12160224,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",755,1,"",shellscript,content +4452,12160284,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",755,0,"1",shellscript,content +4453,12160285,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",756,0,"",shellscript,selection_keyboard +4454,12160375,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",756,0,"2",shellscript,content +4455,12160376,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",757,0,"",shellscript,selection_keyboard +4456,12160888,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",732,0,"",shellscript,selection_command +4457,12161309,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",757,0,"",shellscript,selection_command +4458,12163916,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +4459,12164739,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=69 \\r\n --batch_size=4 \\r\n --maskgit_steps=25 \\r\n --start_frame=12 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +4460,12164856,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +4461,12164987,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +4462,12166080,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +4463,12168220,"TERMINAL",0,0,"2025-07-31 15:52:50.875543: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4464,12179717,"TERMINAL",0,0,"2025-07-31 15:53:02.350881: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4465,12191265,"TERMINAL",0,0,"2025-07-31 15:53:13.917951: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4466,12195587,"TERMINAL",0,0,"2025-07-31 15:53:18.192141: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4467,12199331,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\n",,terminal_output +4468,12229143,"genie.py",0,0,"",python,tab +4469,12229144,"genie.py",9539,0,"",python,selection_mouse +4470,12242817,"TERMINAL",0,0,"2025-07-31 15:54:05.472281: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:54:05.472557: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:54:05.472910: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:54:05.472941: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4471,12271053,"TERMINAL",0,0,"SSIM: 0.854654848575592\r\n",,terminal_output +4472,12272439,"TERMINAL",0,0,"]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +4473,12291761,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab +4474,12298142,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",756,1,"",shellscript,content +4475,12298356,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",755,1,"",shellscript,content +4476,12299276,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",755,0,"0",shellscript,content +4477,12299277,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",756,0,"",shellscript,selection_keyboard +4478,12300312,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",731,0,"",shellscript,selection_command +4479,12300516,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",710,0,"",shellscript,selection_command +4480,12300768,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",709,1,"",shellscript,content +4481,12301117,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",708,1,"",shellscript,content +4482,12301223,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",708,0,"1",shellscript,content +4483,12301224,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",709,0,"",shellscript,selection_keyboard +4484,12301304,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",709,0,"2",shellscript,content +4485,12301305,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",710,0,"",shellscript,selection_keyboard +4486,12301593,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",710,0," ",shellscript,content +4487,12301594,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",711,0,"",shellscript,selection_keyboard +4488,12304969,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +4489,12305235,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=69 \\r\n --batch_size=12 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +4490,12305364,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +4491,12305446,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +4492,12306588,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +4493,12308666,"TERMINAL",0,0,"2025-07-31 15:55:11.319391: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4494,12324346,"genie.py",0,0,"",python,tab +4495,12324924,"genie.py",0,0,"",python,tab +4496,12333660,"TERMINAL",0,0,"2025-07-31 15:55:36.308053: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4497,12338723,"TERMINAL",0,0,"2025-07-31 15:55:41.336580: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4498,12342635,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/117000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/100000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 119000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/119000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 118000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/118000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838/020000/metrics/metrics not found.\r\n",,terminal_output +4499,12387674,"genie.py",5298,0,"",python,selection_mouse +4500,12387941,"TERMINAL",0,0,"2025-07-31 15:56:30.556912: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:56:30.557183: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:56:30.557576: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 15:56:30.557607: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4501,12389786,"genie.py",4920,0,"",python,selection_mouse +4502,12389953,"genie.py",4917,7,"seq_len",python,selection_mouse +4503,12390817,"genie.py",4907,0,"",python,selection_mouse +4504,12390952,"genie.py",4901,9,"pad_shape",python,selection_mouse +4505,12394829,"genie.py",5839,0,"",python,selection_mouse +4506,12394960,"genie.py",5835,6,"step_t",python,selection_mouse +4507,12404896,"genie.py",5756,0,"",python,selection_mouse +4508,12405033,"genie.py",5755,3,"and",python,selection_mouse +4509,12405374,"genie.py",5755,4,"and ",python,selection_mouse +4510,12405388,"genie.py",5755,10,"and future",python,selection_mouse +4511,12405474,"genie.py",5727,31,"\n # Mask current and",python,selection_mouse +4512,12405806,"genie.py",5755,17,"and future frames",python,selection_mouse +4513,12407049,"genie.py",5755,17,"",python,content +4514,12409029,"genie.py",5755,0,"f",python,content +4515,12409031,"genie.py",5756,0,"",python,selection_keyboard +4516,12409263,"genie.py",5756,0,"r",python,content +4517,12409264,"genie.py",5757,0,"",python,selection_keyboard +4518,12409477,"genie.py",5757,0,"a",python,content +4519,12409478,"genie.py",5758,0,"",python,selection_keyboard +4520,12409545,"genie.py",5758,0,"m",python,content +4521,12409546,"genie.py",5759,0,"",python,selection_keyboard +4522,12409640,"genie.py",5759,0,"e",python,content +4523,12409641,"genie.py",5760,0,"",python,selection_keyboard +4524,12410166,"genie.py",5759,0,"",python,selection_command +4525,12410286,"genie.py",5760,0,"",python,selection_command +4526,12410807,"genie.py",5761,0,"",python,selection_command +4527,12410814,"genie.py",5762,0,"",python,selection_command +4528,12410852,"genie.py",5763,0,"",python,selection_command +4529,12410883,"genie.py",5764,0,"",python,selection_command +4530,12410927,"genie.py",5765,0,"",python,selection_command +4531,12410961,"genie.py",5766,0,"",python,selection_command +4532,12410968,"genie.py",5767,0,"",python,selection_command +4533,12410996,"genie.py",5768,0,"",python,selection_command +4534,12411032,"genie.py",5769,0,"",python,selection_command +4535,12411081,"genie.py",5770,0,"",python,selection_command +4536,12412305,"genie.py",5770,1,"=",python,content +4537,12413100,"genie.py",5901,0,"",python,selection_mouse +4538,12413673,"genie.py",5780,0,"",python,selection_mouse +4539,12413674,"genie.py",5779,0,"",python,selection_command +4540,12464885,"genie.py",6288,0,"",python,selection_mouse +4541,12465042,"genie.py",6282,18,"init_carry_maskgit",python,selection_mouse +4542,12466037,"genie.py",6314,0,"",python,selection_mouse +4543,12466202,"genie.py",6313,5,"steps",python,selection_mouse +4544,12466748,"genie.py",6311,0,"",python,selection_mouse +4545,12466871,"genie.py",6306,6,"arange",python,selection_mouse +4546,12468819,"genie.py",6376,0,"",python,selection_mouse +4547,12468824,"genie.py",6375,0,"",python,selection_command +4548,12468920,"genie.py",6376,0,"",python,selection_mouse +4549,12468955,"genie.py",6375,0,"",python,selection_command +4550,12470124,"genie.py",6314,0,"",python,selection_mouse +4551,12470253,"genie.py",6313,5,"steps",python,selection_mouse +4552,12472683,"genie.py",6314,0,"",python,selection_mouse +4553,12478649,"genie.py",6277,0,"",python,selection_mouse +4554,12478754,"genie.py",6274,7,"loop_fn",python,selection_mouse +4555,12483384,"genie.py",5392,0,"",python,selection_mouse +4556,12495549,"TERMINAL",0,0,"SSIM: 0.5181312561035156\r\n",,terminal_output +4557,12496956,"TERMINAL",0,0,"]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +4558,12506858,"genie.py",5508,0,"",python,selection_mouse +4559,12506862,"genie.py",5507,0,"",python,selection_command +4560,12508140,"genie.py",5568,0,"",python,selection_mouse +4561,12508257,"genie.py",5568,5,"steps",python,selection_mouse +4562,12510164,"genie.py",6276,0,"",python,selection_mouse +4563,12510316,"genie.py",6274,7,"loop_fn",python,selection_mouse +4564,12511241,"genie.py",6317,0,"",python,selection_mouse +4565,12511391,"genie.py",6313,5,"steps",python,selection_mouse +4566,12511572,"genie.py",6306,12,"arange(steps",python,selection_mouse +4567,12511647,"genie.py",6302,16,"jnp.arange(steps",python,selection_mouse +4568,12511651,"genie.py",6301,17," jnp.arange(steps",python,selection_mouse +4569,12511671,"genie.py",6300,18,", jnp.arange(steps",python,selection_mouse +4570,12511749,"genie.py",6282,36,"init_carry_maskgit, jnp.arange(steps",python,selection_mouse +4571,12511901,"genie.py",6300,18,", jnp.arange(steps",python,selection_mouse +4572,12511935,"genie.py",6301,17," jnp.arange(steps",python,selection_mouse +4573,12511971,"genie.py",6302,16,"jnp.arange(steps",python,selection_mouse +4574,12512476,"genie.py",6302,0,"",python,selection_mouse +4575,12512477,"genie.py",6302,3,"jnp",python,selection_mouse +4576,12512720,"genie.py",6302,10,"jnp.arange",python,selection_mouse +4577,12512800,"genie.py",6302,11,"jnp.arange(",python,selection_mouse +4578,12512801,"genie.py",6302,16,"jnp.arange(steps",python,selection_mouse +4579,12512933,"genie.py",6302,17,"jnp.arange(steps)",python,selection_mouse +4580,12514069,"genie.py",6319,0,"",python,selection_mouse +4581,12520002,"genie.py",6755,0,"",python,selection_mouse +4582,12520038,"genie.py",6754,0,"",python,selection_command +4583,12525056,"genie.py",6616,0,"",python,selection_mouse +4584,12535046,"genie.py",6595,0,"",python,selection_mouse +4585,12535172,"genie.py",6585,17,"timesteps_to_scan",python,selection_mouse +4586,12536646,"genie.py",6693,0,"",python,selection_mouse +4587,12536777,"genie.py",6679,18,"generation_step_fn",python,selection_mouse +4588,12538758,"genie.py",5673,0,"",python,selection_mouse +4589,12538913,"genie.py",5671,5,"carry",python,selection_mouse +4590,12539472,"genie.py",5628,0,"",python,selection_mouse +4591,12539605,"genie.py",5624,6,"step_t",python,selection_mouse +4592,12653584,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +4593,12655643,"TERMINAL",0,0,"q",,terminal_output +4594,12655946,"TERMINAL",0,0,"ue",,terminal_output +4595,12656034,"TERMINAL",0,0,"ue",,terminal_output +4596,12656353,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0602.localdomain: Thu Jul 31 16:00:58 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3386718 accelerat train_to tum_cte0 PD\t0:00\t 2 (Priority)3386719 accelerat train_to tum_cte0 PD\t0:00\t 2 (Priority)3386722 accelerat train_dy tum_cte0 PD\t0:00\t 8 (Priority)3387190 accelerat interact tum_cte0 R 3:28:19\t 1 hkn0602",,terminal_output +4597,12657405,"TERMINAL",0,0,"921",,terminal_output +4598,12657471,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +4599,12789165,"genie.py",0,0,"",python,tab +4600,12793471,"TERMINAL",0,0,"bash",,terminal_focus +4601,12796085,"TERMINAL",0,0,"srun",,terminal_focus +4602,12796735,"TERMINAL",0,0,"c",,terminal_output +4603,12796912,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +4604,12797078,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +4605,12797685,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +4606,12797789,"TERMINAL",0,0,"",,terminal_output +4607,12798403,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +4608,12798856,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +4609,12798965,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +4610,12799051,"TERMINAL",0,0,"",,terminal_output +4611,12799855,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +4612,12800078,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +4613,12800773,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +4614,12800837,"TERMINAL",0,0,"e.py ",,terminal_output +4615,12802344,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +4616,12802433,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +4617,12802647,"TERMINAL",0,0,"n",,terminal_output +4618,12803406,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +4619,12803492,"TERMINAL",0,0,"e.py ",,terminal_output +4620,12805017,"TERMINAL",0,0,"[?25l.[1@_[?25h",,terminal_output +4621,12805400,"TERMINAL",0,0,"[?25l.[1@b[?25h",,terminal_output +4622,12805574,"TERMINAL",0,0,"[?25lp.[1@a[?25h[1@k",,terminal_output +4623,12806092,"TERMINAL",0,0,"[?25l.[1@_[?25h",,terminal_output +4624,12806653,"TERMINAL",0,0,"[?25lk_[?25h",,terminal_output +4625,12806764,"TERMINAL",0,0,"",,terminal_output +4626,12806864,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +4627,12807031,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +4628,12807666,"TERMINAL",0,0,"[?25lp.[1@f[?25h",,terminal_output +4629,12807733,"TERMINAL",0,0,"[1@i",,terminal_output +4630,12807982,"TERMINAL",0,0,"[?25l.[1@x[?25h",,terminal_output +4631,12808260,"TERMINAL",0,0,"[?25lp.[1@e[?25h",,terminal_output +4632,12808313,"TERMINAL",0,0,"[1@d",,terminal_output +4633,12808541,"TERMINAL",0,0,"[?25l.[1@_[?25h",,terminal_output +4634,12808962,"TERMINAL",0,0,"[?25l.[1@m[?25h",,terminal_output +4635,12809076,"TERMINAL",0,0,"[?25l.[1@a[?25h",,terminal_output +4636,12809236,"TERMINAL",0,0,"[?25l.[1@s[?25h",,terminal_output +4637,12809323,"TERMINAL",0,0,"[?25l.[1@k[?25h",,terminal_output +4638,12809575,"TERMINAL",0,0,"[?25lp.[1@g[?25h",,terminal_output +4639,12809634,"TERMINAL",0,0,"[1@i",,terminal_output +4640,12809773,"TERMINAL",0,0,"[?25l.[1@t[?25h",,terminal_output +4641,12810512,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +4642,12943294,"TERMINAL",0,0,"cp genie.py genie_fixed_maskgit.py ",,terminal_output +4643,12943904,"TERMINAL",0,0,"queue",,terminal_output +4644,12944294,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.0_3365838",,terminal_output +4645,12945642,"TERMINAL",0,0,"\r\n\r",,terminal_output +4646,12946696,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.5_3365839",,terminal_output +4647,12947097,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.5_3365839\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ndynamics_ckpt_dir=$1\r\necho $dynamics_ckpt_dir\r\n\r\nenv | grep SLURM\r\n\r\nsrun python sample.py \\r\n --checkpoint $dynamics_ckpt_dir \\r\n --seq_len=16 \\r\n --seed=69 \\r\n --batch_size=12 \\r\n --maskgit_steps=25 \\r\n --start_frame=0 \\r\n --data_dir $array_records_dir \\r\n\r\n # --dyna_dim=1024 \\r\n # --dyna_num_blocks=16 \\r\n # --dyna_num_heads=16 \\r\n # --dyna_ffn_dim=4096 \\r\n\r\n# srun python sample.py \\r\n # --checkpoint $dynamics_ckpt_dir \\r\n # --start_frame=0 \\r\n # --batch_size=12 \\r\n # --seq_len=2 \\r\n # --data_dir $array_records_dir\r\n",,terminal_output +4648,12947259,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.5_3365839\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2516643\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0602\r\nSLURM_JOB_START_TIME=1753957959\r\nSLURM_STEP_NODELIST=hkn0602\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753975959\r\nSLURM_PMI2_SRUN_PORT=34727\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3387190\r\nSLURM_PTY_PORT=43475\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=39\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0602\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn0602\r\nSLURM_SRUN_COMM_PORT=35307\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3387190\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0602\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=35307\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0602\r\n",,terminal_output +4649,12947388,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +4650,12949173,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +4651,12951260,"TERMINAL",0,0,"2025-07-31 16:05:53.911659: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4652,12976912,"TERMINAL",0,0,"2025-07-31 16:06:19.389287: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4653,12981924,"TERMINAL",0,0,"2025-07-31 16:06:24.572740: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4654,12985714,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.5_3365839/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.5_3365839/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.5_3365839/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 115000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.5_3365839/115000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 116000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.5_3365839/116000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 117000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.5_3365839/117000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.5_3365839/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 100000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/dynamics-masked_lim_0.5_3365839/100000/metrics/metrics not found.\r\n",,terminal_output +4655,13033868,"TERMINAL",0,0,"2025-07-31 16:07:16.356458: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 16:07:16.356698: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 16:07:16.357042: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-31 16:07:16.357070: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +4656,13141470,"TERMINAL",0,0,"SSIM: 0.5225618481636047\r\n",,terminal_output +4657,13142913,"TERMINAL",0,0,"]0;tum_cte0515@hkn0602:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0602 jafar]$ ",,terminal_output +4658,17301617,"",0,0,"Switched from branch 'sample-ali-branch' to 'main'",,git_branch_checkout +4659,17466665,"",0,0,"Switched from branch 'main' to 'maskgit-sampling-iterative-unmasking-fix'",,git_branch_checkout diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-3ccbecba-82d0-462f-a78a-0ad16dfe3f6b1754830643122-2025_08_10-14.58.12.168/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-3ccbecba-82d0-462f-a78a-0ad16dfe3f6b1754830643122-2025_08_10-14.58.12.168/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..761f3908c1dd1386bf21a0ea21fda211283e7478 --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-3ccbecba-82d0-462f-a78a-0ad16dfe3f6b1754830643122-2025_08_10-14.58.12.168/source.csv @@ -0,0 +1,3995 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +2,427,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:58:12 PM [info] Activating crowd-code\n2:58:12 PM [info] Recording started\n2:58:12 PM [info] Initializing git provider using file system watchers...\n2:58:12 PM [info] Git repository found\n2:58:12 PM [info] Git provider initialized successfully\n",Log,tab +3,552,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"2:58:12 PM [info] Initial git state: [object Object]\n",Log,content +4,7165,"TERMINAL",0,0,"queue",,terminal_command +5,7265,"TERMINAL",0,0,"]633;E;2025-08-10 14:58:19 queue;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 14:58:19 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3404607 accelerat train_to tum_cte0 R 1-21:28:49\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]",,terminal_output +6,8308,"TERMINAL",0,0,"2050",,terminal_output +7,9375,"TERMINAL",0,0,"11",,terminal_output +8,10168,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +9,11774,"TERMINAL",0,0,"idling",,terminal_command +10,11842,"TERMINAL",0,0,"]633;E;2025-08-10 14:58:23 idling;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1990.localdomain: Sun Aug 10 14:58:23 2025Partition dev_cpuonly: 12 nodes idle\rPartition cpuonly: 166 nodes idle\rPartition dev_accelerated:\t 1 nodes idle\rPartition accelerated: 69 nodes idle\rPartition dev_accelerated-h100 :\t 1 nodes idle\rPartition accelerated-h100:\t 6 nodes idle\rPartition large:\t 7 nodes idle",,terminal_output +11,12873,"TERMINAL",0,0,"4",,terminal_output +12,13931,"TERMINAL",0,0,"57",,terminal_output +13,14990,"TERMINAL",0,0,"6",,terminal_output +14,16024,"TERMINAL",0,0,"8",,terminal_output +15,16465,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +16,18379,"TERMINAL",0,0,"bash",,terminal_focus +17,19708,"TERMINAL",0,0,"bash",,terminal_focus +18,219992,"TERMINAL",0,0,"git branch",,terminal_command +19,220064,"TERMINAL",0,0,"]633;E;2025-08-10 15:01:52 git branch;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C[?1h=\r add-wandb-name-and-tags\r\n before-nnx\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n logging-variants\r\n lr-schedules\r\n* main\r\n maskgit-different-maskprob-per-sample\r\n maskgit-sampling-iterative-unmasking-fix\r\n metrics-logging-for-dynamics-model\r\n monkey-patch\r\n new-arch-sampling\r\n preprocess_video\r\n refactor-tmp\r\n revised-dataloader\r\n runner\r\n runner-grain\r\n sample-ali-branch\r\n:",,terminal_output +20,221302,"TERMINAL",0,0,"\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +21,226887,"TERMINAL",0,0,"clear",,terminal_command +22,230619,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command +23,230647,"TERMINAL",0,0,"]633;E;2025-08-10 15:02:02 source .venv/bin/activate;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +24,242488,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=384 \\n --init_lr=0 \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +25,255859,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",38,0,"",shellscript,selection_mouse +26,255862,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",37,0,"",shellscript,selection_command +27,257087,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",38,0,"",shellscript,selection_command +28,257503,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",37,1,"",shellscript,content +29,257508,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",37,0,"1",shellscript,content +30,257509,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",38,0,"",shellscript,selection_keyboard +31,257527,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",38,0,"2",shellscript,content +32,257528,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",39,0,"",shellscript,selection_keyboard +33,258093,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",38,0,"",shellscript,selection_command +34,272350,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",39,0,"",shellscript,selection_command +35,272520,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",38,1,"",shellscript,content +36,272647,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",37,1,"",shellscript,content +37,273214,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",37,0,"8",shellscript,content +38,273216,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",38,0,"",shellscript,selection_keyboard +39,273331,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",37,0,"",shellscript,selection_command +40,322001,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1943,0,"",shellscript,selection_mouse +41,322888,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1945,0,"",shellscript,selection_mouse +42,324176,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1944,1,"",shellscript,content +43,324304,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1943,1,"",shellscript,content +44,324689,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1943,0,"2",shellscript,content +45,324690,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1944,0,"",shellscript,selection_keyboard +46,325009,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1944,0,"0",shellscript,content +47,325010,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1945,0,"",shellscript,selection_keyboard +48,325251,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1944,0,"",shellscript,selection_command +49,369689,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1431,0,"",shellscript,selection_mouse +50,371488,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1515,0,"",shellscript,selection_mouse +51,372381,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1515,0,"h",shellscript,content +52,372383,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1516,0,"",shellscript,selection_keyboard +53,372533,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1516,0,"o",shellscript,content +54,372534,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1517,0,"",shellscript,selection_keyboard +55,372682,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1517,0,"l",shellscript,content +56,372683,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1518,0,"",shellscript,selection_keyboard +57,372839,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1518,0,"i",shellscript,content +58,372840,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1519,0,"",shellscript,selection_keyboard +59,372898,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1519,0,"d",shellscript,content +60,372899,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1520,0,"",shellscript,selection_keyboard +61,373080,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1520,0,"a",shellscript,content +62,373081,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1521,0,"",shellscript,selection_keyboard +63,373267,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1521,0,"y",shellscript,content +64,373268,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1522,0,"",shellscript,selection_keyboard +65,373842,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1522,0,"/",shellscript,content +66,373843,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1523,0,"",shellscript,selection_keyboard +67,375451,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1515,0,"",shellscript,selection_mouse +68,379736,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1515,0,"m",shellscript,content +69,379737,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1516,0,"",shellscript,selection_keyboard +70,379947,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1516,0,"i",shellscript,content +71,379950,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1517,0,"",shellscript,selection_keyboard +72,380032,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1517,0,"h",shellscript,content +73,380033,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1518,0,"",shellscript,selection_keyboard +74,380128,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1518,0,"i",shellscript,content +75,380129,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1519,0,"",shellscript,selection_keyboard +76,380217,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1519,0,"r",shellscript,content +77,380217,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1520,0,"",shellscript,selection_keyboard +78,380905,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1520,0,"/",shellscript,content +79,380906,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1521,0,"",shellscript,selection_keyboard +80,383429,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1518,0,"",shellscript,selection_mouse +81,383549,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1515,5,"mihir",shellscript,selection_mouse +82,384284,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1585,0,"",shellscript,selection_mouse +83,386058,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1518,0,"",shellscript,selection_mouse +84,386221,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1515,5,"mihir",shellscript,selection_mouse +85,386957,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1585,0,"",shellscript,selection_mouse +86,389055,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1528,0,"",shellscript,selection_mouse +87,389681,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1584,0,"",shellscript,selection_mouse +88,392662,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",446,0,"",shellscript,selection_mouse +89,392822,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",440,28,"train_dynamics_causal_8_node",shellscript,selection_mouse +90,418287,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/big_run/tokenizer/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/big_run/tokenizer/%x_%j.log\n#SBATCH --job-name=train_tokenizer_1e-4\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=384 \\n --init_lr=0 \\n --max_lr=1e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=1000 \\n --log \\n --name=tokenizer-8-nodes-$slurm_job_id \\n --tags tokenizer big-run 1e-4 \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid",shellscript,tab +91,459606,"slurm/utils/mihir/dataset_sizes.md",0,0,"\n# Genie 1 - Dataset Size, Batch Size, and Steps per Epoch\n\n## Dataset Details\n\n- **Total video hours:** 5,000 hours\n- **Frame rate:** 10 frames per second (fps)\n- **Total frames:** \n 5,000 hours × 60 min/hour × 60 sec/min × 10 fps = **180,000,000 frames**\n- **Sample length:** 16 frames per sample \n (Each training sample is a 16-frame video clip)\n\n- **Total number of samples:** \n 180,000,000 frames ÷ 16 frames/sample = **11,250,000 samples**\n\n---\n\n## Steps per Epoch for Different Batch Sizes\n\n| Batch Size | Steps per Epoch | Formula Used |\n|------------|----------------|-------------------------------------|\n| 48 | 234,375 | 11,250,000 ÷ 48 |\n| 96 | 117,188 | 11,250,000 ÷ 96 |\n| 192 | 58,594 | 11,250,000 ÷ 192 |\n| 384 | 29,297 | 11,250,000 ÷ 384 |\n| 768 | 14,649 | 11,250,000 ÷ 768 |\n\n\n### actual num_frames: 105.286.463\n\n| Batch Size | Steps per Epoch | Formula Used |\n|------------|----------------|-------------------------------------|\n| 48 | 137,091 | 6,580,403 ÷ 48 |\n| 96 | 68,545 | 6,580,403 ÷ 96 |\n| 192 | 34,272 | 6,580,403 ÷ 192 |\n| 384 | 17,136 | 6,580,403 ÷ 384 |\n| 768 | 8,568 | 6,580,403 ÷ 768 |\n\n*All values are rounded up to the nearest integer.*\n",markdown,tab +92,471991,"slurm/utils/mihir/dataset_sizes.md",1399,0,"",markdown,selection_mouse +93,472177,"slurm/utils/mihir/dataset_sizes.md",1399,1,"1",markdown,selection_mouse +94,472193,"slurm/utils/mihir/dataset_sizes.md",1399,2,"17",markdown,selection_mouse +95,472207,"slurm/utils/mihir/dataset_sizes.md",1399,3,"17,",markdown,selection_mouse +96,472222,"slurm/utils/mihir/dataset_sizes.md",1399,4,"17,1",markdown,selection_mouse +97,472256,"slurm/utils/mihir/dataset_sizes.md",1399,5,"17,13",markdown,selection_mouse +98,472299,"slurm/utils/mihir/dataset_sizes.md",1399,6,"17,136",markdown,selection_mouse +99,472340,"slurm/utils/mihir/dataset_sizes.md",1399,7,"17,136 ",markdown,selection_mouse +100,472816,"slurm/utils/mihir/dataset_sizes.md",1406,0,"",markdown,selection_mouse +101,473338,"slurm/utils/mihir/dataset_sizes.md",1405,1," ",markdown,selection_mouse +102,473351,"slurm/utils/mihir/dataset_sizes.md",1404,2,"6 ",markdown,selection_mouse +103,473381,"slurm/utils/mihir/dataset_sizes.md",1403,3,"36 ",markdown,selection_mouse +104,473422,"slurm/utils/mihir/dataset_sizes.md",1402,4,"136 ",markdown,selection_mouse +105,473466,"slurm/utils/mihir/dataset_sizes.md",1401,5,",136 ",markdown,selection_mouse +106,473521,"slurm/utils/mihir/dataset_sizes.md",1400,6,"7,136 ",markdown,selection_mouse +107,473617,"slurm/utils/mihir/dataset_sizes.md",1399,7,"17,136 ",markdown,selection_mouse +108,474273,"slurm/utils/mihir/dataset_sizes.md",1399,0,"",markdown,selection_mouse +109,474734,"slurm/utils/mihir/dataset_sizes.md",1399,1,"1",markdown,selection_mouse +110,474757,"slurm/utils/mihir/dataset_sizes.md",1399,2,"17",markdown,selection_mouse +111,474774,"slurm/utils/mihir/dataset_sizes.md",1399,3,"17,",markdown,selection_mouse +112,474823,"slurm/utils/mihir/dataset_sizes.md",1399,4,"17,1",markdown,selection_mouse +113,474867,"slurm/utils/mihir/dataset_sizes.md",1399,5,"17,13",markdown,selection_mouse +114,475309,"slurm/utils/mihir/dataset_sizes.md",1404,0,"",markdown,selection_mouse +115,475477,"slurm/utils/mihir/dataset_sizes.md",1402,3,"136",markdown,selection_mouse +116,475683,"slurm/utils/mihir/dataset_sizes.md",1401,4,",136",markdown,selection_mouse +117,476166,"slurm/utils/mihir/dataset_sizes.md",1401,0,"",markdown,selection_mouse +118,476586,"slurm/utils/mihir/dataset_sizes.md",1399,0,"",markdown,selection_mouse +119,476732,"slurm/utils/mihir/dataset_sizes.md",1399,2,"17",markdown,selection_mouse +120,476990,"slurm/utils/mihir/dataset_sizes.md",1399,3,"17,",markdown,selection_mouse +121,477034,"slurm/utils/mihir/dataset_sizes.md",1399,6,"17,136",markdown,selection_mouse +122,477437,"slurm/utils/mihir/dataset_sizes.md",1404,0,"",markdown,selection_mouse +123,477437,"slurm/utils/mihir/dataset_sizes.md",1402,3,"136",markdown,selection_mouse +124,477611,"slurm/utils/mihir/dataset_sizes.md",1384,70,"| 384 | 17,136 | 6,580,403 ÷ 384 |\n",markdown,selection_mouse +125,478343,"slurm/utils/mihir/dataset_sizes.md",1400,0,"",markdown,selection_mouse +126,478840,"slurm/utils/mihir/dataset_sizes.md",1399,0,"",markdown,selection_mouse +127,478976,"slurm/utils/mihir/dataset_sizes.md",1399,2,"17",markdown,selection_mouse +128,479242,"slurm/utils/mihir/dataset_sizes.md",1399,3,"17,",markdown,selection_mouse +129,479825,"slurm/utils/mihir/dataset_sizes.md",1402,0,"",markdown,selection_mouse +130,480466,"slurm/utils/mihir/dataset_sizes.md",1403,0,"",markdown,selection_mouse +131,480600,"slurm/utils/mihir/dataset_sizes.md",1402,3,"136",markdown,selection_mouse +132,480796,"slurm/utils/mihir/dataset_sizes.md",1401,4,",136",markdown,selection_mouse +133,480830,"slurm/utils/mihir/dataset_sizes.md",1399,6,"17,136",markdown,selection_mouse +134,481462,"slurm/utils/mihir/dataset_sizes.md",1400,0,"",markdown,selection_mouse +135,481636,"slurm/utils/mihir/dataset_sizes.md",1399,2,"17",markdown,selection_mouse +136,481878,"slurm/utils/mihir/dataset_sizes.md",1399,3,"17,",markdown,selection_mouse +137,481982,"slurm/utils/mihir/dataset_sizes.md",1399,6,"17,136",markdown,selection_mouse +138,482384,"slurm/utils/mihir/dataset_sizes.md",1402,0,"",markdown,selection_mouse +139,482385,"slurm/utils/mihir/dataset_sizes.md",1402,3,"136",markdown,selection_mouse +140,482636,"slurm/utils/mihir/dataset_sizes.md",1401,4,",136",markdown,selection_mouse +141,482696,"slurm/utils/mihir/dataset_sizes.md",1399,6,"17,136",markdown,selection_mouse +142,483153,"slurm/utils/mihir/dataset_sizes.md",1399,0,"",markdown,selection_mouse +143,483153,"slurm/utils/mihir/dataset_sizes.md",1399,2,"17",markdown,selection_mouse +144,483414,"slurm/utils/mihir/dataset_sizes.md",1399,3,"17,",markdown,selection_mouse +145,483839,"slurm/utils/mihir/dataset_sizes.md",1402,0,"",markdown,selection_mouse +146,483840,"slurm/utils/mihir/dataset_sizes.md",1402,3,"136",markdown,selection_mouse +147,484104,"slurm/utils/mihir/dataset_sizes.md",1401,4,",136",markdown,selection_mouse +148,484160,"slurm/utils/mihir/dataset_sizes.md",1399,6,"17,136",markdown,selection_mouse +149,484545,"slurm/utils/mihir/dataset_sizes.md",1400,0,"",markdown,selection_mouse +150,484546,"slurm/utils/mihir/dataset_sizes.md",1399,2,"17",markdown,selection_mouse +151,484745,"slurm/utils/mihir/dataset_sizes.md",1399,3,"17,",markdown,selection_mouse +152,484787,"slurm/utils/mihir/dataset_sizes.md",1399,6,"17,136",markdown,selection_mouse +153,485138,"slurm/utils/mihir/dataset_sizes.md",1404,0,"",markdown,selection_mouse +154,485139,"slurm/utils/mihir/dataset_sizes.md",1402,3,"136",markdown,selection_mouse +155,485352,"slurm/utils/mihir/dataset_sizes.md",1401,4,",136",markdown,selection_mouse +156,485385,"slurm/utils/mihir/dataset_sizes.md",1399,6,"17,136",markdown,selection_mouse +157,485816,"slurm/utils/mihir/dataset_sizes.md",1400,0,"",markdown,selection_mouse +158,485816,"slurm/utils/mihir/dataset_sizes.md",1399,2,"17",markdown,selection_mouse +159,486026,"slurm/utils/mihir/dataset_sizes.md",1399,3,"17,",markdown,selection_mouse +160,486075,"slurm/utils/mihir/dataset_sizes.md",1399,6,"17,136",markdown,selection_mouse +161,486426,"slurm/utils/mihir/dataset_sizes.md",1404,0,"",markdown,selection_mouse +162,486427,"slurm/utils/mihir/dataset_sizes.md",1402,3,"136",markdown,selection_mouse +163,486652,"slurm/utils/mihir/dataset_sizes.md",1401,4,",136",markdown,selection_mouse +164,486694,"slurm/utils/mihir/dataset_sizes.md",1399,6,"17,136",markdown,selection_mouse +165,487063,"slurm/utils/mihir/dataset_sizes.md",1400,0,"",markdown,selection_mouse +166,487064,"slurm/utils/mihir/dataset_sizes.md",1399,2,"17",markdown,selection_mouse +167,487270,"slurm/utils/mihir/dataset_sizes.md",1399,6,"17,136",markdown,selection_mouse +168,487667,"slurm/utils/mihir/dataset_sizes.md",1405,0,"",markdown,selection_mouse +169,487668,"slurm/utils/mihir/dataset_sizes.md",1402,3,"136",markdown,selection_mouse +170,487870,"slurm/utils/mihir/dataset_sizes.md",1401,4,",136",markdown,selection_mouse +171,487897,"slurm/utils/mihir/dataset_sizes.md",1399,6,"17,136",markdown,selection_mouse +172,488282,"slurm/utils/mihir/dataset_sizes.md",1400,0,"",markdown,selection_mouse +173,488283,"slurm/utils/mihir/dataset_sizes.md",1399,2,"17",markdown,selection_mouse +174,488484,"slurm/utils/mihir/dataset_sizes.md",1399,3,"17,",markdown,selection_mouse +175,488506,"slurm/utils/mihir/dataset_sizes.md",1399,6,"17,136",markdown,selection_mouse +176,488875,"slurm/utils/mihir/dataset_sizes.md",1404,0,"",markdown,selection_mouse +177,488876,"slurm/utils/mihir/dataset_sizes.md",1402,3,"136",markdown,selection_mouse +178,489068,"slurm/utils/mihir/dataset_sizes.md",1401,4,",136",markdown,selection_mouse +179,489084,"slurm/utils/mihir/dataset_sizes.md",1402,52,"136 | 6,580,403 ÷ 384 |\n",markdown,selection_mouse +180,489107,"slurm/utils/mihir/dataset_sizes.md",1399,6,"17,136",markdown,selection_mouse +181,489140,"slurm/utils/mihir/dataset_sizes.md",1402,52,"136 | 6,580,403 ÷ 384 |\n",markdown,selection_mouse +182,489290,"slurm/utils/mihir/dataset_sizes.md",1399,0,"",markdown,selection_mouse +183,535376,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",0,0,"",shellscript,tab +184,549832,"TERMINAL",0,0,"ls",,terminal_command +185,549883,"TERMINAL",0,0,"]633;E;2025-08-10 15:07:21 ls;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C",,terminal_output +186,550028,"TERMINAL",0,0,"data genie.py __pycache__ slurm train_dynamics.py\r\ndebug gifs README.md slurm-3373409.out train_lam.py\r\ndiff.diff input_pipeline read_tf_record.py slurm-3373410.out train_tokenizer.py\r\nframe-knoms.png LICENSE requirements-franz.txt slurm-3379613.out utils\r\nframe.png logs requirements.txt slurm-3379615.out wandb\r\nframes models sample.py slurm-3379616.out weekend-job-requeuer.sh\r\ngenerate_dataset.py overfit_dir scripts_cremers slurm-3400254.out weekend-job-starter.sh\r\ngenie_fixed_maskgit.py overfit_dir.zip scripts_horeka tests\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +187,554395,"TERMINAL",0,0,"",,terminal_focus +188,558667,"TERMINAL",0,0,"source /home/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/bin/activate",,terminal_command +189,558726,"TERMINAL",0,0,"]633;E;2025-08-10 15:07:30 source /home/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/bin/activate;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +190,562644,"TERMINAL",0,0,"cd $ws_dir",,terminal_command +191,562650,"TERMINAL",0,0,"]633;E;2025-08-10 15:07:34 cd $ws_dir;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared]633;D;0",,terminal_output +192,563066,"TERMINAL",0,0,"ls",,terminal_command +193,563114,"TERMINAL",0,0,"]633;E;2025-08-10 15:07:35 ls;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C",,terminal_output +194,563217,"TERMINAL",0,0,"checkpoints data huggingface possibly_corrupt_files_in_this_workspace.txt\r\ncount_items.sh data_new logs scripts\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared]633;D;0",,terminal_output +195,565597,"TERMINAL",0,0,"cd checkpoints/",,terminal_command +196,566078,"TERMINAL",0,0,"ls",,terminal_command +197,566123,"TERMINAL",0,0,"]633;E;2025-08-10 15:07:38 ls;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C",,terminal_output +198,566261,"TERMINAL",0,0,"0000 3292334 3297582 3300663 3313565 tokenizer\r\n3290283 3292335 3297586 3300672 3313570 tokenizer_ckpt_dir\r\n3290284 3292336 3297606 3301025 3313571 train_dynamics_lr_schedule_const\r\n3290295 3292337 3297671 3301026 3313572 train_dynamics_lr_schedule_cos\r\n3290296 3292338 3297693 3301027 3316022 train_dynamics_lr_schedule_wsd\r\n3290366 3292339 3297706 3301029 big-runs train_dyn_causal_180M\r\n3290367 3294600 3297727 3301030 causal train_dyn_causal_255M\r\n3290391 3294601 3299016 3301031 causal-fr train_dyn_causal_356M\r\n3290392 3294602 3299062 3306801 causal-fr-flashattn train_dyn_causal_500M\r\n3290439 3294603 3299063 3307618 checkpoints_alfred train_dyn_new_arch-bugfixed-spatial-shift\r\n3290440 3296502 3299065 3307619 coinrun train_dyn_new_arch-bugfixed-temporal-shift\r\n3291405 3296540 3299066 3309662 debug train_dyn_yolorun_new_arch\r\n3292213 3296571 3299068 3309663 dyn train_lam_minecraft_overfit_sample\r\n3292221 3296573 3299069 3309699 dynamics_ckpt_dir train_tokenizer_batch_size_scaling_16_node\r\n3292258 3296574 3299258 3310436 interactive train_tokenizer_minecraft_overfit_sample\r\n3292328 3296575 3299259 3310437 lam wrap\r\n3292329 3297569 3299272 3311671 lam-1-action\r\n3292330 3297575 3299579 3311672 lam_ckpt_dir\r\n3292331 3297576 3300233 3313562 lam_main_test\r\n3292332 3297577 3300290 3313563 maskgit-maskprob-fix\r\n3292333 3297578 3300658 3313564 maskgit-speedrun\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints]633;D;0",,terminal_output +199,571724,"TERMINAL",0,0,"mkdir mihir",,terminal_command +200,571769,"TERMINAL",0,0,"]633;E;2025-08-10 15:07:43 mkdir mihir;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C",,terminal_output +201,571830,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints]633;D;0",,terminal_output +202,578067,"TERMINAL",0,0,"cd mihir/",,terminal_command +203,578873,"TERMINAL",0,0,"ls",,terminal_command +204,588584,"TERMINAL",0,0,"mkdir holiday",,terminal_command +205,588623,"TERMINAL",0,0,"]633;E;2025-08-10 15:08:00 mkdir holiday;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir]633;D;0",,terminal_output +206,591622,"TERMINAL",0,0,"cd holiday/",,terminal_command +207,591654,"TERMINAL",0,0,"]633;E;2025-08-10 15:08:03 cd holiday/;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday]633;D;0",,terminal_output +208,597574,"TERMINAL",0,0,"mkdir tokenizer",,terminal_command +209,599136,"TERMINAL",0,0,"cd tokenizer/",,terminal_command +210,599794,"TERMINAL",0,0,"ls",,terminal_command +211,622720,"TERMINAL",0,0,"cp -r /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607 3404607",,terminal_command +212,622770,"TERMINAL",0,0,"]633;E;2025-08-10 15:08:34 cp -r /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607 3404607;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C",,terminal_output +213,630267,"TERMINAL",0,0,"bash",,terminal_focus +214,632263,"TERMINAL",0,0,"cp",,terminal_focus +215,634401,"TERMINAL",0,0,"bash",,terminal_focus +216,638020,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +217,639176,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1663,0,"",shellscript,selection_mouse +218,640173,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1665,0,"",shellscript,selection_mouse +219,640325,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1665,2,"=/",shellscript,selection_mouse +220,640326,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1665,3,"=/h",shellscript,selection_mouse +221,640340,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1665,9,"=/hkfs/wo",shellscript,selection_mouse +222,640356,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1665,116,"=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n",shellscript,selection_mouse +223,640439,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1665,133,"=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\nenv | grep SLURM",shellscript,selection_mouse +224,640440,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1665,116,"=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n",shellscript,selection_mouse +225,640726,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1665,85,"=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_token",shellscript,selection_mouse +226,640770,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1665,115,"=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +227,641923,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1665,115,"",shellscript,content +228,642438,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1665,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607",shellscript,content +229,644542,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1665,0,"",shellscript,selection_mouse +230,645816,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1665,0,"=",shellscript,content +231,645818,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1666,0,"",shellscript,selection_keyboard +232,658724,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/tokenizer]633;D;0",,terminal_output +233,659998,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1800,0,"",shellscript,selection_mouse +234,665904,"TERMINAL",0,0,"bash",,terminal_focus +235,667924,"TERMINAL",0,0,"ls",,terminal_command +236,669367,"TERMINAL",0,0,"cd 3404607/",,terminal_command +237,669973,"TERMINAL",0,0,"ls",,terminal_command +238,672043,"TERMINAL",0,0,"bash",,terminal_focus +239,673221,"TERMINAL",0,0,"bash",,terminal_focus +240,675186,"TERMINAL",0,0,"ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607",,terminal_command +241,675221,"TERMINAL",0,0,"]633;E;2025-08-10 15:09:27 ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C020000 040000 060000 080000 100000 120000 138000 139000 140000\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/tokenizer/3404607]633;D;0",,terminal_output +242,686512,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +243,722423,"TERMINAL",0,0,"bash",,terminal_focus +244,726084,"TERMINAL",0,0,"runner",,terminal_command +245,728780,"TERMINAL",0,0,"sync-runner",,terminal_command +246,728837,"TERMINAL",0,0,"]633;E;2025-08-10 15:10:20 sync-runner;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;Csending incremental file list\r\n",,terminal_output +247,730243,"TERMINAL",0,0,"./\r\n",,terminal_output +248,731299,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch\r\n",,terminal_output +249,731446,"TERMINAL",0,0,"\r\nsent 28,704 bytes received 174 bytes 8,250.86 bytes/sec\r\ntotal size is 220,516,673 speedup is 7,636.15\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +250,731912,".venv/bin/activate",0,0,"# Copyright (c) 2020-202x The virtualenv developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# ""Software""), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n# This file must be used with ""source bin/activate"" *from bash*\n# you cannot run it directly\n\nif ! [ -z ""${SCRIPT_PATH+_}"" ] ; then\n _OLD_SCRIPT_PATH=""$SCRIPT_PATH""\nfi\n\n# Get script path (only used if environment is relocatable).\nif [ -n ""${BASH_VERSION:+x}"" ] ; then\n SCRIPT_PATH=""${BASH_SOURCE[0]}""\n if [ ""$SCRIPT_PATH"" = ""$0"" ]; then\n # Only bash has a reasonably robust check for source'dness.\n echo ""You must source this script: \$ source $0"" >&2\n exit 33\n fi\nelif [ -n ""${ZSH_VERSION:+x}"" ] ; then\n SCRIPT_PATH=""${(%):-%x}""\nelif [ -n ""${KSH_VERSION:+x}"" ] ; then\n SCRIPT_PATH=""${.sh.file}""\nfi\n\ndeactivate () {\n unset -f pydoc >/dev/null 2>&1 || true\n\n # reset old environment variables\n # ! [ -z ${VAR+_} ] returns true if VAR is declared at all\n if ! [ -z ""${_OLD_VIRTUAL_PATH:+_}"" ] ; then\n PATH=""$_OLD_VIRTUAL_PATH""\n export PATH\n unset _OLD_VIRTUAL_PATH\n fi\n if ! [ -z ""${_OLD_VIRTUAL_PYTHONHOME+_}"" ] ; then\n PYTHONHOME=""$_OLD_VIRTUAL_PYTHONHOME""\n export PYTHONHOME\n unset _OLD_VIRTUAL_PYTHONHOME\n fi\n\n # The hash command must be called to get it to forget past\n # commands. Without forgetting past commands the $PATH changes\n # we made may not be respected\n hash -r 2>/dev/null\n\n if ! [ -z ""${_OLD_VIRTUAL_PS1+_}"" ] ; then\n PS1=""$_OLD_VIRTUAL_PS1""\n export PS1\n unset _OLD_VIRTUAL_PS1\n fi\n\n unset VIRTUAL_ENV\n unset VIRTUAL_ENV_PROMPT\n if [ ! ""${1-}"" = ""nondestructive"" ] ; then\n # Self destruct!\n unset -f deactivate\n fi\n}\n\n# unset irrelevant variables\ndeactivate nondestructive\n\nVIRTUAL_ENV='/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv'\nif ([ ""$OSTYPE"" = ""cygwin"" ] || [ ""$OSTYPE"" = ""msys"" ]) && $(command -v cygpath &> /dev/null) ; then\n VIRTUAL_ENV=$(cygpath -u ""$VIRTUAL_ENV"")\nfi\nexport VIRTUAL_ENV\n\n# Unset the `SCRIPT_PATH` variable, now that the `VIRTUAL_ENV` variable\n# has been set. This is important for relocatable environments.\nif ! [ -z ""${_OLD_SCRIPT_PATH+_}"" ] ; then\n SCRIPT_PATH=""$_OLD_SCRIPT_PATH""\n export SCRIPT_PATH\n unset _OLD_SCRIPT_PATH\nelse\n unset SCRIPT_PATH\nfi\n\n_OLD_VIRTUAL_PATH=""$PATH""\nPATH=""$VIRTUAL_ENV/bin:$PATH""\nexport PATH\n\nif [ ""xjafar"" != x ] ; then\n VIRTUAL_ENV_PROMPT=""jafar""\nelse\n VIRTUAL_ENV_PROMPT=$(basename ""$VIRTUAL_ENV"")\nfi\nexport VIRTUAL_ENV_PROMPT\n\n# unset PYTHONHOME if set\nif ! [ -z ""${PYTHONHOME+_}"" ] ; then\n _OLD_VIRTUAL_PYTHONHOME=""$PYTHONHOME""\n unset PYTHONHOME\nfi\n\nif [ -z ""${VIRTUAL_ENV_DISABLE_PROMPT-}"" ] ; then\n _OLD_VIRTUAL_PS1=""${PS1-}""\n PS1=""(${VIRTUAL_ENV_PROMPT}) ${PS1-}""\n export PS1\nfi\n\n# Make sure to unalias pydoc if it's already there\nalias pydoc 2>/dev/null >/dev/null && unalias pydoc || true\n\npydoc () {\n python -m pydoc ""$@""\n}\n\n# The hash command must be called to get it to forget past\n# commands. Without forgetting past commands the $PATH changes\n# we made may not be respected\nhash -r 2>/dev/null\n",plaintext,tab +251,734231,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +252,751066,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2166,0,"",shellscript,selection_mouse +253,751915,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2166,0,"m",shellscript,content +254,751917,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2167,0,"",shellscript,selection_keyboard +255,751995,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2167,0,"a",shellscript,content +256,751996,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2168,0,"",shellscript,selection_keyboard +257,752068,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2168,0,"i",shellscript,content +258,752069,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2169,0,"",shellscript,selection_keyboard +259,752116,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2169,0,"n",shellscript,content +260,752117,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2170,0,"",shellscript,selection_keyboard +261,752260,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2170,0," ",shellscript,content +262,752261,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2171,0,"",shellscript,selection_keyboard +263,752741,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2170,0,"",shellscript,selection_command +264,756308,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2169,0,"",shellscript,selection_command +265,756462,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2168,0,"",shellscript,selection_command +266,756593,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2167,0,"",shellscript,selection_command +267,756729,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2166,0,"",shellscript,selection_command +268,757164,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2166,0,"ü",shellscript,content +269,757166,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2167,0,"",shellscript,selection_keyboard +270,757408,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2167,0,"p",shellscript,content +271,757409,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2168,0,"",shellscript,selection_keyboard +272,757512,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2168,0,"s",shellscript,content +273,757513,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2169,0,"",shellscript,selection_keyboard +274,757800,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2168,1,"",shellscript,content +275,757910,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2167,1,"",shellscript,content +276,758049,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2166,1,"",shellscript,content +277,758453,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2166,0,"ü",shellscript,content +278,758454,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2167,0,"",shellscript,selection_keyboard +279,758749,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2166,1,"",shellscript,content +280,758947,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2166,0,"p",shellscript,content +281,758948,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2167,0,"",shellscript,selection_keyboard +282,759152,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2167,0,"o",shellscript,content +283,759153,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2168,0,"",shellscript,selection_keyboard +284,759203,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2168,0,"s",shellscript,content +285,759204,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2169,0,"",shellscript,selection_keyboard +286,759424,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2169,0,"t",shellscript,content +287,759425,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2170,0,"",shellscript,selection_keyboard +288,759985,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2170,0,"-",shellscript,content +289,759987,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2171,0,"",shellscript,selection_keyboard +290,760273,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2171,0,"l",shellscript,content +291,760274,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2172,0,"",shellscript,selection_keyboard +292,760467,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2172,0,"a",shellscript,content +293,760468,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2173,0,"",shellscript,selection_keyboard +294,760579,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2173,0,"u",shellscript,content +295,760580,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2174,0,"",shellscript,selection_keyboard +296,760673,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2174,0,"n",shellscript,content +297,760674,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2175,0,"",shellscript,selection_keyboard +298,760771,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2175,0,"c",shellscript,content +299,760772,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2176,0,"",shellscript,selection_keyboard +300,760879,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2176,0,"h",shellscript,content +301,760880,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2177,0,"",shellscript,selection_keyboard +302,761105,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2177,0,"-",shellscript,content +303,761106,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2178,0,"",shellscript,selection_keyboard +304,761970,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2281,0,"",shellscript,selection_mouse +305,786711,"TERMINAL",0,0,"bash",,terminal_focus +306,787797,"TERMINAL",0,0,"bash",,terminal_focus +307,793953,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_command +308,793996,"TERMINAL",0,0,"]633;E;2025-08-10 15:11:26 sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;CSubmitted batch job 3412343\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +309,795985,"TERMINAL",0,0,"queue",,terminal_command +310,796036,"TERMINAL",0,0,"]633;E;2025-08-10 15:11:28 queue;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C",,terminal_output +311,796129,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:11:28 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3412343 accelerat train_dy tum_cte0 PD\t0:00\t 8 (None)3404607 accelerat train_to tum_cte0 R 1-21:41:58\t 8 hkn[0802,0804-0806,0808,0810,0813-0814] ",,terminal_output +312,797457,"TERMINAL",0,0,"99",,terminal_output +313,798209,"TERMINAL",0,0,"302:00",,terminal_output +314,799335,"TERMINAL",0,0,"11",,terminal_output +315,800337,"TERMINAL",0,0,"22",,terminal_output +316,801588,"TERMINAL",0,0,"33",,terminal_output +317,803035,"TERMINAL",0,0,"44",,terminal_output +318,803544,"TERMINAL",0,0,"55",,terminal_output +319,804490,"TERMINAL",0,0,"66",,terminal_output +320,805566,"TERMINAL",0,0,"77",,terminal_output +321,806952,"TERMINAL",0,0,"88",,terminal_output +322,808055,"TERMINAL",0,0,"99",,terminal_output +323,809663,"TERMINAL",0,0,"4010",,terminal_output +324,810291,"TERMINAL",0,0,"11[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +325,810855,"TERMINAL",0,0,"idling",,terminal_command +326,810938,"TERMINAL",0,0,"]633;E;2025-08-10 15:11:42 idling;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1990.localdomain: Sun Aug 10 15:11:42 2025Partition dev_cpuonly: 12 nodes idle\rPartition cpuonly: 161 nodes idle\rPartition dev_accelerated:\t 3 nodes idle\rPartition accelerated: 65 nodes idle\rPartition dev_accelerated-h100 :\t 1 nodes idle\rPartition accelerated-h100:\t 6 nodes idle\rPartition large:\t 7 nodes idle",,terminal_output +327,812002,"TERMINAL",0,0,"4",,terminal_output +328,812283,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +329,814660,"TERMINAL",0,0,"dev",,terminal_command +330,815949,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +331,824726,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit-maskprob-fix/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit-maskprob-fix/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_maskprob_fix_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\n# CHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/$job_name/$slurm_job_id\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --restore_ckpt \\n --wandb_id=3371237 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=384 \\n --init_lr=0 \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-maskprob-fix-8-node-3371237 \\n --tags dynamics maskprob-fix 8-node \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +332,830801,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +333,837287,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +334,839532,"TERMINAL",0,0,"bash",,terminal_focus +335,840128,"TERMINAL",0,0,"bash",,terminal_focus +336,841771,"TERMINAL",0,0,"queue",,terminal_command +337,841861,"TERMINAL",0,0,"]633;E;2025-08-10 15:12:13 queue;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:12:13 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3404607 accelerat train_to tum_cte0 R 1-21:42:43\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3412343 accelerat train_dy tum_cte0 R\t0:24\t 8 hkn[0405-0407,0410,0413,0415,0417-0418]",,terminal_output +338,842998,"TERMINAL",0,0,"445",,terminal_output +339,843961,"TERMINAL",0,0,"567",,terminal_output +340,844639,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +341,846698,"TERMINAL",0,0,"scancel 3412343",,terminal_command +342,846782,"TERMINAL",0,0,"]633;E;2025-08-10 15:12:18 scancel 3412343;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +343,848096,"TERMINAL",0,0,"queue",,terminal_command +344,848172,"TERMINAL",0,0,"]633;E;2025-08-10 15:12:20 queue;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:12:20 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3412343 accelerat train_dy tum_cte0 CG\t0:29\t 8 hkn[0405-0407,0410,0413,0415,0417-0418]3404607 accelerat train_to tum_cte0 R 1-21:42:50\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]",,terminal_output +345,849242,"TERMINAL",0,0,"11",,terminal_output +346,849406,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +347,850557,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +348,851693,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2000,0,"",shellscript,selection_mouse +349,852232,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1962,0,"",shellscript,selection_mouse +350,853414,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1960,0,"",shellscript,selection_mouse +351,854180,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2016,0,"",shellscript,selection_mouse +352,858451,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_type: str = ""maskgit"" # supported options: maskgit, causal\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(\n model: Genie, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n """"""Compute masked dynamics loss""""""\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@nnx.jit\ndef train_step(\n model: Genie, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n """"""Update state and compute metrics""""""\n\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=False,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(genie, tx)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +353,888716,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=01:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/yoloruns/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/yoloruns/%x_%j.log\n#SBATCH --job-name=train_dynamics_overfit_sample_causal\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/causal-fr/overfit-seed69-1-no-noise-5/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3388151\n\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --seed=69 \\n --num_steps=5000 \\n --warmup_steps=0 \\n --wsd_decay_steps=0 \\n --ckpt_dir $CHECKPOINT_DIR \\n --dyna_type=causal \\n --batch_size=8 \\n --init_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=100 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-overfit-final-$slurm_job_id \\n --tags dynamics causal overfit \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir \\n --dyna_dim=128 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4\n\n #--no-use-flash-attention \\n ",shellscript,tab +354,891884,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1212,0,"",shellscript,selection_mouse +355,891885,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1211,0,"",shellscript,selection_command +356,892050,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1211,1,"\",shellscript,selection_mouse +357,892052,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1212,0,"",shellscript,selection_command +358,892082,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1211,1,"\",shellscript,selection_mouse +359,892098,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1209,3,"l \",shellscript,selection_mouse +360,892139,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1208,4,"al \",shellscript,selection_mouse +361,892140,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1207,5,"sal \",shellscript,selection_mouse +362,892184,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1206,6,"usal \",shellscript,selection_mouse +363,892213,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1205,7,"ausal \",shellscript,selection_mouse +364,892295,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1204,8,"causal \",shellscript,selection_mouse +365,892296,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1203,9,"=causal \",shellscript,selection_mouse +366,892297,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1202,10,"e=causal \",shellscript,selection_mouse +367,892333,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1201,11,"pe=causal \",shellscript,selection_mouse +368,892373,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1200,12,"ype=causal \",shellscript,selection_mouse +369,892374,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1199,13,"type=causal \",shellscript,selection_mouse +370,892415,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1198,14,"_type=causal \",shellscript,selection_mouse +371,892416,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1197,15,"a_type=causal \",shellscript,selection_mouse +372,892416,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1195,17,"yna_type=causal \",shellscript,selection_mouse +373,892455,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1194,18,"dyna_type=causal \",shellscript,selection_mouse +374,892456,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1193,19,"-dyna_type=causal \",shellscript,selection_mouse +375,892470,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1192,20,"--dyna_type=causal \",shellscript,selection_mouse +376,892502,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1191,21," --dyna_type=causal \",shellscript,selection_mouse +377,892585,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1190,22," --dyna_type=causal \",shellscript,selection_mouse +378,892628,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1189,23," --dyna_type=causal \",shellscript,selection_mouse +379,892715,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1188,24," --dyna_type=causal \",shellscript,selection_mouse +380,900410,"slurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch",1188,0,"",shellscript,selection_command +381,904845,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +382,905953,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1977,0,"",shellscript,selection_mouse +383,906549,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1976,0,"",shellscript,selection_command +384,907006,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1980,0,"\n --dyna_type=causal \",shellscript,content +385,907009,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1985,0,"",shellscript,selection_command +386,909549,"train_dynamics.py",0,0,"",python,tab +387,920230,"train_dynamics.py",1809,0,"",python,selection_mouse +388,920232,"train_dynamics.py",1808,0,"",python,selection_command +389,920736,"train_dynamics.py",1716,0,"",python,selection_mouse +390,920743,"train_dynamics.py",1715,0,"",python,selection_command +391,921445,"train_dynamics.py",1785,0,"",python,selection_mouse +392,921450,"train_dynamics.py",1784,0,"",python,selection_command +393,922579,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +394,937542,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=320 \\n --init_lr=0 \\n --dyna_type=causal \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +395,938566,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",772,0,"",shellscript,selection_mouse +396,940266,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2022,0,"",shellscript,selection_mouse +397,940753,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1998,0,"",shellscript,selection_mouse +398,941958,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1997,0,"",shellscript,selection_command +399,942265,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1997,6,"",shellscript,content +400,942886,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1997,0,",",shellscript,content +401,942887,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1998,0,"",shellscript,selection_keyboard +402,943005,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1998,0,"a",shellscript,content +403,943006,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1999,0,"",shellscript,selection_keyboard +404,943102,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1999,0,"s",shellscript,content +405,943103,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2000,0,"",shellscript,selection_keyboard +406,943664,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1999,1,"",shellscript,content +407,943836,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1998,1,"",shellscript,content +408,943952,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1997,1,"",shellscript,content +409,944237,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1997,0,"m",shellscript,content +410,944238,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1998,0,"",shellscript,selection_keyboard +411,944632,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1997,1,"",shellscript,content +412,944946,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1997,0,"m",shellscript,content +413,944946,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1998,0,"",shellscript,selection_keyboard +414,945106,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1998,0,"a",shellscript,content +415,945107,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1999,0,"",shellscript,selection_keyboard +416,945181,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1999,0,"s",shellscript,content +417,945182,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2000,0,"",shellscript,selection_keyboard +418,945285,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2000,0,"k",shellscript,content +419,945286,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2001,0,"",shellscript,selection_keyboard +420,945531,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2001,0,"g",shellscript,content +421,945532,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2002,0,"",shellscript,selection_keyboard +422,945588,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2002,0,"i",shellscript,content +423,945589,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2003,0,"",shellscript,selection_keyboard +424,945602,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2003,0,"t",shellscript,content +425,945603,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2004,0,"",shellscript,selection_keyboard +426,948223,"train_dynamics.py",0,0,"",python,tab +427,950446,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +428,952262,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2178,0,"",shellscript,selection_mouse +429,952962,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2476,0,"",shellscript,selection_mouse +430,953754,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2475,1,"\n",shellscript,selection_mouse +431,953765,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2428,48,"ay_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +432,953778,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2223,253," instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +433,953798,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1882,594,"wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=320 \\n --init_lr=0 \\n --dyna_type=maskgit \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +434,953814,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1588,888,"tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=320 \\n --init_lr=0 \\n --dyna_type=maskgit \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +435,953839,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1560,916,"mkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=320 \\n --init_lr=0 \\n --dyna_type=maskgit \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +436,953856,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1256,1220,"array_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=320 \\n --init_lr=0 \\n --dyna_type=maskgit \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +437,953880,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1137,1339,"# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=320 \\n --init_lr=0 \\n --dyna_type=maskgit \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +438,953913,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1087,1389,"else\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=320 \\n --init_lr=0 \\n --dyna_type=maskgit \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +439,953916,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",846,1630,"trap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=320 \\n --init_lr=0 \\n --dyna_type=maskgit \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +440,954003,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",607,1869," echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=320 \\n --init_lr=0 \\n --dyna_type=maskgit \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +441,954005,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",170,2306,"#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=320 \\n --init_lr=0 \\n --dyna_type=maskgit \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +442,954006,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",0,2476,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=320 \\n --init_lr=0 \\n --dyna_type=maskgit \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +443,954657,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",844,0,"",shellscript,selection_mouse +444,955285,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",843,0,"",shellscript,selection_command +445,957750,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",878,1,"c",shellscript,selection_command +446,957777,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1161,2,"ca",shellscript,selection_command +447,957778,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1529,3,"cau",shellscript,selection_command +448,958176,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1529,4,"caus",shellscript,selection_command +449,958430,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1529,5,"causa",shellscript,selection_command +450,958431,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1529,6,"causal",shellscript,selection_command +451,962503,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1529,6,"maskgit",shellscript,content +452,962507,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2129,6,"causal",shellscript,selection_command +453,963248,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2129,6,"maskgit",shellscript,content +454,963252,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2180,6,"causal",shellscript,selection_command +455,963605,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",2180,6,"maskgit",shellscript,content +456,963609,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",259,6,"causal",shellscript,selection_command +457,964461,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",259,6,"maskgit",shellscript,content +458,964465,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",385,6,"causal",shellscript,selection_command +459,964732,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",385,6,"maskgit",shellscript,content +460,964735,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",457,6,"causal",shellscript,selection_command +461,965415,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",457,6,"maskgit",shellscript,content +462,983004,"TERMINAL",0,0,"bash",,terminal_focus +463,984272,"TERMINAL",0,0,"bash",,terminal_focus +464,993654,"TERMINAL",0,0,"runner",,terminal_command +465,996673,"TERMINAL",0,0,"sync-runner",,terminal_command +466,996723,"TERMINAL",0,0,"]633;E;2025-08-10 15:14:48 sync-runner;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;Csending incremental file list\r\n",,terminal_output +467,997112,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/\r\nslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch\r\nslurm/jobs/mihir/horeka/maskgit_big_runs/\r\nslurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch\r\n\r\nsent 31,365 bytes received 193 bytes 21,038.67 bytes/sec\r\ntotal size is 220,519,197 speedup is 6,987.74\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +468,1003888,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_command +469,1003931,"TERMINAL",0,0,"]633;E;2025-08-10 15:14:55 sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;CSubmitted batch job 3412349\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +470,1008378,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",,terminal_command +471,1008398,"TERMINAL",0,0,"]633;E;2025-08-10 15:15:00 sbatch slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;CSubmitted batch job 3412350\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +472,1011769,"TERMINAL",0,0,"queue",,terminal_command +473,1011845,"TERMINAL",0,0,"]633;E;2025-08-10 15:15:03 queue;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:15:03 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3412350 accelerat train_dy tum_cte0 PD\t0:00\t 8 (Priority)3412349 accelerat train_dy tum_cte0 PD\t0:00\t 8 (None)3404607 accelerat train_to tum_cte0 R 1-21:45:33\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]",,terminal_output +474,1012973,"TERMINAL",0,0,"44",,terminal_output +475,1014031,"TERMINAL",0,0,"55",,terminal_output +476,1015019,"TERMINAL",0,0,"67",,terminal_output +477,1015819,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +478,1017242,"TERMINAL",0,0,"fqueue",,terminal_command +479,1017307,"TERMINAL",0,0,"]633;E;2025-08-10 15:15:09 fqueue;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C[?1049h(B[?7hEvery 1.0s: squeue -o ""%.10i %.16P %.30j %.8u %.8T %.10M %.9l %.6D %R""hkn1990.localdomain: Sun Aug 10 15:15:09 2025JOBIDPARTITIONNAME USER STATE\t TIME TIME_LIMI NODES NODELIST(REASON)3412350\taccelerated train_dynamics_maskgit_8_node tum_cte0 PENDING\t 0:00 2-00:00:00\t8 (Priority)3412349\taccelerated train_dynamics_causal_8_node tum_cte0 PENDING\t 0:00 2-00:00:00\t8 (None)3404607\tacceleratedtrain_tokenizer_1e-4 tum_cte0 RUNNING 1-21:45:39 2-00:00:00\t8 hkn[0802,0804-0806,0808,0810,0813-0814]",,terminal_output +480,1018299,"TERMINAL",0,0,"1040",,terminal_output +481,1019730,"TERMINAL",0,0,"11",,terminal_output +482,1020361,"TERMINAL",0,0,"22",,terminal_output +483,1021542,"TERMINAL",0,0,"33",,terminal_output +484,1022499,"TERMINAL",0,0,"44",,terminal_output +485,1023542,"TERMINAL",0,0,"55",,terminal_output +486,1024586,"TERMINAL",0,0,"66",,terminal_output +487,1025866,"TERMINAL",0,0,"77",,terminal_output +488,1026553,"TERMINAL",0,0,"88",,terminal_output +489,1027586,"TERMINAL",0,0,"99",,terminal_output +490,1028620,"TERMINAL",0,0,"2050",,terminal_output +491,1028984,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +492,1030159,"TERMINAL",0,0,"idling",,terminal_command +493,1030262,"TERMINAL",0,0,"]633;E;2025-08-10 15:15:22 idling;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1990.localdomain: Sun Aug 10 15:15:22 2025Partition dev_cpuonly: 12 nodes idle\rPartition cpuonly: 161 nodes idle\rPartition dev_accelerated:\t 3 nodes idle\rPartition accelerated: 68 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 6 nodes idle\rPartition large:\t 7 nodes idle",,terminal_output +494,1031704,"TERMINAL",0,0,"3",,terminal_output +495,1032445,"TERMINAL",0,0,"4",,terminal_output +496,1033414,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +497,1034764,"TERMINAL",0,0,"fqueue",,terminal_command +498,1034847,"TERMINAL",0,0,"]633;E;2025-08-10 15:15:26 fqueue;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C[?1049h(B[?7hEvery 1.0s: squeue -o ""%.10i %.16P %.30j %.8u %.8T %.10M %.9l %.6D %R""hkn1990.localdomain: Sun Aug 10 15:15:26 2025JOBIDPARTITIONNAME USER STATE\t TIME TIME_LIMI NODES NODELIST(REASON)3404607\tacceleratedtrain_tokenizer_1e-4 tum_cte0 RUNNING 1-21:45:56 2-00:00:00\t8 hkn[0802,0804-0806,0808,0810,0813-0814]3412349\taccelerated train_dynamics_causal_8_node tum_cte0 RUNNING\t 0:01 2-00:00:00\t8 hkn[0405-0407,0410,0413,0415,0417-0418]3412350\taccelerated train_dynamics_maskgit_8_node tum_cte0 RUNNING\t 0:01 2-00:00:00\t8 hkn[0601-0605,0607-0608,0610]",,terminal_output +499,1035997,"TERMINAL",0,0,"7722",,terminal_output +500,1036776,"TERMINAL",0,0,"8833",,terminal_output +501,1037840,"TERMINAL",0,0,"9944",,terminal_output +502,1038966,"TERMINAL",0,0,"306:0055",,terminal_output +503,1039885,"TERMINAL",0,0,"1166",,terminal_output +504,1041014,"TERMINAL",0,0,"2277",,terminal_output +505,1041936,"TERMINAL",0,0,"3388",,terminal_output +506,1042992,"TERMINAL",0,0,"451010",,terminal_output +507,1044037,"TERMINAL",0,0,"6611",,terminal_output +508,1045105,"TERMINAL",0,0,"7722",,terminal_output +509,1046538,"TERMINAL",0,0,"8833",,terminal_output +510,1047355,"TERMINAL",0,0,"9944",,terminal_output +511,1048175,"TERMINAL",0,0,"401055",,terminal_output +512,1049170,"TERMINAL",0,0,"1166",,terminal_output +513,1050222,"TERMINAL",0,0,"2277",,terminal_output +514,1051488,"TERMINAL",0,0,"3388",,terminal_output +515,1052283,"TERMINAL",0,0,"4499",,terminal_output +516,1053558,"TERMINAL",0,0,"552020",,terminal_output +517,1054589,"TERMINAL",0,0,"6611",,terminal_output +518,1055478,"TERMINAL",0,0,"7722",,terminal_output +519,1056571,"TERMINAL",0,0,"8833",,terminal_output +520,1057467,"TERMINAL",0,0,"9944",,terminal_output +521,1058818,"TERMINAL",0,0,"502055",,terminal_output +522,1059501,"TERMINAL",0,0,"1166",,terminal_output +523,1060559,"TERMINAL",0,0,"2277",,terminal_output +524,1061888,"TERMINAL",0,0,"3388",,terminal_output +525,1062603,"TERMINAL",0,0,"4499",,terminal_output +526,1063625,"TERMINAL",0,0,"553030",,terminal_output +527,1064651,"TERMINAL",0,0,"6611",,terminal_output +528,1065686,"TERMINAL",0,0,"7722",,terminal_output +529,1066798,"TERMINAL",0,0,"8833",,terminal_output +530,1067822,"TERMINAL",0,0,"9944",,terminal_output +531,1068780,"TERMINAL",0,0,"6:003055",,terminal_output +532,1069818,"TERMINAL",0,0,"1166",,terminal_output +533,1070856,"TERMINAL",0,0,"2277",,terminal_output +534,1071887,"TERMINAL",0,0,"3388",,terminal_output +535,1072937,"TERMINAL",0,0,"4499",,terminal_output +536,1074040,"TERMINAL",0,0,"564141",,terminal_output +537,1075093,"TERMINAL",0,0,"7722",,terminal_output +538,1076215,"TERMINAL",0,0,"8833",,terminal_output +539,1077237,"TERMINAL",0,0,"9944",,terminal_output +540,1078159,"TERMINAL",0,0,"104055",,terminal_output +541,1079129,"TERMINAL",0,0,"1166",,terminal_output +542,1080307,"TERMINAL",0,0,"2277",,terminal_output +543,1081194,"TERMINAL",0,0,"3388",,terminal_output +544,1082456,"TERMINAL",0,0,"4499",,terminal_output +545,1083477,"TERMINAL",0,0,"555050",,terminal_output +546,1084317,"TERMINAL",0,0,"6611",,terminal_output +547,1085327,"TERMINAL",0,0,"7722",,terminal_output +548,1086360,"TERMINAL",0,0,"8833",,terminal_output +549,1087394,"TERMINAL",0,0,"9944",,terminal_output +550,1088595,"TERMINAL",0,0,"205055",,terminal_output +551,1089515,"TERMINAL",0,0,"1166",,terminal_output +552,1090495,"TERMINAL",0,0,"2277",,terminal_output +553,1091538,"TERMINAL",0,0,"3388",,terminal_output +554,1092564,"TERMINAL",0,0,"4499",,terminal_output +555,1093594,"TERMINAL",0,0,"551:001:00",,terminal_output +556,1094670,"TERMINAL",0,0,"6611",,terminal_output +557,1095670,"TERMINAL",0,0,"7722",,terminal_output +558,1096691,"TERMINAL",0,0,"8833",,terminal_output +559,1097811,"TERMINAL",0,0,"9944",,terminal_output +560,1098767,"TERMINAL",0,0,"307:0055",,terminal_output +561,1099803,"TERMINAL",0,0,"1166",,terminal_output +562,1100830,"TERMINAL",0,0,"2277",,terminal_output +563,1101867,"TERMINAL",0,0,"3388",,terminal_output +564,1102903,"TERMINAL",0,0,"4499",,terminal_output +565,1103934,"TERMINAL",0,0,"551010",,terminal_output +566,1105035,"TERMINAL",0,0,"6722",,terminal_output +567,1106101,"TERMINAL",0,0,"8833",,terminal_output +568,1107057,"TERMINAL",0,0,"9944",,terminal_output +569,1108056,"TERMINAL",0,0,"401055",,terminal_output +570,1109091,"TERMINAL",0,0,"1166",,terminal_output +571,1110124,"TERMINAL",0,0,"2277",,terminal_output +572,1111225,"TERMINAL",0,0,"3388",,terminal_output +573,1112216,"TERMINAL",0,0,"4499",,terminal_output +574,1113265,"TERMINAL",0,0,"552020",,terminal_output +575,1114269,"TERMINAL",0,0,"6611",,terminal_output +576,1115304,"TERMINAL",0,0,"7722",,terminal_output +577,1116737,"TERMINAL",0,0,"8833",,terminal_output +578,1117353,"TERMINAL",0,0,"9944",,terminal_output +579,1118424,"TERMINAL",0,0,"502055",,terminal_output +580,1119704,"TERMINAL",0,0,"1166",,terminal_output +581,1120523,"TERMINAL",0,0,"2277",,terminal_output +582,1121496,"TERMINAL",0,0,"3388",,terminal_output +583,1122510,"TERMINAL",0,0,"4499",,terminal_output +584,1123902,"TERMINAL",0,0,"553030",,terminal_output +585,1124582,"TERMINAL",0,0,"6611",,terminal_output +586,1125608,"TERMINAL",0,0,"7722",,terminal_output +587,1126639,"TERMINAL",0,0,"8833",,terminal_output +588,1127673,"TERMINAL",0,0,"9944",,terminal_output +589,1128914,"TERMINAL",0,0,"7:003055",,terminal_output +590,1129747,"TERMINAL",0,0,"1166",,terminal_output +591,1130786,"TERMINAL",0,0,"2277",,terminal_output +592,1131805,"TERMINAL",0,0,"3388",,terminal_output +593,1132872,"TERMINAL",0,0,"4499",,terminal_output +594,1133876,"TERMINAL",0,0,"554040",,terminal_output +595,1135260,"TERMINAL",0,0,"6611",,terminal_output +596,1136284,"TERMINAL",0,0,"7833",,terminal_output +597,1137075,"TERMINAL",0,0,"9944",,terminal_output +598,1138005,"TERMINAL",0,0,"104055",,terminal_output +599,1138311,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +600,1139758,"TERMINAL",0,0,"queue",,terminal_command +601,1139838,"TERMINAL",0,0,"]633;E;2025-08-10 15:17:11 queue;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:17:11 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3404607 accelerat train_to tum_cte0 R 1-21:47:41\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3412349 accelerat train_dy tum_cte0 R\t1:46\t 8 hkn[0405-0407,0410,0413,0415,0417-0418]3412350 accelerat train_dy tum_cte0 R\t1:46\t 8 hkn[0601-0605,0607-0608,0610]",,terminal_output +602,1140893,"TERMINAL",0,0,"2277",,terminal_output +603,1142325,"TERMINAL",0,0,"3388",,terminal_output +604,1143244,"TERMINAL",0,0,"455050",,terminal_output +605,1144036,"TERMINAL",0,0,"6611",,terminal_output +606,1145036,"TERMINAL",0,0,"7722",,terminal_output +607,1146082,"TERMINAL",0,0,"8833",,terminal_output +608,1147123,"TERMINAL",0,0,"9944",,terminal_output +609,1148257,"TERMINAL",0,0,"205055",,terminal_output +610,1149293,"TERMINAL",0,0,"1166",,terminal_output +611,1150242,"TERMINAL",0,0,"2277",,terminal_output +612,1151281,"TERMINAL",0,0,"3388",,terminal_output +613,1152332,"TERMINAL",0,0,"4499",,terminal_output +614,1153359,"TERMINAL",0,0,"552:002:00",,terminal_output +615,1154621,"TERMINAL",0,0,"6611",,terminal_output +616,1155497,"TERMINAL",0,0,"7722",,terminal_output +617,1156500,"TERMINAL",0,0,"8833",,terminal_output +618,1157557,"TERMINAL",0,0,"9944",,terminal_output +619,1158593,"TERMINAL",0,0,"308:0055",,terminal_output +620,1159646,"TERMINAL",0,0,"1166",,terminal_output +621,1160693,"TERMINAL",0,0,"2277",,terminal_output +622,1161741,"TERMINAL",0,0,"3388",,terminal_output +623,1162807,"TERMINAL",0,0,"4499",,terminal_output +624,1163832,"TERMINAL",0,0,"551010",,terminal_output +625,1165053,"TERMINAL",0,0,"6611",,terminal_output +626,1166073,"TERMINAL",0,0,"7722",,terminal_output +627,1167041,"TERMINAL",0,0,"8944",,terminal_output +628,1168139,"TERMINAL",0,0,"401055",,terminal_output +629,1169076,"TERMINAL",0,0,"1166",,terminal_output +630,1169753,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +631,1170123,"TERMINAL",0,0,"2277",,terminal_output +632,1171196,"TERMINAL",0,0,"3388",,terminal_output +633,1172561,"TERMINAL",0,0,"4499",,terminal_output +634,1173536,"TERMINAL",0,0,"552020",,terminal_output +635,1174354,"TERMINAL",0,0,"6611",,terminal_output +636,1175445,"TERMINAL",0,0,"7722",,terminal_output +637,1176811,"TERMINAL",0,0,"8833",,terminal_output +638,1177629,"TERMINAL",0,0,"9944",,terminal_output +639,1178481,"TERMINAL",0,0,"502055",,terminal_output +640,1179467,"TERMINAL",0,0,"1166",,terminal_output +641,1180518,"TERMINAL",0,0,"2277",,terminal_output +642,1181629,"TERMINAL",0,0,"3388",,terminal_output +643,1182581,"TERMINAL",0,0,"4499",,terminal_output +644,1183627,"TERMINAL",0,0,"553030",,terminal_output +645,1184729,"TERMINAL",0,0,"6611",,terminal_output +646,1185717,"TERMINAL",0,0,"7722",,terminal_output +647,1185872,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",471,0,"",shellscript,selection_mouse +648,1185922,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",470,0,"",shellscript,selection_command +649,1186763,"TERMINAL",0,0,"8833",,terminal_output +650,1187858,"TERMINAL",0,0,"9944",,terminal_output +651,1188863,"TERMINAL",0,0,"8:003055",,terminal_output +652,1190109,"TERMINAL",0,0,"1166",,terminal_output +653,1190991,"TERMINAL",0,0,"2388",,terminal_output +654,1192001,"TERMINAL",0,0,"4499",,terminal_output +655,1193071,"TERMINAL",0,0,"554040",,terminal_output +656,1194115,"TERMINAL",0,0,"6611",,terminal_output +657,1195144,"TERMINAL",0,0,"7722",,terminal_output +658,1196201,"TERMINAL",0,0,"8833",,terminal_output +659,1197237,"TERMINAL",0,0,"9944",,terminal_output +660,1198326,"TERMINAL",0,0,"104055",,terminal_output +661,1199391,"TERMINAL",0,0,"1166",,terminal_output +662,1200472,"TERMINAL",0,0,"2277",,terminal_output +663,1200688,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +664,1205647,"TERMINAL",0,0,"cd $ws_dir",,terminal_command +665,1205659,"TERMINAL",0,0,"]633;E;2025-08-10 15:18:17 cd $ws_dir;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared]633;D;0",,terminal_output +666,1206326,"TERMINAL",0,0,"ls",,terminal_command +667,1206372,"TERMINAL",0,0,"]633;E;2025-08-10 15:18:18 ls;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;Ccheckpoints count_items.sh data data_new huggingface logs possibly_corrupt_files_in_this_workspace.txt scripts\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared]633;D;0",,terminal_output +668,1208210,"TERMINAL",0,0,"cd logs/",,terminal_command +669,1208251,"TERMINAL",0,0,"]633;E;2025-08-10 15:18:20 cd logs/;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs]633;D;0",,terminal_output +670,1208563,"TERMINAL",0,0,"ls",,terminal_command +671,1208607,"TERMINAL",0,0,"]633;E;2025-08-10 15:18:20 ls;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C3306965 logs_alfred logs_franz logs_mihir\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs]633;D;0",,terminal_output +672,1210802,"TERMINAL",0,0,"cd logs_mihir/",,terminal_command +673,1211042,"TERMINAL",0,0,"ls",,terminal_command +674,1211116,"TERMINAL",0,0,"]633;E;2025-08-10 15:18:23 ls;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C",,terminal_output +675,1211212,"TERMINAL",0,0,"big_run train_lam_action_space_scaling_6_3318549.log\r\nbig-runs train_lam_action_space_scaling_6_3320178.log\r\ncausal train_lam_action_space_scaling_6_3321528.log\r\nmaskgit train_lam_action_space_scaling_6_3329790.log\r\nmaskgit-maskprob-fix train_lam_action_space_scaling_6_3329805.log\r\ntrain_dyn_causal_180M_3372931.log train_lam_action_space_scaling_6_3331287.log\r\ntrain_dyn_causal_180M_3372963.log train_lam_action_space_scaling_8_3318550.log\r\ntrain_dyn_causal_180M_3372969.log train_lam_action_space_scaling_8_3329791.log\r\ntrain_dyn_causal_180M_3373107.log train_lam_action_space_scaling_8_3329806.log\r\ntrain_dyn_causal_255M_3372932.log train_lam_action_space_scaling_8_3331288.log\r\ntrain_dyn_causal_255M_3372970.log train_lam_minecraft_overfit_sample_3309655.log\r\ntrain_dyn_causal_255M_3373108.log train_lam_model_size_scaling_38M_3317098.log\r\ntrain_dyn_causal_356M_3372934.log train_lam_model_size_scaling_38M_3317115.log\r\ntrain_dyn_causal_356M_3372971.log train_lam_model_size_scaling_38M_3317231.log\r\ntrain_dyn_causal_356M_3373109.log train_tokenizer_batch_size_scaling_16_node_3321526.log\r\ntrain_dyn_causal_500M_3372936.log train_tokenizer_batch_size_scaling_1_node_3318551.log\r\ntrain_dyn_causal_500M_3372972.log train_tokenizer_batch_size_scaling_2_node_3318552.log\r\ntrain_dyn_causal_500M_3373110.log train_tokenizer_batch_size_scaling_2_node_3330806.log\r\ntrain_dyn_new_arch-bugfixed-spatial-shift_3359343.log train_tokenizer_batch_size_scaling_2_node_3330848.log\r\ntrain_dyn_new_arch-bugfixed-temporal-shift_3359349.log train_tokenizer_batch_size_scaling_2_node_3331282.log\r\ntrain_dyn_yolorun_3333026.log train_tokenizer_batch_size_scaling_4_node_3318553.log\r\ntrain_dyn_yolorun_3333448.log train_tokenizer_batch_size_scaling_4_node_3320175.log\r\ntrain_dyn_yolorun_3335345.log train_tokenizer_batch_size_scaling_4_node_3321524.log\r\ntrain_dyn_yolorun_3335362.log train_tokenizer_batch_size_scaling_8_node_3320176.log\r\ntrain_dyn_yolorun_3348592.log train_tokenizer_batch_size_scaling_8_node_3321525.log\r\ntrain_dyn_yolorun_new_arch_3351743.log train_tokenizer_minecraft_overfit_sample_3309656.log\r\ntrain_dyn_yolorun_new_arch_3352103.log train_tokenizer_model_size_scaling_127M_3317233.log\r\ntrain_dyn_yolorun_new_arch_3352115.log train_tokenizer_model_size_scaling_127M_3318554.log\r\ntrain_dyn_yolorun_new_arch_3358457.log train_tokenizer_model_size_scaling_140M_3313562.log\r\ntrain_lam_action_space_scaling_10_3320179.log train_tokenizer_model_size_scaling_140M_3316019.log\r\ntrain_lam_action_space_scaling_10_3321529.log train_tokenizer_model_size_scaling_200M_3313563.log\r\ntrain_lam_action_space_scaling_10_3329786.log train_tokenizer_model_size_scaling_200M_3316020.log\r\ntrain_lam_action_space_scaling_10_3329801.log train_tokenizer_model_size_scaling_227M_3317234.log\r\ntrain_lam_action_space_scaling_10_3331283.log train_tokenizer_model_size_scaling_227M_3318555.log\r\ntrain_lam_action_space_scaling_12_3318546.log train_tokenizer_model_size_scaling_227M_3320173.log\r\ntrain_lam_action_space_scaling_12_3320177.log train_tokenizer_model_size_scaling_227M_3321523.log\r\ntrain_lam_action_space_scaling_12_3321527.log train_tokenizer_model_size_scaling_37M_3313565.log\r\ntrain_lam_action_space_scaling_12_3329787.log train_tokenizer_model_size_scaling_37M_3316022.log\r\ntrain_lam_action_space_scaling_12_3329802.log train_tokenizer_model_size_scaling_37M_3317232.log\r\ntrain_lam_action_space_scaling_12_3331284.log train_tokenizer_model_size_scaling_37M_3317239.log\r\ntrain_lam_action_space_scaling_20_3318547.log train_tokenizer_model_size_scaling_37M_3318556.log\r\ntrain_lam_action_space_scaling_20_3329788.log train_tokenizer_model_size_scaling_74M_3318557.log\r\ntrain_lam_action_space_scaling_20_3329803.log train_tokenizer_model_size_scaling_74M_3320174.log\r\ntrain_lam_action_space_scaling_20_3331285.log train_tokenizer_model_size_scaling_74M_3321522.log\r\ntrain_lam_action_space_scaling_50_3320180.log train_tokenizer_model_size_scaling_80M_3313564.log\r\ntrain_lam_action_space_scaling_50_3329789.log train_tokenizer_model_size_scaling_80M_3316026.log\r\ntrain_lam_action_space_scaling_50_3329804.log yoloruns\r\ntrain_lam_action_space_scaling_50_3331286.log\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +676,1215127,"TERMINAL",0,0,"cd big_run/",,terminal_command +677,1215159,"TERMINAL",0,0,"]633;E;2025-08-10 15:18:27 cd big_run/;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/big_run]633;D;0",,terminal_output +678,1216370,"TERMINAL",0,0,"ls",,terminal_command +679,1216396,"TERMINAL",0,0,"]633;E;2025-08-10 15:18:28 ls;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;Ctokenizer tokenizer-lr-scaling\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/big_run]633;D;0",,terminal_output +680,1217922,"TERMINAL",0,0,"cd ..",,terminal_command +681,1217929,"TERMINAL",0,0,"]633;E;2025-08-10 15:18:30 cd ..;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +682,1220376,"TERMINAL",0,0,"cd big-runs/",,terminal_command +683,1220417,"TERMINAL",0,0,"]633;E;2025-08-10 15:18:32 cd big-runs/;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/big-runs]633;D;0",,terminal_output +684,1221039,"TERMINAL",0,0,"ls",,terminal_command +685,1230938,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +686,1236237,"TERMINAL",0,0,"bash",,terminal_focus +687,1238158,"TERMINAL",0,0,"bash",,terminal_focus +688,1241807,"TERMINAL",0,0,"cd ..",,terminal_command +689,1242195,"TERMINAL",0,0,"ls",,terminal_command +690,1242232,"TERMINAL",0,0,"]633;E;2025-08-10 15:18:54 ls;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;Cbig_run train_lam_action_space_scaling_6_3318549.log\r\nbig-runs train_lam_action_space_scaling_6_3320178.log\r\ncausal train_lam_action_space_scaling_6_3321528.log\r\nmaskgit train_lam_action_space_scaling_6_3329790.log\r\nmaskgit-maskprob-fix train_lam_action_space_scaling_6_3329805.log\r\ntrain_dyn_causal_180M_3372931.log train_lam_action_space_scaling_6_3331287.log\r\ntrain_dyn_causal_180M_3372963.log train_lam_action_space_scaling_8_3318550.log\r\ntrain_dyn_causal_180M_3372969.log train_lam_action_space_scaling_8_3329791.log\r\ntrain_dyn_causal_180M_3373107.log train_lam_action_space_scaling_8_3329806.log\r\ntrain_dyn_causal_255M_3372932.log train_lam_action_space_scaling_8_3331288.log\r\ntrain_dyn_causal_255M_3372970.log train_lam_minecraft_overfit_sample_3309655.log\r\ntrain_dyn_causal_255M_3373108.log train_lam_model_size_scaling_38M_3317098.log\r\ntrain_dyn_causal_356M_3372934.log train_lam_model_size_scaling_38M_3317115.log\r\ntrain_dyn_causal_356M_3372971.log train_lam_model_size_scaling_38M_3317231.log\r\ntrain_dyn_causal_356M_3373109.log train_tokenizer_batch_size_scaling_16_node_3321526.log\r\ntrain_dyn_causal_500M_3372936.log train_tokenizer_batch_size_scaling_1_node_3318551.log\r\ntrain_dyn_causal_500M_3372972.log train_tokenizer_batch_size_scaling_2_node_3318552.log\r\ntrain_dyn_causal_500M_3373110.log train_tokenizer_batch_size_scaling_2_node_3330806.log\r\ntrain_dyn_new_arch-bugfixed-spatial-shift_3359343.log train_tokenizer_batch_size_scaling_2_node_3330848.log\r\ntrain_dyn_new_arch-bugfixed-temporal-shift_3359349.log train_tokenizer_batch_size_scaling_2_node_3331282.log\r\ntrain_dyn_yolorun_3333026.log train_tokenizer_batch_size_scaling_4_node_3318553.log\r\ntrain_dyn_yolorun_3333448.log train_tokenizer_batch_size_scaling_4_node_3320175.log\r\ntrain_dyn_yolorun_3335345.log train_tokenizer_batch_size_scaling_4_node_3321524.log\r\ntrain_dyn_yolorun_3335362.log train_tokenizer_batch_size_scaling_8_node_3320176.log\r\ntrain_dyn_yolorun_3348592.log train_tokenizer_batch_size_scaling_8_node_3321525.log\r\ntrain_dyn_yolorun_new_arch_3351743.log train_tokenizer_minecraft_overfit_sample_3309656.log\r\ntrain_dyn_yolorun_new_arch_3352103.log train_tokenizer_model_size_scaling_127M_3317233.log\r\ntrain_dyn_yolorun_new_arch_3352115.log train_tokenizer_model_size_scaling_127M_3318554.log\r\ntrain_dyn_yolorun_new_arch_3358457.log train_tokenizer_model_size_scaling_140M_3313562.log\r\ntrain_lam_action_space_scaling_10_3320179.log train_tokenizer_model_size_scaling_140M_3316019.log\r\ntrain_lam_action_space_scaling_10_3321529.log train_tokenizer_model_size_scaling_200M_3313563.log\r\ntrain_lam_action_space_scaling_10_3329786.log train_tokenizer_model_size_scaling_200M_3316020.log\r\ntrain_lam_action_space_scaling_10_3329801.log train_tokenizer_model_size_scaling_227M_3317234.log\r\ntrain_lam_action_space_scaling_10_3331283.log train_tokenizer_model_size_scaling_227M_3318555.log\r\ntrain_lam_action_space_scaling_12_3318546.log train_tokenizer_model_size_scaling_227M_3320173.log\r\ntrain_lam_action_space_scaling_12_3320177.log train_tokenizer_model_size_scaling_227M_3321523.log\r\ntrain_lam_action_space_scaling_12_3321527.log train_tokenizer_model_size_scaling_37M_3313565.log\r\ntrain_lam_action_space_scaling_12_3329787.log train_tokenizer_model_size_scaling_37M_3316022.log\r\ntrain_lam_action_space_scaling_12_3329802.log train_tokenizer_model_size_scaling_37M_3317232.log\r\ntrain_lam_action_space_scaling_12_3331284.log train_tokenizer_model_size_scaling_37M_3317239.log\r\ntrain_lam_action_space_scaling_20_3318547.log train_tokenizer_model_size_scaling_37M_3318556.log\r\ntrain_lam_action_space_scaling_20_3329788.log train_tokenizer_model_size_scaling_74M_3318557.log\r\ntrain_lam_action_space_scaling_20_3329803.log train_tokenizer_model_size_scaling_74M_3320174.log\r\ntrain_lam_action_space_scaling_20_3331285.log train_tokenizer_model_size_scaling_74M_3321522.log\r\ntrain_lam_action_space_scaling_50_3320180.log train_tokenizer_model_size_scaling_80M_3313564.log\r\ntrain_lam_action_space_scaling_50_3329789.log train_tokenizer_model_size_scaling_80M_3316026.log\r\ntrain_lam_action_space_scaling_50_3329804.log yoloruns\r\ntrain_lam_action_space_scaling_50_3331286.log\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +691,1244599,"TERMINAL",0,0,"cd causal/",,terminal_command +692,1244800,"TERMINAL",0,0,"ls",,terminal_command +693,1244833,"TERMINAL",0,0,"]633;E;2025-08-10 15:18:56 ls;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;Cdynamics-cotraining\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal]633;D;0",,terminal_output +694,1247506,"TERMINAL",0,0,"cd dynamics-cotraining/",,terminal_command +695,1248033,"TERMINAL",0,0,"ls",,terminal_command +696,1248109,"TERMINAL",0,0,"]633;E;2025-08-10 15:19:00 ls;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C",,terminal_output +697,1248189,"TERMINAL",0,0,"train_dynamics_causal_2_node_3373407.log train_dynamics_causal_2_node_3393065.log train_dynamics_causal_8_node_3393060.log\r\ntrain_dynamics_causal_2_node_3373407.log_bak train_dynamics_causal_8_node_3373408.log train_dynamics_causal_8_node_3393061.log\r\ntrain_dynamics_causal_2_node_3388135.log train_dynamics_causal_8_node_3388140.log train_dynamics_causal_8_node_3393066.log\r\ntrain_dynamics_causal_2_node_3388147.log train_dynamics_causal_8_node_3389928.log train_dynamics_causal_8_node_3412343.log\r\ntrain_dynamics_causal_2_node_3389801.log train_dynamics_causal_8_node_3390458.log train_dynamics_causal_8_node_3412349.log\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;0",,terminal_output +698,1252888,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412349.log",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=320 \\n --init_lr=0 \\n --dyna_type=causal \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n/var/spool/slurmd/job3412349/slurm_script: line 42: .venv/bin/activate: No such file or directory\nSLURM_JOB_USER=tum_cte0515\nSLURM_TASKS_PER_NODE=4(x8)\nSLURM_JOB_UID=999226\nSLURM_TASK_PID=1599003\nSLURM_JOB_GPUS=0,1,2,3\nSLURM_LOCALID=0\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs\nSLURMD_NODENAME=hkn0405\nSLURM_JOB_START_TIME=1754831725\nSLURM_CLUSTER_NAME=hk\nSLURM_JOB_END_TIME=1755004525\nSLURM_CPUS_ON_NODE=24\nSLURM_JOB_CPUS_PER_NODE=24(x8)\nSLURM_GPUS_ON_NODE=4\nSLURM_GTIDS=0\nSLURM_JOB_PARTITION=accelerated\nSLURM_TRES_PER_TASK=cpu=5\nSLURM_OOM_KILL_STEP=0\nSLURM_JOB_NUM_NODES=8\nSLURM_JOBID=3412349\nSLURM_JOB_QOS=normal\nSLURM_PROCID=0\nSLURM_CPUS_PER_TASK=5\nSLURM_NTASKS=32\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e12.hkn0405\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\nSLURM_SCRIPT_CONTEXT=prolog_task\nSLURM_NODELIST=hkn[0405-0407,0410,0413,0415,0417-0418]\nSLURM_JOB_ACCOUNT=hk-project-p0023960\nSLURM_PRIO_PROCESS=0\nSLURM_NPROCS=32\nSLURM_NNODES=8\nSLURM_SUBMIT_HOST=hkn1990.localdomain\nSLURM_JOB_ID=3412349\nSLURM_NODEID=0\nSLURM_CONF=/etc/slurm/slurm.conf\nSLURM_JOB_NAME=train_dynamics_causal_8_node\nSLURM_NTASKS_PER_NODE=4\nSLURM_JOB_GID=502226\nSLURM_JOB_NODELIST=hkn[0405-0407,0410,0413,0415,0417-0418]\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nwandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\nwandb: creating run\nwandb: Tracking run with wandb version 0.19.11\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/wandb/run-20250810_151635-3412349\nwandb: Run `wandb offline` to turn off syncing.\nwandb: Syncing run dynamics-causal-8-node-3412349\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3412349\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\n2025-08-10 15:18:43.612156: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:18:43.666530: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:18:44.322704: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:18:44.817283: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:18:44.817329: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:18:49.742321: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:18:56.404772: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_1_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.404765: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_2_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.412973: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_0_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.412986: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_1_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.412955: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_2_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.413034: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_3_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.414405: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_1_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.414341: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_0_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.414347: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_1_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.414344: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_2_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.414349: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_3_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.415071: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_0_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.415022: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_2_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.415154: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_3_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.416958: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_3_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.417132: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_0_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.417043: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_1_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.417039: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_2_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.418270: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_0_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.418266: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_1_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.418271: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_2_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.418274: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_3_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.441566: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_0_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.441568: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_2_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.441583: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_3_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.446259: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_0_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.446257: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_2_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.446259: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_3_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.446390: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_1_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.451359: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_1_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.527548: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_0_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n2025-08-10 15:18:56.531572: W external/xla/xla/tsl/framework/bfc_allocator.cc:310] Allocator (GPU_3_bfc) ran out of memory trying to allocate 21.27GiB with freed_by_count=0. The caller indicates that this is not a failure, but this may mean that there could be performance gains if more memory were available.\n",log,tab +699,1253404,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412349.log",845,0,"",log,selection_mouse +700,1253975,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412349.log",100999,0,"",log,selection_command +701,1255094,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412349.log",90522,0,"",log,selection_mouse +702,1274484,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412349.log",38655,0,"",log,selection_mouse +703,1274599,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412349.log",100999,0,"2025-08-10 15:19:12.571393: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.666368: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.845135: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.846737: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.849557: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.852130: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.858271: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.867443: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.877521: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.882586: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.894142: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.894540: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.896980: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.900847: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.954702: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:12.964958: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.005444: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.006666: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.010450: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.031643: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.038514: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.041512: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.150064: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.184401: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.191813: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.224250: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.238547: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.250454: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.294037: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.308999: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.381452: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n2025-08-10 15:19:13.794643: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3023] Can't reduce memory use below 20.35GiB (21852262786 bytes) by rematerialization; only reduced to 24.17GiB (25955090176 bytes), down from 26.21GiB (28145849496 bytes) originally\n",log,content +704,1279336,"TERMINAL",0,0,"queue",,terminal_command +705,1279386,"TERMINAL",0,0,"]633;E;2025-08-10 15:19:31 queue;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C",,terminal_output +706,1279466,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:19:31 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3404607 accelerat train_to tum_cte0 R 1-21:50:01\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3412349 accelerat train_dy tum_cte0 R\t4:06\t 8 hkn[0405-0407,0410,0413,0415,0417-0418]3412350 accelerat train_dy tum_cte0 R\t4:06\t 8 hkn[0601-0605,0607-0608,0610]",,terminal_output +707,1280488,"TERMINAL",0,0,"2277",,terminal_output +708,1281318,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;0",,terminal_output +709,1283803,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412349.log",0,0,"",log,tab +710,1299577,"TERMINAL",0,0,"cd ..",,terminal_command +711,1300153,"TERMINAL",0,0,"ls",,terminal_command +712,1300206,"TERMINAL",0,0,"]633;E;2025-08-10 15:19:52 ls;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;Cdynamics-cotraining\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal]633;D;0",,terminal_output +713,1301138,"TERMINAL",0,0,"cd ..",,terminal_command +714,1302775,"TERMINAL",0,0,"cd maskgit",,terminal_command +715,1303275,"TERMINAL",0,0,"ks",,terminal_command +716,1303318,"TERMINAL",0,0,"]633;E;2025-08-10 15:19:55 ks;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;Cbash: ks: command not found...\r\n",,terminal_output +717,1304527,"TERMINAL",0,0,"^C\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit]633;D;130",,terminal_output +718,1304910,"TERMINAL",0,0,"ls",,terminal_command +719,1304942,"TERMINAL",0,0,"]633;E;2025-08-10 15:19:56 ls;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;Cdynamics-cotraining\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit]633;D;0",,terminal_output +720,1306837,"TERMINAL",0,0,"cd dynamics-cotraining/",,terminal_command +721,1308123,"TERMINAL",0,0,"ls",,terminal_command +722,1310222,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/train_dynamics_maskgit_8_node_3412350.log",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_maskgit_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/maskgit/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=320 \\n --init_lr=0 \\n --dyna_type=maskgit \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-maskgit-8-node-$slurm_job_id \\n --tags dynamics maskgit 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n/var/spool/slurmd/job3412350/slurm_script: line 42: .venv/bin/activate: No such file or directory\nSLURM_JOB_USER=tum_cte0515\nSLURM_TASKS_PER_NODE=4(x8)\nSLURM_JOB_UID=999226\nSLURM_TASK_PID=2616551\nSLURM_JOB_GPUS=0,1,2,3\nSLURM_LOCALID=0\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs\nSLURMD_NODENAME=hkn0601\nSLURM_JOB_START_TIME=1754831725\nSLURM_CLUSTER_NAME=hk\nSLURM_JOB_END_TIME=1755004525\nSLURM_CPUS_ON_NODE=24\nSLURM_JOB_CPUS_PER_NODE=24(x8)\nSLURM_GPUS_ON_NODE=4\nSLURM_GTIDS=0\nSLURM_JOB_PARTITION=accelerated\nSLURM_TRES_PER_TASK=cpu=5\nSLURM_OOM_KILL_STEP=0\nSLURM_JOB_NUM_NODES=8\nSLURM_JOBID=3412350\nSLURM_JOB_QOS=normal\nSLURM_PROCID=0\nSLURM_CPUS_PER_TASK=5\nSLURM_NTASKS=32\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0601\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\nSLURM_SCRIPT_CONTEXT=prolog_task\nSLURM_NODELIST=hkn[0601-0605,0607-0608,0610]\nSLURM_JOB_ACCOUNT=hk-project-p0023960\nSLURM_PRIO_PROCESS=0\nSLURM_NPROCS=32\nSLURM_NNODES=8\nSLURM_SUBMIT_HOST=hkn1990.localdomain\nSLURM_JOB_ID=3412350\nSLURM_NODEID=0\nSLURM_CONF=/etc/slurm/slurm.conf\nSLURM_JOB_NAME=train_dynamics_maskgit_8_node\nSLURM_NTASKS_PER_NODE=4\nSLURM_JOB_GID=502226\nSLURM_JOB_NODELIST=hkn[0601-0605,0607-0608,0610]\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\n2025-08-10 15:16:24.821458: W external/xla/xla/service/platform_util.cc:220] unable to create StreamExecutor for CUDA:3: CUDA error: Failed call to cuDeviceGet: CUDA_ERROR_INVALID_DEVICE: invalid device ordinal\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 742, in backends\n backend = _init_backend(platform)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 828, in _init_backend\n backend = registration.factory()\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 528, in factory\n return xla_client.make_c_api_client(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jaxlib/xla_client.py"", line 153, in make_c_api_client\n return _xla.get_c_api_client(plugin_name, options, distributed_client)\njaxlib._jax.XlaRuntimeError: INTERNAL: no supported devices found for platform CUDA\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 157, in \n num_devices = jax.device_count()\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 907, in device_count\n return int(get_backend(backend).device_count())\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 876, in get_backend\n return _get_backend_uncached(platform)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 855, in _get_backend_uncached\n bs = backends()\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 758, in backends\n raise RuntimeError(err_msg)\nRuntimeError: Unable to initialize backend 'cuda': INTERNAL: no supported devices found for platform CUDA (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 742, in backends\n backend = _init_backend(platform)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 828, in _init_backend\n backend = registration.factory()\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 528, in factory\n return xla_client.make_c_api_client(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jaxlib/xla_client.py"", line 153, in make_c_api_client\n return _xla.get_c_api_client(plugin_name, options, distributed_client)\njaxlib._jax.XlaRuntimeError: INTERNAL: Getting local topologies failed: Error 1: GetKeyValue() timed out with key: cuda:local_topology/cuda/23 and duration: 2m\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 157, in \n num_devices = jax.device_count()\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 907, in device_count\n return int(get_backend(backend).device_count())\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 876, in get_backend\n return _get_backend_uncached(platform)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 855, in _get_backend_uncached\n bs = backends()\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 758, in backends\n raise RuntimeError(err_msg)\nRuntimeError: Unable to initialize backend 'cuda': INTERNAL: Getting local topologies failed: Error 1: GetKeyValue() timed out with key: cuda:local_topology/cuda/23 and duration: 2m (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\n",log,tab +723,1310730,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/train_dynamics_maskgit_8_node_3412350.log",834,0,"",log,selection_mouse +724,1310736,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/train_dynamics_maskgit_8_node_3412350.log",833,0,"",log,selection_command +725,1311387,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/train_dynamics_maskgit_8_node_3412350.log",8697,0,"",log,selection_command +726,1338534,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +727,1352205,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1964,0,"",shellscript,selection_mouse +728,1353293,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1963,0,"",shellscript,selection_command +729,1353558,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1962,0,"",shellscript,selection_command +730,1354525,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1961,0,"",shellscript,selection_command +731,1355332,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1961,4,"",shellscript,content +732,1357649,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1961,0,"2",shellscript,content +733,1357650,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1962,0,"",shellscript,selection_keyboard +734,1358280,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1962,0,"5",shellscript,content +735,1358281,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1963,0,"",shellscript,selection_keyboard +736,1358466,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1963,0,"6",shellscript,content +737,1358467,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1964,0,"",shellscript,selection_keyboard +738,1358739,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1964,0," ",shellscript,content +739,1358740,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",1965,0,"",shellscript,selection_keyboard +740,1361731,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +741,1363851,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1959,0,"",shellscript,selection_mouse +742,1363969,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1957,3,"320",shellscript,selection_mouse +743,1364975,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1957,3,"",shellscript,content +744,1366247,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1957,0,"2",shellscript,content +745,1366248,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1958,0,"",shellscript,selection_keyboard +746,1366655,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1958,0,"5",shellscript,content +747,1366656,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1959,0,"",shellscript,selection_keyboard +748,1366858,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1959,0,"6",shellscript,content +749,1366859,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1960,0,"",shellscript,selection_keyboard +750,1368000,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412349.log",0,0,"",log,tab +751,1368229,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412349.log",110023,0,"2025-08-10 15:19:49.901325: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:49.906178: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:49.906727 496742 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:50.558042: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:50.562971: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:50.563589 498220 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:51.437311: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:51.442232: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:51.442902 186007 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\n2025-08-10 15:19:51.542462: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:51.547549: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:51.548207 2484132 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\n2025-08-10 15:19:51.604864: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:51.609994: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:51.610704 3566866 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\n2025-08-10 15:19:51.675437: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:51.680950: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:51.681642 2175909 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\n2025-08-10 15:19:51.762605: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:51.767265: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:51.767916 186005 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\n2025-08-10 15:19:51.837661: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:51.842300: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:51.842935 3566868 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:51.876827: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:51.881464: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:51.882050 186006 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\n2025-08-10 15:19:51.957460: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:51.958884: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:51.962123: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:51.962748 2209049 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\n2025-08-10 15:19:51.964307: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:51.964906 2484135 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:52.013626: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:52.018256: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:52.018897 2209051 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\n2025-08-10 15:19:52.020178: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\n2025-08-10 15:19:52.025120: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:52.025662 2484133 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\n2025-08-10 15:19:52.033793: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:52.038463: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:52.039104 2484134 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\n2025-08-10 15:19:52.060007: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:52.065088: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:52.065729 1599074 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\n2025-08-10 15:19:52.092369: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:52.097082: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:52.097737 3566869 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\n2025-08-10 15:19:52.105967: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:52.110549: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:52.111155 2175907 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:52.188956: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:52.193583: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:52.194245 1599072 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 11, which is shorter than the requested sequence length 16.\nFiltering out episode with length 3, which is shorter than the requested sequence length 16.\nFiltering out episode with length 6, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:52.280579: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:52.285172: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:52.285774 2175908 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:52.349196: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:52.353843: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:52.354457 1599073 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:52.420975: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:52.425571: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:52.426213 2209050 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:52.470139: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:52.474946: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\nE0810 15:19:52.475691 186008 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 2, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:52.508910: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:52.513638: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:52.514248 2175910 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\n2025-08-10 15:19:52.518064: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n2025-08-10 15:19:52.523307: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nE0810 15:19:52.523875 1599075 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 10, which is shorter than the requested sequence length 16.\nFiltering out episode with length 8, which is shorter than the requested sequence length 16.\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:52.616221: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:52.620835: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:52.621516 3566867 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\nFiltering out episode with length 6, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:52.720304: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:52.725398: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:52.726082 2209052 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\nFiltering out episode with length 2, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:53.160678: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:53.166066: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:53.166714 498219 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\nFiltering out episode with length 9, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:53.768442: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:53.773469: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:53.774133 496741 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\n2025-08-10 15:19:53.795690: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:53.800422: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:53.801181 498221 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\n2025-08-10 15:19:53.878793: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:53.883970: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:53.884569 496743 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 13, which is shorter than the requested sequence length 16.\nFiltering out episode with length 7, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\nFiltering out episode with length 6, which is shorter than the requested sequence length 16.\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 2, which is shorter than the requested sequence length 16.\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:54.487067: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:54.491929: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:54.492693 498222 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\n2025-08-10 15:19:55.419334: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 18.85GiB (rounded to 20240685056)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:19:55.428989: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:19:55.430179 496744 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20240684848 bytes.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 6, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 13, which is shorter than the requested sequence length 16.\nwandb: \nwandb: 🚀 View run dynamics-causal-8-node-3412349 at: https://wandb.ai/instant-uv/jafar/runs/3412349\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/wandb/run-20250810_151635-3412349/logs\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\nsrun: error: hkn0418: task 29: Exited with exit code 1\nsrun: error: hkn0413: task 17: Exited with exit code 1\nsrun: error: hkn0415: tasks 21-22: Exited with exit code 1\nsrun: error: hkn0405: task 1: Exited with exit code 1\nsrun: error: hkn0417: tasks 24-25: Exited with exit code 1\nsrun: error: hkn0406: tasks 4-6: Exited with exit code 1\nsrun: error: hkn0413: tasks 16,18: Exited with exit code 1\nsrun: error: hkn0418: tasks 28,30: Exited with exit code 1\nsrun: error: hkn0410: tasks 13-14: Exited with exit code 1\nsrun: error: hkn0407: tasks 8-10: Exited with exit code 1\nsrun: error: hkn0405: tasks 2-3: Exited with exit code 1\nsrun: error: hkn0418: task 31: Exited with exit code 1\nsrun: error: hkn0413: task 19: Exited with exit code 1\nsrun: error: hkn0415: task 23: Exited with exit code 1\nsrun: error: hkn0407: task 11: Exited with exit code 1\nsrun: error: hkn0415: task 20: Exited with exit code 1\nsrun: error: hkn0406: task 7: Exited with exit code 1\nsrun: error: hkn0417: task 27: Exited with exit code 1\nsrun: error: hkn0417: task 26: Exited with exit code 1\nsrun: error: hkn0410: tasks 12,15: Exited with exit code 1\nsrun: error: hkn0405: task 0: Exited with exit code 1\n\n============================= JOB FEEDBACK =============================\n\nJob ID: 3412349\nCluster: hk\nUser/Group: tum_cte0515/hk-project-p0023960\nAccount: hk-project-p0023960\nState: FAILED (exit code 1)\nPartition: accelerated\nNodes: 8\nCores per node: 24\nNodelist: hkn[0405-0407,0410,0413,0415,0417-0418]\nCPU Utilized: 01:51:30\nCPU Efficiency: 12.67% of 14:40:00 core-walltime\nJob Wall-clock time: 00:04:35\nStarttime: Sun Aug 10 15:15:25 2025\nEndtime: Sun Aug 10 15:20:00 2025\nMemory Utilized: 252.69 GB (estimated maximum)\nMemory Efficiency: 0.00% of 0.00 MB (0.00 MB/node)\nEnergy Consumed: 1169233 Joule / 324.786944444444 Watthours\nAverage node power draw: 4251.75636363636 Watt\n",log,content +752,1368367,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412349.log",100410,0,"",log,selection_mouse +753,1370238,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412349.log",100689,0,"",log,selection_command +754,1371928,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +755,1372458,"TERMINAL",0,0,"bash",,terminal_focus +756,1374060,"TERMINAL",0,0,"runner",,terminal_command +757,1376880,"TERMINAL",0,0,"sync-runner",,terminal_command +758,1376890,"TERMINAL",0,0,"]633;E;2025-08-10 15:21:08 sync-runner;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;Csending incremental file list\r\n",,terminal_output +759,1377236,"TERMINAL",0,0,"[?25lqu[?25h",,terminal_output +760,1377295,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +761,1377371,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch\r\nslurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch\r\n",,terminal_output +762,1377413,"TERMINAL",0,0,"\r\nsent 31,357 bytes received 187 bytes 21,029.33 bytes/sec\r\ntotal size is 220,519,197 speedup is 6,990.84\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +763,1378545,"TERMINAL",0,0,"queue",,terminal_command +764,1378635,"TERMINAL",0,0,"]633;E;2025-08-10 15:21:10 queue;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:21:10 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3404607 accelerat train_to tum_cte0 R 1-21:51:40\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3412350 accelerat train_dy tum_cte0 R\t5:45\t 8 hkn[0601-0605,0607-0608,0610]",,terminal_output +765,1379951,"TERMINAL",0,0,"116",,terminal_output +766,1380762,"TERMINAL",0,0,"227",,terminal_output +767,1381402,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +768,1387204,"TERMINAL",0,0,"scancel 3412350",,terminal_command +769,1387233,"TERMINAL",0,0,"]633;E;2025-08-10 15:21:19 scancel 3412350;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +770,1388562,"TERMINAL",0,0,"queue",,terminal_command +771,1388614,"TERMINAL",0,0,"]633;E;2025-08-10 15:21:20 queue;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C",,terminal_output +772,1388702,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:21:20 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3412350 accelerat train_dy tum_cte0 CG\t5:54\t 8 hkn[0601-0605,0607-0608,0610]3404607 accelerat train_to tum_cte0 R 1-21:51:50\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]",,terminal_output +773,1389877,"TERMINAL",0,0,"11",,terminal_output +774,1390515,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +775,1396171,"TERMINAL",0,0,"sbatch quslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_command +776,1396215,"TERMINAL",0,0,"]633;E;2025-08-10 15:21:28 sbatch quslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;Csbatch: error: Unable to open file quslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;1",,terminal_output +777,1406426,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",,terminal_command +778,1406468,"TERMINAL",0,0,"]633;E;2025-08-10 15:21:38 sbatch slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;CSubmitted batch job 3412354\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +779,1411680,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_command +780,1411721,"TERMINAL",0,0,"]633;E;2025-08-10 15:21:43 sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;CSubmitted batch job 3412356\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +781,1412999,"TERMINAL",0,0,"queue",,terminal_command +782,1413087,"TERMINAL",0,0,"]633;E;2025-08-10 15:21:45 queue;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:21:45 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3412350 accelerat train_dy tum_cte0 CG\t5:54\t 8 hkn[0601-0605,0607-0608,0610]3412356 accelerat train_dy tum_cte0 PD\t0:00\t 8 (Priority)3412354 accelerat train_dy tum_cte0 PD\t0:00\t 8 (None)3404607 accelerat train_to tum_cte0 R 1-21:52:15\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]",,terminal_output +783,1414126,"TERMINAL",0,0,"66",,terminal_output +784,1415353,"TERMINAL",0,0,"77",,terminal_output +785,1415509,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +786,1647495,"TERMINAL",0,0,"cd $ws_dir",,terminal_command +787,1648046,"TERMINAL",0,0,"ls",,terminal_command +788,1648130,"TERMINAL",0,0,"]633;E;2025-08-10 15:25:40 ls;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;Ccheckpoints count_items.sh data data_new huggingface logs possibly_corrupt_files_in_this_workspace.txt scripts\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared]633;D;0",,terminal_output +789,1650412,"TERMINAL",0,0,"cd logs/",,terminal_command +790,1650931,"TERMINAL",0,0,"ls",,terminal_command +791,1652766,"TERMINAL",0,0,"cd logs_mihir/",,terminal_command +792,1652802,"TERMINAL",0,0,"]633;E;2025-08-10 15:25:44 cd logs_mihir/;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +793,1653105,"TERMINAL",0,0,"ls",,terminal_command +794,1656187,"TERMINAL",0,0,"cd causal/dynamics-cotraining/",,terminal_command +795,1656498,"TERMINAL",0,0,"ls",,terminal_command +796,1656546,"TERMINAL",0,0,"]633;E;2025-08-10 15:25:48 ls;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;Ctrain_dynamics_causal_2_node_3373407.log train_dynamics_causal_8_node_3373408.log train_dynamics_causal_8_node_3393066.log\r\ntrain_dynamics_causal_2_node_3373407.log_bak train_dynamics_causal_8_node_3388140.log train_dynamics_causal_8_node_3412343.log\r\ntrain_dynamics_causal_2_node_3388135.log train_dynamics_causal_8_node_3389928.log train_dynamics_causal_8_node_3412349.log\r\ntrain_dynamics_causal_2_node_3388147.log train_dynamics_causal_8_node_3390458.log train_dynamics_causal_8_node_3412356.log\r\ntrain_dynamics_causal_2_node_3389801.log train_dynamics_causal_8_node_3393060.log\r\ntrain_dynamics_causal_2_node_3393065.log train_dynamics_causal_8_node_3393061.log\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;0",,terminal_output +797,1658970,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412356.log",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=256 \\n --init_lr=0 \\n --dyna_type=causal \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n/var/spool/slurmd/job3412356/slurm_script: line 42: .venv/bin/activate: No such file or directory\nSLURM_JOB_USER=tum_cte0515\nSLURM_TASKS_PER_NODE=4(x8)\nSLURM_JOB_UID=999226\nSLURM_TASK_PID=980352\nSLURM_JOB_GPUS=0,1,2,3\nSLURM_LOCALID=0\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs\nSLURMD_NODENAME=hkn0404\nSLURM_JOB_START_TIME=1754832109\nSLURM_CLUSTER_NAME=hk\nSLURM_JOB_END_TIME=1755004909\nSLURM_CPUS_ON_NODE=24\nSLURM_JOB_CPUS_PER_NODE=24(x8)\nSLURM_GPUS_ON_NODE=4\nSLURM_GTIDS=0\nSLURM_JOB_PARTITION=accelerated\nSLURM_TRES_PER_TASK=cpu=5\nSLURM_OOM_KILL_STEP=0\nSLURM_JOB_NUM_NODES=8\nSLURM_JOBID=3412356\nSLURM_JOB_QOS=normal\nSLURM_PROCID=0\nSLURM_CPUS_PER_TASK=5\nSLURM_NTASKS=32\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e11.hkn0404\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\nSLURM_SCRIPT_CONTEXT=prolog_task\nSLURM_NODELIST=hkn[0404,0521-0527]\nSLURM_JOB_ACCOUNT=hk-project-p0023960\nSLURM_PRIO_PROCESS=0\nSLURM_NPROCS=32\nSLURM_NNODES=8\nSLURM_SUBMIT_HOST=hkn1990.localdomain\nSLURM_JOB_ID=3412356\nSLURM_NODEID=0\nSLURM_CONF=/etc/slurm/slurm.conf\nSLURM_JOB_NAME=train_dynamics_causal_8_node\nSLURM_NTASKS_PER_NODE=4\nSLURM_JOB_GID=502226\nSLURM_JOB_NODELIST=hkn[0404,0521-0527]\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nwandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\nwandb: Tracking run with wandb version 0.19.11\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/wandb/run-20250810_152243-3412356\nwandb: Run `wandb offline` to turn off syncing.\nwandb: Syncing run dynamics-causal-8-node-3412356\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3412356\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nStarting training from step 0...\n2025-08-10 15:24:42.551849: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:43.942931: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:44.153932: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:44.153977: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:48.070016: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:50.048268: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:25:50.683723: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:50.688554: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:50.689309 1675360 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n",log,tab +798,1659584,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412356.log",844,0,"",log,selection_mouse +799,1659585,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412356.log",843,0,"",log,selection_command +800,1660174,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412356.log",92631,0,"",log,selection_command +801,1692590,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412356.log",92631,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\n2025-08-10 15:25:51.077766: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:51.083044: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:51.083756 2894266 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\n2025-08-10 15:25:51.086970: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:51.092551: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:51.093334 980422 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n2025-08-10 15:25:51.201756: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:51.206590: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:51.207293 634191 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\n2025-08-10 15:25:51.396407: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:51.401620: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:51.402306 1432779 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n2025-08-10 15:25:51.439957: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:51.444801: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:51.445487 1432781 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n2025-08-10 15:25:51.469245: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:51.474418: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\n2025-08-10 15:25:51.474187: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\nE0810 15:25:51.475149 980421 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\n2025-08-10 15:25:51.479062: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:51.479774 2116889 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n2025-08-10 15:25:51.480531: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n2025-08-10 15:25:51.486278: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:51.486998 2894265 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\n2025-08-10 15:25:51.494490: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:51.499746: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:51.500327 1675361 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n2025-08-10 15:25:51.539849: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:51.540027: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:51.545326: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\n2025-08-10 15:25:51.545615: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:51.546093 370067 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\nE0810 15:25:51.546508 486006 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\nFiltering out episode with length 10, which is shorter than the requested sequence length 16.\nFiltering out episode with length 8, which is shorter than the requested sequence length 16.\n2025-08-10 15:25:51.644759: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:51.649863: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:51.650467 1432780 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n2025-08-10 15:25:51.755230: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:51.760070: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:51.760746 980423 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\nFiltering out episode with length 9, which is shorter than the requested sequence length 16.\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\n2025-08-10 15:25:51.928669: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:51.933969: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:51.934606 634192 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\n2025-08-10 15:25:51.969201: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:51.974909: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:51.975612 2894267 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\n2025-08-10 15:25:52.058653: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:52.064168: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:52.064814 486007 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\n2025-08-10 15:25:52.069997: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n2025-08-10 15:25:52.075172: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:52.075840 370068 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\nFiltering out episode with length 6, which is shorter than the requested sequence length 16.\n2025-08-10 15:25:52.164898: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:52.169712: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:52.170418 634190 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n2025-08-10 15:25:52.177594: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:52.182418: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:52.183116 370070 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\nFiltering out episode with length 2, which is shorter than the requested sequence length 16.\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\n2025-08-10 15:25:52.533630: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:52.538658: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:52.539439 1432782 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\n2025-08-10 15:25:52.544122: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n2025-08-10 15:25:52.544952: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\n2025-08-10 15:25:52.548998: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\n2025-08-10 15:25:52.549898: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:52.549718 1675359 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\nE0810 15:25:52.550622 370069 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n2025-08-10 15:25:52.561947: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:52.566937: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:52.567615 2894268 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n2025-08-10 15:25:52.642384: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:52.647302: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:52.647957 2116891 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\n2025-08-10 15:25:52.745959: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:52.750851: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:52.751511 980424 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n2025-08-10 15:25:52.759404: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:52.764414: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:52.765291 634193 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n2025-08-10 15:25:52.780996: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:52.786535: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:52.787190 2116890 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\nFiltering out episode with length 2, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\n2025-08-10 15:25:52.978366: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:52.983245: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:52.983873 2116888 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n2025-08-10 15:25:53.011901: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:53.016696: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:53.017413 486008 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\n2025-08-10 15:25:53.058233: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:53.063123: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:53.063822 486005 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 3, which is shorter than the requested sequence length 16.\n2025-08-10 15:25:53.209904: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 16.04GiB (rounded to 17224249088)requested by op \nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \nCurrent allocation summary follows.\nCurrent allocation summary follows.\n2025-08-10 15:25:53.215211: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************************************************__________________________________________________\nE0810 15:25:53.215882 1675362 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes. [tf-allocator-allocation-error='']\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 365, in \n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17224248880 bytes.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 6, which is shorter than the requested sequence length 16.\nFiltering out episode with length 6, which is shorter than the requested sequence length 16.\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\nFiltering out episode with length 6, which is shorter than the requested sequence length 16.\nFiltering out episode with length 11, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 13, which is shorter than the requested sequence length 16.\nFiltering out episode with length 2, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nFiltering out episode with length 13, which is shorter than the requested sequence length 16.\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\nwandb: \nwandb: 🚀 View run dynamics-causal-8-node-3412356 at: https://wandb.ai/instant-uv/jafar/runs/3412356\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/wandb/run-20250810_152243-3412356/logs\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\n warnings.warn('resource_tracker: There appear to be %d '\nsrun: error: hkn0527: task 29: Exited with exit code 1\nsrun: error: hkn0522: task 10: Exited with exit code 1\nsrun: error: hkn0523: task 13: Exited with exit code 1\nsrun: error: hkn0404: tasks 1-2: Exited with exit code 1\nsrun: error: hkn0524: tasks 17-18: Exited with exit code 1\nsrun: error: hkn0525: tasks 20,22: Exited with exit code 1\nsrun: error: hkn0527: tasks 28,30: Exited with exit code 1\nsrun: error: hkn0522: tasks 8-9: Exited with exit code 1\nsrun: error: hkn0523: tasks 14-15: Exited with exit code 1\nsrun: error: hkn0521: tasks 4-6: Exited with exit code 1\nsrun: error: hkn0526: tasks 24-26: Exited with exit code 1\nsrun: error: hkn0527: task 31: Exited with exit code 1\nsrun: error: hkn0522: task 11: Exited with exit code 1\nsrun: error: hkn0521: task 7: Exited with exit code 1\nsrun: error: hkn0404: task 3: Exited with exit code 1\nsrun: error: hkn0524: task 19: Exited with exit code 1\nsrun: error: hkn0525: tasks 21,23: Exited with exit code 1\nsrun: error: hkn0523: task 12: Exited with exit code 1\nsrun: error: hkn0524: task 16: Exited with exit code 1\nsrun: error: hkn0404: task 0: Exited with exit code 1\nsrun: error: hkn0526: task 27: Exited with exit code 1\n\n============================= JOB FEEDBACK =============================\n\nJob ID: 3412356\nCluster: hk\nUser/Group: tum_cte0515/hk-project-p0023960\nAccount: hk-project-p0023960\nState: FAILED (exit code 1)\nPartition: accelerated\nNodes: 8\nCores per node: 24\nNodelist: hkn[0404,0521-0527]\nCPU Utilized: 01:45:59\nCPU Efficiency: 13.30% of 13:16:48 core-walltime\nJob Wall-clock time: 00:04:09\nStarttime: Sun Aug 10 15:21:49 2025\nEndtime: Sun Aug 10 15:25:58 2025\nMemory Utilized: 234.69 GB (estimated maximum)\nMemory Efficiency: 0.00% of 0.00 MB (0.00 MB/node)\nEnergy Consumed: 13898475 Joule / 3860.6875 Watthours\nAverage node power draw: 55817.1686746988 Watt\n",log,content +802,1698221,"TERMINAL",0,0,"cd ..",,terminal_command +803,1698258,"TERMINAL",0,0,"]633;E;2025-08-10 15:26:30 cd ..;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal]633;D;0",,terminal_output +804,1699866,"TERMINAL",0,0,"cd ..",,terminal_command +805,1701300,"TERMINAL",0,0,"cd maskgit",,terminal_command +806,1701334,"TERMINAL",0,0,"]633;E;2025-08-10 15:26:33 cd maskgit;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit]633;D;0",,terminal_output +807,1702285,"TERMINAL",0,0,"ls",,terminal_command +808,1702327,"TERMINAL",0,0,"]633;E;2025-08-10 15:26:34 ls;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;Cdynamics-cotraining\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit]633;D;0",,terminal_output +809,1703449,"TERMINAL",0,0,"cd dynamics-cotraining/",,terminal_command +810,1703677,"TERMINAL",0,0,"ls",,terminal_command +811,1703728,"TERMINAL",0,0,"]633;E;2025-08-10 15:26:35 ls;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;Ctrain_dynamics_maskgit_8_node_3412350.log train_dynamics_maskgit_8_node_3412354.log\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining]633;D;0",,terminal_output +812,1705634,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/train_dynamics_maskgit_8_node_3412354.log",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_maskgit_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/maskgit/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=256 \\n --init_lr=0 \\n --dyna_type=maskgit \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-maskgit-8-node-$slurm_job_id \\n --tags dynamics maskgit 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n/var/spool/slurmd/job3412354/slurm_script: line 42: .venv/bin/activate: No such file or directory\nSLURM_JOB_USER=tum_cte0515\nSLURM_TASKS_PER_NODE=4(x8)\nSLURM_JOB_UID=999226\nSLURM_TASK_PID=1609109\nSLURM_JOB_GPUS=0,1,2,3\nSLURM_LOCALID=0\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs\nSLURMD_NODENAME=hkn0405\nSLURM_JOB_START_TIME=1754832109\nSLURM_CLUSTER_NAME=hk\nSLURM_JOB_END_TIME=1755004909\nSLURM_CPUS_ON_NODE=24\nSLURM_JOB_CPUS_PER_NODE=24(x8)\nSLURM_GPUS_ON_NODE=4\nSLURM_GTIDS=0\nSLURM_JOB_PARTITION=accelerated\nSLURM_TRES_PER_TASK=cpu=5\nSLURM_OOM_KILL_STEP=0\nSLURM_JOB_NUM_NODES=8\nSLURM_JOBID=3412354\nSLURM_JOB_QOS=normal\nSLURM_PROCID=0\nSLURM_CPUS_PER_TASK=5\nSLURM_NTASKS=32\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e12.hkn0405\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\nSLURM_SCRIPT_CONTEXT=prolog_task\nSLURM_NODELIST=hkn[0405-0407,0410,0413,0415,0417-0418]\nSLURM_JOB_ACCOUNT=hk-project-p0023960\nSLURM_PRIO_PROCESS=0\nSLURM_NPROCS=32\nSLURM_NNODES=8\nSLURM_SUBMIT_HOST=hkn1990.localdomain\nSLURM_JOB_ID=3412354\nSLURM_NODEID=0\nSLURM_CONF=/etc/slurm/slurm.conf\nSLURM_JOB_NAME=train_dynamics_maskgit_8_node\nSLURM_NTASKS_PER_NODE=4\nSLURM_JOB_GID=502226\nSLURM_JOB_NODELIST=hkn[0405-0407,0410,0413,0415,0417-0418]\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nwandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\nwandb: Tracking run with wandb version 0.19.11\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/wandb/run-20250810_152240-3412354\nwandb: Run `wandb offline` to turn off syncing.\nwandb: Syncing run dynamics-maskgit-8-node-3412354\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3412354\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\n2025-08-10 15:24:41.611234: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:41.696464: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:41.938504: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:41.976109: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:41.976157: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:45.696880: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n",log,tab +813,1705995,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/train_dynamics_maskgit_8_node_3412354.log",834,0,"",log,selection_mouse +814,1705997,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/train_dynamics_maskgit_8_node_3412354.log",833,0,"",log,selection_command +815,1706554,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/train_dynamics_maskgit_8_node_3412354.log",91068,0,"",log,selection_command +816,1796831,"TERMINAL",0,0,"queue",,terminal_command +817,1796884,"TERMINAL",0,0,"]633;E;2025-08-10 15:28:08 queue;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:28:08 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3404607 accelerat train_to tum_cte0 R 1-21:58:38\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3412354 accelerat train_dy tum_cte0 R\t6:19\t 8 hkn[0405-0407,0410,0413,0415,0417-0418]",,terminal_output +818,1797927,"TERMINAL",0,0,"9920",,terminal_output +819,1798979,"TERMINAL",0,0,"10412",,terminal_output +820,1799527,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining]633;D;0",,terminal_output +821,1804374,"TERMINAL",0,0,"scancel 3412354",,terminal_command +822,1804411,"TERMINAL",0,0,"]633;E;2025-08-10 15:28:16 scancel 3412354;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining]633;D;0",,terminal_output +823,1805338,"TERMINAL",0,0,"ls",,terminal_command +824,1805381,"TERMINAL",0,0,"]633;E;2025-08-10 15:28:17 ls;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;Ctrain_dynamics_maskgit_8_node_3412350.log train_dynamics_maskgit_8_node_3412354.log\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining]633;D;0",,terminal_output +825,1807699,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/train_dynamics_maskgit_8_node_3412354.log",0,0,"",log,tab +826,1807945,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/train_dynamics_maskgit_8_node_3412354.log",91068,0,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT\nslurmstepd: error: *** JOB 3412354 ON hkn0405 CANCELLED AT 2025-08-10T15:28:16 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3412354.0 ON hkn0405 CANCELLED AT 2025-08-10T15:28:16 ***\n",log,content +827,1824208,"TERMINAL",0,0,"queue",,terminal_command +828,1824282,"TERMINAL",0,0,"]633;E;2025-08-10 15:28:36 queue;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:28:36 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3412354 accelerat train_dy tum_cte0 CG\t6:27\t 8 hkn[0405-0407,0410,0413,0415,0417-0418]3404607 accelerat train_to tum_cte0 R 1-21:59:06\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]",,terminal_output +829,1825574,"TERMINAL",0,0,"77",,terminal_output +830,1826531,"TERMINAL",0,0,"88",,terminal_output +831,1827655,"TERMINAL",0,0,"99",,terminal_output +832,1828578,"TERMINAL",0,0,"4010",,terminal_output +833,1829602,"TERMINAL",0,0,"11",,terminal_output +834,1830965,"TERMINAL",0,0,"22",,terminal_output +835,1831749,"TERMINAL",0,0,"33",,terminal_output +836,1832669,"TERMINAL",0,0,"44",,terminal_output +837,1834072,"TERMINAL",0,0,"55",,terminal_output +838,1834879,"TERMINAL",0,0,"66",,terminal_output +839,1835883,"TERMINAL",0,0,"77",,terminal_output +840,1837002,"TERMINAL",0,0,"88",,terminal_output +841,1837926,"TERMINAL",0,0,"99",,terminal_output +842,1838936,"TERMINAL",0,0,"5020",,terminal_output +843,1840073,"TERMINAL",0,0,"22",,terminal_output +844,1841063,"TERMINAL",0,0,"33",,terminal_output +845,1842126,"TERMINAL",0,0,"44",,terminal_output +846,1843211,"TERMINAL",0,0,"55",,terminal_output +847,1844169,"TERMINAL",0,0,"66",,terminal_output +848,1845260,"TERMINAL",0,0,"77",,terminal_output +849,1846282,"TERMINAL",0,0,"88",,terminal_output +850,1847345,"TERMINAL",0,0,"99",,terminal_output +851,1848637,"TERMINAL",0,0,"9:0030",,terminal_output +852,1849416,"TERMINAL",0,0,"11",,terminal_output +853,1850455,"TERMINAL",0,0,"\r204607to R 1-21:59:32802,0804-0806,0808,0810,0813-0814",,terminal_output +854,1851503,"TERMINAL",0,0,"33",,terminal_output +855,1852557,"TERMINAL",0,0,"44",,terminal_output +856,1853650,"TERMINAL",0,0,"55",,terminal_output +857,1854648,"TERMINAL",0,0,"66",,terminal_output +858,1856114,"TERMINAL",0,0,"77",,terminal_output +859,1856890,"TERMINAL",0,0,"88",,terminal_output +860,1857792,"TERMINAL",0,0,"99",,terminal_output +861,1858871,"TERMINAL",0,0,"1040",,terminal_output +862,1859877,"TERMINAL",0,0,"11",,terminal_output +863,1860924,"TERMINAL",0,0,"22",,terminal_output +864,1861968,"TERMINAL",0,0,"34",,terminal_output +865,1863013,"TERMINAL",0,0,"55",,terminal_output +866,1864065,"TERMINAL",0,0,"66",,terminal_output +867,1865109,"TERMINAL",0,0,"77",,terminal_output +868,1866245,"TERMINAL",0,0,"88",,terminal_output +869,1867264,"TERMINAL",0,0,"99",,terminal_output +870,1868593,"TERMINAL",0,0,"2050",,terminal_output +871,1869411,"TERMINAL",0,0,"11",,terminal_output +872,1870549,"TERMINAL",0,0,"22",,terminal_output +873,1871662,"TERMINAL",0,0,"33",,terminal_output +874,1872441,"TERMINAL",0,0,"44",,terminal_output +875,1873490,"TERMINAL",0,0,"55",,terminal_output +876,1874593,"TERMINAL",0,0,"66",,terminal_output +877,1875754,"TERMINAL",0,0,"77",,terminal_output +878,1876611,"TERMINAL",0,0,"88",,terminal_output +879,1877698,"TERMINAL",0,0,"99",,terminal_output +880,1878720,"TERMINAL",0,0,"302:00:00",,terminal_output +881,1879888,"TERMINAL",0,0,"11",,terminal_output +882,1880802,"TERMINAL",0,0,"22",,terminal_output +883,1881891,"TERMINAL",0,0,"33",,terminal_output +884,1882920,"TERMINAL",0,0,"44",,terminal_output +885,1883950,"TERMINAL",0,0,"56",,terminal_output +886,1884993,"TERMINAL",0,0,"77",,terminal_output +887,1886092,"TERMINAL",0,0,"88",,terminal_output +888,1887078,"TERMINAL",0,0,"99",,terminal_output +889,1888240,"TERMINAL",0,0,"4010",,terminal_output +890,1889178,"TERMINAL",0,0,"11",,terminal_output +891,1890231,"TERMINAL",0,0,"22",,terminal_output +892,1891422,"TERMINAL",0,0,"33",,terminal_output +893,1892380,"TERMINAL",0,0,"44",,terminal_output +894,1893371,"TERMINAL",0,0,"55",,terminal_output +895,1894489,"TERMINAL",0,0,"66",,terminal_output +896,1895620,"TERMINAL",0,0,"77",,terminal_output +897,1896644,"TERMINAL",0,0,"88",,terminal_output +898,1897559,"TERMINAL",0,0,"99",,terminal_output +899,1898681,"TERMINAL",0,0,"5020",,terminal_output +900,1899806,"TERMINAL",0,0,"11",,terminal_output +901,1900708,"TERMINAL",0,0,"22",,terminal_output +902,1902030,"TERMINAL",0,0,"33",,terminal_output +903,1903078,"TERMINAL",0,0,"44",,terminal_output +904,1903899,"TERMINAL",0,0,"55",,terminal_output +905,1904928,"TERMINAL",0,0,"66",,terminal_output +906,1906168,"TERMINAL",0,0,"78",,terminal_output +907,1907033,"TERMINAL",0,0,"99",,terminal_output +908,1908093,"TERMINAL",0,0,"30:0030",,terminal_output +909,1909079,"TERMINAL",0,0,"11",,terminal_output +910,1910164,"TERMINAL",0,0,"22",,terminal_output +911,1911183,"TERMINAL",0,0,"33",,terminal_output +912,1912287,"TERMINAL",0,0,"44",,terminal_output +913,1913621,"TERMINAL",0,0,"55",,terminal_output +914,1914353,"TERMINAL",0,0,"66",,terminal_output +915,1915397,"TERMINAL",0,0,"77",,terminal_output +916,1916689,"TERMINAL",0,0,"88",,terminal_output +917,1917508,"TERMINAL",0,0,"99",,terminal_output +918,1918531,"TERMINAL",0,0,"1040",,terminal_output +919,1919907,"TERMINAL",0,0,"11",,terminal_output +920,1920632,"TERMINAL",0,0,"22",,terminal_output +921,1921806,"TERMINAL",0,0,"33",,terminal_output +922,1923134,"TERMINAL",0,0,"44",,terminal_output +923,1924044,"TERMINAL",0,0,"55",,terminal_output +924,1924877,"TERMINAL",0,0,"66",,terminal_output +925,1925900,"TERMINAL",0,0,"77",,terminal_output +926,1926918,"TERMINAL",0,0,"88",,terminal_output +927,1927967,"TERMINAL",0,0,"950",,terminal_output +928,1929043,"TERMINAL",0,0,"211",,terminal_output +929,1930057,"TERMINAL",0,0,"22",,terminal_output +930,1931117,"TERMINAL",0,0,"33",,terminal_output +931,1932253,"TERMINAL",0,0,"44",,terminal_output +932,1933272,"TERMINAL",0,0,"55",,terminal_output +933,1934293,"TERMINAL",0,0,"66",,terminal_output +934,1935323,"TERMINAL",0,0,"77",,terminal_output +935,1936416,"TERMINAL",0,0,"88",,terminal_output +936,1937393,"TERMINAL",0,0,"99",,terminal_output +937,1938486,"TERMINAL",0,0,"301:00",,terminal_output +938,1939817,"TERMINAL",0,0,"11",,terminal_output +939,1940636,"TERMINAL",0,0,"22",,terminal_output +940,1941966,"TERMINAL",0,0,"33",,terminal_output +941,1942683,"TERMINAL",0,0,"44",,terminal_output +942,1943810,"TERMINAL",0,0,"55",,terminal_output +943,1944730,"TERMINAL",0,0,"66",,terminal_output +944,1945800,"TERMINAL",0,0,"77",,terminal_output +945,1946918,"TERMINAL",0,0,"88",,terminal_output +946,1947860,"TERMINAL",0,0,"99",,terminal_output +947,1949041,"TERMINAL",0,0,"4010",,terminal_output +948,1949954,"TERMINAL",0,0,"12",,terminal_output +949,1951001,"TERMINAL",0,0,"33",,terminal_output +950,1952303,"TERMINAL",0,0,"44",,terminal_output +951,1953099,"TERMINAL",0,0,"55",,terminal_output +952,1954146,"TERMINAL",0,0,"66",,terminal_output +953,1955269,"TERMINAL",0,0,"77",,terminal_output +954,1956293,"TERMINAL",0,0,"88",,terminal_output +955,1957325,"TERMINAL",0,0,"99",,terminal_output +956,1958441,"TERMINAL",0,0,"5020",,terminal_output +957,1959466,"TERMINAL",0,0,"11",,terminal_output +958,1960423,"TERMINAL",0,0,"22",,terminal_output +959,1961517,"TERMINAL",0,0,"33",,terminal_output +960,1962536,"TERMINAL",0,0,"44",,terminal_output +961,1963662,"TERMINAL",0,0,"55",,terminal_output +962,1964616,"TERMINAL",0,0,"66",,terminal_output +963,1965819,"TERMINAL",0,0,"77",,terminal_output +964,1966740,"TERMINAL",0,0,"88",,terminal_output +965,1967858,"TERMINAL",0,0,"99",,terminal_output +966,1968989,"TERMINAL",0,0,"1:0030",,terminal_output +967,1969944,"TERMINAL",0,0,"11",,terminal_output +968,1970944,"TERMINAL",0,0,"22",,terminal_output +969,1971956,"TERMINAL",0,0,"34",,terminal_output +970,1973000,"TERMINAL",0,0,"55",,terminal_output +971,1974047,"TERMINAL",0,0,"66",,terminal_output +972,1975328,"TERMINAL",0,0,"77",,terminal_output +973,1976249,"TERMINAL",0,0,"88",,terminal_output +974,1977188,"TERMINAL",0,0,"99",,terminal_output +975,1978233,"TERMINAL",0,0,"1040",,terminal_output +976,1979289,"TERMINAL",0,0,"11",,terminal_output +977,1980650,"TERMINAL",0,0,"22",,terminal_output +978,1981376,"TERMINAL",0,0,"33",,terminal_output +979,1982745,"TERMINAL",0,0,"44",,terminal_output +980,1983720,"TERMINAL",0,0,"55",,terminal_output +981,1984589,"TERMINAL",0,0,"66",,terminal_output +982,1985668,"TERMINAL",0,0,"77",,terminal_output +983,1986686,"TERMINAL",0,0,"88",,terminal_output +984,1987678,"TERMINAL",0,0,"99",,terminal_output +985,1988724,"TERMINAL",0,0,"2050",,terminal_output +986,1989865,"TERMINAL",0,0,"11",,terminal_output +987,1990814,"TERMINAL",0,0,"22",,terminal_output +988,1991870,"TERMINAL",0,0,"33",,terminal_output +989,1992947,"TERMINAL",0,0,"44",,terminal_output +990,1993127,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining]633;D;0",,terminal_output +991,1998847,"TERMINAL",0,0,"dev",,terminal_command +992,2001360,"TERMINAL",0,0,"git branch",,terminal_command +993,2001418,"TERMINAL",0,0,"]633;E;2025-08-10 15:31:33 git branch;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C[?1h=\r add-wandb-name-and-tags\r\n before-nnx\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n logging-variants\r\n lr-schedules\r\n* main\r\n maskgit-different-maskprob-per-sample\r\n maskgit-sampling-iterative-unmasking-fix\r\n metrics-logging-for-dynamics-model\r\n monkey-patch\r\n new-arch-sampling\r\n preprocess_video\r\n refactor-tmp\r\n revised-dataloader\r\n runner\r\n runner-grain\r\n sample-ali-branch\r\n:",,terminal_output +994,2001747,"TERMINAL",0,0,"\r\r:",,terminal_output +995,2001845,"TERMINAL",0,0,"\r:",,terminal_output +996,2001948,"TERMINAL",0,0,"\rNo next tag (press RETURN)",,terminal_output +997,2002017,"TERMINAL",0,0,"\r:",,terminal_output +998,2002481,"TERMINAL",0,0,"\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +999,2011865,"TERMINAL",0,0,"git checkout -b ""causal-mem-reduce""",,terminal_command +1000,2011912,"TERMINAL",0,0,"]633;E;2025-08-10 15:31:43 git checkout -b ""causal-mem-reduce"";90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C",,terminal_output +1001,2012099,"TERMINAL",0,0,"Switched to a new branch 'causal-mem-reduce'\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1002,2015393,"",0,0,"Switched from branch 'main' to 'causal-mem-reduce'",,git_branch_checkout +1003,2021005,"utils/nn.py",0,0,"import math\nfrom typing import Tuple, Callable, List\n\nfrom flax import nnx\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass PositionalEncoding(nnx.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n def __init__(self, d_model: int, max_len: int = 5000):\n self.d_model = d_model\n self.max_len = max_len\n\n pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n pe = pe.at[:, 0::2].set(jnp.sin(position * div_term))\n pe = pe.at[:, 1::2].set(jnp.cos(position * div_term))\n self.pe = nnx.Variable(pe)\n\n def __call__(self, x: jax.Array) -> jax.Array:\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nnx.Module):\n def __init__(\n self,\n dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.dim = dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.spatial_pos_enc = PositionalEncoding(self.dim)\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=False\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.temporal_pos_enc = PositionalEncoding(self.dim)\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_pos_enc(x_BTNM)\n z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM\n\n\nclass STTransformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n O: number of output features\n """"""\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n self.blocks = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n )\n\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n\n for block in self.blocks:\n x_BTNM = block(x_BTNM)\n\n x_BTNO = self.output_dense(x_BTNM)\n return x_BTNO\n\nclass TransformerBlock(nnx.Module):\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n\n self.temporal_pos_enc = PositionalEncoding(self.model_dim)\n self.spatial_pos_enc = PositionalEncoding(self.model_dim)\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None) -> jax.Array:\n # --- Spatial attention ---\n B, T, N, M = x_BTNM.shape\n x_BTNM = self.spatial_pos_enc(x_BTNM)\n z_FNM = einops.rearrange(x_BTNM, ""b t n m -> (b t) n m"")\n z_FNM = self.spatial_norm(z_FNM)\n z_FNM = self.spatial_attention(z_FNM)\n z_BTNM = einops.rearrange(z_FNM, ""(b t) n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n x_BTNM = self.temporal_pos_enc(x_BTNM)\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> (b n) t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""(b n) t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM\n\nclass Transformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n O: number of output features\n F: number of frames in batch\n P: number of patch positions in batch\n """"""\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n self.blocks: List[TransformerBlock] = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n TransformerBlock(\n model_dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n rngs=rngs,\n )\n )\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n\n for block in self.blocks:\n x_BTNM = block(x_BTNM, pos_index)\n\n x_BTNV = self.output_dense(x_BTNM)\n return x_BTNV\n\ndef normalize(x: jax.Array) -> jax.Array:\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nnx.Module):\n """"""\n Dimension keys:\n D: B * T * N\n K: number of latents\n L: latent dimension\n """"""\n def __init__(\n self, latent_dim: int, num_latents: int, dropout: float, rngs: nnx.Rngs\n ):\n self.latent_dim = latent_dim\n self.num_latents = num_latents\n self.dropout = dropout\n\n self.codebook = nnx.Param(\n normalize(\n nnx.initializers.lecun_uniform()(\n rngs.params(), (self.num_latents, self.latent_dim)\n )\n )\n )\n self.drop = nnx.Dropout(self.dropout, rngs=rngs)\n\n def __call__(\n self, x_DL: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x_DL = normalize(x_DL)\n normalized_codebook_KL = normalize(self.codebook.value)\n distance_DK = -jnp.matmul(x_DL, normalized_codebook_KL.T)\n if training:\n distance_DK = self.drop(distance_DK)\n\n # --- Get indices and embeddings ---\n indices_D = jnp.argmin(distance_DK, axis=-1)\n z_DL = self.codebook[indices_D]\n\n # --- Straight through estimator ---\n z_q_DL = x_DL + jax.lax.stop_gradient(z_DL - x_DL)\n return z_q_DL, z_DL, x_DL, indices_D\n\n def get_codes(self, indices_E: jax.Array) -> jax.Array:\n return self.codebook[indices_E]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool) -> Callable:\n """"""\n Create an attention function that uses flash attention if enabled.\n\n flax.nnx.MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim),\n but jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim). We reshape to\n ensure compatibility. cuDNN's flash attention additionally requires a sequence length that\n is a multiple of 4. We pad the sequence length to the nearest multiple of 4 and mask\n accordingly. Note that cuDNN requires the mask to be broadcast before calling the attention\n function due to strict shape checking.\n """"""\n\n def attention_fn(query_BTHD, key_BSHD, value_BSHD, bias=None, mask_B111=None, **kwargs):\n implementation = ""cudnn"" if use_flash_attention else None\n\n def _merge_batch_dims(x):\n return einops.rearrange(x, ""... l h k -> (...) l h k"")\n\n def _pad(x, pad_size):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n\n original_shape = query_BTHD.shape\n T = query_BTHD.shape[-3]\n S = key_BSHD.shape[-3]\n\n # Pad to nearest multiple of 4\n Q = ((T + 3) // 4) * 4\n pad_size_Q = Q - T\n K = ((S + 3) // 4) * 4\n pad_size_K = K - S\n\n query_BQHD = _pad(_merge_batch_dims(query_BTHD), pad_size_Q)\n key_BKHD = _pad(_merge_batch_dims(key_BSHD), pad_size_K)\n value_BKHD = _pad(_merge_batch_dims(value_BSHD), pad_size_K)\n\n attention_mask = jnp.ones((Q, K), dtype=jnp.bool_)\n attention_mask = attention_mask.at[T:, :].set(False)\n attention_mask = attention_mask.at[:, S:].set(False)\n\n mask_11TS = attention_mask[jnp.newaxis, jnp.newaxis, :, :]\n\n bias_4d = jnp.pad(_merge_batch_dims(bias), ((0, 0), (0, 0), (0, pad_size_Q), (0, pad_size_K))) if bias is not None else None\n\n # NOTE: jax.nn.dot_product_attention does not support dropout\n output_4d = jax.nn.dot_product_attention(\n query=query_BQHD,\n key=key_BKHD,\n value=value_BKHD,\n bias=bias_4d,\n mask=mask_11TS,\n implementation=implementation,\n is_causal=is_causal,\n )\n return output_4d[..., :T, :, :].reshape(original_shape)\n\n return attention_fn\n",python,tab +1004,2039260,"utils/nn.py",4274,0,"",python,selection_mouse +1005,2039262,"utils/nn.py",4273,0,"",python,selection_command +1006,2039451,"utils/nn.py",4273,1,"M",python,selection_mouse +1007,2039454,"utils/nn.py",4274,0,"",python,selection_command +1008,2039465,"utils/nn.py",4252,22,"\n return x_BTNM",python,selection_mouse +1009,2039483,"utils/nn.py",4192,82," = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1010,2039498,"utils/nn.py",4068,206," z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1011,2039525,"utils/nn.py",3992,282," x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1012,2039539,"utils/nn.py",3818,456," z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1013,2039572,"utils/nn.py",3779,495," x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1014,2039581,"utils/nn.py",3742,532," # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1015,2039614,"utils/nn.py",3741,533,"\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1016,2039656,"utils/nn.py",3708,566," x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1017,2039701,"utils/nn.py",3660,614," z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1018,2039704,"utils/nn.py",3617,657," z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1019,2039758,"utils/nn.py",3535,739," # --- Spatial attention ---\n z_BTNM = self.spatial_pos_enc(x_BTNM)\n z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1020,2039760,"utils/nn.py",3479,795," def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_pos_enc(x_BTNM)\n z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1021,2039805,"utils/nn.py",3464,810," @nnx.remat\n def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_pos_enc(x_BTNM)\n z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1022,2039891,"utils/nn.py",3463,811,"\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_pos_enc(x_BTNM)\n z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1023,2040031,"utils/nn.py",3464,810," @nnx.remat\n def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_pos_enc(x_BTNM)\n z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1024,2040047,"utils/nn.py",3479,795," def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_pos_enc(x_BTNM)\n z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1025,2040061,"utils/nn.py",3535,739," # --- Spatial attention ---\n z_BTNM = self.spatial_pos_enc(x_BTNM)\n z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1026,2040102,"utils/nn.py",3571,703," z_BTNM = self.spatial_pos_enc(x_BTNM)\n z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1027,2040803,"utils/nn.py",3535,739," # --- Spatial attention ---\n z_BTNM = self.spatial_pos_enc(x_BTNM)\n z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1028,2040814,"utils/nn.py",3479,795," def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_pos_enc(x_BTNM)\n z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1029,2041530,"utils/nn.py",3535,739," # --- Spatial attention ---\n z_BTNM = self.spatial_pos_enc(x_BTNM)\n z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +1030,2056811,"TERMINAL",0,0,"git pull",,terminal_command +1031,2056891,"TERMINAL",0,0,"]633;E;2025-08-10 15:32:28 git pull;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C",,terminal_output +1032,2058672,"TERMINAL",0,0,"remote: Enumerating objects: 11, done.\r\nremote: Counting objects: 9% (1/11)\rremote: Counting objects: 18% (2/11)\rremote: Counting objects: 27% (3/11)\rremote: Counting objects: 36% (4/11)\rremote: Counting objects: 45% (5/11)\rremote: Counting objects: 54% (6/11)\rremote: Counting objects: 63% (7/11)\rremote: Counting objects: 72% (8/11)\rremote: Counting objects: 81% (9/11)\rremote: Counting objects: 90% (10/11)\rremote: Counting objects: 100% (11/11)\rremote: Counting objects: 100% (11/11), done.\r\nremote: Compressing objects: 9% (1/11)\rremote: Compressing objects: 18% (2/11)\rremote: Compressing objects: 27% (3/11)\rremote: Compressing objects: 36% (4/11)\rremote: Compressing objects: 45% (5/11)\rremote: Compressing objects: 54% (6/11)\rremote: Compressing objects: 63% (7/11)\rremote: Compressing objects: 72% (8/11)\rremote: Compressing objects: 81% (9/11)\rremote: Compressing objects: 90% (10/11)\rremote: Compressing objects: 100% (11/11)\rremote: Compressing objects: 100% (11/11), done.\r\n",,terminal_output +1033,2058762,"TERMINAL",0,0,"remote: Total 11 (delta 4), reused 0 (delta 0), pack-reused 0 (from 0)\r\n",,terminal_output +1034,2058909,"TERMINAL",0,0,"Unpacking objects: 9% (1/11)\rUnpacking objects: 18% (2/11)\rUnpacking objects: 27% (3/11)\r",,terminal_output +1035,2059023,"TERMINAL",0,0,"Unpacking objects: 36% (4/11)\rUnpacking objects: 45% (5/11)\rUnpacking objects: 54% (6/11)\r",,terminal_output +1036,2059117,"TERMINAL",0,0,"Unpacking objects: 63% (7/11)\rUnpacking objects: 72% (8/11)\rUnpacking objects: 81% (9/11)\rUnpacking objects: 90% (10/11)\rUnpacking objects: 100% (11/11)\rUnpacking objects: 100% (11/11), 8.47 KiB | 24.00 KiB/s, done.\r\n",,terminal_output +1037,2059852,"TERMINAL",0,0,"From github.com:p-doom/jafar\r\n 8004889..b1558ee main -> origin/main\r\n * [new branch] typo-readme-bibtex -> origin/typo-readme-bibtex\r\nThere is no tracking information for the current branch.\r\nPlease specify which branch you want to merge with.\r\nSee git-pull(1) for details.\r\n\r\n git pull \r\n\r\nIf you wish to set tracking information for this branch you can do so with:\r\n\r\n git branch --set-upstream-to=origin/ causal-mem-reduce\r\n\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;1",,terminal_output +1038,2062255,"TERMINAL",0,0,"git checkout main",,terminal_command +1039,2062297,"TERMINAL",0,0,"]633;E;2025-08-10 15:32:34 git checkout main;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;CSwitched to branch 'main'\r\nYour branch is behind 'origin/main' by 19 commits, and can be fast-forwarded.\r\n (use ""git pull"" to update your local branch)\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1040,2063674,"TERMINAL",0,0,"git pull",,terminal_command +1041,2063719,"TERMINAL",0,0,"]633;E;2025-08-10 15:32:35 git pull;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C",,terminal_output +1042,2065316,"TERMINAL",0,0,"Updating 8004889..b1558ee\r\n",,terminal_output +1043,2065395,"",0,0,"Switched from branch 'causal-mem-reduce' to 'main'",,git_branch_checkout +1044,2065412,"TERMINAL",0,0,"Fast-forward\r\n",,terminal_output +1045,2065464,"TERMINAL",0,0," README.md | 62 ++++++++++++++++++++++++++++++++++++++++++++++++++------------\r\n genie.py | 17 +++++++++++------\r\n input_pipeline/download/download_array_records.sh | 2 +-\r\n requirements.txt | 10 ++++++++--\r\n utils/nn.py | 50 ++++++++++++++++++++++++++++++++++----------------\r\n 5 files changed, 104 insertions(+), 37 deletions(-)\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1046,2070230,"utils/nn.py",0,0,"import math\nfrom typing import Tuple, Callable, List\n\nfrom flax import nnx\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass SpatioTemporalPositionalEncoding(nnx.Module):\n """"""\n Applies separate sinusoidal positional encodings to the temporal and spatial dimensions.\n """"""\n def __init__(self, d_model: int, max_len: int = 5000):\n self.d_model = d_model\n self.max_len = max_len\n\n pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n pe = pe.at[:, 0::2].set(jnp.sin(position * div_term))\n pe = pe.at[:, 1::2].set(jnp.cos(position * div_term))\n self.pe = nnx.Variable(pe)\n\n def __call__(self, x: jax.Array) -> jax.Array:\n """"""\n Args:\n x: The input tensor of shape (Batch, Time, Space, Dimension).\n\n Returns:\n The input tensor with positional encodings added.\n """"""\n assert x.ndim == 4, f""Input must be 4-dimensional, but got shape {x.shape}""\n\n num_timesteps = x.shape[1]\n num_spatial_patches = x.shape[2]\n\n # Temporal positional encoding: (1, T, 1, D)\n temporal_pe = self.pe.value[None, :num_timesteps, None, :]\n x = x + temporal_pe\n\n # Spatial positional encoding: (1, 1, S, D)\n spatial_pe = self.pe.value[None, None, :num_spatial_patches, :]\n x = x + spatial_pe\n\n return x\n\n\nclass STBlock(nnx.Module):\n def __init__(\n self,\n dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.dim = dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=False\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_norm(x_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM\n\n\nclass STTransformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n O: number of output features\n """"""\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n max_len: int = 5000,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n self.pos_enc = SpatioTemporalPositionalEncoding(self.model_dim, max_len=max_len)\n\n self.blocks = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n )\n\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n x_BTNM = self.pos_enc(x_BTNM)\n for block in self.blocks:\n x_BTNM = block(x_BTNM)\n\n x_BTNO = self.output_dense(x_BTNM)\n return x_BTNO\n\nclass TransformerBlock(nnx.Module):\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None) -> jax.Array:\n # --- Spatial attention ---\n B, T, N, M = x_BTNM.shape\n z_FNM = einops.rearrange(x_BTNM, ""b t n m -> (b t) n m"")\n z_FNM = self.spatial_norm(z_FNM)\n z_FNM = self.spatial_attention(z_FNM)\n z_BTNM = einops.rearrange(z_FNM, ""(b t) n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> (b n) t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""(b n) t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM\n\nclass Transformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n O: number of output features\n F: number of frames in batch\n P: number of patch positions in batch\n """"""\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n max_len: int = 5000,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n self.pos_enc = SpatioTemporalPositionalEncoding(self.model_dim, max_len=max_len)\n\n self.blocks: List[TransformerBlock] = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n TransformerBlock(\n model_dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n rngs=rngs,\n )\n )\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n x_BTNM = self.pos_enc(x_BTNM)\n for block in self.blocks:\n x_BTNM = block(x_BTNM, pos_index)\n\n x_BTNV = self.output_dense(x_BTNM)\n return x_BTNV\n\ndef normalize(x: jax.Array) -> jax.Array:\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nnx.Module):\n """"""\n Dimension keys:\n D: B * T * N\n K: number of latents\n L: latent dimension\n """"""\n def __init__(\n self, latent_dim: int, num_latents: int, dropout: float, rngs: nnx.Rngs\n ):\n self.latent_dim = latent_dim\n self.num_latents = num_latents\n self.dropout = dropout\n\n self.codebook = nnx.Param(\n normalize(\n nnx.initializers.lecun_uniform()(\n rngs.params(), (self.num_latents, self.latent_dim)\n )\n )\n )\n self.drop = nnx.Dropout(self.dropout, rngs=rngs)\n\n def __call__(\n self, x_DL: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x_DL = normalize(x_DL)\n normalized_codebook_KL = normalize(self.codebook.value)\n distance_DK = -jnp.matmul(x_DL, normalized_codebook_KL.T)\n if training:\n distance_DK = self.drop(distance_DK)\n\n # --- Get indices and embeddings ---\n indices_D = jnp.argmin(distance_DK, axis=-1)\n z_DL = self.codebook[indices_D]\n\n # --- Straight through estimator ---\n z_q_DL = x_DL + jax.lax.stop_gradient(z_DL - x_DL)\n return z_q_DL, z_DL, x_DL, indices_D\n\n def get_codes(self, indices_E: jax.Array) -> jax.Array:\n return self.codebook[indices_E]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool) -> Callable:\n """"""\n Create an attention function that uses flash attention if enabled.\n\n flax.nnx.MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim),\n but jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim). We reshape to\n ensure compatibility. cuDNN's flash attention additionally requires a sequence length that\n is a multiple of 4. We pad the sequence length to the nearest multiple of 4 and mask\n accordingly. Note that cuDNN requires the mask to be broadcast before calling the attention\n function due to strict shape checking.\n """"""\n\n def attention_fn(query_BTHD, key_BSHD, value_BSHD, bias=None, mask_B111=None, **kwargs):\n implementation = ""cudnn"" if use_flash_attention else None\n\n def _merge_batch_dims(x):\n return einops.rearrange(x, ""... l h k -> (...) l h k"")\n\n def _pad(x, pad_size):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n\n original_shape = query_BTHD.shape\n T = query_BTHD.shape[-3]\n S = key_BSHD.shape[-3]\n\n # Pad to nearest multiple of 4\n Q = ((T + 3) // 4) * 4\n pad_size_Q = Q - T\n K = ((S + 3) // 4) * 4\n pad_size_K = K - S\n\n query_BQHD = _pad(_merge_batch_dims(query_BTHD), pad_size_Q)\n key_BKHD = _pad(_merge_batch_dims(key_BSHD), pad_size_K)\n value_BKHD = _pad(_merge_batch_dims(value_BSHD), pad_size_K)\n\n attention_mask = jnp.ones((Q, K), dtype=jnp.bool_)\n attention_mask = attention_mask.at[T:, :].set(False)\n attention_mask = attention_mask.at[:, S:].set(False)\n\n mask_11TS = attention_mask[jnp.newaxis, jnp.newaxis, :, :]\n\n bias_4d = jnp.pad(_merge_batch_dims(bias), ((0, 0), (0, 0), (0, pad_size_Q), (0, pad_size_K))) if bias is not None else None\n\n # NOTE: jax.nn.dot_product_attention does not support dropout\n output_4d = jax.nn.dot_product_attention(\n query=query_BQHD,\n key=key_BKHD,\n value=value_BKHD,\n bias=bias_4d,\n mask=mask_11TS,\n implementation=implementation,\n is_causal=is_causal,\n )\n return output_4d[..., :T, :, :].reshape(original_shape)\n\n return attention_fn\n",python,tab +1047,2077015,"TERMINAL",0,0,"runner",,terminal_command +1048,2085567,"TERMINAL",0,0,"sync-runner",,terminal_command +1049,2085618,"TERMINAL",0,0,"]633;E;2025-08-10 15:32:57 sync-runner;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;Csending incremental file list\r\n",,terminal_output +1050,2085816,"TERMINAL",0,0,"./\r\nREADME.md\r\ngenie.py\r\nrequirements.txt\r\n",,terminal_output +1051,2086291,"TERMINAL",0,0,"input_pipeline/download/\r\ninput_pipeline/download/download_array_records.sh\r\nutils/\r\nutils/nn.py\r\n",,terminal_output +1052,2086470,"TERMINAL",0,0,"\r\nsent 77,148 bytes received 261 bytes 51,606.00 bytes/sec\r\ntotal size is 220,521,654 speedup is 2,848.79\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +1053,2100570,"TERMINAL",0,0,"sbatch quslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_command +1054,2100577,"TERMINAL",0,0,"]633;E;2025-08-10 15:33:12 sbatch quslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;Csbatch: error: Unable to open file quslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;1",,terminal_output +1055,2106782,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_command +1056,2106864,"TERMINAL",0,0,"]633;E;2025-08-10 15:33:18 sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;CSubmitted batch job 3412397\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +1057,2107697,"TERMINAL",0,0,"queue",,terminal_command +1058,2107798,"TERMINAL",0,0,"]633;E;2025-08-10 15:33:19 queue;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:33:19 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3412397 accelerat train_dy tum_cte0 PD\t0:00\t 8 (None)3404607 accelerat train_to tum_cte0 R 1-22:03:49\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]",,terminal_output +1059,2108833,"TERMINAL",0,0,"2050",,terminal_output +1060,2109897,"TERMINAL",0,0,"11",,terminal_output +1061,2111129,"TERMINAL",0,0,"22",,terminal_output +1062,2112019,"TERMINAL",0,0,"34",,terminal_output +1063,2113016,"TERMINAL",0,0,"50460to R 1-22:03:55hkn[0802,0804-0806,0808,0810,0813-0814]1239dy 0:00602-0605,0607-0608,0610-0611]",,terminal_output +1064,2114082,"TERMINAL",0,0,"661",,terminal_output +1065,2115326,"TERMINAL",0,0,"772",,terminal_output +1066,2116153,"TERMINAL",0,0,"883",,terminal_output +1067,2117197,"TERMINAL",0,0,"994",,terminal_output +1068,2118293,"TERMINAL",0,0,"304:005",,terminal_output +1069,2119312,"TERMINAL",0,0,"116",,terminal_output +1070,2120441,"TERMINAL",0,0,"227",,terminal_output +1071,2121464,"TERMINAL",0,0,"338",,terminal_output +1072,2122432,"TERMINAL",0,0,"449",,terminal_output +1073,2124129,"TERMINAL",0,0,"5510",,terminal_output +1074,2124590,"TERMINAL",0,0,"661",,terminal_output +1075,2125864,"TERMINAL",0,0,"772",,terminal_output +1076,2126637,"TERMINAL",0,0,"883",,terminal_output +1077,2127701,"TERMINAL",0,0,"994",,terminal_output +1078,2128833,"TERMINAL",0,0,"40105",,terminal_output +1079,2129857,"TERMINAL",0,0,"116",,terminal_output +1080,2130875,"TERMINAL",0,0,"227",,terminal_output +1081,2131954,"TERMINAL",0,0,"338",,terminal_output +1082,2132921,"TERMINAL",0,0,"449",,terminal_output +1083,2134087,"TERMINAL",0,0,"5621",,terminal_output +1084,2135276,"TERMINAL",0,0,"772",,terminal_output +1085,2136411,"TERMINAL",0,0,"883",,terminal_output +1086,2137219,"TERMINAL",0,0,"994",,terminal_output +1087,2138150,"TERMINAL",0,0,"50205",,terminal_output +1088,2139217,"TERMINAL",0,0,"116",,terminal_output +1089,2140275,"TERMINAL",0,0,"227",,terminal_output +1090,2141516,"TERMINAL",0,0,"338",,terminal_output +1091,2142363,"TERMINAL",0,0,"449",,terminal_output +1092,2143401,"TERMINAL",0,0,"5530",,terminal_output +1093,2144592,"TERMINAL",0,0,"661",,terminal_output +1094,2145497,"TERMINAL",0,0,"772",,terminal_output +1095,2146543,"TERMINAL",0,0,"883",,terminal_output +1096,2147962,"TERMINAL",0,0,"994",,terminal_output +1097,2148782,"TERMINAL",0,0,"4:00305",,terminal_output +1098,2149807,"TERMINAL",0,0,"116",,terminal_output +1099,2150806,"TERMINAL",0,0,"227",,terminal_output +1100,2151783,"TERMINAL",0,0,"338",,terminal_output +1101,2152888,"TERMINAL",0,0,"449",,terminal_output +1102,2154013,"TERMINAL",0,0,"5540",,terminal_output +1103,2154930,"TERMINAL",0,0,"661",,terminal_output +1104,2156309,"TERMINAL",0,0,"783",,terminal_output +1105,2157084,"TERMINAL",0,0,"994",,terminal_output +1106,2158069,"TERMINAL",0,0,"10405",,terminal_output +1107,2159169,"TERMINAL",0,0,"116",,terminal_output +1108,2160160,"TERMINAL",0,0,"227",,terminal_output +1109,2161211,"TERMINAL",0,0,"338",,terminal_output +1110,2162256,"TERMINAL",0,0,"449",,terminal_output +1111,2163313,"TERMINAL",0,0,"5550",,terminal_output +1112,2164439,"TERMINAL",0,0,"661",,terminal_output +1113,2165391,"TERMINAL",0,0,"772",,terminal_output +1114,2166486,"TERMINAL",0,0,"883",,terminal_output +1115,2167487,"TERMINAL",0,0,"994",,terminal_output +1116,2168595,"TERMINAL",0,0,"20505",,terminal_output +1117,2169580,"TERMINAL",0,0,"116",,terminal_output +1118,2170629,"TERMINAL",0,0,"227",,terminal_output +1119,2171710,"TERMINAL",0,0,"338",,terminal_output +1120,2172729,"TERMINAL",0,0,"449",,terminal_output +1121,2173870,"TERMINAL",0,0,"551:00",,terminal_output +1122,2174823,"TERMINAL",0,0,"661",,terminal_output +1123,2176008,"TERMINAL",0,0,"772",,terminal_output +1124,2176924,"TERMINAL",0,0,"883",,terminal_output +1125,2178363,"TERMINAL",0,0,"95:005",,terminal_output +1126,2179040,"TERMINAL",0,0,"3116",,terminal_output +1127,2180046,"TERMINAL",0,0,"227",,terminal_output +1128,2181095,"TERMINAL",0,0,"338",,terminal_output +1129,2182141,"TERMINAL",0,0,"449",,terminal_output +1130,2183274,"TERMINAL",0,0,"5510",,terminal_output +1131,2184236,"TERMINAL",0,0,"661",,terminal_output +1132,2185281,"TERMINAL",0,0,"772",,terminal_output +1133,2186449,"TERMINAL",0,0,"883",,terminal_output +1134,2187387,"TERMINAL",0,0,"994",,terminal_output +1135,2188432,"TERMINAL",0,0,"40105",,terminal_output +1136,2189510,"TERMINAL",0,0,"116",,terminal_output +1137,2190636,"TERMINAL",0,0,"227",,terminal_output +1138,2191660,"TERMINAL",0,0,"338",,terminal_output +1139,2192685,"TERMINAL",0,0,"449",,terminal_output +1140,2193707,"TERMINAL",0,0,"5520",,terminal_output +1141,2194711,"TERMINAL",0,0,"661",,terminal_output +1142,2195958,"TERMINAL",0,0,"772",,terminal_output +1143,2196884,"TERMINAL",0,0,"883",,terminal_output +1144,2197902,"TERMINAL",0,0,"994",,terminal_output +1145,2198878,"TERMINAL",0,0,"50205",,terminal_output +1146,2199964,"TERMINAL",0,0,"116",,terminal_output +1147,2200978,"TERMINAL",0,0,"238",,terminal_output +1148,2202047,"TERMINAL",0,0,"449",,terminal_output +1149,2203121,"TERMINAL",0,0,"5530",,terminal_output +1150,2204089,"TERMINAL",0,0,"661",,terminal_output +1151,2205139,"TERMINAL",0,0,"772",,terminal_output +1152,2206172,"TERMINAL",0,0,"883",,terminal_output +1153,2207221,"TERMINAL",0,0,"994",,terminal_output +1154,2208345,"TERMINAL",0,0,"5:00305",,terminal_output +1155,2209289,"TERMINAL",0,0,"116",,terminal_output +1156,2210334,"TERMINAL",0,0,"227",,terminal_output +1157,2211363,"TERMINAL",0,0,"338",,terminal_output +1158,2212440,"TERMINAL",0,0,"449",,terminal_output +1159,2213466,"TERMINAL",0,0,"5540",,terminal_output +1160,2214497,"TERMINAL",0,0,"661",,terminal_output +1161,2215607,"TERMINAL",0,0,"772",,terminal_output +1162,2216566,"TERMINAL",0,0,"883",,terminal_output +1163,2217652,"TERMINAL",0,0,"994",,terminal_output +1164,2218676,"TERMINAL",0,0,"10405",,terminal_output +1165,2219802,"TERMINAL",0,0,"116",,terminal_output +1166,2220824,"TERMINAL",0,0,"227",,terminal_output +1167,2221859,"TERMINAL",0,0,"338",,terminal_output +1168,2222869,"TERMINAL",0,0,"449",,terminal_output +1169,2223920,"TERMINAL",0,0,"5550",,terminal_output +1170,2224918,"TERMINAL",0,0,"661",,terminal_output +1171,2225950,"TERMINAL",0,0,"783",,terminal_output +1172,2227050,"TERMINAL",0,0,"994",,terminal_output +1173,2228091,"TERMINAL",0,0,"20505",,terminal_output +1174,2229085,"TERMINAL",0,0,"116",,terminal_output +1175,2230117,"TERMINAL",0,0,"227",,terminal_output +1176,2231162,"TERMINAL",0,0,"338",,terminal_output +1177,2232200,"TERMINAL",0,0,"449",,terminal_output +1178,2233248,"TERMINAL",0,0,"552:00",,terminal_output +1179,2234338,"TERMINAL",0,0,"661",,terminal_output +1180,2235363,"TERMINAL",0,0,"772",,terminal_output +1181,2236386,"TERMINAL",0,0,"883",,terminal_output +1182,2237449,"TERMINAL",0,0,"994",,terminal_output +1183,2238480,"TERMINAL",0,0,"306:005",,terminal_output +1184,2239522,"TERMINAL",0,0,"116",,terminal_output +1185,2240694,"TERMINAL",0,0,"227",,terminal_output +1186,2241615,"TERMINAL",0,0,"338",,terminal_output +1187,2242735,"TERMINAL",0,0,"449",,terminal_output +1188,2243864,"TERMINAL",0,0,"5510",,terminal_output +1189,2244773,"TERMINAL",0,0,"661",,terminal_output +1190,2245800,"TERMINAL",0,0,"772",,terminal_output +1191,2247030,"TERMINAL",0,0,"883",,terminal_output +1192,2247901,"TERMINAL",0,0,"994",,terminal_output +1193,2248944,"TERMINAL",0,0,"40116",,terminal_output +1194,2250296,"TERMINAL",0,0,"227",,terminal_output +1195,2251118,"TERMINAL",0,0,"338",,terminal_output +1196,2252128,"TERMINAL",0,0,"449",,terminal_output +1197,2253165,"TERMINAL",0,0,"5520",,terminal_output +1198,2254241,"TERMINAL",0,0,"661",,terminal_output +1199,2255414,"TERMINAL",0,0,"772",,terminal_output +1200,2255906,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/train_dynamics_maskgit_8_node_3412354.log",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_maskgit_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/maskgit/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=256 \\n --init_lr=0 \\n --dyna_type=maskgit \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-maskgit-8-node-$slurm_job_id \\n --tags dynamics maskgit 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n/var/spool/slurmd/job3412354/slurm_script: line 42: .venv/bin/activate: No such file or directory\nSLURM_JOB_USER=tum_cte0515\nSLURM_TASKS_PER_NODE=4(x8)\nSLURM_JOB_UID=999226\nSLURM_TASK_PID=1609109\nSLURM_JOB_GPUS=0,1,2,3\nSLURM_LOCALID=0\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs\nSLURMD_NODENAME=hkn0405\nSLURM_JOB_START_TIME=1754832109\nSLURM_CLUSTER_NAME=hk\nSLURM_JOB_END_TIME=1755004909\nSLURM_CPUS_ON_NODE=24\nSLURM_JOB_CPUS_PER_NODE=24(x8)\nSLURM_GPUS_ON_NODE=4\nSLURM_GTIDS=0\nSLURM_JOB_PARTITION=accelerated\nSLURM_TRES_PER_TASK=cpu=5\nSLURM_OOM_KILL_STEP=0\nSLURM_JOB_NUM_NODES=8\nSLURM_JOBID=3412354\nSLURM_JOB_QOS=normal\nSLURM_PROCID=0\nSLURM_CPUS_PER_TASK=5\nSLURM_NTASKS=32\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e12.hkn0405\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\nSLURM_SCRIPT_CONTEXT=prolog_task\nSLURM_NODELIST=hkn[0405-0407,0410,0413,0415,0417-0418]\nSLURM_JOB_ACCOUNT=hk-project-p0023960\nSLURM_PRIO_PROCESS=0\nSLURM_NPROCS=32\nSLURM_NNODES=8\nSLURM_SUBMIT_HOST=hkn1990.localdomain\nSLURM_JOB_ID=3412354\nSLURM_NODEID=0\nSLURM_CONF=/etc/slurm/slurm.conf\nSLURM_JOB_NAME=train_dynamics_maskgit_8_node\nSLURM_NTASKS_PER_NODE=4\nSLURM_JOB_GID=502226\nSLURM_JOB_NODELIST=hkn[0405-0407,0410,0413,0415,0417-0418]\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nwandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\nwandb: Tracking run with wandb version 0.19.11\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/wandb/run-20250810_152240-3412354\nwandb: Run `wandb offline` to turn off syncing.\nwandb: Syncing run dynamics-maskgit-8-node-3412354\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3412354\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 139000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 139000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/139000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271933440, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340798928}\nStarting training from step 0...\n2025-08-10 15:24:41.611234: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:41.696464: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:41.938504: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:41.976109: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:41.976157: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-10 15:24:45.696880: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\nsrun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT\nslurmstepd: error: *** JOB 3412354 ON hkn0405 CANCELLED AT 2025-08-10T15:28:16 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3412354.0 ON hkn0405 CANCELLED AT 2025-08-10T15:28:16 ***\n",log,tab +1201,2256173,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/train_dynamics_maskgit_8_node_3412354.log",91358,0,"\n============================= JOB FEEDBACK =============================\n\nJob ID: 3412354\nCluster: hk\nUser/Group: tum_cte0515/hk-project-p0023960\nAccount: hk-project-p0023960\nState: CANCELLED (exit code 0)\nPartition: accelerated\nNodes: 8\nCores per node: 24\nNodelist: hkn[0405-0407,0410,0413,0415,0417-0418]\nCPU Utilized: 02:26:46\nCPU Efficiency: 11.85% of 20:38:24 core-walltime\nJob Wall-clock time: 00:06:27\nStarttime: Sun Aug 10 15:21:49 2025\nEndtime: Sun Aug 10 15:28:16 2025\nMemory Utilized: 233.83 GB (estimated maximum)\nMemory Efficiency: 0.00% of 0.00 MB (0.00 MB/node)\nEnergy Consumed: 2572902 Joule / 714.695 Watthours\nAverage node power draw: 6648.32558139535 Watt\n",log,content +1202,2256304,"TERMINAL",0,0,"883",,terminal_output +1203,2257337,"TERMINAL",0,0,"994",,terminal_output +1204,2258490,"TERMINAL",0,0,"50205",,terminal_output +1205,2259743,"TERMINAL",0,0,"116",,terminal_output +1206,2260486,"TERMINAL",0,0,"227",,terminal_output +1207,2261593,"TERMINAL",0,0,"338",,terminal_output +1208,2262584,"TERMINAL",0,0,"449",,terminal_output +1209,2262896,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +1210,2266622,"TERMINAL",0,0,"queue",,terminal_command +1211,2266719,"TERMINAL",0,0,"]633;E;2025-08-10 15:35:58 queue;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:35:58 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3404607 accelerat train_to tum_cte0 R 1-22:06:28\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3412397 accelerat train_dy tum_cte0 R\t2:33\t 8 hkn[0602-0605,0607-0608,0610-0611]",,terminal_output +1212,2267671,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +1213,2271126,"TERMINAL",0,0,"cd $ws_dir",,terminal_command +1214,2271638,"TERMINAL",0,0,"ls",,terminal_command +1215,2271667,"TERMINAL",0,0,"]633;E;2025-08-10 15:36:03 ls;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;Ccheckpoints count_items.sh data data_new huggingface logs possibly_corrupt_files_in_this_workspace.txt scripts\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared]633;D;0",,terminal_output +1216,2278527,"TERMINAL",0,0,"cd logs/logs_mihir/causal/",,terminal_command +1217,2278855,"TERMINAL",0,0,"ls",,terminal_command +1218,2278896,"TERMINAL",0,0,"]633;E;2025-08-10 15:36:10 ls;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;Cdynamics-cotraining\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal]633;D;0",,terminal_output +1219,2281160,"TERMINAL",0,0,"cd dynamics-cotraining/",,terminal_command +1220,2281488,"TERMINAL",0,0,"ls",,terminal_command +1221,2281526,"TERMINAL",0,0,"]633;E;2025-08-10 15:36:13 ls;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;Ctrain_dynamics_causal_2_node_3373407.log train_dynamics_causal_8_node_3373408.log train_dynamics_causal_8_node_3393066.log\r\ntrain_dynamics_causal_2_node_3373407.log_bak train_dynamics_causal_8_node_3388140.log train_dynamics_causal_8_node_3412343.log\r\ntrain_dynamics_causal_2_node_3388135.log train_dynamics_causal_8_node_3389928.log train_dynamics_causal_8_node_3412349.log\r\ntrain_dynamics_causal_2_node_3388147.log train_dynamics_causal_8_node_3390458.log train_dynamics_causal_8_node_3412356.log\r\ntrain_dynamics_causal_2_node_3389801.log train_dynamics_causal_8_node_3393060.log train_dynamics_causal_8_node_3412397.log\r\ntrain_dynamics_causal_2_node_3393065.log train_dynamics_causal_8_node_3393061.log\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;0",,terminal_output +1222,2284904,"TERMINAL",0,0,"queue",,terminal_command +1223,2284986,"TERMINAL",0,0,"]633;E;2025-08-10 15:36:16 queue;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:36:16 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3404607 accelerat train_to tum_cte0 R 1-22:06:47\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3412397 accelerat train_dy tum_cte0 R\t2:52\t 8 hkn[0602-0605,0607-0608,0610-0611]",,terminal_output +1224,2286074,"TERMINAL",0,0,"883",,terminal_output +1225,2286123,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;0",,terminal_output +1226,2293273,"TERMINAL",0,0,"tail -f train_dynamics_causal_8_node_3412397.log",,terminal_command +1227,2293316,"TERMINAL",0,0,"]633;E;2025-08-10 15:36:25 tail -f train_dynamics_causal_8_node_3412397.log;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C num_devices = jax.device_count()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 907, in device_count\r\n return int(get_backend(backend).device_count())\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 876, in get_backend\r\n return _get_backend_uncached(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 855, in _get_backend_uncached\r\n bs = backends()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 758, in backends\r\n raise RuntimeError(err_msg)\r\nRuntimeError: Unable to initialize backend 'cuda': INTERNAL: Getting local topologies failed: Error 1: GetKeyValue() timed out with key: cuda:local_topology/cuda/19 and duration: 2m (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\r\n",,terminal_output +1228,2301914,"TERMINAL",0,0,"^C\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;130",,terminal_output +1229,2306921,"TERMINAL",0,0,"runner",,terminal_command +1230,2310493,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_command +1231,2310543,"TERMINAL",0,0,"]633;E;2025-08-10 15:36:42 sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;CSubmitted batch job 3412399\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +1232,2312080,"TERMINAL",0,0,"queue",,terminal_command +1233,2312165,"TERMINAL",0,0,"]633;E;2025-08-10 15:36:44 queue;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:36:44 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3412399 accelerat train_dy tum_cte0 PD\t0:00\t 8 (None)3404607 accelerat train_to tum_cte0 R 1-22:07:14\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3412397 accelerat train_dy tum_cte0 R\t3:19\t 8 hkn[0602-0605,0607-0608,0610-0611]",,terminal_output +1234,2313201,"TERMINAL",0,0,"5520",,terminal_output +1235,2314262,"TERMINAL",0,0,"661",,terminal_output +1236,2315293,"TERMINAL",0,0,"772",,terminal_output +1237,2315766,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +1238,2318952,"TERMINAL",0,0,"scancel 3412397",,terminal_command +1239,2318993,"TERMINAL",0,0,"]633;E;2025-08-10 15:36:51 scancel 3412397;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs",,terminal_output +1240,2320869,"TERMINAL",0,0,"queue",,terminal_command +1241,2320923,"TERMINAL",0,0,"]633;E;2025-08-10 15:36:52 queue;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:36:52 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3412397 accelerat train_dy tum_cte0 CG\t3:26\t 8 hkn[0602-0605,0607-0608,0610-0611]3412399 accelerat train_dy tum_cte0 PD\t0:00\t 8 (None)3404607 accelerat train_to tum_cte0 R 1-22:07:22\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]",,terminal_output +1242,2322034,"TERMINAL",0,0,"34",,terminal_output +1243,2323030,"TERMINAL",0,0,"55",,terminal_output +1244,2324091,"TERMINAL",0,0,"66",,terminal_output +1245,2325330,"TERMINAL",0,0,"77",,terminal_output +1246,2326176,"TERMINAL",0,0,"88",,terminal_output +1247,2327333,"TERMINAL",0,0,"99",,terminal_output +1248,2328378,"TERMINAL",0,0,"7:0030",,terminal_output +1249,2329388,"TERMINAL",0,0,"11",,terminal_output +1250,2330644,"TERMINAL",0,0,"22",,terminal_output +1251,2331511,"TERMINAL",0,0,"33",,terminal_output +1252,2332580,"TERMINAL",0,0,"44",,terminal_output +1253,2333597,"TERMINAL",0,0,"55",,terminal_output +1254,2334629,"TERMINAL",0,0,"66",,terminal_output +1255,2335754,"TERMINAL",0,0,"77",,terminal_output +1256,2336873,"TERMINAL",0,0,"88",,terminal_output +1257,2337776,"TERMINAL",0,0,"99",,terminal_output +1258,2338917,"TERMINAL",0,0,"1040",,terminal_output +1259,2339952,"TERMINAL",0,0,"11",,terminal_output +1260,2340919,"TERMINAL",0,0,"22",,terminal_output +1261,2341963,"TERMINAL",0,0,"34",,terminal_output +1262,2343119,"TERMINAL",0,0,"55",,terminal_output +1263,2344057,"TERMINAL",0,0,"66",,terminal_output +1264,2345107,"TERMINAL",0,0,"77",,terminal_output +1265,2346190,"TERMINAL",0,0,"88",,terminal_output +1266,2347197,"TERMINAL",0,0,"99",,terminal_output +1267,2348257,"TERMINAL",0,0,"2050",,terminal_output +1268,2349291,"TERMINAL",0,0,"11",,terminal_output +1269,2350394,"TERMINAL",0,0,"22",,terminal_output +1270,2351404,"TERMINAL",0,0,"33",,terminal_output +1271,2352459,"TERMINAL",0,0,"44",,terminal_output +1272,2353492,"TERMINAL",0,0,"504607to R 1-22:07:55hkn[0802,0804-0806,0808,0810,0813-0814]12399dy 0:00404,0521-0527]",,terminal_output +1273,2354590,"TERMINAL",0,0,"661",,terminal_output +1274,2355593,"TERMINAL",0,0,"772",,terminal_output +1275,2356648,"TERMINAL",0,0,"883",,terminal_output +1276,2357693,"TERMINAL",0,0,"994",,terminal_output +1277,2358786,"TERMINAL",0,0,"308:005",,terminal_output +1278,2359843,"TERMINAL",0,0,"116",,terminal_output +1279,2360861,"TERMINAL",0,0,"227",,terminal_output +1280,2361923,"TERMINAL",0,0,"338",,terminal_output +1281,2362938,"TERMINAL",0,0,"449",,terminal_output +1282,2363978,"TERMINAL",0,0,"5611",,terminal_output +1283,2365026,"TERMINAL",0,0,"\r70460to R 1-22:08:07802,0804-0806,0808,0810,0813-0814]12399dy 0:12404,0521-0527]",,terminal_output +1284,2366248,"TERMINAL",0,0,"883",,terminal_output +1285,2367170,"TERMINAL",0,0,"994",,terminal_output +1286,2368172,"TERMINAL",0,0,"40105",,terminal_output +1287,2369226,"TERMINAL",0,0,"116",,terminal_output +1288,2370269,"TERMINAL",0,0,"227",,terminal_output +1289,2371358,"TERMINAL",0,0,"338",,terminal_output +1290,2372383,"TERMINAL",0,0,"449",,terminal_output +1291,2373441,"TERMINAL",0,0,"5520",,terminal_output +1292,2374477,"TERMINAL",0,0,"661",,terminal_output +1293,2375860,"TERMINAL",0,0,"772",,terminal_output +1294,2376800,"TERMINAL",0,0,"883",,terminal_output +1295,2377639,"TERMINAL",0,0,"994",,terminal_output +1296,2378888,"TERMINAL",0,0,"50205",,terminal_output +1297,2379796,"TERMINAL",0,0,"116",,terminal_output +1298,2380880,"TERMINAL",0,0,"227",,terminal_output +1299,2381911,"TERMINAL",0,0,"338",,terminal_output +1300,2382930,"TERMINAL",0,0,"449",,terminal_output +1301,2383962,"TERMINAL",0,0,"5530",,terminal_output +1302,2384969,"TERMINAL",0,0,"672",,terminal_output +1303,2386300,"TERMINAL",0,0,"883",,terminal_output +1304,2387124,"TERMINAL",0,0,"994",,terminal_output +1305,2388132,"TERMINAL",0,0,"8:00305",,terminal_output +1306,2389133,"TERMINAL",0,0,"116",,terminal_output +1307,2390188,"TERMINAL",0,0,"227",,terminal_output +1308,2391231,"TERMINAL",0,0,"338",,terminal_output +1309,2392287,"TERMINAL",0,0,"449",,terminal_output +1310,2393367,"TERMINAL",0,0,"5540",,terminal_output +1311,2394388,"TERMINAL",0,0,"661",,terminal_output +1312,2395465,"TERMINAL",0,0,"772",,terminal_output +1313,2396533,"TERMINAL",0,0,"883",,terminal_output +1314,2397556,"TERMINAL",0,0,"994",,terminal_output +1315,2398581,"TERMINAL",0,0,"10405",,terminal_output +1316,2399629,"TERMINAL",0,0,"116",,terminal_output +1317,2400734,"TERMINAL",0,0,"227",,terminal_output +1318,2401752,"TERMINAL",0,0,"338",,terminal_output +1319,2402776,"TERMINAL",0,0,"449",,terminal_output +1320,2403813,"TERMINAL",0,0,"5550",,terminal_output +1321,2404929,"TERMINAL",0,0,"661",,terminal_output +1322,2405919,"TERMINAL",0,0,"772",,terminal_output +1323,2407078,"TERMINAL",0,0,"894",,terminal_output +1324,2408095,"TERMINAL",0,0,"20505",,terminal_output +1325,2409059,"TERMINAL",0,0,"116",,terminal_output +1326,2410118,"TERMINAL",0,0,"227",,terminal_output +1327,2411480,"TERMINAL",0,0,"338",,terminal_output +1328,2412457,"TERMINAL",0,0,"449",,terminal_output +1329,2413255,"TERMINAL",0,0,"551:00",,terminal_output +1330,2414300,"TERMINAL",0,0,"661",,terminal_output +1331,2415368,"TERMINAL",0,0,"772",,terminal_output +1332,2416599,"TERMINAL",0,0,"883",,terminal_output +1333,2417443,"TERMINAL",0,0,"994",,terminal_output +1334,2418542,"TERMINAL",0,0,"309:005",,terminal_output +1335,2419542,"TERMINAL",0,0,"116",,terminal_output +1336,2420683,"TERMINAL",0,0,"227",,terminal_output +1337,2421633,"TERMINAL",0,0,"338",,terminal_output +1338,2422677,"TERMINAL",0,0,"449",,terminal_output +1339,2423734,"TERMINAL",0,0,"5510",,terminal_output +1340,2424895,"TERMINAL",0,0,"661",,terminal_output +1341,2425903,"TERMINAL",0,0,"772",,terminal_output +1342,2426886,"TERMINAL",0,0,"883",,terminal_output +1343,2428056,"TERMINAL",0,0,"9105",,terminal_output +1344,2429043,"TERMINAL",0,0,"4116",,terminal_output +1345,2430032,"TERMINAL",0,0,"227",,terminal_output +1346,2431078,"TERMINAL",0,0,"338",,terminal_output +1347,2432456,"TERMINAL",0,0,"449",,terminal_output +1348,2433274,"TERMINAL",0,0,"5520",,terminal_output +1349,2434245,"TERMINAL",0,0,"661",,terminal_output +1350,2435323,"TERMINAL",0,0,"772",,terminal_output +1351,2436303,"TERMINAL",0,0,"883",,terminal_output +1352,2437404,"TERMINAL",0,0,"994",,terminal_output +1353,2438395,"TERMINAL",0,0,"50205",,terminal_output +1354,2439452,"TERMINAL",0,0,"116",,terminal_output +1355,2440540,"TERMINAL",0,0,"227",,terminal_output +1356,2441565,"TERMINAL",0,0,"338",,terminal_output +1357,2442686,"TERMINAL",0,0,"449",,terminal_output +1358,2443919,"TERMINAL",0,0,"5530",,terminal_output +1359,2444690,"TERMINAL",0,0,"661",,terminal_output +1360,2445759,"TERMINAL",0,0,"772",,terminal_output +1361,2446885,"TERMINAL",0,0,"883",,terminal_output +1362,2447911,"TERMINAL",0,0,"994",,terminal_output +1363,2448905,"TERMINAL",0,0,"9:00305",,terminal_output +1364,2450054,"TERMINAL",0,0,"116",,terminal_output +1365,2450952,"TERMINAL",0,0,"238",,terminal_output +1366,2452104,"TERMINAL",0,0,"449",,terminal_output +1367,2453050,"TERMINAL",0,0,"5540",,terminal_output +1368,2454103,"TERMINAL",0,0,"661",,terminal_output +1369,2455211,"TERMINAL",0,0,"772",,terminal_output +1370,2456399,"TERMINAL",0,0,"883",,terminal_output +1371,2457257,"TERMINAL",0,0,"994",,terminal_output +1372,2458338,"TERMINAL",0,0,"10405",,terminal_output +1373,2459123,"TERMINAL",0,0,"bash",,terminal_focus +1374,2459360,"TERMINAL",0,0,"116",,terminal_output +1375,2460407,"TERMINAL",0,0,"227",,terminal_output +1376,2461455,"TERMINAL",0,0,"338",,terminal_output +1377,2462503,"TERMINAL",0,0,"449",,terminal_output +1378,2463590,"TERMINAL",0,0,"5550",,terminal_output +1379,2463889,"TERMINAL",0,0,"ls",,terminal_command +1380,2463929,"TERMINAL",0,0,"]633;E;2025-08-10 15:39:15 ls;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;Ctrain_dynamics_maskgit_8_node_3412350.log train_dynamics_maskgit_8_node_3412354.log\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining]633;D;0",,terminal_output +1381,2464619,"TERMINAL",0,0,"661",,terminal_output +1382,2465650,"TERMINAL",0,0,"772",,terminal_output +1383,2466740,"TERMINAL",0,0,"883",,terminal_output +1384,2466816,"TERMINAL",0,0,"cd ..",,terminal_command +1385,2466860,"TERMINAL",0,0,"]633;E;2025-08-10 15:39:18 cd ..;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit]633;D;0",,terminal_output +1386,2467746,"TERMINAL",0,0,"994",,terminal_output +1387,2468793,"TERMINAL",0,0,"20505",,terminal_output +1388,2469858,"TERMINAL",0,0,"116",,terminal_output +1389,2470009,"TERMINAL",0,0,"cd ..",,terminal_command +1390,2470494,"TERMINAL",0,0,"ls",,terminal_command +1391,2470535,"TERMINAL",0,0,"]633;E;2025-08-10 15:39:22 ls;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C",,terminal_output +1392,2470760,"TERMINAL",0,0,"big_run train_lam_action_space_scaling_6_3318549.log\r\nbig-runs train_lam_action_space_scaling_6_3320178.log\r\ncausal train_lam_action_space_scaling_6_3321528.log\r\nmaskgit train_lam_action_space_scaling_6_3329790.log\r\nmaskgit-maskprob-fix train_lam_action_space_scaling_6_3329805.log\r\ntrain_dyn_causal_180M_3372931.log train_lam_action_space_scaling_6_3331287.log\r\ntrain_dyn_causal_180M_3372963.log train_lam_action_space_scaling_8_3318550.log\r\ntrain_dyn_causal_180M_3372969.log train_lam_action_space_scaling_8_3329791.log\r\ntrain_dyn_causal_180M_3373107.log train_lam_action_space_scaling_8_3329806.log\r\ntrain_dyn_causal_255M_3372932.log train_lam_action_space_scaling_8_3331288.log\r\ntrain_dyn_causal_255M_3372970.log train_lam_minecraft_overfit_sample_3309655.log\r\ntrain_dyn_causal_255M_3373108.log train_lam_model_size_scaling_38M_3317098.log\r\ntrain_dyn_causal_356M_3372934.log train_lam_model_size_scaling_38M_3317115.log\r\ntrain_dyn_causal_356M_3372971.log train_lam_model_size_scaling_38M_3317231.log\r\ntrain_dyn_causal_356M_3373109.log train_tokenizer_batch_size_scaling_16_node_3321526.log\r\ntrain_dyn_causal_500M_3372936.log train_tokenizer_batch_size_scaling_1_node_3318551.log\r\ntrain_dyn_causal_500M_3372972.log train_tokenizer_batch_size_scaling_2_node_3318552.log\r\ntrain_dyn_causal_500M_3373110.log train_tokenizer_batch_size_scaling_2_node_3330806.log\r\ntrain_dyn_new_arch-bugfixed-spatial-shift_3359343.log train_tokenizer_batch_size_scaling_2_node_3330848.log\r\ntrain_dyn_new_arch-bugfixed-temporal-shift_3359349.log train_tokenizer_batch_size_scaling_2_node_3331282.log\r\ntrain_dyn_yolorun_3333026.log train_tokenizer_batch_size_scaling_4_node_3318553.log\r\ntrain_dyn_yolorun_3333448.log train_tokenizer_batch_size_scaling_4_node_3320175.log\r\ntrain_dyn_yolorun_3335345.log train_tokenizer_batch_size_scaling_4_node_3321524.log\r\ntrain_dyn_yolorun_3335362.log train_tokenizer_batch_size_scaling_8_node_3320176.log\r\ntrain_dyn_yolorun_3348592.log train_tokenizer_batch_size_scaling_8_node_3321525.log\r\ntrain_dyn_yolorun_new_arch_3351743.log train_tokenizer_minecraft_overfit_sample_3309656.log\r\ntrain_dyn_yolorun_new_arch_3352103.log train_tokenizer_model_size_scaling_127M_3317233.log\r\ntrain_dyn_yolorun_new_arch_3352115.log train_tokenizer_model_size_scaling_127M_3318554.log\r\ntrain_dyn_yolorun_new_arch_3358457.log train_tokenizer_model_size_scaling_140M_3313562.log\r\ntrain_lam_action_space_scaling_10_3320179.log train_tokenizer_model_size_scaling_140M_3316019.log\r\ntrain_lam_action_space_scaling_10_3321529.log train_tokenizer_model_size_scaling_200M_3313563.log\r\ntrain_lam_action_space_scaling_10_3329786.log train_tokenizer_model_size_scaling_200M_3316020.log\r\ntrain_lam_action_space_scaling_10_3329801.log train_tokenizer_model_size_scaling_227M_3317234.log\r\ntrain_lam_action_space_scaling_10_3331283.log train_tokenizer_model_size_scaling_227M_3318555.log\r\ntrain_lam_action_space_scaling_12_3318546.log train_tokenizer_model_size_scaling_227M_3320173.log\r\ntrain_lam_action_space_scaling_12_3320177.log train_tokenizer_model_size_scaling_227M_3321523.log\r\ntrain_lam_action_space_scaling_12_3321527.log train_tokenizer_model_size_scaling_37M_3313565.log\r\ntrain_lam_action_space_scaling_12_3329787.log train_tokenizer_model_size_scaling_37M_3316022.log\r\ntrain_lam_action_space_scaling_12_3329802.log train_tokenizer_model_size_scaling_37M_3317232.log\r\ntrain_lam_action_space_scaling_12_3331284.log train_tokenizer_model_size_scaling_37M_3317239.log\r\ntrain_lam_action_space_scaling_20_3318547.log train_tokenizer_model_size_scaling_37M_3318556.log\r\ntrain_lam_action_space_scaling_20_3329788.log train_tokenizer_model_size_scaling_74M_3318557.log\r\ntrain_lam_action_space_scaling_20_3329803.log train_tokenizer_model_size_scaling_74M_3320174.log\r\ntrain_lam_action_space_scaling_20_3331285.log train_tokenizer_model_size_scaling_74M_3321522.log\r\ntrain_lam_action_space_scaling_50_3320180.log train_tokenizer_model_size_scaling_80M_3313564.log\r\ntrain_lam_action_space_scaling_50_3329789.log train_tokenizer_model_size_scaling_80M_3316026.log\r\ntrain_lam_action_space_scaling_50_3329804.log yoloruns\r\ntrain_lam_action_space_scaling_50_3331286.log\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +1393,2470920,"TERMINAL",0,0,"227",,terminal_output +1394,2471953,"TERMINAL",0,0,"349",,terminal_output +1395,2472059,"TERMINAL",0,0,"cd causal/",,terminal_command +1396,2472404,"TERMINAL",0,0,"ls",,terminal_command +1397,2472444,"TERMINAL",0,0,"]633;E;2025-08-10 15:39:24 ls;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;Cdynamics-cotraining\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal]633;D;0",,terminal_output +1398,2472990,"TERMINAL",0,0,"552:00",,terminal_output +1399,2474038,"TERMINAL",0,0,"661",,terminal_output +1400,2475125,"TERMINAL",0,0,"772",,terminal_output +1401,2476142,"TERMINAL",0,0,"883",,terminal_output +1402,2477196,"TERMINAL",0,0,"994",,terminal_output +1403,2477691,"TERMINAL",0,0,"cd dynamics-cotraining/",,terminal_command +1404,2478067,"TERMINAL",0,0,"ls",,terminal_command +1405,2478175,"TERMINAL",0,0,"]633;E;2025-08-10 15:39:30 ls;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;Ctrain_dynamics_causal_2_node_3373407.log train_dynamics_causal_8_node_3373408.log train_dynamics_causal_8_node_3393066.log\r\ntrain_dynamics_causal_2_node_3373407.log_bak train_dynamics_causal_8_node_3388140.log train_dynamics_causal_8_node_3412343.log\r\ntrain_dynamics_causal_2_node_3388135.log train_dynamics_causal_8_node_3389928.log train_dynamics_causal_8_node_3412349.log\r\ntrain_dynamics_causal_2_node_3388147.log train_dynamics_causal_8_node_3390458.log train_dynamics_causal_8_node_3412356.log\r\ntrain_dynamics_causal_2_node_3389801.log train_dynamics_causal_8_node_3393060.log train_dynamics_causal_8_node_3412397.log\r\ntrain_dynamics_causal_2_node_3393065.log train_dynamics_causal_8_node_3393061.log train_dynamics_causal_8_node_3412399.log\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;0",,terminal_output +1406,2478278,"TERMINAL",0,0,"3010:005",,terminal_output +1407,2479324,"TERMINAL",0,0,"116",,terminal_output +1408,2480335,"TERMINAL",0,0,"227",,terminal_output +1409,2481480,"TERMINAL",0,0,"338",,terminal_output +1410,2482408,"TERMINAL",0,0,"queue",,terminal_command +1411,2482458,"TERMINAL",0,0,"449",,terminal_output +1412,2482499,"TERMINAL",0,0,"]633;E;2025-08-10 15:39:34 queue;2eb5e437-e1f9-471c-899b-55bd0b9abc8b]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:39:34 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3404607 accelerat train_to tum_cte0 R 1-22:10:04\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3412399 accelerat train_dy tum_cte0 R\t2:09\t 8 hkn[0404,0521-0527]",,terminal_output +1413,2483517,"TERMINAL",0,0,"5510",,terminal_output +1414,2483524,"TERMINAL",0,0,"5510",,terminal_output +1415,2483949,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;0",,terminal_output +1416,2484541,"TERMINAL",0,0,"661",,terminal_output +1417,2485193,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412399.log",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=256 \\n --init_lr=0 \\n --dyna_type=causal \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n/var/spool/slurmd/job3412399/slurm_script: line 42: .venv/bin/activate: No such file or directory\nSLURM_JOB_USER=tum_cte0515\nSLURM_TASKS_PER_NODE=4(x8)\nSLURM_JOB_UID=999226\nSLURM_TASK_PID=992577\nSLURM_JOB_GPUS=0,1,2,3\nSLURM_LOCALID=0\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs\nSLURMD_NODENAME=hkn0404\nSLURM_JOB_START_TIME=1754833045\nSLURM_CLUSTER_NAME=hk\nSLURM_JOB_END_TIME=1755005845\nSLURM_CPUS_ON_NODE=24\nSLURM_JOB_CPUS_PER_NODE=24(x8)\nSLURM_GPUS_ON_NODE=4\nSLURM_GTIDS=0\nSLURM_JOB_PARTITION=accelerated\nSLURM_TRES_PER_TASK=cpu=5\nSLURM_OOM_KILL_STEP=0\nSLURM_JOB_NUM_NODES=8\nSLURM_JOBID=3412399\nSLURM_JOB_QOS=normal\nSLURM_PROCID=0\nSLURM_CPUS_PER_TASK=5\nSLURM_NTASKS=32\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e11.hkn0404\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\nSLURM_SCRIPT_CONTEXT=prolog_task\nSLURM_NODELIST=hkn[0404,0521-0527]\nSLURM_JOB_ACCOUNT=hk-project-p0023960\nSLURM_PRIO_PROCESS=0\nSLURM_NPROCS=32\nSLURM_NNODES=8\nSLURM_SUBMIT_HOST=hkn1990.localdomain\nSLURM_JOB_ID=3412399\nSLURM_NODEID=0\nSLURM_CONF=/etc/slurm/slurm.conf\nSLURM_JOB_NAME=train_dynamics_causal_8_node\nSLURM_NTASKS_PER_NODE=4\nSLURM_JOB_GID=502226\nSLURM_JOB_NODELIST=hkn[0404,0521-0527]\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nwandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\nwandb: Tracking run with wandb version 0.19.11\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/wandb/run-20250810_153813-3412399\nwandb: Run `wandb offline` to turn off syncing.\nwandb: Syncing run dynamics-causal-8-node-3412399\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3412399\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 60000\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nWARNING:absl:Missing metrics for step 142000\nWARNING:absl:Missing metrics for step 142000\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 80000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/080000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 120000\nWARNING:absl:Missing metrics for step 120000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/120000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 100000\nWARNING:absl:Missing metrics for step 100000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/100000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 141000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/141000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 140000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/140000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 60000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/060000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 142000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607/142000/metrics/metrics not found.\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n return super().restore(directory, *args, **kwargs)\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\nRunning on 32 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n return super().restore(directory, *args, **kwargs)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._restore(directory, args=ckpt_args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return self._handler.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n restored[item_name] = handler.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n raise ValueError(\n raise ValueError(\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,Traceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 347, in \n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n raise ValueError(\n raise ValueError(\n raise ValueError(\n raise ValueError(\n write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 raise ValueError(\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 raise ValueError(\n raise ValueError(\n write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n raise ValueError(\n raise ValueError(\n raise ValueError(\n raise ValueError(\n write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/genie.py"", line 473, in restore_genie_components\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1608, in restore\n restored = self._checkpointer.restore(restore_directory, args=args)\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n raise ValueError(\n write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 raise ValueError(\n raise ValueError(\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 raise ValueError(\n raise ValueError(\n raise ValueError(\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, raise ValueError(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 558, in restore\n return super().restore(directory, *args, **kwargs)\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, raise ValueError(\n raise ValueError(\n raise ValueError(\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, raise ValueError(\n raise ValueError(\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, raise ValueError(\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n raise ValueError(\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, raise ValueError(\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\n restored = self._restore(directory, args=ckpt_args)\n raise ValueError(\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\n return self._handler.restore(directory, args=args)\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 837, in restore\n restored[item_name] = handler.restore(\n write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 796, in restore\n return self._handler_impl.restore(directory, args=args)\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 737, in restore\n raise ValueError(\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n raise ValueError(\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False,ValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5 write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n raise ValueError(\nValueError: User-provided restore item and on-disk value metadata tree structures do not match: {'model': {'decoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}, 'encoder': {'blocks': {'0': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '1': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '2': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}, '3': {'spatial_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}}), 'temporal_pos_enc': Diff(lhs=None, rhs={'pe': {'value': ValueMetadataEntry(value_type='jax.Array', skip_deserialize=False, write_shape=(5000, 16))}})}}, 'pos_enc': Diff(lhs={'pe': {'value': ShapeDtypeStruct(shape=(5000, 512), dtype=float32, sharding=NamedSharding(mesh=Mesh('data': 32, axis_types=(Auto,)), spec=PartitionSpec(), memory_kind=device))}}, rhs=None)}}}\n",log,tab +1418,2485577,"TERMINAL",0,0,"772",,terminal_output +1419,2486622,"TERMINAL",0,0,"883",,terminal_output +1420,2487681,"TERMINAL",0,0,"994",,terminal_output +1421,2488067,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412399.log",27537,0,"",log,selection_mouse +1422,2488105,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412399.log",27536,0,"",log,selection_command +1423,2488712,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/train_dynamics_causal_8_node_3412399.log",237876,0,"",log,selection_command +1424,2488830,"TERMINAL",0,0,"4012399dyCG 2:14404,0521-0527]04607to1-22:10:10802,0804-0806,0808,0810,0813-0814]",,terminal_output +1425,2489971,"TERMINAL",0,0,"11",,terminal_output +1426,2490846,"TERMINAL",0,0,"22",,terminal_output +1427,2492017,"TERMINAL",0,0,"33",,terminal_output +1428,2492969,"TERMINAL",0,0,"45",,terminal_output +1429,2494017,"TERMINAL",0,0,"66",,terminal_output +1430,2495284,"TERMINAL",0,0,"77",,terminal_output +1431,2496257,"TERMINAL",0,0,"88",,terminal_output +1432,2497331,"TERMINAL",0,0,"99",,terminal_output +1433,2498261,"TERMINAL",0,0,"501 hkn040420",,terminal_output +1434,2499276,"TERMINAL",0,0,"\r104607to R 1-22:10:21\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]",,terminal_output +1435,2500303,"TERMINAL",0,0,"22",,terminal_output +1436,2501343,"TERMINAL",0,0,"33",,terminal_output +1437,2502396,"TERMINAL",0,0,"44",,terminal_output +1438,2503448,"TERMINAL",0,0,"55",,terminal_output +1439,2504480,"TERMINAL",0,0,"66",,terminal_output +1440,2505598,"TERMINAL",0,0,"77",,terminal_output +1441,2506577,"TERMINAL",0,0,"88",,terminal_output +1442,2507671,"TERMINAL",0,0,"99",,terminal_output +1443,2508669,"TERMINAL",0,0,"40:0030",,terminal_output +1444,2509723,"TERMINAL",0,0,"11",,terminal_output +1445,2510812,"TERMINAL",0,0,"22",,terminal_output +1446,2511868,"TERMINAL",0,0,"33",,terminal_output +1447,2512872,"TERMINAL",0,0,"44",,terminal_output +1448,2513916,"TERMINAL",0,0,"55",,terminal_output +1449,2514963,"TERMINAL",0,0,"67",,terminal_output +1450,2516013,"TERMINAL",0,0,"88",,terminal_output +1451,2517093,"TERMINAL",0,0,"99",,terminal_output +1452,2518251,"TERMINAL",0,0,"1040",,terminal_output +1453,2518387,"utils/nn.py",0,0,"",python,tab +1454,2519158,"TERMINAL",0,0,"11",,terminal_output +1455,2520463,"TERMINAL",0,0,"22",,terminal_output +1456,2521260,"TERMINAL",0,0,"33",,terminal_output +1457,2522331,"TERMINAL",0,0,"44",,terminal_output +1458,2523433,"TERMINAL",0,0,"55",,terminal_output +1459,2524413,"TERMINAL",0,0,"66",,terminal_output +1460,2525480,"TERMINAL",0,0,"77",,terminal_output +1461,2526491,"TERMINAL",0,0,"88",,terminal_output +1462,2527535,"TERMINAL",0,0,"99",,terminal_output +1463,2528652,"TERMINAL",0,0,"2050",,terminal_output +1464,2529982,"TERMINAL",0,0,"11",,terminal_output +1465,2530686,"TERMINAL",0,0,"22",,terminal_output +1466,2531822,"TERMINAL",0,0,"33",,terminal_output +1467,2532779,"TERMINAL",0,0,"44",,terminal_output +1468,2533848,"TERMINAL",0,0,"55",,terminal_output +1469,2534895,"TERMINAL",0,0,"66",,terminal_output +1470,2536343,"TERMINAL",0,0,"77",,terminal_output +1471,2537146,"TERMINAL",0,0,"89",,terminal_output +1472,2538169,"TERMINAL",0,0,"301:00",,terminal_output +1473,2539082,"TERMINAL",0,0,"11",,terminal_output +1474,2540132,"TERMINAL",0,0,"22",,terminal_output +1475,2541172,"TERMINAL",0,0,"33",,terminal_output +1476,2542464,"TERMINAL",0,0,"44",,terminal_output +1477,2543287,"TERMINAL",0,0,"55",,terminal_output +1478,2544311,"TERMINAL",0,0,"66",,terminal_output +1479,2545359,"TERMINAL",0,0,"77",,terminal_output +1480,2546099,"TERMINAL",0,0,"watch",,terminal_focus +1481,2546408,"TERMINAL",0,0,"88",,terminal_output +1482,2547454,"TERMINAL",0,0,"99",,terminal_output +1483,2548509,"TERMINAL",0,0,"4010",,terminal_output +1484,2549635,"TERMINAL",0,0,"11",,terminal_output +1485,2550622,"TERMINAL",0,0,"22",,terminal_output +1486,2551676,"TERMINAL",0,0,"33",,terminal_output +1487,2551866,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +1488,2553938,"TERMINAL",0,0,"scancel 3404607",,terminal_command +1489,2553976,"TERMINAL",0,0,"]633;E;2025-08-10 15:40:46 scancel 3404607;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +1490,2555148,"TERMINAL",0,0,"queue",,terminal_command +1491,2555162,"TERMINAL",0,0,"]633;E;2025-08-10 15:40:47 queue;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C",,terminal_output +1492,2555227,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:40:47 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3404607 accelerat train_to tum_cte0 CG 1-22:11:16\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]",,terminal_output +1493,2556333,"TERMINAL",0,0,"8[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +1494,2558695,"TERMINAL",0,0,"runner",,terminal_command +1495,2560493,"TERMINAL",0,0,"sync-runner",,terminal_command +1496,2560507,"TERMINAL",0,0,"]633;E;2025-08-10 15:40:52 sync-runner;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;Csending incremental file list\r\n",,terminal_output +1497,2565239,"TERMINAL",0,0,"\r\nsent 26,323 bytes received 143 bytes 4,812.00 bytes/sec\r\ntotal size is 220,521,654 speedup is 8,332.26\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +1498,2574761,"TERMINAL",0,0,"dev",,terminal_command +1499,2576066,"TERMINAL",0,0,"git pull",,terminal_command +1500,2576157,"TERMINAL",0,0,"]633;E;2025-08-10 15:41:08 git pull;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C",,terminal_output +1501,2578115,"TERMINAL",0,0,"Already up to date.\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1502,2580957,"TERMINAL",0,0,"runner",,terminal_command +1503,2583160,"TERMINAL",0,0,"sync-runner",,terminal_command +1504,2583225,"TERMINAL",0,0,"]633;E;2025-08-10 15:41:15 sync-runner;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;Csending incremental file list\r\n",,terminal_output +1505,2583334,"TERMINAL",0,0,"\r\nsent 26,323 bytes received 143 bytes 52,932.00 bytes/sec\r\ntotal size is 220,521,654 speedup is 8,332.26\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +1506,2587631,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/big_run/tokenizer/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/big_run/tokenizer/%x_%j.log\n#SBATCH --job-name=train_tokenizer_1e-4\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=384 \\n --init_lr=0 \\n --max_lr=1e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=1000 \\n --log \\n --name=tokenizer-8-nodes-$slurm_job_id \\n --tags tokenizer big-run 1e-4 \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid",shellscript,tab +1507,2592868,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",1734,0,"",shellscript,selection_mouse +1508,2596033,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",1733,0,"",shellscript,selection_command +1509,2597117,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",1734,0,"",shellscript,selection_command +1510,2597512,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",1733,1,"",shellscript,content +1511,2597631,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",1732,1,"",shellscript,content +1512,2597925,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",1731,1,"",shellscript,content +1513,2600005,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",1731,0,"2",shellscript,content +1514,2600006,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",1732,0,"",shellscript,selection_keyboard +1515,2600089,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",1732,0,"5",shellscript,content +1516,2600090,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",1733,0,"",shellscript,selection_keyboard +1517,2600244,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",1733,0,"6",shellscript,content +1518,2600245,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",1734,0,"",shellscript,selection_keyboard +1519,2600658,"slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",1733,0,"",shellscript,selection_command +1520,2613907,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch",,terminal_command +1521,2613925,"TERMINAL",0,0,"]633;E;2025-08-10 15:41:45 sbatch slurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;CSubmitted batch job 3412401\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +1522,2615053,"TERMINAL",0,0,"queue",,terminal_command +1523,2615132,"TERMINAL",0,0,"]633;E;2025-08-10 15:41:47 queue;90368b59-bfc9-4fc2-a75c-40b23bd904f0]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Sun Aug 10 15:41:47 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3412401 accelerat train_to tum_cte0 PD\t0:00\t 8 (None)",,terminal_output +1524,2616556,"TERMINAL",0,0,"8",,terminal_output +1525,2617481,"TERMINAL",0,0,"9",,terminal_output +1526,2618331,"TERMINAL",0,0,"50 Rhkn[0802,0804-0806,0808,0810,0813-0814]",,terminal_output +1527,2619321,"TERMINAL",0,0,"11",,terminal_output +1528,2620450,"TERMINAL",0,0,"22",,terminal_output +1529,2621414,"TERMINAL",0,0,"33",,terminal_output +1530,2622474,"TERMINAL",0,0,"44",,terminal_output +1531,2623515,"TERMINAL",0,0,"55",,terminal_output +1532,2624563,"TERMINAL",0,0,"66",,terminal_output +1533,2625662,"TERMINAL",0,0,"77",,terminal_output +1534,2626664,"TERMINAL",0,0,"88",,terminal_output +1535,2627708,"TERMINAL",0,0,"99",,terminal_output +1536,2628836,"TERMINAL",0,0,"2:0010",,terminal_output +1537,2630161,"TERMINAL",0,0,"11",,terminal_output +1538,2630847,"TERMINAL",0,0,"22",,terminal_output +1539,2631900,"TERMINAL",0,0,"33",,terminal_output +1540,2633031,"TERMINAL",0,0,"45",,terminal_output +1541,2633998,"TERMINAL",0,0,"66",,terminal_output +1542,2635052,"TERMINAL",0,0,"77",,terminal_output +1543,2636202,"TERMINAL",0,0,"88",,terminal_output +1544,2637206,"TERMINAL",0,0,"99",,terminal_output +1545,2638196,"TERMINAL",0,0,"1020",,terminal_output +1546,2639243,"TERMINAL",0,0,"11",,terminal_output +1547,2640343,"TERMINAL",0,0,"22",,terminal_output +1548,2641350,"TERMINAL",0,0,"33",,terminal_output +1549,2642395,"TERMINAL",0,0,"44",,terminal_output +1550,2643458,"TERMINAL",0,0,"55",,terminal_output +1551,2644500,"TERMINAL",0,0,"66",,terminal_output +1552,2645545,"TERMINAL",0,0,"77",,terminal_output +1553,2646591,"TERMINAL",0,0,"88",,terminal_output +1554,2647702,"TERMINAL",0,0,"99",,terminal_output +1555,2648747,"TERMINAL",0,0,"2030",,terminal_output +1556,2649795,"TERMINAL",0,0,"11",,terminal_output +1557,2651054,"TERMINAL",0,0,"22",,terminal_output +1558,2651895,"TERMINAL",0,0,"33",,terminal_output +1559,2652941,"TERMINAL",0,0,"45",,terminal_output +1560,2654012,"TERMINAL",0,0,"66",,terminal_output +1561,2655039,"TERMINAL",0,0,"77",,terminal_output +1562,2656157,"TERMINAL",0,0,"88",,terminal_output +1563,2657182,"TERMINAL",0,0,"99",,terminal_output +1564,2658196,"TERMINAL",0,0,"3040",,terminal_output +1565,2659250,"TERMINAL",0,0,"11",,terminal_output +1566,2660290,"TERMINAL",0,0,"22",,terminal_output +1567,2661343,"TERMINAL",0,0,"33",,terminal_output +1568,2662392,"TERMINAL",0,0,"44",,terminal_output +1569,2663451,"TERMINAL",0,0,"55",,terminal_output +1570,2664496,"TERMINAL",0,0,"66",,terminal_output +1571,2665630,"TERMINAL",0,0,"77",,terminal_output +1572,2666591,"TERMINAL",0,0,"88",,terminal_output +1573,2667648,"TERMINAL",0,0,"99",,terminal_output +1574,2668692,"TERMINAL",0,0,"4050",,terminal_output +1575,2670071,"TERMINAL",0,0,"11",,terminal_output +1576,2671098,"TERMINAL",0,0,"22",,terminal_output +1577,2672121,"TERMINAL",0,0,"33",,terminal_output +1578,2672945,"TERMINAL",0,0,"44",,terminal_output +1579,2674040,"TERMINAL",0,0,"56",,terminal_output +1580,2675414,"TERMINAL",0,0,"77",,terminal_output +1581,2676114,"TERMINAL",0,0,"88",,terminal_output +1582,2677135,"TERMINAL",0,0,"99",,terminal_output +1583,2678159,"TERMINAL",0,0,"501:00",,terminal_output +1584,2679202,"TERMINAL",0,0,"11",,terminal_output +1585,2680423,"TERMINAL",0,0,"22",,terminal_output +1586,2681391,"TERMINAL",0,0,"33",,terminal_output +1587,2682394,"TERMINAL",0,0,"44",,terminal_output +1588,2683482,"TERMINAL",0,0,"55",,terminal_output +1589,2684419,"TERMINAL",0,0,"66",,terminal_output +1590,2685468,"TERMINAL",0,0,"77",,terminal_output +1591,2686525,"TERMINAL",0,0,"88",,terminal_output +1592,2687679,"TERMINAL",0,0,"99",,terminal_output +1593,2688696,"TERMINAL",0,0,"3:0010",,terminal_output +1594,2689670,"TERMINAL",0,0,"11",,terminal_output +1595,2690712,"TERMINAL",0,0,"22",,terminal_output +1596,2691900,"TERMINAL",0,0,"33",,terminal_output +1597,2692811,"TERMINAL",0,0,"44",,terminal_output +1598,2693860,"TERMINAL",0,0,"55",,terminal_output +1599,2694956,"TERMINAL",0,0,"66",,terminal_output +1600,2695966,"TERMINAL",0,0,"78",,terminal_output +1601,2697094,"TERMINAL",0,0,"99",,terminal_output +1602,2698139,"TERMINAL",0,0,"1020",,terminal_output +1603,2699104,"TERMINAL",0,0,"11",,terminal_output +1604,2700571,"TERMINAL",0,0,"22",,terminal_output +1605,2701212,"TERMINAL",0,0,"33",,terminal_output +1606,2702262,"TERMINAL",0,0,"44",,terminal_output +1607,2703640,"TERMINAL",0,0,"55",,terminal_output +1608,2704459,"TERMINAL",0,0,"66",,terminal_output +1609,2705419,"TERMINAL",0,0,"77",,terminal_output +1610,2706711,"TERMINAL",0,0,"88",,terminal_output +1611,2707577,"TERMINAL",0,0,"99",,terminal_output +1612,2708551,"TERMINAL",0,0,"2030",,terminal_output +1613,2709604,"TERMINAL",0,0,"11",,terminal_output +1614,2710657,"TERMINAL",0,0,"22",,terminal_output +1615,2711705,"TERMINAL",0,0,"33",,terminal_output +1616,2712751,"TERMINAL",0,0,"44",,terminal_output +1617,2713876,"TERMINAL",0,0,"55",,terminal_output +1618,2714898,"TERMINAL",0,0,"66",,terminal_output +1619,2715901,"TERMINAL",0,0,"77",,terminal_output +1620,2717046,"TERMINAL",0,0,"89",,terminal_output +1621,2718072,"TERMINAL",0,0,"3040",,terminal_output +1622,2719065,"TERMINAL",0,0,"11",,terminal_output +1623,2720122,"TERMINAL",0,0,"22",,terminal_output +1624,2721166,"TERMINAL",0,0,"33",,terminal_output +1625,2722196,"TERMINAL",0,0,"44",,terminal_output +1626,2723493,"TERMINAL",0,0,"55",,terminal_output +1627,2724312,"TERMINAL",0,0,"66",,terminal_output +1628,2725381,"TERMINAL",0,0,"77",,terminal_output +1629,2726463,"TERMINAL",0,0,"88",,terminal_output +1630,2727449,"TERMINAL",0,0,"99",,terminal_output +1631,2728525,"TERMINAL",0,0,"4050",,terminal_output +1632,2729548,"TERMINAL",0,0,"11",,terminal_output +1633,2730597,"TERMINAL",0,0,"22",,terminal_output +1634,2731650,"TERMINAL",0,0,"33",,terminal_output +1635,2732808,"TERMINAL",0,0,"44",,terminal_output +1636,2733751,"TERMINAL",0,0,"55",,terminal_output +1637,2734853,"TERMINAL",0,0,"66",,terminal_output +1638,2735862,"TERMINAL",0,0,"77",,terminal_output +1639,2737000,"TERMINAL",0,0,"88",,terminal_output +1640,2738235,"TERMINAL",0,0,"92:00",,terminal_output +1641,2738994,"TERMINAL",0,0,"511",,terminal_output +1642,2740071,"TERMINAL",0,0,"22",,terminal_output +1643,2741265,"TERMINAL",0,0,"33",,terminal_output +1644,2742145,"TERMINAL",0,0,"44",,terminal_output +1645,2743550,"TERMINAL",0,0,"55",,terminal_output +1646,2744249,"TERMINAL",0,0,"66",,terminal_output +1647,2745296,"TERMINAL",0,0,"77",,terminal_output +1648,2746387,"TERMINAL",0,0,"88",,terminal_output +1649,2747396,"TERMINAL",0,0,"99",,terminal_output +1650,2748438,"TERMINAL",0,0,"4:0010",,terminal_output +1651,2749493,"TERMINAL",0,0,"11",,terminal_output +1652,2750538,"TERMINAL",0,0,"22",,terminal_output +1653,2751635,"TERMINAL",0,0,"33",,terminal_output +1654,2752760,"TERMINAL",0,0,"44",,terminal_output +1655,2753681,"TERMINAL",0,0,"55",,terminal_output +1656,2754732,"TERMINAL",0,0,"66",,terminal_output +1657,2755830,"TERMINAL",0,0,"77",,terminal_output +1658,2757051,"TERMINAL",0,0,"88",,terminal_output +1659,2757881,"TERMINAL",0,0,"99",,terminal_output +1660,2759004,"TERMINAL",0,0,"1020",,terminal_output +1661,2759981,"TERMINAL",0,0,"12",,terminal_output +1662,2761046,"TERMINAL",0,0,"33",,terminal_output +1663,2762182,"TERMINAL",0,0,"44",,terminal_output +1664,2763200,"TERMINAL",0,0,"55",,terminal_output +1665,2764168,"TERMINAL",0,0,"66",,terminal_output +1666,2765246,"TERMINAL",0,0,"77",,terminal_output +1667,2766372,"TERMINAL",0,0,"88",,terminal_output +1668,2767316,"TERMINAL",0,0,"99",,terminal_output +1669,2768403,"TERMINAL",0,0,"2030",,terminal_output +1670,2769421,"TERMINAL",0,0,"11",,terminal_output +1671,2770674,"TERMINAL",0,0,"22",,terminal_output +1672,2771694,"TERMINAL",0,0,"33",,terminal_output +1673,2772560,"TERMINAL",0,0,"44",,terminal_output +1674,2773608,"TERMINAL",0,0,"55",,terminal_output +1675,2774663,"TERMINAL",0,0,"66",,terminal_output +1676,2775717,"TERMINAL",0,0,"77",,terminal_output +1677,2776752,"TERMINAL",0,0,"88",,terminal_output +1678,2777936,"TERMINAL",0,0,"99",,terminal_output +1679,2778851,"TERMINAL",0,0,"3040",,terminal_output +1680,2779902,"TERMINAL",0,0,"11",,terminal_output +1681,2781109,"TERMINAL",0,0,"23",,terminal_output +1682,2782134,"TERMINAL",0,0,"44",,terminal_output +1683,2783042,"TERMINAL",0,0,"55",,terminal_output +1684,2784098,"TERMINAL",0,0,"66",,terminal_output +1685,2785145,"TERMINAL",0,0,"77",,terminal_output +1686,2786544,"TERMINAL",0,0,"88",,terminal_output +1687,2787394,"TERMINAL",0,0,"99",,terminal_output +1688,2788416,"TERMINAL",0,0,"4050",,terminal_output +1689,2789392,"TERMINAL",0,0,"11",,terminal_output +1690,2790524,"TERMINAL",0,0,"22",,terminal_output +1691,2791544,"TERMINAL",0,0,"33",,terminal_output +1692,2792483,"TERMINAL",0,0,"44",,terminal_output +1693,2793534,"TERMINAL",0,0,"55",,terminal_output +1694,2794587,"TERMINAL",0,0,"66",,terminal_output +1695,2795747,"TERMINAL",0,0,"77",,terminal_output +1696,2796683,"TERMINAL",0,0,"88",,terminal_output +1697,2797787,"TERMINAL",0,0,"99",,terminal_output +1698,2798811,"TERMINAL",0,0,"503:00",,terminal_output +1699,2799856,"TERMINAL",0,0,"11",,terminal_output +1700,2800959,"TERMINAL",0,0,"22",,terminal_output +1701,2801984,"TERMINAL",0,0,"33",,terminal_output +1702,2803112,"TERMINAL",0,0,"45",,terminal_output +1703,2804024,"TERMINAL",0,0,"66",,terminal_output +1704,2805078,"TERMINAL",0,0,"77",,terminal_output +1705,2806485,"TERMINAL",0,0,"88",,terminal_output +1706,2807201,"TERMINAL",0,0,"99",,terminal_output +1707,2808635,"TERMINAL",0,0,"5:0010",,terminal_output +1708,2809395,"TERMINAL",0,0,"11",,terminal_output +1709,2810374,"TERMINAL",0,0,"22",,terminal_output +1710,2811415,"TERMINAL",0,0,"33",,terminal_output +1711,2812427,"TERMINAL",0,0,"44",,terminal_output +1712,2813473,"TERMINAL",0,0,"55",,terminal_output +1713,2814522,"TERMINAL",0,0,"66",,terminal_output +1714,2815799,"TERMINAL",0,0,"77",,terminal_output +1715,2816619,"TERMINAL",0,0,"88",,terminal_output +1716,2817741,"TERMINAL",0,0,"99",,terminal_output +1717,2818766,"TERMINAL",0,0,"1020",,terminal_output +1718,2819791,"TERMINAL",0,0,"11",,terminal_output +1719,2820919,"TERMINAL",0,0,"22",,terminal_output +1720,2822147,"TERMINAL",0,0,"33",,terminal_output +1721,2822957,"TERMINAL",0,0,"44",,terminal_output +1722,2824041,"TERMINAL",0,0,"56",,terminal_output +1723,2825013,"TERMINAL",0,0,"77",,terminal_output +1724,2826068,"TERMINAL",0,0,"88",,terminal_output +1725,2827107,"TERMINAL",0,0,"99",,terminal_output +1726,2828176,"TERMINAL",0,0,"2030",,terminal_output +1727,2829206,"TERMINAL",0,0,"11",,terminal_output +1728,2830560,"TERMINAL",0,0,"22",,terminal_output +1729,2831353,"TERMINAL",0,0,"33",,terminal_output +1730,2832399,"TERMINAL",0,0,"44",,terminal_output +1731,2833413,"TERMINAL",0,0,"55",,terminal_output +1732,2834460,"TERMINAL",0,0,"66",,terminal_output +1733,2835513,"TERMINAL",0,0,"77",,terminal_output +1734,2836560,"TERMINAL",0,0,"88",,terminal_output +1735,2837698,"TERMINAL",0,0,"99",,terminal_output +1736,2838825,"TERMINAL",0,0,"3040",,terminal_output +1737,2839745,"TERMINAL",0,0,"11",,terminal_output +1738,2841076,"TERMINAL",0,0,"22",,terminal_output +1739,2842049,"TERMINAL",0,0,"33",,terminal_output +1740,2842850,"TERMINAL",0,0,"44",,terminal_output +1741,2843906,"TERMINAL",0,0,"55",,terminal_output +1742,2844962,"TERMINAL",0,0,"67",,terminal_output +1743,2846201,"TERMINAL",0,0,"88",,terminal_output +1744,2847113,"TERMINAL",0,0,"99",,terminal_output +1745,2848135,"TERMINAL",0,0,"4050",,terminal_output +1746,2849158,"TERMINAL",0,0,"11",,terminal_output +1747,2850284,"TERMINAL",0,0,"22",,terminal_output +1748,2851517,"TERMINAL",0,0,"33",,terminal_output +1749,2852331,"TERMINAL",0,0,"44",,terminal_output +1750,2853501,"TERMINAL",0,0,"55",,terminal_output +1751,2854593,"TERMINAL",0,0,"66",,terminal_output +1752,2855436,"TERMINAL",0,0,"77",,terminal_output +1753,2856734,"TERMINAL",0,0,"88",,terminal_output +1754,2857552,"TERMINAL",0,0,"99",,terminal_output +1755,2858675,"TERMINAL",0,0,"504:00",,terminal_output +1756,2859641,"TERMINAL",0,0,"11",,terminal_output +1757,2860723,"TERMINAL",0,0,"22",,terminal_output +1758,2861746,"TERMINAL",0,0,"33",,terminal_output +1759,2862793,"TERMINAL",0,0,"44",,terminal_output +1760,2863837,"TERMINAL",0,0,"55",,terminal_output +1761,2864918,"TERMINAL",0,0,"66",,terminal_output +1762,2866044,"TERMINAL",0,0,"77",,terminal_output +1763,2867173,"TERMINAL",0,0,"99",,terminal_output +1764,2868091,"TERMINAL",0,0,"6:0010",,terminal_output +1765,2869088,"TERMINAL",0,0,"11",,terminal_output +1766,2870142,"TERMINAL",0,0,"22",,terminal_output +1767,2871190,"TERMINAL",0,0,"33",,terminal_output +1768,2872285,"TERMINAL",0,0,"44",,terminal_output +1769,2873288,"TERMINAL",0,0,"55",,terminal_output +1770,2874435,"TERMINAL",0,0,"66",,terminal_output +1771,2875462,"TERMINAL",0,0,"77",,terminal_output +1772,2876689,"TERMINAL",0,0,"88",,terminal_output +1773,2877487,"TERMINAL",0,0,"99",,terminal_output +1774,2878536,"TERMINAL",0,0,"1020",,terminal_output +1775,2879582,"TERMINAL",0,0,"11",,terminal_output +1776,2880674,"TERMINAL",0,0,"22",,terminal_output +1777,2881689,"TERMINAL",0,0,"33",,terminal_output +1778,2882723,"TERMINAL",0,0,"44",,terminal_output +1779,2883850,"TERMINAL",0,0,"55",,terminal_output +1780,2884874,"TERMINAL",0,0,"66",,terminal_output +1781,2885863,"TERMINAL",0,0,"77",,terminal_output +1782,2887022,"TERMINAL",0,0,"88",,terminal_output +1783,2888147,"TERMINAL",0,0,"930",,terminal_output +1784,2889040,"TERMINAL",0,0,"211",,terminal_output +1785,2890092,"TERMINAL",0,0,"22",,terminal_output +1786,2891526,"TERMINAL",0,0,"33",,terminal_output +1787,2892250,"TERMINAL",0,0,"44",,terminal_output +1788,2893266,"TERMINAL",0,0,"55",,terminal_output +1789,2894305,"TERMINAL",0,0,"66",,terminal_output +1790,2895317,"TERMINAL",0,0,"77",,terminal_output +1791,2896512,"TERMINAL",0,0,"88",,terminal_output +1792,2897371,"TERMINAL",0,0,"99",,terminal_output +1793,2898687,"TERMINAL",0,0,"3040",,terminal_output +1794,2899473,"TERMINAL",0,0,"11",,terminal_output +1795,2900632,"TERMINAL",0,0,"22",,terminal_output +1796,2901555,"TERMINAL",0,0,"33",,terminal_output +1797,2902679,"TERMINAL",0,0,"44",,terminal_output +1798,2903645,"TERMINAL",0,0,"55",,terminal_output +1799,2904691,"TERMINAL",0,0,"66",,terminal_output +1800,2905750,"TERMINAL",0,0,"77",,terminal_output +1801,2906795,"TERMINAL",0,0,"88",,terminal_output +1802,2907853,"TERMINAL",0,0,"99",,terminal_output +1803,2908888,"TERMINAL",0,0,"4050",,terminal_output +1804,2910254,"TERMINAL",0,0,"12",,terminal_output +1805,2911176,"TERMINAL",0,0,"33",,terminal_output +1806,2912197,"TERMINAL",0,0,"44",,terminal_output +1807,2913222,"TERMINAL",0,0,"55",,terminal_output +1808,2914122,"TERMINAL",0,0,"66",,terminal_output +1809,2915368,"TERMINAL",0,0,"77",,terminal_output +1810,2916291,"TERMINAL",0,0,"88",,terminal_output +1811,2917313,"TERMINAL",0,0,"99",,terminal_output +1812,2918338,"TERMINAL",0,0,"505:00",,terminal_output +1813,2919462,"TERMINAL",0,0,"11",,terminal_output +1814,2920402,"TERMINAL",0,0,"22",,terminal_output +1815,2921508,"TERMINAL",0,0,"33",,terminal_output +1816,2922521,"TERMINAL",0,0,"44",,terminal_output +1817,2923567,"TERMINAL",0,0,"55",,terminal_output +1818,2924606,"TERMINAL",0,0,"66",,terminal_output +1819,2925659,"TERMINAL",0,0,"77",,terminal_output +1820,2926702,"TERMINAL",0,0,"88",,terminal_output +1821,2927751,"TERMINAL",0,0,"99",,terminal_output +1822,2928882,"TERMINAL",0,0,"7:0010",,terminal_output +1823,2929899,"TERMINAL",0,0,"11",,terminal_output +1824,2930893,"TERMINAL",0,0,"22",,terminal_output +1825,2931939,"TERMINAL",0,0,"34",,terminal_output +1826,2932995,"TERMINAL",0,0,"55",,terminal_output +1827,2934046,"TERMINAL",0,0,"66",,terminal_output +1828,2935324,"TERMINAL",0,0,"77",,terminal_output +1829,2936244,"TERMINAL",0,0,"88",,terminal_output +1830,2937272,"TERMINAL",0,0,"99",,terminal_output +1831,2938222,"TERMINAL",0,0,"1020",,terminal_output +1832,2939269,"TERMINAL",0,0,"11",,terminal_output +1833,2940344,"TERMINAL",0,0,"22",,terminal_output +1834,2941361,"TERMINAL",0,0,"33",,terminal_output +1835,2942691,"TERMINAL",0,0,"44",,terminal_output +1836,2943510,"TERMINAL",0,0,"55",,terminal_output +1837,2944646,"TERMINAL",0,0,"66",,terminal_output +1838,2945569,"TERMINAL",0,0,"77",,terminal_output +1839,2946619,"TERMINAL",0,0,"88",,terminal_output +1840,2947661,"TERMINAL",0,0,"99",,terminal_output +1841,2948708,"TERMINAL",0,0,"2030",,terminal_output +1842,2949755,"TERMINAL",0,0,"11",,terminal_output +1843,2950813,"TERMINAL",0,0,"22",,terminal_output +1844,2951856,"TERMINAL",0,0,"33",,terminal_output +1845,2952908,"TERMINAL",0,0,"44",,terminal_output +1846,2953962,"TERMINAL",0,0,"56",,terminal_output +1847,2955059,"TERMINAL",0,0,"77",,terminal_output +1848,2956062,"TERMINAL",0,0,"88",,terminal_output +1849,2957105,"TERMINAL",0,0,"99",,terminal_output +1850,2958165,"TERMINAL",0,0,"3040",,terminal_output +1851,2959199,"TERMINAL",0,0,"11",,terminal_output +1852,2960497,"TERMINAL",0,0,"22",,terminal_output +1853,2961300,"TERMINAL",0,0,"33",,terminal_output +1854,2962386,"TERMINAL",0,0,"44",,terminal_output +1855,2963480,"TERMINAL",0,0,"55",,terminal_output +1856,2964484,"TERMINAL",0,0,"66",,terminal_output +1857,2965623,"TERMINAL",0,0,"77",,terminal_output +1858,2966541,"TERMINAL",0,0,"88",,terminal_output +1859,2967586,"TERMINAL",0,0,"99",,terminal_output +1860,2968624,"TERMINAL",0,0,"4050",,terminal_output +1861,2969681,"TERMINAL",0,0,"11",,terminal_output +1862,2970834,"TERMINAL",0,0,"22",,terminal_output +1863,2971856,"TERMINAL",0,0,"33",,terminal_output +1864,2972880,"TERMINAL",0,0,"44",,terminal_output +1865,2973905,"TERMINAL",0,0,"55",,terminal_output +1866,2975029,"TERMINAL",0,0,"66",,terminal_output +1867,2975975,"TERMINAL",0,0,"78",,terminal_output +1868,2977076,"TERMINAL",0,0,"99",,terminal_output +1869,2978303,"TERMINAL",0,0,"506:00",,terminal_output +1870,2979124,"TERMINAL",0,0,"11",,terminal_output +1871,2980248,"TERMINAL",0,0,"22",,terminal_output +1872,2981356,"TERMINAL",0,0,"33",,terminal_output +1873,2982453,"TERMINAL",0,0,"44",,terminal_output +1874,2983531,"TERMINAL",0,0,"55",,terminal_output +1875,2984501,"TERMINAL",0,0,"66",,terminal_output +1876,2985398,"TERMINAL",0,0,"77",,terminal_output +1877,2986469,"TERMINAL",0,0,"88",,terminal_output +1878,2987516,"TERMINAL",0,0,"99",,terminal_output +1879,2988844,"TERMINAL",0,0,"8:0010",,terminal_output +1880,2989868,"TERMINAL",0,0,"11",,terminal_output +1881,2990692,"TERMINAL",0,0,"22",,terminal_output +1882,2991710,"TERMINAL",0,0,"33",,terminal_output +1883,2992836,"TERMINAL",0,0,"44",,terminal_output +1884,2993807,"TERMINAL",0,0,"55",,terminal_output +1885,2994887,"TERMINAL",0,0,"66",,terminal_output +1886,2996324,"TERMINAL",0,0,"77",,terminal_output +1887,2997238,"TERMINAL",0,0,"89",,terminal_output +1888,2998057,"TERMINAL",0,0,"1020",,terminal_output +1889,2999055,"TERMINAL",0,0,"11",,terminal_output +1890,3000113,"TERMINAL",0,0,"22",,terminal_output +1891,3001228,"TERMINAL",0,0,"33",,terminal_output +1892,3002354,"TERMINAL",0,0,"44",,terminal_output +1893,3003279,"TERMINAL",0,0,"55",,terminal_output +1894,3004399,"TERMINAL",0,0,"66",,terminal_output +1895,3005336,"TERMINAL",0,0,"77",,terminal_output +1896,3006491,"TERMINAL",0,0,"88",,terminal_output +1897,3007580,"TERMINAL",0,0,"99",,terminal_output +1898,3008605,"TERMINAL",0,0,"2030",,terminal_output +1899,3009523,"TERMINAL",0,0,"11",,terminal_output +1900,3010847,"TERMINAL",0,0,"22",,terminal_output +1901,3011779,"TERMINAL",0,0,"33",,terminal_output +1902,3012662,"TERMINAL",0,0,"44",,terminal_output +1903,3013718,"TERMINAL",0,0,"55",,terminal_output +1904,3014838,"TERMINAL",0,0,"66",,terminal_output +1905,3015863,"TERMINAL",0,0,"77",,terminal_output +1906,3016861,"TERMINAL",0,0,"88",,terminal_output +1907,3017909,"TERMINAL",0,0,"99",,terminal_output +1908,3019033,"TERMINAL",0,0,"3041",,terminal_output +1909,3020007,"TERMINAL",0,0,"22",,terminal_output +1910,3021052,"TERMINAL",0,0,"33",,terminal_output +1911,3022204,"TERMINAL",0,0,"44",,terminal_output +1912,3023229,"TERMINAL",0,0,"55",,terminal_output +1913,3024194,"TERMINAL",0,0,"66",,terminal_output +1914,3025275,"TERMINAL",0,0,"77",,terminal_output +1915,3026297,"TERMINAL",0,0,"88",,terminal_output +1916,3027325,"TERMINAL",0,0,"99",,terminal_output +1917,3028565,"TERMINAL",0,0,"4050",,terminal_output +1918,3029483,"TERMINAL",0,0,"11",,terminal_output +1919,3030801,"TERMINAL",0,0,"22",,terminal_output +1920,3031620,"TERMINAL",0,0,"33",,terminal_output +1921,3032644,"TERMINAL",0,0,"44",,terminal_output +1922,3033769,"TERMINAL",0,0,"55",,terminal_output +1923,3034660,"TERMINAL",0,0,"66",,terminal_output +1924,3035713,"TERMINAL",0,0,"77",,terminal_output +1925,3036839,"TERMINAL",0,0,"88",,terminal_output +1926,3037799,"TERMINAL",0,0,"99",,terminal_output +1927,3038842,"TERMINAL",0,0,"507:00",,terminal_output +1928,3039885,"TERMINAL",0,0,"11",,terminal_output +1929,3040939,"TERMINAL",0,0,"22",,terminal_output +1930,3041986,"TERMINAL",0,0,"34",,terminal_output +1931,3043085,"TERMINAL",0,0,"55",,terminal_output +1932,3044069,"TERMINAL",0,0,"66",,terminal_output +1933,3045222,"TERMINAL",0,0,"77",,terminal_output +1934,3046363,"TERMINAL",0,0,"88",,terminal_output +1935,3047214,"TERMINAL",0,0,"99",,terminal_output +1936,3048303,"TERMINAL",0,0,"9:0010",,terminal_output +1937,3049344,"TERMINAL",0,0,"11",,terminal_output +1938,3050449,"TERMINAL",0,0,"22",,terminal_output +1939,3051472,"TERMINAL",0,0,"33",,terminal_output +1940,3052446,"TERMINAL",0,0,"44",,terminal_output +1941,3053521,"TERMINAL",0,0,"55",,terminal_output +1942,3054571,"TERMINAL",0,0,"66",,terminal_output +1943,3055584,"TERMINAL",0,0,"77",,terminal_output +1944,3056901,"TERMINAL",0,0,"88",,terminal_output +1945,3057717,"TERMINAL",0,0,"99",,terminal_output +1946,3058734,"TERMINAL",0,0,"1020",,terminal_output +1947,3059783,"TERMINAL",0,0,"11",,terminal_output +1948,3060887,"TERMINAL",0,0,"22",,terminal_output +1949,3061912,"TERMINAL",0,0,"33",,terminal_output +1950,3063039,"TERMINAL",0,0,"44",,terminal_output +1951,3063968,"TERMINAL",0,0,"56",,terminal_output +1952,3065392,"TERMINAL",0,0,"77",,terminal_output +1953,3066076,"TERMINAL",0,0,"88",,terminal_output +1954,3067234,"TERMINAL",0,0,"99",,terminal_output +1955,3068259,"TERMINAL",0,0,"2030",,terminal_output +1956,3069231,"TERMINAL",0,0,"11",,terminal_output +1957,3070302,"TERMINAL",0,0,"22",,terminal_output +1958,3071428,"TERMINAL",0,0,"33",,terminal_output +1959,3072455,"TERMINAL",0,0,"44",,terminal_output +1960,3073424,"TERMINAL",0,0,"55",,terminal_output +1961,3074469,"TERMINAL",0,0,"66",,terminal_output +1962,3075736,"TERMINAL",0,0,"77",,terminal_output +1963,3076555,"TERMINAL",0,0,"88",,terminal_output +1964,3077613,"TERMINAL",0,0,"99",,terminal_output +1965,3078649,"TERMINAL",0,0,"3040",,terminal_output +1966,3079820,"TERMINAL",0,0,"11",,terminal_output +1967,3080773,"TERMINAL",0,0,"22",,terminal_output +1968,3081804,"TERMINAL",0,0,"33",,terminal_output +1969,3082869,"TERMINAL",0,0,"44",,terminal_output +1970,3084044,"TERMINAL",0,0,"55",,terminal_output +1971,3085084,"TERMINAL",0,0,"67",,terminal_output +1972,3086063,"TERMINAL",0,0,"88",,terminal_output +1973,3087087,"TERMINAL",0,0,"99",,terminal_output +1974,3088108,"TERMINAL",0,0,"4050",,terminal_output +1975,3089177,"TERMINAL",0,0,"11",,terminal_output +1976,3090256,"TERMINAL",0,0,"22",,terminal_output +1977,3091280,"TERMINAL",0,0,"33",,terminal_output +1978,3092554,"TERMINAL",0,0,"44",,terminal_output +1979,3093331,"TERMINAL",0,0,"55",,terminal_output +1980,3094350,"TERMINAL",0,0,"66",,terminal_output +1981,3095680,"TERMINAL",0,0,"77",,terminal_output +1982,3096514,"TERMINAL",0,0,"88",,terminal_output +1983,3097663,"TERMINAL",0,0,"99",,terminal_output +1984,3098540,"TERMINAL",0,0,"508:00",,terminal_output +1985,3099775,"TERMINAL",0,0,"11",,terminal_output +1986,3100632,"TERMINAL",0,0,"22",,terminal_output +1987,3101683,"TERMINAL",0,0,"33",,terminal_output +1988,3102716,"TERMINAL",0,0,"44",,terminal_output +1989,3103765,"TERMINAL",0,0,"55",,terminal_output +1990,3104813,"TERMINAL",0,0,"66",,terminal_output +1991,3105857,"TERMINAL",0,0,"77",,terminal_output +1992,3106892,"TERMINAL",0,0,"88",,terminal_output +1993,3107943,"TERMINAL",0,0,"910",,terminal_output +1994,3109024,"TERMINAL",0,0,"50:011",,terminal_output +1995,3110109,"TERMINAL",0,0,"22",,terminal_output +1996,3111239,"TERMINAL",0,0,"33",,terminal_output +1997,3112258,"TERMINAL",0,0,"44",,terminal_output +1998,3113600,"TERMINAL",0,0,"55",,terminal_output +1999,3114284,"TERMINAL",0,0,"66",,terminal_output +2000,3115636,"TERMINAL",0,0,"77",,terminal_output +2001,3116457,"TERMINAL",0,0,"88",,terminal_output +2002,3117482,"TERMINAL",0,0,"99",,terminal_output +2003,3118481,"TERMINAL",0,0,"1020",,terminal_output +2004,3119663,"TERMINAL",0,0,"11",,terminal_output +2005,3120711,"TERMINAL",0,0,"22",,terminal_output +2006,3121611,"TERMINAL",0,0,"33",,terminal_output +2007,3122667,"TERMINAL",0,0,"44",,terminal_output +2008,3123712,"TERMINAL",0,0,"55",,terminal_output +2009,3124846,"TERMINAL",0,0,"66",,terminal_output +2010,3125870,"TERMINAL",0,0,"77",,terminal_output +2011,3126864,"TERMINAL",0,0,"88",,terminal_output +2012,3127916,"TERMINAL",0,0,"99",,terminal_output +2013,3128944,"TERMINAL",0,0,"2031",,terminal_output +2014,3130371,"TERMINAL",0,0,"22",,terminal_output +2015,3131088,"TERMINAL",0,0,"33",,terminal_output +2016,3132419,"TERMINAL",0,0,"44",,terminal_output +2017,3133239,"TERMINAL",0,0,"55",,terminal_output +2018,3134188,"TERMINAL",0,0,"66",,terminal_output +2019,3135238,"TERMINAL",0,0,"77",,terminal_output +2020,3136291,"TERMINAL",0,0,"88",,terminal_output +2021,3137334,"TERMINAL",0,0,"99",,terminal_output +2022,3138455,"TERMINAL",0,0,"3040",,terminal_output +2023,3139479,"TERMINAL",0,0,"11",,terminal_output +2024,3140483,"TERMINAL",0,0,"22",,terminal_output +2025,3141534,"TERMINAL",0,0,"33",,terminal_output +2026,3142654,"TERMINAL",0,0,"44",,terminal_output +2027,3143627,"TERMINAL",0,0,"55",,terminal_output +2028,3144675,"TERMINAL",0,0,"66",,terminal_output +2029,3145728,"TERMINAL",0,0,"77",,terminal_output +2030,3146818,"TERMINAL",0,0,"88",,terminal_output +2031,3147840,"TERMINAL",0,0,"99",,terminal_output +2032,3148998,"TERMINAL",0,0,"4050",,terminal_output +2033,3150028,"TERMINAL",0,0,"11",,terminal_output +2034,3151045,"TERMINAL",0,0,"23",,terminal_output +2035,3152024,"TERMINAL",0,0,"44",,terminal_output +2036,3153297,"TERMINAL",0,0,"55",,terminal_output +2037,3154118,"TERMINAL",0,0,"66",,terminal_output +2038,3155344,"TERMINAL",0,0,"77",,terminal_output +2039,3156216,"TERMINAL",0,0,"88",,terminal_output +2040,3157262,"TERMINAL",0,0,"99",,terminal_output +2041,3158416,"TERMINAL",0,0,"509:00",,terminal_output +2042,3159437,"TERMINAL",0,0,"11",,terminal_output +2043,3160670,"TERMINAL",0,0,"22",,terminal_output +2044,3161461,"TERMINAL",0,0,"33",,terminal_output +2045,3162593,"TERMINAL",0,0,"44",,terminal_output +2046,3163734,"TERMINAL",0,0,"55",,terminal_output +2047,3164623,"TERMINAL",0,0,"66",,terminal_output +2048,3165986,"TERMINAL",0,0,"77",,terminal_output +2049,3166803,"TERMINAL",0,0,"88",,terminal_output +2050,3167771,"TERMINAL",0,0,"99",,terminal_output +2051,3168869,"TERMINAL",0,0,"1:0010",,terminal_output +2052,3169860,"TERMINAL",0,0,"11",,terminal_output +2053,3170909,"TERMINAL",0,0,"22",,terminal_output +2054,3171952,"TERMINAL",0,0,"34",,terminal_output +2055,3173045,"TERMINAL",0,0,"55",,terminal_output +2056,3174046,"TERMINAL",0,0,"66",,terminal_output +2057,3175298,"TERMINAL",0,0,"77",,terminal_output +2058,3176423,"TERMINAL",0,0,"88",,terminal_output +2059,3177239,"TERMINAL",0,0,"99",,terminal_output +2060,3178265,"TERMINAL",0,0,"1020",,terminal_output +2061,3179275,"TERMINAL",0,0,"11",,terminal_output +2062,3180516,"TERMINAL",0,0,"22",,terminal_output +2063,3181437,"TERMINAL",0,0,"33",,terminal_output +2064,3182767,"TERMINAL",0,0,"44",,terminal_output +2065,3183484,"TERMINAL",0,0,"55",,terminal_output +2066,3184517,"TERMINAL",0,0,"66",,terminal_output +2067,3185603,"TERMINAL",0,0,"77",,terminal_output +2068,3187005,"TERMINAL",0,0,"88",,terminal_output +2069,3187659,"TERMINAL",0,0,"99",,terminal_output +2070,3188705,"TERMINAL",0,0,"2030",,terminal_output +2071,3189829,"TERMINAL",0,0,"11",,terminal_output +2072,3190851,"TERMINAL",0,0,"22",,terminal_output +2073,3191874,"TERMINAL",0,0,"33",,terminal_output +2074,3193006,"TERMINAL",0,0,"44",,terminal_output +2075,3194026,"TERMINAL",0,0,"56",,terminal_output +2076,3195355,"TERMINAL",0,0,"77",,terminal_output +2077,3196482,"TERMINAL",0,0,"88",,terminal_output +2078,3197300,"TERMINAL",0,0,"99",,terminal_output +2079,3198324,"TERMINAL",0,0,"3040",,terminal_output +2080,3199242,"TERMINAL",0,0,"11",,terminal_output +2081,3200372,"TERMINAL",0,0,"22",,terminal_output +2082,3201482,"TERMINAL",0,0,"33",,terminal_output +2083,3202419,"TERMINAL",0,0,"44",,terminal_output +2084,3203439,"TERMINAL",0,0,"55",,terminal_output +2085,3204472,"TERMINAL",0,0,"66",,terminal_output +2086,3205735,"TERMINAL",0,0,"77",,terminal_output +2087,3206611,"TERMINAL",0,0,"88",,terminal_output +2088,3207635,"TERMINAL",0,0,"99",,terminal_output +2089,3208669,"TERMINAL",0,0,"4050",,terminal_output +2090,3209992,"TERMINAL",0,0,"11",,terminal_output +2091,3210811,"TERMINAL",0,0,"22",,terminal_output +2092,3211831,"TERMINAL",0,0,"33",,terminal_output +2093,3212961,"TERMINAL",0,0,"44",,terminal_output +2094,3213910,"TERMINAL",0,0,"55",,terminal_output +2095,3215005,"TERMINAL",0,0,"67",,terminal_output +2096,3216016,"TERMINAL",0,0,"88",,terminal_output +2097,3217152,"TERMINAL",0,0,"99",,terminal_output +2098,3218176,"TERMINAL",0,0,"5010:00",,terminal_output +2099,3219171,"TERMINAL",0,0,"11",,terminal_output +2100,3220326,"TERMINAL",0,0,"22",,terminal_output +2101,3221453,"TERMINAL",0,0,"33",,terminal_output +2102,3222476,"TERMINAL",0,0,"44",,terminal_output +2103,3223330,"TERMINAL",0,0,"55",,terminal_output +2104,3224420,"TERMINAL",0,0,"66",,terminal_output +2105,3225445,"TERMINAL",0,0,"77",,terminal_output +2106,3226506,"TERMINAL",0,0,"88",,terminal_output +2107,3227592,"TERMINAL",0,0,"99",,terminal_output +2108,3228926,"TERMINAL",0,0,"2:0010",,terminal_output +2109,3229842,"TERMINAL",0,0,"11",,terminal_output +2110,3230711,"TERMINAL",0,0,"22",,terminal_output +2111,3231760,"TERMINAL",0,0,"33",,terminal_output +2112,3232834,"TERMINAL",0,0,"44",,terminal_output +2113,3233935,"TERMINAL",0,0,"55",,terminal_output +2114,3235245,"TERMINAL",0,0,"66",,terminal_output +2115,3236085,"TERMINAL",0,0,"78",,terminal_output +2116,3237029,"TERMINAL",0,0,"99",,terminal_output +2117,3238133,"TERMINAL",0,0,"1020",,terminal_output +2118,3239123,"TERMINAL",0,0,"11",,terminal_output +2119,3240192,"TERMINAL",0,0,"22",,terminal_output +2120,3241514,"TERMINAL",0,0,"33",,terminal_output +2121,3242335,"TERMINAL",0,0,"44",,terminal_output +2122,3243453,"TERMINAL",0,0,"55",,terminal_output +2123,3244474,"TERMINAL",0,0,"66",,terminal_output +2124,3245625,"TERMINAL",0,0,"77",,terminal_output +2125,3246834,"TERMINAL",0,0,"88",,terminal_output +2126,3247529,"TERMINAL",0,0,"99",,terminal_output +2127,3248778,"TERMINAL",0,0,"2030",,terminal_output +2128,3249618,"TERMINAL",0,0,"11",,terminal_output +2129,3250673,"TERMINAL",0,0,"22",,terminal_output +2130,3251717,"TERMINAL",0,0,"33",,terminal_output +2131,3252768,"TERMINAL",0,0,"44",,terminal_output +2132,3253815,"TERMINAL",0,0,"55",,terminal_output +2133,3254918,"TERMINAL",0,0,"66",,terminal_output +2134,3255941,"TERMINAL",0,0,"77",,terminal_output +2135,3256961,"TERMINAL",0,0,"89",,terminal_output +2136,3258099,"TERMINAL",0,0,"3040",,terminal_output +2137,3259038,"TERMINAL",0,0,"11",,terminal_output +2138,3260148,"TERMINAL",0,0,"22",,terminal_output +2139,3261171,"TERMINAL",0,0,"33",,terminal_output +2140,3262321,"TERMINAL",0,0,"44",,terminal_output +2141,3263323,"TERMINAL",0,0,"55",,terminal_output +2142,3264259,"TERMINAL",0,0,"66",,terminal_output +2143,3265382,"TERMINAL",0,0,"77",,terminal_output +2144,3266383,"TERMINAL",0,0,"88",,terminal_output +2145,3267407,"TERMINAL",0,0,"99",,terminal_output +2146,3268533,"TERMINAL",0,0,"4050",,terminal_output +2147,3269458,"TERMINAL",0,0,"11",,terminal_output +2148,3270922,"TERMINAL",0,0,"22",,terminal_output +2149,3271695,"TERMINAL",0,0,"33",,terminal_output +2150,3272593,"TERMINAL",0,0,"44",,terminal_output +2151,3273956,"TERMINAL",0,0,"55",,terminal_output +2152,3274674,"TERMINAL",0,0,"66",,terminal_output +2153,3275788,"TERMINAL",0,0,"77",,terminal_output +2154,3276793,"TERMINAL",0,0,"88",,terminal_output +2155,3277939,"TERMINAL",0,0,"99",,terminal_output +2156,3278884,"TERMINAL",0,0,"501:00",,terminal_output +2157,3279927,"TERMINAL",0,0,"11",,terminal_output +2158,3281008,"TERMINAL",0,0,"23",,terminal_output +2159,3282016,"TERMINAL",0,0,"44",,terminal_output +2160,3283362,"TERMINAL",0,0,"55",,terminal_output +2161,3284106,"TERMINAL",0,0,"66",,terminal_output +2162,3285346,"TERMINAL",0,0,"77",,terminal_output +2163,3286388,"TERMINAL",0,0,"88",,terminal_output +2164,3287262,"TERMINAL",0,0,"99",,terminal_output +2165,3288377,"TERMINAL",0,0,"3:0010",,terminal_output +2166,3289399,"TERMINAL",0,0,"11",,terminal_output +2167,3290423,"TERMINAL",0,0,"22",,terminal_output +2168,3291433,"TERMINAL",0,0,"33",,terminal_output +2169,3292675,"TERMINAL",0,0,"44",,terminal_output +2170,3293521,"TERMINAL",0,0,"55",,terminal_output +2171,3294632,"TERMINAL",0,0,"66",,terminal_output +2172,3295620,"TERMINAL",0,0,"77",,terminal_output +2173,3296710,"TERMINAL",0,0,"88",,terminal_output +2174,3297720,"TERMINAL",0,0,"99",,terminal_output +2175,3298750,"TERMINAL",0,0,"1020",,terminal_output +2176,3299839,"TERMINAL",0,0,"11",,terminal_output +2177,3300865,"TERMINAL",0,0,"22",,terminal_output +2178,3301987,"TERMINAL",0,0,"33",,terminal_output +2179,3302931,"TERMINAL",0,0,"44",,terminal_output +2180,3304042,"TERMINAL",0,0,"56",,terminal_output +2181,3305043,"TERMINAL",0,0,"77",,terminal_output +2182,3306084,"TERMINAL",0,0,"88",,terminal_output +2183,3307207,"TERMINAL",0,0,"99",,terminal_output +2184,3308231,"TERMINAL",0,0,"2030",,terminal_output +2185,3309210,"TERMINAL",0,0,"11",,terminal_output +2186,3310623,"TERMINAL",0,0,"22",,terminal_output +2187,3311404,"TERMINAL",0,0,"33",,terminal_output +2188,3312445,"TERMINAL",0,0,"44",,terminal_output +2189,3313451,"TERMINAL",0,0,"55",,terminal_output +2190,3314476,"TERMINAL",0,0,"66",,terminal_output +2191,3315484,"TERMINAL",0,0,"77",,terminal_output +2192,3316621,"TERMINAL",0,0,"88",,terminal_output +2193,3317616,"TERMINAL",0,0,"99",,terminal_output +2194,3318625,"TERMINAL",0,0,"3040",,terminal_output +2195,3319794,"TERMINAL",0,0,"11",,terminal_output +2196,3320921,"TERMINAL",0,0,"22",,terminal_output +2197,3322045,"TERMINAL",0,0,"33",,terminal_output +2198,3323179,"TERMINAL",0,0,"44",,terminal_output +2199,3324301,"TERMINAL",0,0,"55",,terminal_output +2200,3325118,"TERMINAL",0,0,"66",,terminal_output +2201,3326053,"TERMINAL",0,0,"78",,terminal_output +2202,3327060,"TERMINAL",0,0,"99",,terminal_output +2203,3328050,"TERMINAL",0,0,"4050",,terminal_output +2204,3329095,"TERMINAL",0,0,"11",,terminal_output +2205,3330538,"TERMINAL",0,0,"22",,terminal_output +2206,3331356,"TERMINAL",0,0,"33",,terminal_output +2207,3332235,"TERMINAL",0,0,"44",,terminal_output +2208,3333281,"TERMINAL",0,0,"55",,terminal_output +2209,3334427,"TERMINAL",0,0,"66",,terminal_output +2210,3335377,"TERMINAL",0,0,"77",,terminal_output +2211,3336474,"TERMINAL",0,0,"88",,terminal_output +2212,3337470,"TERMINAL",0,0,"99",,terminal_output +2213,3338546,"TERMINAL",0,0,"502:00",,terminal_output +2214,3339623,"TERMINAL",0,0,"11",,terminal_output +2215,3340671,"TERMINAL",0,0,"22",,terminal_output +2216,3341676,"TERMINAL",0,0,"33",,terminal_output +2217,3342723,"TERMINAL",0,0,"44",,terminal_output +2218,3343823,"TERMINAL",0,0,"55",,terminal_output +2219,3344866,"TERMINAL",0,0,"66",,terminal_output +2220,3345889,"TERMINAL",0,0,"77",,terminal_output +2221,3347320,"TERMINAL",0,0,"88",,terminal_output +2222,3348037,"TERMINAL",0,0,"910",,terminal_output +2223,3349007,"TERMINAL",0,0,"4:011",,terminal_output +2224,3350088,"TERMINAL",0,0,"22",,terminal_output +2225,3351095,"TERMINAL",0,0,"33",,terminal_output +2226,3352149,"TERMINAL",0,0,"44",,terminal_output +2227,3353241,"TERMINAL",0,0,"55",,terminal_output +2228,3354236,"TERMINAL",0,0,"66",,terminal_output +2229,3355511,"TERMINAL",0,0,"77",,terminal_output +2230,3356428,"TERMINAL",0,0,"88",,terminal_output +2231,3357759,"TERMINAL",0,0,"99",,terminal_output +2232,3358426,"TERMINAL",0,0,"1020",,terminal_output +2233,3359492,"TERMINAL",0,0,"11",,terminal_output +2234,3360829,"TERMINAL",0,0,"22",,terminal_output +2235,3361632,"TERMINAL",0,0,"33",,terminal_output +2236,3362774,"TERMINAL",0,0,"44",,terminal_output +2237,3364042,"TERMINAL",0,0,"55",,terminal_output +2238,3364839,"TERMINAL",0,0,"66",,terminal_output +2239,3365758,"TERMINAL",0,0,"77",,terminal_output +2240,3366802,"TERMINAL",0,0,"88",,terminal_output +2241,3367891,"TERMINAL",0,0,"99",,terminal_output +2242,3368913,"TERMINAL",0,0,"2030",,terminal_output +2243,3369944,"TERMINAL",0,0,"12",,terminal_output +2244,3371063,"TERMINAL",0,0,"33",,terminal_output +2245,3372086,"TERMINAL",0,0,"44",,terminal_output +2246,3373088,"TERMINAL",0,0,"55",,terminal_output +2247,3374130,"TERMINAL",0,0,"66",,terminal_output +2248,3375173,"TERMINAL",0,0,"77",,terminal_output +2249,3376226,"TERMINAL",0,0,"88",,terminal_output +2250,3377306,"TERMINAL",0,0,"99",,terminal_output +2251,3378534,"TERMINAL",0,0,"3040",,terminal_output +2252,3379763,"TERMINAL",0,0,"11",,terminal_output +2253,3380588,"TERMINAL",0,0,"22",,terminal_output +2254,3381511,"TERMINAL",0,0,"33",,terminal_output +2255,3382592,"TERMINAL",0,0,"44",,terminal_output +2256,3383652,"TERMINAL",0,0,"55",,terminal_output +2257,3384643,"TERMINAL",0,0,"66",,terminal_output +2258,3385697,"TERMINAL",0,0,"77",,terminal_output +2259,3386690,"TERMINAL",0,0,"88",,terminal_output +2260,3387747,"TERMINAL",0,0,"99",,terminal_output +2261,3388869,"TERMINAL",0,0,"4050",,terminal_output +2262,3390000,"TERMINAL",0,0,"11",,terminal_output +2263,3390915,"TERMINAL",0,0,"22",,terminal_output +2264,3391943,"TERMINAL",0,0,"33",,terminal_output +2265,3392964,"TERMINAL",0,0,"45",,terminal_output +2266,3394075,"TERMINAL",0,0,"66",,terminal_output +2267,3395325,"TERMINAL",0,0,"77",,terminal_output +2268,3396133,"TERMINAL",0,0,"88",,terminal_output +2269,3397259,"TERMINAL",0,0,"99",,terminal_output +2270,3398485,"TERMINAL",0,0,"503:00",,terminal_output +2271,3399242,"TERMINAL",0,0,"11",,terminal_output +2272,3400289,"TERMINAL",0,0,"22",,terminal_output +2273,3401563,"TERMINAL",0,0,"33",,terminal_output +2274,3402476,"TERMINAL",0,0,"44",,terminal_output +2275,3403499,"TERMINAL",0,0,"55",,terminal_output +2276,3404585,"TERMINAL",0,0,"66",,terminal_output +2277,3405553,"TERMINAL",0,0,"77",,terminal_output +2278,3406673,"TERMINAL",0,0,"88",,terminal_output +2279,3407699,"TERMINAL",0,0,"99",,terminal_output +2280,3408923,"TERMINAL",0,0,"5:0010",,terminal_output +2281,3409736,"TERMINAL",0,0,"11",,terminal_output +2282,3410793,"TERMINAL",0,0,"22",,terminal_output +2283,3411832,"TERMINAL",0,0,"33",,terminal_output +2284,3412874,"TERMINAL",0,0,"44",,terminal_output +2285,3413952,"TERMINAL",0,0,"55",,terminal_output +2286,3414970,"TERMINAL",0,0,"67",,terminal_output +2287,3416092,"TERMINAL",0,0,"88",,terminal_output +2288,3417064,"TERMINAL",0,0,"99",,terminal_output +2289,3418105,"TERMINAL",0,0,"1020",,terminal_output +2290,3419312,"TERMINAL",0,0,"11",,terminal_output +2291,3420202,"TERMINAL",0,0,"22",,terminal_output +2292,3421308,"TERMINAL",0,0,"33",,terminal_output +2293,3422331,"TERMINAL",0,0,"44",,terminal_output +2294,3423565,"TERMINAL",0,0,"55",,terminal_output +2295,3424487,"TERMINAL",0,0,"66",,terminal_output +2296,3425505,"TERMINAL",0,0,"77",,terminal_output +2297,3426529,"TERMINAL",0,0,"88",,terminal_output +2298,3427532,"TERMINAL",0,0,"99",,terminal_output +2299,3428922,"TERMINAL",0,0,"2030",,terminal_output +2300,3429687,"TERMINAL",0,0,"11",,terminal_output +2301,3430710,"TERMINAL",0,0,"22",,terminal_output +2302,3431756,"TERMINAL",0,0,"33",,terminal_output +2303,3432769,"TERMINAL",0,0,"44",,terminal_output +2304,3433816,"TERMINAL",0,0,"55",,terminal_output +2305,3434946,"TERMINAL",0,0,"66",,terminal_output +2306,3435918,"TERMINAL",0,0,"77",,terminal_output +2307,3436986,"TERMINAL",0,0,"89",,terminal_output +2308,3437994,"TERMINAL",0,0,"3040",,terminal_output +2309,3439069,"TERMINAL",0,0,"11",,terminal_output +2310,3440095,"TERMINAL",0,0,"22",,terminal_output +2311,3441415,"TERMINAL",0,0,"33",,terminal_output +2312,3442189,"TERMINAL",0,0,"44",,terminal_output +2313,3443515,"TERMINAL",0,0,"55",,terminal_output +2314,3444285,"TERMINAL",0,0,"66",,terminal_output +2315,3445354,"TERMINAL",0,0,"77",,terminal_output +2316,3446394,"TERMINAL",0,0,"88",,terminal_output +2317,3447613,"TERMINAL",0,0,"99",,terminal_output +2318,3448760,"TERMINAL",0,0,"4050",,terminal_output +2319,3449554,"TERMINAL",0,0,"11",,terminal_output +2320,3450702,"TERMINAL",0,0,"22",,terminal_output +2321,3451679,"TERMINAL",0,0,"33",,terminal_output +2322,3452684,"TERMINAL",0,0,"44",,terminal_output +2323,3453758,"TERMINAL",0,0,"55",,terminal_output +2324,3454784,"TERMINAL",0,0,"66",,terminal_output +2325,3455824,"TERMINAL",0,0,"77",,terminal_output +2326,3456930,"TERMINAL",0,0,"88",,terminal_output +2327,3457984,"TERMINAL",0,0,"99",,terminal_output +2328,3459056,"TERMINAL",0,0,"504:01",,terminal_output +2329,3460400,"TERMINAL",0,0,"22",,terminal_output +2330,3461115,"TERMINAL",0,0,"33",,terminal_output +2331,3462125,"TERMINAL",0,0,"44",,terminal_output +2332,3463572,"TERMINAL",0,0,"55",,terminal_output +2333,3464204,"TERMINAL",0,0,"66",,terminal_output +2334,3465312,"TERMINAL",0,0,"77",,terminal_output +2335,3466336,"TERMINAL",0,0,"88",,terminal_output +2336,3467566,"TERMINAL",0,0,"99",,terminal_output +2337,3468590,"TERMINAL",0,0,"6:0010",,terminal_output +2338,3469783,"TERMINAL",0,0,"11",,terminal_output +2339,3470674,"TERMINAL",0,0,"22",,terminal_output +2340,3471662,"TERMINAL",0,0,"33",,terminal_output +2341,3472787,"TERMINAL",0,0,"44",,terminal_output +2342,3473753,"TERMINAL",0,0,"55",,terminal_output +2343,3474791,"TERMINAL",0,0,"66",,terminal_output +2344,3475841,"TERMINAL",0,0,"77",,terminal_output +2345,3476980,"TERMINAL",0,0,"88",,terminal_output +2346,3477926,"TERMINAL",0,0,"99",,terminal_output +2347,3479026,"TERMINAL",0,0,"1021",,terminal_output +2348,3480016,"TERMINAL",0,0,"22",,terminal_output +2349,3481071,"TERMINAL",0,0,"33",,terminal_output +2350,3482402,"TERMINAL",0,0,"44",,terminal_output +2351,3483426,"TERMINAL",0,0,"55",,terminal_output +2352,3484215,"TERMINAL",0,0,"66",,terminal_output +2353,3485257,"TERMINAL",0,0,"77",,terminal_output +2354,3486308,"TERMINAL",0,0,"88",,terminal_output +2355,3487350,"TERMINAL",0,0,"99",,terminal_output +2356,3488395,"TERMINAL",0,0,"2030",,terminal_output +2357,3489468,"TERMINAL",0,0,"11",,terminal_output +2358,3490488,"TERMINAL",0,0,"22",,terminal_output +2359,3491612,"TERMINAL",0,0,"33",,terminal_output +2360,3492590,"TERMINAL",0,0,"44",,terminal_output +2361,3493674,"TERMINAL",0,0,"55",,terminal_output +2362,3494729,"TERMINAL",0,0,"66",,terminal_output +2363,3495726,"TERMINAL",0,0,"77",,terminal_output +2364,3496778,"TERMINAL",0,0,"88",,terminal_output +2365,3497824,"TERMINAL",0,0,"99",,terminal_output +2366,3498985,"TERMINAL",0,0,"3040",,terminal_output +2367,3500007,"TERMINAL",0,0,"11",,terminal_output +2368,3500972,"TERMINAL",0,0,"23",,terminal_output +2369,3502052,"TERMINAL",0,0,"44",,terminal_output +2370,3503381,"TERMINAL",0,0,"55",,terminal_output +2371,3504102,"TERMINAL",0,0,"66",,terminal_output +2372,3505508,"TERMINAL",0,0,"77",,terminal_output +2373,3506208,"TERMINAL",0,0,"88",,terminal_output +2374,3507371,"TERMINAL",0,0,"99",,terminal_output +2375,3508380,"TERMINAL",0,0,"4050",,terminal_output +2376,3509408,"TERMINAL",0,0,"11",,terminal_output +2377,3510392,"TERMINAL",0,0,"22",,terminal_output +2378,3511436,"TERMINAL",0,0,"33",,terminal_output +2379,3512524,"TERMINAL",0,0,"44",,terminal_output +2380,3513623,"TERMINAL",0,0,"55",,terminal_output +2381,3514591,"TERMINAL",0,0,"66",,terminal_output +2382,3515680,"TERMINAL",0,0,"77",,terminal_output +2383,3516697,"TERMINAL",0,0,"88",,terminal_output +2384,3517683,"TERMINAL",0,0,"99",,terminal_output +2385,3518739,"TERMINAL",0,0,"505:00",,terminal_output +2386,3519881,"TERMINAL",0,0,"11",,terminal_output +2387,3520815,"TERMINAL",0,0,"22",,terminal_output +2388,3521950,"TERMINAL",0,0,"33",,terminal_output +2389,3522914,"TERMINAL",0,0,"44",,terminal_output +2390,3523974,"TERMINAL",0,0,"55",,terminal_output +2391,3525089,"TERMINAL",0,0,"77",,terminal_output +2392,3526415,"TERMINAL",0,0,"88",,terminal_output +2393,3527108,"TERMINAL",0,0,"99",,terminal_output +2394,3528472,"TERMINAL",0,0,"7:0010",,terminal_output +2395,3529179,"TERMINAL",0,0,"11",,terminal_output +2396,3530298,"TERMINAL",0,0,"22",,terminal_output +2397,3531324,"TERMINAL",0,0,"33",,terminal_output +2398,3532284,"TERMINAL",0,0,"44",,terminal_output +2399,3533389,"TERMINAL",0,0,"55",,terminal_output +2400,3534407,"TERMINAL",0,0,"66",,terminal_output +2401,3535414,"TERMINAL",0,0,"77",,terminal_output +2402,3536474,"TERMINAL",0,0,"88",,terminal_output +2403,3537506,"TERMINAL",0,0,"99",,terminal_output +2404,3538624,"TERMINAL",0,0,"1020",,terminal_output +2405,3539741,"TERMINAL",0,0,"11",,terminal_output +2406,3540807,"TERMINAL",0,0,"22",,terminal_output +2407,3541659,"TERMINAL",0,0,"33",,terminal_output +2408,3542890,"TERMINAL",0,0,"44",,terminal_output +2409,3543815,"TERMINAL",0,0,"55",,terminal_output +2410,3544797,"TERMINAL",0,0,"66",,terminal_output +2411,3545830,"TERMINAL",0,0,"77",,terminal_output +2412,3546881,"TERMINAL",0,0,"88",,terminal_output +2413,3547917,"TERMINAL",0,0,"99",,terminal_output +2414,3548965,"TERMINAL",0,0,"2031",,terminal_output +2415,3550020,"TERMINAL",0,0,"22",,terminal_output +2416,3551098,"TERMINAL",0,0,"33",,terminal_output +2417,3552120,"TERMINAL",0,0,"44",,terminal_output +2418,3553160,"TERMINAL",0,0,"55",,terminal_output +2419,3554183,"TERMINAL",0,0,"66",,terminal_output +2420,3555269,"TERMINAL",0,0,"77",,terminal_output +2421,3556260,"TERMINAL",0,0,"88",,terminal_output +2422,3557589,"TERMINAL",0,0,"99",,terminal_output +2423,3558750,"TERMINAL",0,0,"3040",,terminal_output +2424,3559468,"TERMINAL",0,0,"11",,terminal_output +2425,3560539,"TERMINAL",0,0,"22",,terminal_output +2426,3561515,"TERMINAL",0,0,"33",,terminal_output +2427,3562782,"TERMINAL",0,0,"44",,terminal_output +2428,3563794,"TERMINAL",0,0,"55",,terminal_output +2429,3564622,"TERMINAL",0,0,"66",,terminal_output +2430,3565657,"TERMINAL",0,0,"77",,terminal_output +2431,3566808,"TERMINAL",0,0,"88",,terminal_output +2432,3567757,"TERMINAL",0,0,"99",,terminal_output +2433,3568830,"TERMINAL",0,0,"4050",,terminal_output +2434,3570040,"TERMINAL",0,0,"11",,terminal_output +2435,3570906,"TERMINAL",0,0,"22",,terminal_output +2436,3571957,"TERMINAL",0,0,"33",,terminal_output +2437,3572943,"TERMINAL",0,0,"45",,terminal_output +2438,3574042,"TERMINAL",0,0,"66",,terminal_output +2439,3575424,"TERMINAL",0,0,"77",,terminal_output +2440,3576077,"TERMINAL",0,0,"88",,terminal_output +2441,3577108,"TERMINAL",0,0,"99",,terminal_output +2442,3578206,"TERMINAL",0,0,"506:00",,terminal_output +2443,3579220,"TERMINAL",0,0,"11",,terminal_output +2444,3580339,"TERMINAL",0,0,"22",,terminal_output +2445,3581474,"TERMINAL",0,0,"33",,terminal_output +2446,3582399,"TERMINAL",0,0,"44",,terminal_output +2447,3583517,"TERMINAL",0,0,"55",,terminal_output +2448,3584540,"TERMINAL",0,0,"66",,terminal_output +2449,3585591,"TERMINAL",0,0,"77",,terminal_output +2450,3586521,"TERMINAL",0,0,"88",,terminal_output +2451,3587607,"TERMINAL",0,0,"99",,terminal_output +2452,3588706,"TERMINAL",0,0,"8:0010",,terminal_output +2453,3589982,"TERMINAL",0,0,"11",,terminal_output +2454,3591024,"TERMINAL",0,0,"22",,terminal_output +2455,3591741,"TERMINAL",0,0,"33",,terminal_output +2456,3593137,"TERMINAL",0,0,"44",,terminal_output +2457,3593865,"TERMINAL",0,0,"55",,terminal_output +2458,3594889,"TERMINAL",0,0,"66",,terminal_output +2459,3595948,"TERMINAL",0,0,"77",,terminal_output +2460,3596994,"TERMINAL",0,0,"89",,terminal_output +2461,3598030,"TERMINAL",0,0,"1020",,terminal_output +2462,3599097,"TERMINAL",0,0,"11",,terminal_output +2463,3600131,"TERMINAL",0,0,"22",,terminal_output +2464,3601212,"TERMINAL",0,0,"33",,terminal_output +2465,3602300,"TERMINAL",0,0,"44",,terminal_output +2466,3603463,"TERMINAL",0,0,"55",,terminal_output +2467,3604540,"TERMINAL",0,0,"66",,terminal_output +2468,3605407,"TERMINAL",0,0,"77",,terminal_output +2469,3606533,"TERMINAL",0,0,"88",,terminal_output +2470,3607488,"TERMINAL",0,0,"99",,terminal_output +2471,3608581,"TERMINAL",0,0,"2030",,terminal_output +2472,3609824,"TERMINAL",0,0,"11",,terminal_output +2473,3610656,"TERMINAL",0,0,"22",,terminal_output +2474,3612060,"TERMINAL",0,0,"33",,terminal_output +2475,3612746,"TERMINAL",0,0,"44",,terminal_output +2476,3613753,"TERMINAL",0,0,"55",,terminal_output +2477,3614804,"TERMINAL",0,0,"66",,terminal_output +2478,3616052,"TERMINAL",0,0,"77",,terminal_output +2479,3616898,"TERMINAL",0,0,"88",,terminal_output +2480,3617957,"TERMINAL",0,0,"940",,terminal_output +2481,3618992,"TERMINAL",0,0,"311",,terminal_output +2482,3620033,"TERMINAL",0,0,"22",,terminal_output +2483,3621172,"TERMINAL",0,0,"33",,terminal_output +2484,3622199,"TERMINAL",0,0,"44",,terminal_output +2485,3623214,"TERMINAL",0,0,"55",,terminal_output +2486,3624216,"TERMINAL",0,0,"66",,terminal_output +2487,3625272,"TERMINAL",0,0,"77",,terminal_output +2488,3626632,"TERMINAL",0,0,"88",,terminal_output +2489,3627520,"TERMINAL",0,0,"99",,terminal_output +2490,3628646,"TERMINAL",0,0,"4050",,terminal_output +2491,3629472,"TERMINAL",0,0,"11",,terminal_output +2492,3630598,"TERMINAL",0,0,"22",,terminal_output +2493,3631612,"TERMINAL",0,0,"33",,terminal_output +2494,3632749,"TERMINAL",0,0,"44",,terminal_output +2495,3633651,"TERMINAL",0,0,"55",,terminal_output +2496,3634674,"TERMINAL",0,0,"66",,terminal_output +2497,3636124,"TERMINAL",0,0,"77",,terminal_output +2498,3636947,"TERMINAL",0,0,"88",,terminal_output +2499,3637854,"TERMINAL",0,0,"99",,terminal_output +2500,3638996,"TERMINAL",0,0,"507:00",,terminal_output +2501,3639902,"TERMINAL",0,0,"11",,terminal_output +2502,3640956,"TERMINAL",0,0,"23",,terminal_output +2503,3642015,"TERMINAL",0,0,"44",,terminal_output +2504,3643039,"TERMINAL",0,0,"55",,terminal_output +2505,3644224,"TERMINAL",0,0,"66",,terminal_output +2506,3647406,"TERMINAL",0,0,"79",,terminal_output +2507,3648504,"TERMINAL",0,0,"9:0010",,terminal_output +2508,3649383,"TERMINAL",0,0,"11",,terminal_output +2509,3650555,"TERMINAL",0,0,"22",,terminal_output +2510,3651464,"TERMINAL",0,0,"33",,terminal_output +2511,3652840,"TERMINAL",0,0,"44",,terminal_output +2512,3653510,"TERMINAL",0,0,"55",,terminal_output +2513,3654645,"TERMINAL",0,0,"66",,terminal_output +2514,3655584,"TERMINAL",0,0,"77",,terminal_output +2515,3656884,"TERMINAL",0,0,"88",,terminal_output +2516,3657718,"TERMINAL",0,0,"99",,terminal_output +2517,3658829,"TERMINAL",0,0,"1020",,terminal_output +2518,3659897,"TERMINAL",0,0,"11",,terminal_output +2519,3660846,"TERMINAL",0,0,"22",,terminal_output +2520,3661874,"TERMINAL",0,0,"33",,terminal_output +2521,3662916,"TERMINAL",0,0,"44",,terminal_output +2522,3663952,"TERMINAL",0,0,"56",,terminal_output +2523,3665086,"TERMINAL",0,0,"77",,terminal_output +2524,3666054,"TERMINAL",0,0,"88",,terminal_output +2525,3667097,"TERMINAL",0,0,"99",,terminal_output +2526,3668357,"TERMINAL",0,0,"2030",,terminal_output +2527,3669195,"TERMINAL",0,0,"11",,terminal_output +2528,3670593,"TERMINAL",0,0,"22",,terminal_output +2529,3671414,"TERMINAL",0,0,"33",,terminal_output +2530,3672463,"TERMINAL",0,0,"44",,terminal_output +2531,3673353,"TERMINAL",0,0,"55",,terminal_output +2532,3674393,"TERMINAL",0,0,"66",,terminal_output +2533,3675507,"TERMINAL",0,0,"77",,terminal_output +2534,3676491,"TERMINAL",0,0,"88",,terminal_output +2535,3677533,"TERMINAL",0,0,"99",,terminal_output +2536,3678678,"TERMINAL",0,0,"3040",,terminal_output +2537,3679633,"TERMINAL",0,0,"11",,terminal_output +2538,3680734,"TERMINAL",0,0,"22",,terminal_output +2539,3681757,"TERMINAL",0,0,"33",,terminal_output +2540,3682781,"TERMINAL",0,0,"44",,terminal_output +2541,3684045,"TERMINAL",0,0,"55",,terminal_output +2542,3684841,"TERMINAL",0,0,"66",,terminal_output +2543,3685887,"TERMINAL",0,0,"77",,terminal_output +2544,3686938,"TERMINAL",0,0,"88",,terminal_output +2545,3687974,"TERMINAL",0,0,"950",,terminal_output +2546,3689047,"TERMINAL",0,0,"411",,terminal_output +2547,3690144,"TERMINAL",0,0,"22",,terminal_output +2548,3691166,"TERMINAL",0,0,"33",,terminal_output +2549,3692204,"TERMINAL",0,0,"44",,terminal_output +2550,3693518,"TERMINAL",0,0,"55",,terminal_output +2551,3694249,"TERMINAL",0,0,"66",,terminal_output +2552,3695463,"TERMINAL",0,0,"77",,terminal_output +2553,3696690,"TERMINAL",0,0,"88",,terminal_output +2554,3697410,"TERMINAL",0,0,"99",,terminal_output +2555,3698979,"TERMINAL",0,0,"508:01",,terminal_output +2556,3700024,"TERMINAL",0,0,"22",,terminal_output +2557,3701192,"TERMINAL",0,0,"33",,terminal_output +2558,3702216,"TERMINAL",0,0,"44",,terminal_output +2559,3703165,"TERMINAL",0,0,"55",,terminal_output +2560,3704214,"TERMINAL",0,0,"66",,terminal_output +2561,3705593,"TERMINAL",0,0,"77",,terminal_output +2562,3706513,"TERMINAL",0,0,"88",,terminal_output +2563,3707538,"TERMINAL",0,0,"99",,terminal_output +2564,3708388,"TERMINAL",0,0,"6:00:0010",,terminal_output +2565,3709482,"TERMINAL",0,0,"11",,terminal_output +2566,3710829,"TERMINAL",0,0,"22",,terminal_output +2567,3711528,"TERMINAL",0,0,"33",,terminal_output +2568,3712899,"TERMINAL",0,0,"44",,terminal_output +2569,3713679,"TERMINAL",0,0,"55",,terminal_output +2570,3714671,"TERMINAL",0,0,"66",,terminal_output +2571,3716034,"TERMINAL",0,0,"77",,terminal_output +2572,3716900,"TERMINAL",0,0,"88",,terminal_output +2573,3717796,"TERMINAL",0,0,"99",,terminal_output +2574,3718826,"TERMINAL",0,0,"1020",,terminal_output +2575,3720035,"TERMINAL",0,0,"11",,terminal_output +2576,3720914,"TERMINAL",0,0,"22",,terminal_output +2577,3721956,"TERMINAL",0,0,"34",,terminal_output +2578,3723014,"TERMINAL",0,0,"55",,terminal_output +2579,3724082,"TERMINAL",0,0,"66",,terminal_output +2580,3725167,"TERMINAL",0,0,"77",,terminal_output +2581,3726194,"TERMINAL",0,0,"88",,terminal_output +2582,3727305,"TERMINAL",0,0,"99",,terminal_output +2583,3728629,"TERMINAL",0,0,"2030",,terminal_output +2584,3729276,"TERMINAL",0,0,"11",,terminal_output +2585,3730664,"TERMINAL",0,0,"22",,terminal_output +2586,3731588,"TERMINAL",0,0,"33",,terminal_output +2587,3732445,"TERMINAL",0,0,"44",,terminal_output +2588,3733459,"TERMINAL",0,0,"55",,terminal_output +2589,3734593,"TERMINAL",0,0,"66",,terminal_output +2590,3735554,"TERMINAL",0,0,"77",,terminal_output +2591,3736811,"TERMINAL",0,0,"88",,terminal_output +2592,3737635,"TERMINAL",0,0,"99",,terminal_output +2593,3738952,"TERMINAL",0,0,"3040",,terminal_output +2594,3739773,"TERMINAL",0,0,"11",,terminal_output +2595,3741106,"TERMINAL",0,0,"22",,terminal_output +2596,3741873,"TERMINAL",0,0,"33",,terminal_output +2597,3742848,"TERMINAL",0,0,"44",,terminal_output +2598,3744051,"TERMINAL",0,0,"55",,terminal_output +2599,3744917,"TERMINAL",0,0,"66",,terminal_output +2600,3746118,"TERMINAL",0,0,"78",,terminal_output +2601,3747001,"TERMINAL",0,0,"99",,terminal_output +2602,3748065,"TERMINAL",0,0,"4050",,terminal_output +2603,3749088,"TERMINAL",0,0,"11",,terminal_output +2604,3750237,"TERMINAL",0,0,"22",,terminal_output +2605,3751176,"TERMINAL",0,0,"33",,terminal_output +2606,3752258,"TERMINAL",0,0,"44",,terminal_output +2607,3753589,"TERMINAL",0,0,"55",,terminal_output +2608,3754281,"TERMINAL",0,0,"66",,terminal_output +2609,3755532,"TERMINAL",0,0,"77",,terminal_output +2610,3756586,"TERMINAL",0,0,"88",,terminal_output +2611,3757514,"TERMINAL",0,0,"99",,terminal_output +2612,3758499,"TERMINAL",0,0,"509:00",,terminal_output +2613,3759523,"TERMINAL",0,0,"11",,terminal_output +2614,3760648,"TERMINAL",0,0,"22",,terminal_output +2615,3761672,"TERMINAL",0,0,"33",,terminal_output +2616,3762633,"TERMINAL",0,0,"44",,terminal_output +2617,3763718,"TERMINAL",0,0,"55",,terminal_output +2618,3764696,"TERMINAL",0,0,"66",,terminal_output +2619,3765763,"TERMINAL",0,0,"77",,terminal_output +2620,3767095,"TERMINAL",0,0,"88",,terminal_output +2621,3767808,"TERMINAL",0,0,"99",,terminal_output +2622,3769045,"TERMINAL",0,0,"1:0010",,terminal_output +2623,3769891,"TERMINAL",0,0,"11",,terminal_output +2624,3770923,"TERMINAL",0,0,"22",,terminal_output +2625,3771968,"TERMINAL",0,0,"34",,terminal_output +2626,3773002,"TERMINAL",0,0,"55",,terminal_output +2627,3774045,"TERMINAL",0,0,"66",,terminal_output +2628,3775180,"TERMINAL",0,0,"77",,terminal_output +2629,3776414,"TERMINAL",0,0,"88",,terminal_output +2630,3777161,"TERMINAL",0,0,"99",,terminal_output +2631,3778234,"TERMINAL",0,0,"1020",,terminal_output +2632,3779236,"TERMINAL",0,0,"11",,terminal_output +2633,3780401,"TERMINAL",0,0,"22",,terminal_output +2634,3781630,"TERMINAL",0,0,"33",,terminal_output +2635,3782456,"TERMINAL",0,0,"44",,terminal_output +2636,3783476,"TERMINAL",0,0,"55",,terminal_output +2637,3784530,"TERMINAL",0,0,"66",,terminal_output +2638,3785633,"TERMINAL",0,0,"77",,terminal_output +2639,3786865,"TERMINAL",0,0,"88",,terminal_output +2640,3787576,"TERMINAL",0,0,"99",,terminal_output +2641,3789117,"TERMINAL",0,0,"2030",,terminal_output +2642,3789739,"TERMINAL",0,0,"11",,terminal_output +2643,3790744,"TERMINAL",0,0,"22",,terminal_output +2644,3791834,"TERMINAL",0,0,"33",,terminal_output +2645,3792796,"TERMINAL",0,0,"44",,terminal_output +2646,3793811,"TERMINAL",0,0,"55",,terminal_output +2647,3794828,"TERMINAL",0,0,"66",,terminal_output +2648,3795958,"TERMINAL",0,0,"77",,terminal_output +2649,3796908,"TERMINAL",0,0,"88",,terminal_output +2650,3797943,"TERMINAL",0,0,"940",,terminal_output +2651,3799024,"TERMINAL",0,0,"311",,terminal_output +2652,3800019,"TERMINAL",0,0,"22",,terminal_output +2653,3801071,"TERMINAL",0,0,"33",,terminal_output +2654,3802196,"TERMINAL",0,0,"44",,terminal_output +2655,3803140,"TERMINAL",0,0,"55",,terminal_output +2656,3804195,"TERMINAL",0,0,"66",,terminal_output +2657,3805274,"TERMINAL",0,0,"77",,terminal_output +2658,3806288,"TERMINAL",0,0,"88",,terminal_output +2659,3807421,"TERMINAL",0,0,"99",,terminal_output +2660,3808564,"TERMINAL",0,0,"4050",,terminal_output +2661,3809465,"TERMINAL",0,0,"11",,terminal_output +2662,3810614,"TERMINAL",0,0,"22",,terminal_output +2663,3811457,"TERMINAL",0,0,"33",,terminal_output +2664,3812532,"TERMINAL",0,0,"44",,terminal_output +2665,3813606,"TERMINAL",0,0,"55",,terminal_output +2666,3814598,"TERMINAL",0,0,"66",,terminal_output +2667,3816024,"TERMINAL",0,0,"77",,terminal_output +2668,3817163,"TERMINAL",0,0,"88",,terminal_output +2669,3817688,"TERMINAL",0,0,"99",,terminal_output +2670,3819100,"TERMINAL",0,0,"5020:00",,terminal_output +2671,3819800,"TERMINAL",0,0,"11",,terminal_output +2672,3820930,"TERMINAL",0,0,"22",,terminal_output +2673,3821882,"TERMINAL",0,0,"33",,terminal_output +2674,3822910,"TERMINAL",0,0,"44",,terminal_output +2675,3824056,"TERMINAL",0,0,"56",,terminal_output +2676,3824983,"TERMINAL",0,0,"77",,terminal_output +2677,3826021,"TERMINAL",0,0,"88",,terminal_output +2678,3827166,"TERMINAL",0,0,"99",,terminal_output +2679,3828098,"TERMINAL",0,0,"2:0010",,terminal_output +2680,3829143,"TERMINAL",0,0,"11",,terminal_output +2681,3830186,"TERMINAL",0,0,"22",,terminal_output +2682,3831358,"TERMINAL",0,0,"33",,terminal_output +2683,3832248,"TERMINAL",0,0,"44",,terminal_output +2684,3833612,"TERMINAL",0,0,"55",,terminal_output +2685,3834431,"TERMINAL",0,0,"66",,terminal_output +2686,3835555,"TERMINAL",0,0,"77",,terminal_output +2687,3836546,"TERMINAL",0,0,"88",,terminal_output +2688,3837602,"TERMINAL",0,0,"99",,terminal_output +2689,3838582,"TERMINAL",0,0,"1020",,terminal_output +2690,3839648,"TERMINAL",0,0,"11",,terminal_output +2691,3840675,"TERMINAL",0,0,"22",,terminal_output +2692,3841701,"TERMINAL",0,0,"33",,terminal_output +2693,3842673,"TERMINAL",0,0,"44",,terminal_output +2694,3843745,"TERMINAL",0,0,"55",,terminal_output +2695,3844720,"TERMINAL",0,0,"66",,terminal_output +2696,3845895,"TERMINAL",0,0,"77",,terminal_output +2697,3846815,"TERMINAL",0,0,"88",,terminal_output +2698,3847840,"TERMINAL",0,0,"99",,terminal_output +2699,3849045,"TERMINAL",0,0,"2030",,terminal_output +2700,3849909,"TERMINAL",0,0,"11",,terminal_output +2701,3851086,"TERMINAL",0,0,"23",,terminal_output +2702,3851999,"TERMINAL",0,0,"44",,terminal_output +2703,3853032,"TERMINAL",0,0,"55",,terminal_output +2704,3854073,"TERMINAL",0,0,"66",,terminal_output +2705,3855206,"TERMINAL",0,0,"77",,terminal_output +2706,3856153,"TERMINAL",0,0,"88",,terminal_output +2707,3857251,"TERMINAL",0,0,"99",,terminal_output +2708,3858221,"TERMINAL",0,0,"3040",,terminal_output +2709,3859267,"TERMINAL",0,0,"11",,terminal_output +2710,3860323,"TERMINAL",0,0,"22",,terminal_output +2711,3861345,"TERMINAL",0,0,"33",,terminal_output +2712,3862780,"TERMINAL",0,0,"44",,terminal_output +2713,3863704,"TERMINAL",0,0,"55",,terminal_output +2714,3864555,"TERMINAL",0,0,"66",,terminal_output +2715,3865850,"TERMINAL",0,0,"77",,terminal_output +2716,3866871,"TERMINAL",0,0,"88",,terminal_output +2717,3867767,"TERMINAL",0,0,"99",,terminal_output +2718,3868715,"TERMINAL",0,0,"4050",,terminal_output +2719,3869744,"TERMINAL",0,0,"11",,terminal_output +2720,3870966,"TERMINAL",0,0,"22",,terminal_output +2721,3871994,"TERMINAL",0,0,"33",,terminal_output +2722,3872825,"TERMINAL",0,0,"44",,terminal_output +2723,3873855,"TERMINAL",0,0,"55",,terminal_output +2724,3874855,"TERMINAL",0,0,"66",,terminal_output +2725,3875882,"TERMINAL",0,0,"77",,terminal_output +2726,3877003,"TERMINAL",0,0,"88",,terminal_output +2727,3877953,"TERMINAL",0,0,"91:00",,terminal_output +2728,3878997,"TERMINAL",0,0,"511",,terminal_output +2729,3880031,"TERMINAL",0,0,"22",,terminal_output +2730,3881095,"TERMINAL",0,0,"33",,terminal_output +2731,3882103,"TERMINAL",0,0,"44",,terminal_output +2732,3883141,"TERMINAL",0,0,"55",,terminal_output +2733,3884190,"TERMINAL",0,0,"66",,terminal_output +2734,3885219,"TERMINAL",0,0,"77",,terminal_output +2735,3886258,"TERMINAL",0,0,"88",,terminal_output +2736,3887299,"TERMINAL",0,0,"99",,terminal_output +2737,3888568,"TERMINAL",0,0,"3:0010",,terminal_output +2738,3889486,"TERMINAL",0,0,"11",,terminal_output +2739,3890511,"TERMINAL",0,0,"22",,terminal_output +2740,3891460,"TERMINAL",0,0,"33",,terminal_output +2741,3892565,"TERMINAL",0,0,"44",,terminal_output +2742,3893580,"TERMINAL",0,0,"55",,terminal_output +2743,3894561,"TERMINAL",0,0,"66",,terminal_output +2744,3895602,"TERMINAL",0,0,"77",,terminal_output +2745,3896751,"TERMINAL",0,0,"88",,terminal_output +2746,3897679,"TERMINAL",0,0,"99",,terminal_output +2747,3898840,"TERMINAL",0,0,"1020",,terminal_output +2748,3899761,"TERMINAL",0,0,"11",,terminal_output +2749,3900847,"TERMINAL",0,0,"22",,terminal_output +2750,3901870,"TERMINAL",0,0,"33",,terminal_output +2751,3902894,"TERMINAL",0,0,"44",,terminal_output +2752,3903923,"TERMINAL",0,0,"55",,terminal_output +2753,3904963,"TERMINAL",0,0,"67",,terminal_output +2754,3906003,"TERMINAL",0,0,"88",,terminal_output +2755,3907037,"TERMINAL",0,0,"99",,terminal_output +2756,3908115,"TERMINAL",0,0,"2030",,terminal_output +2757,3909124,"TERMINAL",0,0,"11",,terminal_output +2758,3910261,"TERMINAL",0,0,"22",,terminal_output +2759,3911199,"TERMINAL",0,0,"33",,terminal_output +2760,3912445,"TERMINAL",0,0,"44",,terminal_output +2761,3913283,"TERMINAL",0,0,"55",,terminal_output +2762,3914355,"TERMINAL",0,0,"66",,terminal_output +2763,3915685,"TERMINAL",0,0,"77",,terminal_output +2764,3916511,"TERMINAL",0,0,"88",,terminal_output +2765,3917637,"TERMINAL",0,0,"99",,terminal_output +2766,3918755,"TERMINAL",0,0,"3040",,terminal_output +2767,3919572,"TERMINAL",0,0,"11",,terminal_output +2768,3920699,"TERMINAL",0,0,"22",,terminal_output +2769,3921999,"TERMINAL",0,0,"33",,terminal_output +2770,3922848,"TERMINAL",0,0,"44",,terminal_output +2771,3923768,"TERMINAL",0,0,"55",,terminal_output +2772,3924752,"TERMINAL",0,0,"66",,terminal_output +2773,3925857,"TERMINAL",0,0,"77",,terminal_output +2774,3926941,"TERMINAL",0,0,"88",,terminal_output +2775,3927879,"TERMINAL",0,0,"99",,terminal_output +2776,3928999,"TERMINAL",0,0,"4050",,terminal_output +2777,3929973,"TERMINAL",0,0,"12",,terminal_output +2778,3931044,"TERMINAL",0,0,"33",,terminal_output +2779,3932069,"TERMINAL",0,0,"44",,terminal_output +2780,3933185,"TERMINAL",0,0,"55",,terminal_output +2781,3934161,"TERMINAL",0,0,"66",,terminal_output +2782,3935232,"TERMINAL",0,0,"77",,terminal_output +2783,3936359,"TERMINAL",0,0,"88",,terminal_output +2784,3937483,"TERMINAL",0,0,"99",,terminal_output +2785,3938610,"TERMINAL",0,0,"502:00",,terminal_output +2786,3939432,"TERMINAL",0,0,"11",,terminal_output +2787,3940420,"TERMINAL",0,0,"22",,terminal_output +2788,3941785,"TERMINAL",0,0,"33",,terminal_output +2789,3942810,"TERMINAL",0,0,"44",,terminal_output +2790,3943623,"TERMINAL",0,0,"55",,terminal_output +2791,3944619,"TERMINAL",0,0,"66",,terminal_output +2792,3945771,"TERMINAL",0,0,"77",,terminal_output +2793,3946819,"TERMINAL",0,0,"88",,terminal_output +2794,3947857,"TERMINAL",0,0,"99",,terminal_output +2795,3948943,"TERMINAL",0,0,"4:0010",,terminal_output +2796,3949976,"TERMINAL",0,0,"12",,terminal_output +2797,3951023,"TERMINAL",0,0,"33",,terminal_output +2798,3952115,"TERMINAL",0,0,"44",,terminal_output +2799,3953190,"TERMINAL",0,0,"55",,terminal_output +2800,3955039,"TERMINAL",0,0,"67",,terminal_output +2801,3956113,"TERMINAL",0,0,"88",,terminal_output +2802,3957131,"TERMINAL",0,0,"99",,terminal_output +2803,3958180,"TERMINAL",0,0,"1020",,terminal_output +2804,3959267,"TERMINAL",0,0,"11",,terminal_output +2805,3960269,"TERMINAL",0,0,"22",,terminal_output +2806,3961327,"TERMINAL",0,0,"33",,terminal_output +2807,3962758,"TERMINAL",0,0,"44",,terminal_output +2808,3963681,"TERMINAL",0,0,"55",,terminal_output +2809,3964508,"TERMINAL",0,0,"66",,terminal_output +2810,3965624,"TERMINAL",0,0,"77",,terminal_output +2811,3966647,"TERMINAL",0,0,"88",,terminal_output +2812,3967610,"TERMINAL",0,0,"99",,terminal_output +2813,3968796,"TERMINAL",0,0,"2030",,terminal_output +2814,3969716,"TERMINAL",0,0,"11",,terminal_output +2815,3970845,"TERMINAL",0,0,"22",,terminal_output +2816,3971970,"TERMINAL",0,0,"33",,terminal_output +2817,3972890,"TERMINAL",0,0,"44",,terminal_output +2818,3973889,"TERMINAL",0,0,"55",,terminal_output +2819,3974939,"TERMINAL",0,0,"67",,terminal_output +2820,3975986,"TERMINAL",0,0,"88",,terminal_output +2821,3977029,"TERMINAL",0,0,"99",,terminal_output +2822,3978108,"TERMINAL",0,0,"3040",,terminal_output +2823,3979126,"TERMINAL",0,0,"11",,terminal_output +2824,3980176,"TERMINAL",0,0,"22",,terminal_output +2825,3981236,"TERMINAL",0,0,"33",,terminal_output +2826,3982303,"TERMINAL",0,0,"44",,terminal_output +2827,3983431,"TERMINAL",0,0,"55",,terminal_output +2828,3984592,"TERMINAL",0,0,"66",,terminal_output +2829,3985477,"TERMINAL",0,0,"77",,terminal_output +2830,3986453,"TERMINAL",0,0,"88",,terminal_output +2831,3987523,"TERMINAL",0,0,"99",,terminal_output +2832,3988649,"TERMINAL",0,0,"4050",,terminal_output +2833,3989594,"TERMINAL",0,0,"11",,terminal_output +2834,3990696,"TERMINAL",0,0,"22",,terminal_output +2835,3991718,"TERMINAL",0,0,"33",,terminal_output +2836,3992890,"TERMINAL",0,0,"44",,terminal_output +2837,3993914,"TERMINAL",0,0,"55",,terminal_output +2838,3994891,"TERMINAL",0,0,"66",,terminal_output +2839,3995933,"TERMINAL",0,0,"77",,terminal_output +2840,3996964,"TERMINAL",0,0,"88",,terminal_output +2841,3998203,"TERMINAL",0,0,"93:00",,terminal_output +2842,3999086,"TERMINAL",0,0,"511",,terminal_output +2843,4000117,"TERMINAL",0,0,"22",,terminal_output +2844,4001140,"TERMINAL",0,0,"33",,terminal_output +2845,4002174,"TERMINAL",0,0,"44",,terminal_output +2846,4003388,"TERMINAL",0,0,"55",,terminal_output +2847,4004305,"TERMINAL",0,0,"66",,terminal_output +2848,4005351,"TERMINAL",0,0,"77",,terminal_output +2849,4006774,"TERMINAL",0,0,"88",,terminal_output +2850,4007401,"TERMINAL",0,0,"99",,terminal_output +2851,4008519,"TERMINAL",0,0,"5:0010",,terminal_output +2852,4009551,"TERMINAL",0,0,"11",,terminal_output +2853,4010551,"TERMINAL",0,0,"22",,terminal_output +2854,4011599,"TERMINAL",0,0,"33",,terminal_output +2855,4012651,"TERMINAL",0,0,"44",,terminal_output +2856,4013810,"TERMINAL",0,0,"55",,terminal_output +2857,4014727,"TERMINAL",0,0,"66",,terminal_output +2858,4016075,"TERMINAL",0,0,"77",,terminal_output +2859,4016896,"TERMINAL",0,0,"88",,terminal_output +2860,4017904,"TERMINAL",0,0,"99",,terminal_output +2861,4018940,"TERMINAL",0,0,"1020",,terminal_output +2862,4020270,"TERMINAL",0,0,"12",,terminal_output +2863,4021091,"TERMINAL",0,0,"33",,terminal_output +2864,4022077,"TERMINAL",0,0,"44",,terminal_output +2865,4023344,"TERMINAL",0,0,"55",,terminal_output +2866,4024149,"TERMINAL",0,0,"66",,terminal_output +2867,4025335,"TERMINAL",0,0,"77",,terminal_output +2868,4026234,"TERMINAL",0,0,"88",,terminal_output +2869,4027336,"TERMINAL",0,0,"99",,terminal_output +2870,4028664,"TERMINAL",0,0,"2030",,terminal_output +2871,4029494,"TERMINAL",0,0,"11",,terminal_output +2872,4030429,"TERMINAL",0,0,"22",,terminal_output +2873,4031572,"TERMINAL",0,0,"33",,terminal_output +2874,4032522,"TERMINAL",0,0,"44",,terminal_output +2875,4033588,"TERMINAL",0,0,"55",,terminal_output +2876,4034609,"TERMINAL",0,0,"66",,terminal_output +2877,4035943,"TERMINAL",0,0,"77",,terminal_output +2878,4036753,"TERMINAL",0,0,"88",,terminal_output +2879,4037924,"TERMINAL",0,0,"99",,terminal_output +2880,4038797,"TERMINAL",0,0,"3040",,terminal_output +2881,4039913,"TERMINAL",0,0,"11",,terminal_output +2882,4040948,"TERMINAL",0,0,"22",,terminal_output +2883,4041942,"TERMINAL",0,0,"33",,terminal_output +2884,4043281,"TERMINAL",0,0,"45",,terminal_output +2885,4044021,"TERMINAL",0,0,"66",,terminal_output +2886,4045069,"TERMINAL",0,0,"77",,terminal_output +2887,4046115,"TERMINAL",0,0,"88",,terminal_output +2888,4047216,"TERMINAL",0,0,"99",,terminal_output +2889,4048319,"TERMINAL",0,0,"4050",,terminal_output +2890,4049307,"TERMINAL",0,0,"11",,terminal_output +2891,4050355,"TERMINAL",0,0,"22",,terminal_output +2892,4051385,"TERMINAL",0,0,"33",,terminal_output +2893,4052479,"TERMINAL",0,0,"44",,terminal_output +2894,4053436,"TERMINAL",0,0,"55",,terminal_output +2895,4054592,"TERMINAL",0,0,"66",,terminal_output +2896,4055782,"TERMINAL",0,0,"77",,terminal_output +2897,4056600,"TERMINAL",0,0,"88",,terminal_output +2898,4057622,"TERMINAL",0,0,"99",,terminal_output +2899,4058952,"TERMINAL",0,0,"504:00",,terminal_output +2900,4059976,"TERMINAL",0,0,"11",,terminal_output +2901,4060795,"TERMINAL",0,0,"22",,terminal_output +2902,4062106,"TERMINAL",0,0,"33",,terminal_output +2903,4063250,"TERMINAL",0,0,"44",,terminal_output +2904,4064004,"TERMINAL",0,0,"55",,terminal_output +2905,4065093,"TERMINAL",0,0,"67",,terminal_output +2906,4066012,"TERMINAL",0,0,"88",,terminal_output +2907,4067038,"TERMINAL",0,0,"99",,terminal_output +2908,4068162,"TERMINAL",0,0,"6:0010",,terminal_output +2909,4069134,"TERMINAL",0,0,"11",,terminal_output +2910,4070181,"TERMINAL",0,0,"22",,terminal_output +2911,4071227,"TERMINAL",0,0,"33",,terminal_output +2912,4072360,"TERMINAL",0,0,"44",,terminal_output +2913,4073353,"TERMINAL",0,0,"55",,terminal_output +2914,4074410,"TERMINAL",0,0,"66",,terminal_output +2915,4075555,"TERMINAL",0,0,"77",,terminal_output +2916,4076887,"TERMINAL",0,0,"88",,terminal_output +2917,4077639,"TERMINAL",0,0,"99",,terminal_output +2918,4078604,"TERMINAL",0,0,"1020",,terminal_output +2919,4079649,"TERMINAL",0,0,"11",,terminal_output +2920,4080756,"TERMINAL",0,0,"22",,terminal_output +2921,4081967,"TERMINAL",0,0,"33",,terminal_output +2922,4082895,"TERMINAL",0,0,"44",,terminal_output +2923,4083896,"TERMINAL",0,0,"55",,terminal_output +2924,4084951,"TERMINAL",0,0,"66",,terminal_output +2925,4086243,"TERMINAL",0,0,"77",,terminal_output +2926,4087101,"TERMINAL",0,0,"88",,terminal_output +2927,4088044,"TERMINAL",0,0,"2030",,terminal_output +2928,4089046,"TERMINAL",0,0,"11",,terminal_output +2929,4090092,"TERMINAL",0,0,"22",,terminal_output +2930,4091157,"TERMINAL",0,0,"33",,terminal_output +2931,4092207,"TERMINAL",0,0,"44",,terminal_output +2932,4093270,"TERMINAL",0,0,"55",,terminal_output +2933,4094397,"TERMINAL",0,0,"66",,terminal_output +2934,4095389,"TERMINAL",0,0,"77",,terminal_output +2935,4096372,"TERMINAL",0,0,"88",,terminal_output +2936,4097413,"TERMINAL",0,0,"99",,terminal_output +2937,4098559,"TERMINAL",0,0,"3040",,terminal_output +2938,4099833,"TERMINAL",0,0,"11",,terminal_output +2939,4100631,"TERMINAL",0,0,"22",,terminal_output +2940,4101627,"TERMINAL",0,0,"33",,terminal_output +2941,4102649,"TERMINAL",0,0,"44",,terminal_output +2942,4103777,"TERMINAL",0,0,"55",,terminal_output +2943,4104720,"TERMINAL",0,0,"66",,terminal_output +2944,4105820,"TERMINAL",0,0,"77",,terminal_output +2945,4106845,"TERMINAL",0,0,"88",,terminal_output +2946,4107853,"TERMINAL",0,0,"99",,terminal_output +2947,4108904,"TERMINAL",0,0,"4050",,terminal_output +2948,4110222,"TERMINAL",0,0,"12",,terminal_output +2949,4111042,"TERMINAL",0,0,"33",,terminal_output +2950,4112167,"TERMINAL",0,0,"44",,terminal_output +2951,4113197,"TERMINAL",0,0,"55",,terminal_output +2952,4114143,"TERMINAL",0,0,"66",,terminal_output +2953,4115358,"TERMINAL",0,0,"77",,terminal_output +2954,4116261,"TERMINAL",0,0,"88",,terminal_output +2955,4117386,"TERMINAL",0,0,"99",,terminal_output +2956,4118314,"TERMINAL",0,0,"505:00",,terminal_output +2957,4119433,"TERMINAL",0,0,"11",,terminal_output +2958,4120429,"TERMINAL",0,0,"22",,terminal_output +2959,4121582,"TERMINAL",0,0,"33",,terminal_output +2960,4122503,"TERMINAL",0,0,"44",,terminal_output +2961,4123628,"TERMINAL",0,0,"55",,terminal_output +2962,4124665,"TERMINAL",0,0,"66",,terminal_output +2963,4125675,"TERMINAL",0,0,"77",,terminal_output +2964,4126690,"TERMINAL",0,0,"88",,terminal_output +2965,4128129,"TERMINAL",0,0,"99",,terminal_output +2966,4128949,"TERMINAL",0,0,"7:0010",,terminal_output +2967,4129872,"TERMINAL",0,0,"11",,terminal_output +2968,4130877,"TERMINAL",0,0,"22",,terminal_output +2969,4131958,"TERMINAL",0,0,"33",,terminal_output +2970,4133359,"TERMINAL",0,0,"45",,terminal_output +2971,4134021,"TERMINAL",0,0,"66",,terminal_output +2972,4135063,"TERMINAL",0,0,"77",,terminal_output +2973,4136106,"TERMINAL",0,0,"88",,terminal_output +2974,4137240,"TERMINAL",0,0,"99",,terminal_output +2975,4138208,"TERMINAL",0,0,"1020",,terminal_output +2976,4139271,"TERMINAL",0,0,"11",,terminal_output +2977,4140310,"TERMINAL",0,0,"22",,terminal_output +2978,4141433,"TERMINAL",0,0,"33",,terminal_output +2979,4142457,"TERMINAL",0,0,"44",,terminal_output +2980,4143792,"TERMINAL",0,0,"55",,terminal_output +2981,4144486,"TERMINAL",0,0,"66",,terminal_output +2982,4145647,"TERMINAL",0,0,"77",,terminal_output +2983,4146769,"TERMINAL",0,0,"88",,terminal_output +2984,4147682,"TERMINAL",0,0,"99",,terminal_output +2985,4148707,"TERMINAL",0,0,"2030",,terminal_output +2986,4149826,"TERMINAL",0,0,"11",,terminal_output +2987,4151104,"TERMINAL",0,0,"22",,terminal_output +2988,4151810,"TERMINAL",0,0,"33",,terminal_output +2989,4152854,"TERMINAL",0,0,"44",,terminal_output +2990,4153956,"TERMINAL",0,0,"55",,terminal_output +2991,4155421,"TERMINAL",0,0,"67",,terminal_output +2992,4156021,"TERMINAL",0,0,"88",,terminal_output +2993,4157130,"TERMINAL",0,0,"99",,terminal_output +2994,4158131,"TERMINAL",0,0,"3040",,terminal_output +2995,4159163,"TERMINAL",0,0,"11",,terminal_output +2996,4160211,"TERMINAL",0,0,"22",,terminal_output +2997,4161275,"TERMINAL",0,0,"33",,terminal_output +2998,4162284,"TERMINAL",0,0,"44",,terminal_output +2999,4163445,"TERMINAL",0,0,"55",,terminal_output +3000,4164461,"TERMINAL",0,0,"66",,terminal_output +3001,4165488,"TERMINAL",0,0,"77",,terminal_output +3002,4166510,"TERMINAL",0,0,"88",,terminal_output +3003,4167769,"TERMINAL",0,0,"99",,terminal_output +3004,4168675,"TERMINAL",0,0,"4050",,terminal_output +3005,4169882,"TERMINAL",0,0,"11",,terminal_output +3006,4170702,"TERMINAL",0,0,"22",,terminal_output +3007,4171828,"TERMINAL",0,0,"33",,terminal_output +3008,4173157,"TERMINAL",0,0,"44",,terminal_output +3009,4173978,"TERMINAL",0,0,"55",,terminal_output +3010,4174897,"TERMINAL",0,0,"66",,terminal_output +3011,4176068,"TERMINAL",0,0,"77",,terminal_output +3012,4177353,"TERMINAL",0,0,"89",,terminal_output +3013,4178173,"TERMINAL",0,0,"506:00",,terminal_output +3014,4179071,"TERMINAL",0,0,"11",,terminal_output +3015,4180322,"TERMINAL",0,0,"22",,terminal_output +3016,4181345,"TERMINAL",0,0,"33",,terminal_output +3017,4182247,"TERMINAL",0,0,"44",,terminal_output +3018,4183265,"TERMINAL",0,0,"55",,terminal_output +3019,4184323,"TERMINAL",0,0,"66",,terminal_output +3020,4185365,"TERMINAL",0,0,"77",,terminal_output +3021,4186414,"TERMINAL",0,0,"88",,terminal_output +3022,4187456,"TERMINAL",0,0,"99",,terminal_output +3023,4188521,"TERMINAL",0,0,"8:0010",,terminal_output +3024,4189842,"TERMINAL",0,0,"11",,terminal_output +3025,4190755,"TERMINAL",0,0,"22",,terminal_output +3026,4191658,"TERMINAL",0,0,"33",,terminal_output +3027,4193051,"TERMINAL",0,0,"44",,terminal_output +3028,4194140,"TERMINAL",0,0,"55",,terminal_output +3029,4194909,"TERMINAL",0,0,"66",,terminal_output +3030,4195887,"TERMINAL",0,0,"77",,terminal_output +3031,4197239,"TERMINAL",0,0,"88",,terminal_output +3032,4198030,"TERMINAL",0,0,"99",,terminal_output +3033,4199009,"TERMINAL",0,0,"1021",,terminal_output +3034,4200093,"TERMINAL",0,0,"22",,terminal_output +3035,4201071,"TERMINAL",0,0,"33",,terminal_output +3036,4202137,"TERMINAL",0,0,"44",,terminal_output +3037,4203231,"TERMINAL",0,0,"55",,terminal_output +3038,4204262,"TERMINAL",0,0,"66",,terminal_output +3039,4205278,"TERMINAL",0,0,"77",,terminal_output +3040,4206324,"TERMINAL",0,0,"88",,terminal_output +3041,4207374,"TERMINAL",0,0,"99",,terminal_output +3042,4208792,"TERMINAL",0,0,"2030",,terminal_output +3043,4209521,"TERMINAL",0,0,"11",,terminal_output +3044,4210933,"TERMINAL",0,0,"22",,terminal_output +3045,4211648,"TERMINAL",0,0,"33",,terminal_output +3046,4212594,"TERMINAL",0,0,"44",,terminal_output +3047,4213668,"TERMINAL",0,0,"55",,terminal_output +3048,4214705,"TERMINAL",0,0,"66",,terminal_output +3049,4216039,"TERMINAL",0,0,"77",,terminal_output +3050,4216798,"TERMINAL",0,0,"88",,terminal_output +3051,4217978,"TERMINAL",0,0,"99",,terminal_output +3052,4218930,"TERMINAL",0,0,"3040",,terminal_output +3053,4220059,"TERMINAL",0,0,"11",,terminal_output +3054,4221075,"TERMINAL",0,0,"23",,terminal_output +3055,4222261,"TERMINAL",0,0,"44",,terminal_output +3056,4223064,"TERMINAL",0,0,"55",,terminal_output +3057,4224159,"TERMINAL",0,0,"66",,terminal_output +3058,4225176,"TERMINAL",0,0,"77",,terminal_output +3059,4226303,"TERMINAL",0,0,"88",,terminal_output +3060,4227290,"TERMINAL",0,0,"99",,terminal_output +3061,4228321,"TERMINAL",0,0,"4050",,terminal_output +3062,4229748,"TERMINAL",0,0,"11",,terminal_output +3063,4230772,"TERMINAL",0,0,"22",,terminal_output +3064,4231795,"TERMINAL",0,0,"33",,terminal_output +3065,4232717,"TERMINAL",0,0,"44",,terminal_output +3066,4233738,"TERMINAL",0,0,"55",,terminal_output +3067,4234593,"TERMINAL",0,0,"66",,terminal_output +3068,4235684,"TERMINAL",0,0,"77",,terminal_output +3069,4236810,"TERMINAL",0,0,"88",,terminal_output +3070,4237833,"TERMINAL",0,0,"99",,terminal_output +3071,4238857,"TERMINAL",0,0,"507:00",,terminal_output +3072,4239879,"TERMINAL",0,0,"11",,terminal_output +3073,4240906,"TERMINAL",0,0,"22",,terminal_output +3074,4241929,"TERMINAL",0,0,"33",,terminal_output +3075,4243051,"TERMINAL",0,0,"45",,terminal_output +3076,4244047,"TERMINAL",0,0,"66",,terminal_output +3077,4245310,"TERMINAL",0,0,"77",,terminal_output +3078,4246110,"TERMINAL",0,0,"88",,terminal_output +3079,4247160,"TERMINAL",0,0,"99",,terminal_output +3080,4248274,"TERMINAL",0,0,"9:0010",,terminal_output +3081,4249254,"TERMINAL",0,0,"11",,terminal_output +3082,4250322,"TERMINAL",0,0,"22",,terminal_output +3083,4251620,"TERMINAL",0,0,"33",,terminal_output +3084,4252389,"TERMINAL",0,0,"44",,terminal_output +3085,4253454,"TERMINAL",0,0,"55",,terminal_output +3086,4254518,"TERMINAL",0,0,"66",,terminal_output +3087,4255559,"TERMINAL",0,0,"77",,terminal_output +3088,4256667,"TERMINAL",0,0,"88",,terminal_output +3089,4257774,"TERMINAL",0,0,"99",,terminal_output +3090,4258697,"TERMINAL",0,0,"1020",,terminal_output +3091,4259835,"TERMINAL",0,0,"11",,terminal_output +3092,4261002,"TERMINAL",0,0,"22",,terminal_output +3093,4261883,"TERMINAL",0,0,"33",,terminal_output +3094,4262908,"TERMINAL",0,0,"44",,terminal_output +3095,4264047,"TERMINAL",0,0,"55",,terminal_output +3096,4265020,"TERMINAL",0,0,"67",,terminal_output +3097,4266078,"TERMINAL",0,0,"88",,terminal_output +3098,4267306,"TERMINAL",0,0,"99",,terminal_output +3099,4268129,"TERMINAL",0,0,"2030",,terminal_output +3100,4269161,"TERMINAL",0,0,"11",,terminal_output +3101,4270209,"TERMINAL",0,0,"22",,terminal_output +3102,4271259,"TERMINAL",0,0,"33",,terminal_output +3103,4272319,"TERMINAL",0,0,"44",,terminal_output +3104,4273445,"TERMINAL",0,0,"55",,terminal_output +3105,4274468,"TERMINAL",0,0,"66",,terminal_output +3106,4275491,"TERMINAL",0,0,"77",,terminal_output +3107,4276515,"TERMINAL",0,0,"88",,terminal_output +3108,4277549,"TERMINAL",0,0,"99",,terminal_output +3109,4278592,"TERMINAL",0,0,"3040",,terminal_output +3110,4279892,"TERMINAL",0,0,"11",,terminal_output +3111,4280711,"TERMINAL",0,0,"22",,terminal_output +3112,4282171,"TERMINAL",0,0,"33",,terminal_output +3113,4282860,"TERMINAL",0,0,"44",,terminal_output +3114,4284030,"TERMINAL",0,0,"55",,terminal_output +3115,4284903,"TERMINAL",0,0,"66",,terminal_output +3116,4286048,"TERMINAL",0,0,"77",,terminal_output +3117,4287060,"TERMINAL",0,0,"89",,terminal_output +3118,4288014,"TERMINAL",0,0,"4050",,terminal_output +3119,4289071,"TERMINAL",0,0,"11",,terminal_output +3120,4290132,"TERMINAL",0,0,"22",,terminal_output +3121,4291221,"TERMINAL",0,0,"33",,terminal_output +3122,4292274,"TERMINAL",0,0,"44",,terminal_output +3123,4293392,"TERMINAL",0,0,"55",,terminal_output +3124,4294309,"TERMINAL",0,0,"66",,terminal_output +3125,4295663,"TERMINAL",0,0,"77",,terminal_output +3126,4296480,"TERMINAL",0,0,"88",,terminal_output +3127,4297453,"TERMINAL",0,0,"99",,terminal_output +3128,4298534,"TERMINAL",0,0,"508:00",,terminal_output +3129,4299724,"TERMINAL",0,0,"11",,terminal_output +3130,4300749,"TERMINAL",0,0,"22",,terminal_output +3131,4301638,"TERMINAL",0,0,"33",,terminal_output +3132,4302706,"TERMINAL",0,0,"44",,terminal_output +3133,4303850,"TERMINAL",0,0,"55",,terminal_output +3134,4304828,"TERMINAL",0,0,"66",,terminal_output +3135,4305913,"TERMINAL",0,0,"77",,terminal_output +3136,4306912,"TERMINAL",0,0,"88",,terminal_output +3137,4307934,"TERMINAL",0,0,"99",,terminal_output +3138,4309085,"TERMINAL",0,0,"10:0011",,terminal_output +3139,4310251,"TERMINAL",0,0,"22",,terminal_output +3140,4311223,"TERMINAL",0,0,"33",,terminal_output +3141,4312121,"TERMINAL",0,0,"44",,terminal_output +3142,4313151,"TERMINAL",0,0,"55",,terminal_output +3143,4314207,"TERMINAL",0,0,"66",,terminal_output +3144,4315247,"TERMINAL",0,0,"77",,terminal_output +3145,4316323,"TERMINAL",0,0,"88",,terminal_output +3146,4317464,"TERMINAL",0,0,"99",,terminal_output +3147,4318474,"TERMINAL",0,0,"1020",,terminal_output +3148,4319498,"TERMINAL",0,0,"11",,terminal_output +3149,4320826,"TERMINAL",0,0,"22",,terminal_output +3150,4321749,"TERMINAL",0,0,"33",,terminal_output +3151,4322616,"TERMINAL",0,0,"44",,terminal_output +3152,4323693,"TERMINAL",0,0,"55",,terminal_output +3153,4324677,"TERMINAL",0,0,"66",,terminal_output +3154,4325841,"TERMINAL",0,0,"77",,terminal_output +3155,4326863,"TERMINAL",0,0,"88",,terminal_output +3156,4327835,"TERMINAL",0,0,"99",,terminal_output +3157,4328946,"TERMINAL",0,0,"2030",,terminal_output +3158,4329937,"TERMINAL",0,0,"11",,terminal_output +3159,4331059,"TERMINAL",0,0,"23",,terminal_output +3160,4332423,"TERMINAL",0,0,"44",,terminal_output +3161,4333106,"TERMINAL",0,0,"55",,terminal_output +3162,4334107,"TERMINAL",0,0,"66",,terminal_output +3163,4335358,"TERMINAL",0,0,"77",,terminal_output +3164,4336204,"TERMINAL",0,0,"88",,terminal_output +3165,4337247,"TERMINAL",0,0,"99",,terminal_output +3166,4338339,"TERMINAL",0,0,"3040",,terminal_output +3167,4339348,"TERMINAL",0,0,"11",,terminal_output +3168,4340392,"TERMINAL",0,0,"22",,terminal_output +3169,4341499,"TERMINAL",0,0,"33",,terminal_output +3170,4342490,"TERMINAL",0,0,"44",,terminal_output +3171,4343524,"TERMINAL",0,0,"55",,terminal_output +3172,4344592,"TERMINAL",0,0,"66",,terminal_output +3173,4345795,"TERMINAL",0,0,"77",,terminal_output +3174,4346723,"TERMINAL",0,0,"88",,terminal_output +3175,4347805,"TERMINAL",0,0,"99",,terminal_output +3176,4349050,"TERMINAL",0,0,"4050",,terminal_output +3177,4349989,"TERMINAL",0,0,"11",,terminal_output +3178,4351024,"TERMINAL",0,0,"22",,terminal_output +3179,4352139,"TERMINAL",0,0,"33",,terminal_output +3180,4353265,"TERMINAL",0,0,"45",,terminal_output +3181,4354044,"TERMINAL",0,0,"66",,terminal_output +3182,4355109,"TERMINAL",0,0,"77",,terminal_output +3183,4356099,"TERMINAL",0,0,"88",,terminal_output +3184,4357147,"TERMINAL",0,0,"99",,terminal_output +3185,4358300,"TERMINAL",0,0,"509:00",,terminal_output +3186,4359266,"TERMINAL",0,0,"11",,terminal_output +3187,4360330,"TERMINAL",0,0,"22",,terminal_output +3188,4361390,"TERMINAL",0,0,"33",,terminal_output +3189,4362381,"TERMINAL",0,0,"44",,terminal_output +3190,4363499,"TERMINAL",0,0,"55",,terminal_output +3191,4364523,"TERMINAL",0,0,"66",,terminal_output +3192,4365646,"TERMINAL",0,0,"77",,terminal_output +3193,4366671,"TERMINAL",0,0,"88",,terminal_output +3194,4367797,"TERMINAL",0,0,"99",,terminal_output +3195,4368821,"TERMINAL",0,0,"1:0010",,terminal_output +3196,4370152,"TERMINAL",0,0,"11",,terminal_output +3197,4370793,"TERMINAL",0,0,"22",,terminal_output +3198,4371834,"TERMINAL",0,0,"33",,terminal_output +3199,4372879,"TERMINAL",0,0,"44",,terminal_output +3200,4373936,"TERMINAL",0,0,"55",,terminal_output +3201,4374997,"TERMINAL",0,0,"67",,terminal_output +3202,4376070,"TERMINAL",0,0,"88",,terminal_output +3203,4377416,"TERMINAL",0,0,"99",,terminal_output +3204,4378117,"TERMINAL",0,0,"1020",,terminal_output +3205,4379165,"TERMINAL",0,0,"11",,terminal_output +3206,4380214,"TERMINAL",0,0,"22",,terminal_output +3207,4381305,"TERMINAL",0,0,"33",,terminal_output +3208,4382363,"TERMINAL",0,0,"44",,terminal_output +3209,4383359,"TERMINAL",0,0,"55",,terminal_output +3210,4384531,"TERMINAL",0,0,"66",,terminal_output +3211,4385534,"TERMINAL",0,0,"77",,terminal_output +3212,4386536,"TERMINAL",0,0,"88",,terminal_output +3213,4387592,"TERMINAL",0,0,"99",,terminal_output +3214,4388601,"TERMINAL",0,0,"2030",,terminal_output +3215,4389800,"TERMINAL",0,0,"11",,terminal_output +3216,4390924,"TERMINAL",0,0,"22",,terminal_output +3217,4392015,"TERMINAL",0,0,"33",,terminal_output +3218,4392872,"TERMINAL",0,0,"44",,terminal_output +3219,4393841,"TERMINAL",0,0,"55",,terminal_output +3220,4394915,"TERMINAL",0,0,"66",,terminal_output +3221,4396147,"TERMINAL",0,0,"77",,terminal_output +3222,4397064,"TERMINAL",0,0,"99",,terminal_output +3223,4398092,"TERMINAL",0,0,"3040",,terminal_output +3224,4399083,"TERMINAL",0,0,"11",,terminal_output +3225,4400443,"TERMINAL",0,0,"22",,terminal_output +3226,4401261,"TERMINAL",0,0,"33",,terminal_output +3227,4402223,"TERMINAL",0,0,"44",,terminal_output +3228,4403274,"TERMINAL",0,0,"55",,terminal_output +3229,4404318,"TERMINAL",0,0,"66",,terminal_output +3230,4405370,"TERMINAL",0,0,"77",,terminal_output +3231,4406540,"TERMINAL",0,0,"88",,terminal_output +3232,4407608,"TERMINAL",0,0,"99",,terminal_output +3233,4408536,"TERMINAL",0,0,"4050",,terminal_output +3234,4409654,"TERMINAL",0,0,"11",,terminal_output +3235,4410608,"TERMINAL",0,0,"22",,terminal_output +3236,4411658,"TERMINAL",0,0,"33",,terminal_output +3237,4412829,"TERMINAL",0,0,"44",,terminal_output +3238,4413753,"TERMINAL",0,0,"55",,terminal_output +3239,4414871,"TERMINAL",0,0,"66",,terminal_output +3240,4415895,"TERMINAL",0,0,"77",,terminal_output +3241,4417122,"TERMINAL",0,0,"88",,terminal_output +3242,4418248,"TERMINAL",0,0,"930:00",,terminal_output +3243,4419061,"TERMINAL",0,0,"511",,terminal_output +3244,4420139,"TERMINAL",0,0,"22",,terminal_output +3245,4421423,"TERMINAL",0,0,"33",,terminal_output +3246,4422145,"TERMINAL",0,0,"44",,terminal_output +3247,4423195,"TERMINAL",0,0,"55",,terminal_output +3248,4424235,"TERMINAL",0,0,"66",,terminal_output +3249,4425413,"TERMINAL",0,0,"77",,terminal_output +3250,4426333,"TERMINAL",0,0,"88",,terminal_output +3251,4427458,"TERMINAL",0,0,"99",,terminal_output +3252,4428484,"TERMINAL",0,0,"2:0010",,terminal_output +3253,4429504,"TERMINAL",0,0,"11",,terminal_output +3254,4430530,"TERMINAL",0,0,"22",,terminal_output +3255,4431655,"TERMINAL",0,0,"33",,terminal_output +3256,4432779,"TERMINAL",0,0,"44",,terminal_output +3257,4433905,"TERMINAL",0,0,"55",,terminal_output +3258,4434691,"TERMINAL",0,0,"66",,terminal_output +3259,4435847,"TERMINAL",0,0,"77",,terminal_output +3260,4436784,"TERMINAL",0,0,"88",,terminal_output +3261,4437824,"TERMINAL",0,0,"99",,terminal_output +3262,4438863,"TERMINAL",0,0,"1020",,terminal_output +3263,4439944,"TERMINAL",0,0,"11",,terminal_output +3264,4441296,"TERMINAL",0,0,"22",,terminal_output +3265,4442194,"TERMINAL",0,0,"44",,terminal_output +3266,4443025,"TERMINAL",0,0,"55",,terminal_output +3267,4444071,"TERMINAL",0,0,"66",,terminal_output +3268,4445137,"TERMINAL",0,0,"77",,terminal_output +3269,4446406,"TERMINAL",0,0,"88",,terminal_output +3270,4447207,"TERMINAL",0,0,"99",,terminal_output +3271,4448438,"TERMINAL",0,0,"2030",,terminal_output +3272,4449275,"TERMINAL",0,0,"11",,terminal_output +3273,4450363,"TERMINAL",0,0,"22",,terminal_output +3274,4451357,"TERMINAL",0,0,"33",,terminal_output +3275,4452440,"TERMINAL",0,0,"44",,terminal_output +3276,4453438,"TERMINAL",0,0,"55",,terminal_output +3277,4454478,"TERMINAL",0,0,"66",,terminal_output +3278,4455601,"TERMINAL",0,0,"77",,terminal_output +3279,4456931,"TERMINAL",0,0,"88",,terminal_output +3280,4457852,"TERMINAL",0,0,"99",,terminal_output +3281,4458670,"TERMINAL",0,0,"3040",,terminal_output +3282,4459738,"TERMINAL",0,0,"11",,terminal_output +3283,4461125,"TERMINAL",0,0,"22",,terminal_output +3284,4461872,"TERMINAL",0,0,"33",,terminal_output +3285,4462888,"TERMINAL",0,0,"44",,terminal_output +3286,4463995,"TERMINAL",0,0,"55",,terminal_output +3287,4465332,"TERMINAL",0,0,"67",,terminal_output +3288,4466038,"TERMINAL",0,0,"88",,terminal_output +3289,4467368,"TERMINAL",0,0,"99",,terminal_output +3290,4468292,"TERMINAL",0,0,"4050",,terminal_output +3291,4469155,"TERMINAL",0,0,"11",,terminal_output +3292,4470182,"TERMINAL",0,0,"22",,terminal_output +3293,4471232,"TERMINAL",0,0,"33",,terminal_output +3294,4472279,"TERMINAL",0,0,"44",,terminal_output +3295,4473326,"TERMINAL",0,0,"55",,terminal_output +3296,4474431,"TERMINAL",0,0,"66",,terminal_output +3297,4475426,"TERMINAL",0,0,"77",,terminal_output +3298,4476477,"TERMINAL",0,0,"88",,terminal_output +3299,4477604,"TERMINAL",0,0,"99",,terminal_output +3300,4478593,"TERMINAL",0,0,"501:00",,terminal_output +3301,4479958,"TERMINAL",0,0,"11",,terminal_output +3302,4480773,"TERMINAL",0,0,"22",,terminal_output +3303,4481899,"TERMINAL",0,0,"33",,terminal_output +3304,4482833,"TERMINAL",0,0,"44",,terminal_output +3305,4483844,"TERMINAL",0,0,"55",,terminal_output +3306,4485277,"TERMINAL",0,0,"66",,terminal_output +3307,4486141,"TERMINAL",0,0,"77",,terminal_output +3308,4487016,"TERMINAL",0,0,"89",,terminal_output +3309,4488246,"TERMINAL",0,0,"3:0010",,terminal_output +3310,4489089,"TERMINAL",0,0,"11",,terminal_output +3311,4490442,"TERMINAL",0,0,"22",,terminal_output +3312,4491175,"TERMINAL",0,0,"33",,terminal_output +3313,4492208,"TERMINAL",0,0,"44",,terminal_output +3314,4493261,"TERMINAL",0,0,"55",,terminal_output +3315,4494305,"TERMINAL",0,0,"66",,terminal_output +3316,4495400,"TERMINAL",0,0,"77",,terminal_output +3317,4496431,"TERMINAL",0,0,"88",,terminal_output +3318,4497778,"TERMINAL",0,0,"99",,terminal_output +3319,4498590,"TERMINAL",0,0,"1020",,terminal_output +3320,4499913,"TERMINAL",0,0,"11",,terminal_output +3321,4500627,"TERMINAL",0,0,"22",,terminal_output +3322,4501748,"TERMINAL",0,0,"33",,terminal_output +3323,4502778,"TERMINAL",0,0,"44",,terminal_output +3324,4503901,"TERMINAL",0,0,"55",,terminal_output +3325,4504822,"TERMINAL",0,0,"66",,terminal_output +3326,4506171,"TERMINAL",0,0,"77",,terminal_output +3327,4506873,"TERMINAL",0,0,"88",,terminal_output +3328,4507934,"TERMINAL",0,0,"99",,terminal_output +3329,4509044,"TERMINAL",0,0,"2031",,terminal_output +3330,4510041,"TERMINAL",0,0,"22",,terminal_output +3331,4511271,"TERMINAL",0,0,"33",,terminal_output +3332,4512301,"TERMINAL",0,0,"44",,terminal_output +3333,4513217,"TERMINAL",0,0,"55",,terminal_output +3334,4514211,"TERMINAL",0,0,"66",,terminal_output +3335,4515365,"TERMINAL",0,0,"77",,terminal_output +3336,4516289,"TERMINAL",0,0,"88",,terminal_output +3337,4517413,"TERMINAL",0,0,"99",,terminal_output +3338,4518404,"TERMINAL",0,0,"3040",,terminal_output +3339,4519456,"TERMINAL",0,0,"11",,terminal_output +3340,4520890,"TERMINAL",0,0,"22",,terminal_output +3341,4521913,"TERMINAL",0,0,"33",,terminal_output +3342,4522598,"TERMINAL",0,0,"44",,terminal_output +3343,4523856,"TERMINAL",0,0,"55",,terminal_output +3344,4524677,"TERMINAL",0,0,"66",,terminal_output +3345,4525801,"TERMINAL",0,0,"77",,terminal_output +3346,4526927,"TERMINAL",0,0,"88",,terminal_output +3347,4527832,"TERMINAL",0,0,"99",,terminal_output +3348,4528975,"TERMINAL",0,0,"4050",,terminal_output +3349,4530001,"TERMINAL",0,0,"11",,terminal_output +3350,4531020,"TERMINAL",0,0,"23",,terminal_output +3351,4532043,"TERMINAL",0,0,"44",,terminal_output +3352,4533167,"TERMINAL",0,0,"55",,terminal_output +3353,4534121,"TERMINAL",0,0,"66",,terminal_output +3354,4535217,"TERMINAL",0,0,"77",,terminal_output +3355,4536202,"TERMINAL",0,0,"88",,terminal_output +3356,4537301,"TERMINAL",0,0,"99",,terminal_output +3357,4538300,"TERMINAL",0,0,"502:00",,terminal_output +3358,4539410,"TERMINAL",0,0,"11",,terminal_output +3359,4540490,"TERMINAL",0,0,"22",,terminal_output +3360,4541771,"TERMINAL",0,0,"33",,terminal_output +3361,4542547,"TERMINAL",0,0,"44",,terminal_output +3362,4543609,"TERMINAL",0,0,"55",,terminal_output +3363,4544593,"TERMINAL",0,0,"66",,terminal_output +3364,4545653,"TERMINAL",0,0,"77",,terminal_output +3365,4546780,"TERMINAL",0,0,"88",,terminal_output +3366,4547803,"TERMINAL",0,0,"99",,terminal_output +3367,4548940,"TERMINAL",0,0,"4:0010",,terminal_output +3368,4549866,"TERMINAL",0,0,"11",,terminal_output +3369,4550880,"TERMINAL",0,0,"22",,terminal_output +3370,4551998,"TERMINAL",0,0,"33",,terminal_output +3371,4553046,"TERMINAL",0,0,"45",,terminal_output +3372,4554065,"TERMINAL",0,0,"66",,terminal_output +3373,4555274,"TERMINAL",0,0,"77",,terminal_output +3374,4556161,"TERMINAL",0,0,"88",,terminal_output +3375,4557181,"TERMINAL",0,0,"99",,terminal_output +3376,4558244,"TERMINAL",0,0,"1020",,terminal_output +3377,4559248,"TERMINAL",0,0,"11",,terminal_output +3378,4560317,"TERMINAL",0,0,"22",,terminal_output +3379,4561359,"TERMINAL",0,0,"33",,terminal_output +3380,4562512,"TERMINAL",0,0,"44",,terminal_output +3381,4563563,"TERMINAL",0,0,"55",,terminal_output +3382,4564594,"TERMINAL",0,0,"66",,terminal_output +3383,4565613,"TERMINAL",0,0,"77",,terminal_output +3384,4566589,"TERMINAL",0,0,"88",,terminal_output +3385,4567659,"TERMINAL",0,0,"99",,terminal_output +3386,4568787,"TERMINAL",0,0,"2030",,terminal_output +3387,4569914,"TERMINAL",0,0,"11",,terminal_output +3388,4571039,"TERMINAL",0,0,"22",,terminal_output +3389,4571852,"TERMINAL",0,0,"33",,terminal_output +3390,4572867,"TERMINAL",0,0,"44",,terminal_output +3391,4574001,"TERMINAL",0,0,"55",,terminal_output +3392,4575332,"TERMINAL",0,0,"67",,terminal_output +3393,4576149,"TERMINAL",0,0,"88",,terminal_output +3394,4577071,"TERMINAL",0,0,"99",,terminal_output +3395,4578291,"TERMINAL",0,0,"3040",,terminal_output +3396,4579146,"TERMINAL",0,0,"11",,terminal_output +3397,4580448,"TERMINAL",0,0,"22",,terminal_output +3398,4581267,"TERMINAL",0,0,"33",,terminal_output +3399,4582432,"TERMINAL",0,0,"44",,terminal_output +3400,4583341,"TERMINAL",0,0,"55",,terminal_output +3401,4584438,"TERMINAL",0,0,"66",,terminal_output +3402,4585444,"TERMINAL",0,0,"77",,terminal_output +3403,4586896,"TERMINAL",0,0,"88",,terminal_output +3404,4587611,"TERMINAL",0,0,"99",,terminal_output +3405,4588634,"TERMINAL",0,0,"4050",,terminal_output +3406,4589660,"TERMINAL",0,0,"11",,terminal_output +3407,4590926,"TERMINAL",0,0,"22",,terminal_output +3408,4591915,"TERMINAL",0,0,"33",,terminal_output +3409,4592775,"TERMINAL",0,0,"44",,terminal_output +3410,4593956,"TERMINAL",0,0,"55",,terminal_output +3411,4594899,"TERMINAL",0,0,"66",,terminal_output +3412,4596115,"TERMINAL",0,0,"77",,terminal_output +3413,4597392,"TERMINAL",0,0,"89",,terminal_output +3414,4598058,"TERMINAL",0,0,"503:00",,terminal_output +3415,4599047,"TERMINAL",0,0,"11",,terminal_output +3416,4600200,"TERMINAL",0,0,"22",,terminal_output +3417,4601222,"TERMINAL",0,0,"33",,terminal_output +3418,4602249,"TERMINAL",0,0,"44",,terminal_output +3419,4603481,"TERMINAL",0,0,"55",,terminal_output +3420,4604279,"TERMINAL",0,0,"66",,terminal_output +3421,4605426,"TERMINAL",0,0,"77",,terminal_output +3422,4606370,"TERMINAL",0,0,"88",,terminal_output +3423,4607421,"TERMINAL",0,0,"99",,terminal_output +3424,4608500,"TERMINAL",0,0,"5:0010",,terminal_output +3425,4609523,"TERMINAL",0,0,"11",,terminal_output +3426,4610574,"TERMINAL",0,0,"22",,terminal_output +3427,4611629,"TERMINAL",0,0,"33",,terminal_output +3428,4612669,"TERMINAL",0,0,"44",,terminal_output +3429,4613705,"TERMINAL",0,0,"55",,terminal_output +3430,4614735,"TERMINAL",0,0,"66",,terminal_output +3431,4615991,"TERMINAL",0,0,"77",,terminal_output +3432,4617186,"TERMINAL",0,0,"88",,terminal_output +3433,4617908,"TERMINAL",0,0,"99",,terminal_output +3434,4619069,"TERMINAL",0,0,"1020",,terminal_output +3435,4620124,"TERMINAL",0,0,"12",,terminal_output +3436,4621049,"TERMINAL",0,0,"33",,terminal_output +3437,4622087,"TERMINAL",0,0,"44",,terminal_output +3438,4623227,"TERMINAL",0,0,"55",,terminal_output +3439,4624165,"TERMINAL",0,0,"66",,terminal_output +3440,4625270,"TERMINAL",0,0,"77",,terminal_output +3441,4626299,"TERMINAL",0,0,"88",,terminal_output +3442,4627419,"TERMINAL",0,0,"99",,terminal_output +3443,4628344,"TERMINAL",0,0,"2030",,terminal_output +3444,4629466,"TERMINAL",0,0,"11",,terminal_output +3445,4630437,"TERMINAL",0,0,"22",,terminal_output +3446,4631822,"TERMINAL",0,0,"33",,terminal_output +3447,4632544,"TERMINAL",0,0,"44",,terminal_output +3448,4633603,"TERMINAL",0,0,"55",,terminal_output +3449,4634630,"TERMINAL",0,0,"66",,terminal_output +3450,4635713,"TERMINAL",0,0,"77",,terminal_output +3451,4636944,"TERMINAL",0,0,"88",,terminal_output +3452,4637860,"TERMINAL",0,0,"99",,terminal_output +3453,4638984,"TERMINAL",0,0,"3040",,terminal_output +3454,4640011,"TERMINAL",0,0,"11",,terminal_output +3455,4640927,"TERMINAL",0,0,"22",,terminal_output +3456,4642067,"TERMINAL",0,0,"34",,terminal_output +3457,4643074,"TERMINAL",0,0,"55",,terminal_output +3458,4644046,"TERMINAL",0,0,"66",,terminal_output +3459,4645197,"TERMINAL",0,0,"77",,terminal_output +3460,4646165,"TERMINAL",0,0,"88",,terminal_output +3461,4647277,"TERMINAL",0,0,"99",,terminal_output +3462,4648502,"TERMINAL",0,0,"4050",,terminal_output +3463,4649305,"TERMINAL",0,0,"11",,terminal_output +3464,4650550,"TERMINAL",0,0,"22",,terminal_output +3465,4651395,"TERMINAL",0,0,"33",,terminal_output +3466,4652441,"TERMINAL",0,0,"44",,terminal_output +3467,4653519,"TERMINAL",0,0,"55",,terminal_output +3468,4654611,"TERMINAL",0,0,"66",,terminal_output +3469,4655664,"TERMINAL",0,0,"77",,terminal_output +3470,4656688,"TERMINAL",0,0,"88",,terminal_output +3471,4657760,"TERMINAL",0,0,"99",,terminal_output +3472,4658716,"TERMINAL",0,0,"504:00",,terminal_output +3473,4659875,"TERMINAL",0,0,"11",,terminal_output +3474,4660984,"TERMINAL",0,0,"22",,terminal_output +3475,4662009,"TERMINAL",0,0,"33",,terminal_output +3476,4662897,"TERMINAL",0,0,"44",,terminal_output +3477,4664045,"TERMINAL",0,0,"56",,terminal_output +3478,4665295,"TERMINAL",0,0,"77",,terminal_output +3479,4666172,"TERMINAL",0,0,"88",,terminal_output +3480,4667329,"TERMINAL",0,0,"99",,terminal_output +3481,4668250,"TERMINAL",0,0,"6:0010",,terminal_output +3482,4669194,"TERMINAL",0,0,"11",,terminal_output +3483,4670266,"TERMINAL",0,0,"22",,terminal_output +3484,4671290,"TERMINAL",0,0,"33",,terminal_output +3485,4672344,"TERMINAL",0,0,"44",,terminal_output +3486,4673394,"TERMINAL",0,0,"55",,terminal_output +3487,4674435,"TERMINAL",0,0,"66",,terminal_output +3488,4675516,"TERMINAL",0,0,"77",,terminal_output +3489,4676539,"TERMINAL",0,0,"88",,terminal_output +3490,4677573,"TERMINAL",0,0,"99",,terminal_output +3491,4678653,"TERMINAL",0,0,"1020",,terminal_output +3492,4679917,"TERMINAL",0,0,"11",,terminal_output +3493,4680941,"TERMINAL",0,0,"22",,terminal_output +3494,4681963,"TERMINAL",0,0,"33",,terminal_output +3495,4682885,"TERMINAL",0,0,"44",,terminal_output +3496,4683945,"TERMINAL",0,0,"55",,terminal_output +3497,4684883,"TERMINAL",0,0,"66",,terminal_output +3498,4685964,"TERMINAL",0,0,"78",,terminal_output +3499,4687427,"TERMINAL",0,0,"99",,terminal_output +3500,4688252,"TERMINAL",0,0,"2030",,terminal_output +3501,4689079,"TERMINAL",0,0,"11",,terminal_output +3502,4690134,"TERMINAL",0,0,"22",,terminal_output +3503,4691178,"TERMINAL",0,0,"33",,terminal_output +3504,4692302,"TERMINAL",0,0,"44",,terminal_output +3505,4693316,"TERMINAL",0,0,"55",,terminal_output +3506,4694311,"TERMINAL",0,0,"66",,terminal_output +3507,4695476,"TERMINAL",0,0,"77",,terminal_output +3508,4696406,"TERMINAL",0,0,"88",,terminal_output +3509,4697448,"TERMINAL",0,0,"99",,terminal_output +3510,4698562,"TERMINAL",0,0,"3040",,terminal_output +3511,4699566,"TERMINAL",0,0,"11",,terminal_output +3512,4700589,"TERMINAL",0,0,"22",,terminal_output +3513,4701727,"TERMINAL",0,0,"33",,terminal_output +3514,4702685,"TERMINAL",0,0,"44",,terminal_output +3515,4703762,"TERMINAL",0,0,"55",,terminal_output +3516,4704886,"TERMINAL",0,0,"66",,terminal_output +3517,4706013,"TERMINAL",0,0,"77",,terminal_output +3518,4707103,"TERMINAL",0,0,"88",,terminal_output +3519,4707957,"TERMINAL",0,0,"99",,terminal_output +3520,4709049,"TERMINAL",0,0,"4051",,terminal_output +3521,4710107,"TERMINAL",0,0,"22",,terminal_output +3522,4711128,"TERMINAL",0,0,"33",,terminal_output +3523,4712482,"TERMINAL",0,0,"44",,terminal_output +3524,4713234,"TERMINAL",0,0,"55",,terminal_output +3525,4714234,"TERMINAL",0,0,"66",,terminal_output +3526,4715324,"TERMINAL",0,0,"77",,terminal_output +3527,4716554,"TERMINAL",0,0,"88",,terminal_output +3528,4717883,"TERMINAL",0,0,"99",,terminal_output +3529,4718380,"TERMINAL",0,0,"505:00",,terminal_output +3530,4719520,"TERMINAL",0,0,"11",,terminal_output +3531,4720583,"TERMINAL",0,0,"22",,terminal_output +3532,4721636,"TERMINAL",0,0,"33",,terminal_output +3533,4722557,"TERMINAL",0,0,"44",,terminal_output +3534,4723615,"TERMINAL",0,0,"55",,terminal_output +3535,4724648,"TERMINAL",0,0,"66",,terminal_output +3536,4725694,"TERMINAL",0,0,"77",,terminal_output +3537,4726787,"TERMINAL",0,0,"88",,terminal_output +3538,4727813,"TERMINAL",0,0,"99",,terminal_output +3539,4728838,"TERMINAL",0,0,"7:0010",,terminal_output +3540,4729959,"TERMINAL",0,0,"11",,terminal_output +3541,4730938,"TERMINAL",0,0,"22",,terminal_output +3542,4732004,"TERMINAL",0,0,"34",,terminal_output +3543,4733346,"TERMINAL",0,0,"55",,terminal_output +3544,4734076,"TERMINAL",0,0,"66",,terminal_output +3545,4735179,"TERMINAL",0,0,"77",,terminal_output +3546,4736421,"TERMINAL",0,0,"88",,terminal_output +3547,4737428,"TERMINAL",0,0,"99",,terminal_output +3548,4738259,"TERMINAL",0,0,"1020",,terminal_output +3549,4739303,"TERMINAL",0,0,"11",,terminal_output +3550,4740347,"TERMINAL",0,0,"22",,terminal_output +3551,4741625,"TERMINAL",0,0,"33",,terminal_output +3552,4742443,"TERMINAL",0,0,"44",,terminal_output +3553,4743570,"TERMINAL",0,0,"55",,terminal_output +3554,4744592,"TERMINAL",0,0,"66",,terminal_output +3555,4745615,"TERMINAL",0,0,"77",,terminal_output +3556,4746648,"TERMINAL",0,0,"88",,terminal_output +3557,4747763,"TERMINAL",0,0,"99",,terminal_output +3558,4748788,"TERMINAL",0,0,"2030",,terminal_output +3559,4749772,"TERMINAL",0,0,"11",,terminal_output +3560,4750841,"TERMINAL",0,0,"22",,terminal_output +3561,4751965,"TERMINAL",0,0,"33",,terminal_output +3562,4752902,"TERMINAL",0,0,"44",,terminal_output +3563,4754015,"TERMINAL",0,0,"56",,terminal_output +3564,4755349,"TERMINAL",0,0,"77",,terminal_output +3565,4756052,"TERMINAL",0,0,"88",,terminal_output +3566,4757283,"TERMINAL",0,0,"99",,terminal_output +3567,4758412,"TERMINAL",0,0,"3040",,terminal_output +3568,4759205,"TERMINAL",0,0,"11",,terminal_output +3569,4760595,"TERMINAL",0,0,"22",,terminal_output +3570,4761706,"TERMINAL",0,0,"33",,terminal_output +3571,4762462,"TERMINAL",0,0,"44",,terminal_output +3572,4764772,"TERMINAL",0,0,"55",,terminal_output +3573,4764819,"TERMINAL",0,0,"66",,terminal_output +3574,4765812,"TERMINAL",0,0,"77",,terminal_output +3575,4766534,"TERMINAL",0,0,"88",,terminal_output +3576,4767622,"TERMINAL",0,0,"99",,terminal_output +3577,4768659,"TERMINAL",0,0,"4050",,terminal_output +3578,4769674,"TERMINAL",0,0,"11",,terminal_output +3579,4771159,"TERMINAL",0,0,"22",,terminal_output +3580,4771848,"TERMINAL",0,0,"33",,terminal_output +3581,4772836,"TERMINAL",0,0,"44",,terminal_output +3582,4773828,"TERMINAL",0,0,"55",,terminal_output +3583,4775292,"TERMINAL",0,0,"66",,terminal_output +3584,4776012,"TERMINAL",0,0,"77",,terminal_output +3585,4777144,"TERMINAL",0,0,"89",,terminal_output +3586,4778085,"TERMINAL",0,0,"506:00",,terminal_output +3587,4779053,"TERMINAL",0,0,"11",,terminal_output +3588,4780520,"TERMINAL",0,0,"22",,terminal_output +3589,4781335,"TERMINAL",0,0,"33",,terminal_output +3590,4782310,"TERMINAL",0,0,"44",,terminal_output +3591,4783463,"TERMINAL",0,0,"55",,terminal_output +3592,4784303,"TERMINAL",0,0,"66",,terminal_output +3593,4785713,"TERMINAL",0,0,"77",,terminal_output +3594,4786455,"TERMINAL",0,0,"88",,terminal_output +3595,4787442,"TERMINAL",0,0,"99",,terminal_output +3596,4788721,"TERMINAL",0,0,"8:0010",,terminal_output +3597,4789622,"TERMINAL",0,0,"11",,terminal_output +3598,4790590,"TERMINAL",0,0,"22",,terminal_output +3599,4791666,"TERMINAL",0,0,"33",,terminal_output +3600,4792675,"TERMINAL",0,0,"44",,terminal_output +3601,4793724,"TERMINAL",0,0,"55",,terminal_output +3602,4794865,"TERMINAL",0,0,"66",,terminal_output +3603,4795867,"TERMINAL",0,0,"77",,terminal_output +3604,4797149,"TERMINAL",0,0,"88",,terminal_output +3605,4798604,"TERMINAL",0,0,"99",,terminal_output +3606,4799049,"TERMINAL",0,0,"1021",,terminal_output +3607,4800088,"TERMINAL",0,0,"22",,terminal_output +3608,4801049,"TERMINAL",0,0,"33",,terminal_output +3609,4802104,"TERMINAL",0,0,"44",,terminal_output +3610,4803345,"TERMINAL",0,0,"55",,terminal_output +3611,4804242,"TERMINAL",0,0,"66",,terminal_output +3612,4805292,"TERMINAL",0,0,"77",,terminal_output +3613,4806303,"TERMINAL",0,0,"88",,terminal_output +3614,4807631,"TERMINAL",0,0,"99",,terminal_output +3615,4808387,"TERMINAL",0,0,"2030",,terminal_output +3616,4809422,"TERMINAL",0,0,"11",,terminal_output +3617,4810495,"TERMINAL",0,0,"22",,terminal_output +3618,4811622,"TERMINAL",0,0,"33",,terminal_output +3619,4812562,"TERMINAL",0,0,"44",,terminal_output +3620,4813670,"TERMINAL",0,0,"55",,terminal_output +3621,4814664,"TERMINAL",0,0,"66",,terminal_output +3622,4815744,"TERMINAL",0,0,"77",,terminal_output +3623,4816789,"TERMINAL",0,0,"88",,terminal_output +3624,4817800,"TERMINAL",0,0,"99",,terminal_output +3625,4818895,"TERMINAL",0,0,"3040",,terminal_output +3626,4819940,"TERMINAL",0,0,"11",,terminal_output +3627,4821135,"TERMINAL",0,0,"22",,terminal_output +3628,4822109,"TERMINAL",0,0,"34",,terminal_output +3629,4823123,"TERMINAL",0,0,"55",,terminal_output +3630,4824157,"TERMINAL",0,0,"66",,terminal_output +3631,4825441,"TERMINAL",0,0,"77",,terminal_output +3632,4826491,"TERMINAL",0,0,"88",,terminal_output +3633,4827396,"TERMINAL",0,0,"99",,terminal_output +3634,4828649,"TERMINAL",0,0,"4050",,terminal_output +3635,4829352,"TERMINAL",0,0,"11",,terminal_output +3636,4830426,"TERMINAL",0,0,"22",,terminal_output +3637,4831465,"TERMINAL",0,0,"33",,terminal_output +3638,4832512,"TERMINAL",0,0,"44",,terminal_output +3639,4833496,"TERMINAL",0,0,"55",,terminal_output +3640,4834574,"TERMINAL",0,0,"66",,terminal_output +3641,4835776,"TERMINAL",0,0,"77",,terminal_output +3642,4836621,"TERMINAL",0,0,"88",,terminal_output +3643,4837660,"TERMINAL",0,0,"99",,terminal_output +3644,4838706,"TERMINAL",0,0,"507:00",,terminal_output +3645,4839786,"TERMINAL",0,0,"11",,terminal_output +3646,4841092,"TERMINAL",0,0,"22",,terminal_output +3647,4842015,"TERMINAL",0,0,"33",,terminal_output +3648,4842935,"TERMINAL",0,0,"44",,terminal_output +3649,4844044,"TERMINAL",0,0,"55",,terminal_output +3650,4845289,"TERMINAL",0,0,"67",,terminal_output +3651,4846312,"TERMINAL",0,0,"88",,terminal_output +3652,4847232,"TERMINAL",0,0,"99",,terminal_output +3653,4848143,"TERMINAL",0,0,"9:0010",,terminal_output +3654,4849185,"TERMINAL",0,0,"11",,terminal_output +3655,4850348,"TERMINAL",0,0,"22",,terminal_output +3656,4851310,"TERMINAL",0,0,"33",,terminal_output +3657,4852349,"TERMINAL",0,0,"44",,terminal_output +3658,4853394,"TERMINAL",0,0,"55",,terminal_output +3659,4854431,"TERMINAL",0,0,"66",,terminal_output +3660,4855448,"TERMINAL",0,0,"77",,terminal_output +3661,4856492,"TERMINAL",0,0,"88",,terminal_output +3662,4857678,"TERMINAL",0,0,"99",,terminal_output +3663,4858582,"TERMINAL",0,0,"1020",,terminal_output +3664,4860071,"TERMINAL",0,0,"11",,terminal_output +3665,4860808,"TERMINAL",0,0,"22",,terminal_output +3666,4861727,"TERMINAL",0,0,"33",,terminal_output +3667,4862791,"TERMINAL",0,0,"44",,terminal_output +3668,4863916,"TERMINAL",0,0,"55",,terminal_output +3669,4864949,"TERMINAL",0,0,"66",,terminal_output +3670,4866061,"TERMINAL",0,0,"77",,terminal_output +3671,4866940,"TERMINAL",0,0,"89",,terminal_output +3672,4868313,"TERMINAL",0,0,"2030",,terminal_output +3673,4869028,"TERMINAL",0,0,"11",,terminal_output +3674,4870361,"TERMINAL",0,0,"22",,terminal_output +3675,4871178,"TERMINAL",0,0,"33",,terminal_output +3676,4872304,"TERMINAL",0,0,"44",,terminal_output +3677,4873189,"TERMINAL",0,0,"55",,terminal_output +3678,4874350,"TERMINAL",0,0,"66",,terminal_output +3679,4875379,"TERMINAL",0,0,"77",,terminal_output +3680,4876317,"TERMINAL",0,0,"88",,terminal_output +3681,4877367,"TERMINAL",0,0,"99",,terminal_output +3682,4878430,"TERMINAL",0,0,"3040",,terminal_output +3683,4879437,"TERMINAL",0,0,"11",,terminal_output +3684,4880468,"TERMINAL",0,0,"22",,terminal_output +3685,4881508,"TERMINAL",0,0,"33",,terminal_output +3686,4882584,"TERMINAL",0,0,"44",,terminal_output +3687,4883668,"TERMINAL",0,0,"55",,terminal_output +3688,4884619,"TERMINAL",0,0,"66",,terminal_output +3689,4885659,"TERMINAL",0,0,"77",,terminal_output +3690,4887041,"TERMINAL",0,0,"88",,terminal_output +3691,4887780,"TERMINAL",0,0,"99",,terminal_output +3692,4888882,"TERMINAL",0,0,"4050",,terminal_output +3693,4889911,"TERMINAL",0,0,"11",,terminal_output +3694,4891085,"TERMINAL",0,0,"22",,terminal_output +3695,4892205,"TERMINAL",0,0,"33",,terminal_output +3696,4893094,"TERMINAL",0,0,"44",,terminal_output +3697,4894049,"TERMINAL",0,0,"56",,terminal_output +3698,4895147,"TERMINAL",0,0,"77",,terminal_output +3699,4896266,"TERMINAL",0,0,"88",,terminal_output +3700,4897172,"TERMINAL",0,0,"99",,terminal_output +3701,4898161,"TERMINAL",0,0,"508:00",,terminal_output +3702,4899258,"TERMINAL",0,0,"11",,terminal_output +3703,4900245,"TERMINAL",0,0,"22",,terminal_output +3704,4901328,"TERMINAL",0,0,"33",,terminal_output +3705,4902649,"TERMINAL",0,0,"44",,terminal_output +3706,4903418,"TERMINAL",0,0,"55",,terminal_output +3707,4904355,"TERMINAL",0,0,"66",,terminal_output +3708,4905392,"TERMINAL",0,0,"77",,terminal_output +3709,4906470,"TERMINAL",0,0,"88",,terminal_output +3710,4907471,"TERMINAL",0,0,"99",,terminal_output +3711,4908535,"TERMINAL",0,0,"20:0010",,terminal_output +3712,4909688,"TERMINAL",0,0,"11",,terminal_output +3713,4910601,"TERMINAL",0,0,"22",,terminal_output +3714,4911702,"TERMINAL",0,0,"33",,terminal_output +3715,4912693,"TERMINAL",0,0,"44",,terminal_output +3716,4913728,"TERMINAL",0,0,"55",,terminal_output +3717,4914771,"TERMINAL",0,0,"66",,terminal_output +3718,4915969,"TERMINAL",0,0,"77",,terminal_output +3719,4917126,"TERMINAL",0,0,"88",,terminal_output +3720,4917882,"TERMINAL",0,0,"99",,terminal_output +3721,4918970,"TERMINAL",0,0,"1020",,terminal_output +3722,4920094,"TERMINAL",0,0,"12",,terminal_output +3723,4921326,"TERMINAL",0,0,"33",,terminal_output +3724,4922142,"TERMINAL",0,0,"44",,terminal_output +3725,4923370,"TERMINAL",0,0,"55",,terminal_output +3726,4924112,"TERMINAL",0,0,"66",,terminal_output +3727,4925213,"TERMINAL",0,0,"77",,terminal_output +3728,4926228,"TERMINAL",0,0,"88",,terminal_output +3729,4927565,"TERMINAL",0,0,"99",,terminal_output +3730,4928387,"TERMINAL",0,0,"2030",,terminal_output +3731,4929407,"TERMINAL",0,0,"11",,terminal_output +3732,4930343,"TERMINAL",0,0,"22",,terminal_output +3733,4931386,"TERMINAL",0,0,"33",,terminal_output +3734,4932446,"TERMINAL",0,0,"44",,terminal_output +3735,4933469,"TERMINAL",0,0,"55",,terminal_output +3736,4934524,"TERMINAL",0,0,"66",,terminal_output +3737,4935649,"TERMINAL",0,0,"77",,terminal_output +3738,4936672,"TERMINAL",0,0,"88",,terminal_output +3739,4937635,"TERMINAL",0,0,"99",,terminal_output +3740,4938719,"TERMINAL",0,0,"3040",,terminal_output +3741,4939757,"TERMINAL",0,0,"11",,terminal_output +3742,4940744,"TERMINAL",0,0,"22",,terminal_output +3743,4941908,"TERMINAL",0,0,"33",,terminal_output +3744,4942830,"TERMINAL",0,0,"44",,terminal_output +3745,4943941,"TERMINAL",0,0,"55",,terminal_output +3746,4944937,"TERMINAL",0,0,"66",,terminal_output +3747,4946090,"TERMINAL",0,0,"77",,terminal_output +3748,4947420,"TERMINAL",0,0,"99",,terminal_output +3749,4948099,"TERMINAL",0,0,"4050",,terminal_output +3750,4949048,"TERMINAL",0,0,"11",,terminal_output +3751,4950195,"TERMINAL",0,0,"22",,terminal_output +3752,4951208,"TERMINAL",0,0,"33",,terminal_output +3753,4952227,"TERMINAL",0,0,"44",,terminal_output +3754,4953353,"TERMINAL",0,0,"55",,terminal_output +3755,4954252,"TERMINAL",0,0,"66",,terminal_output +3756,4955300,"TERMINAL",0,0,"77",,terminal_output +3757,4956423,"TERMINAL",0,0,"88",,terminal_output +3758,4957655,"TERMINAL",0,0,"99",,terminal_output +3759,4958589,"TERMINAL",0,0,"509:00",,terminal_output +3760,4959435,"TERMINAL",0,0,"11",,terminal_output +3761,4960712,"TERMINAL",0,0,"22",,terminal_output +3762,4961513,"TERMINAL",0,0,"33",,terminal_output +3763,4962673,"TERMINAL",0,0,"44",,terminal_output +3764,4963689,"TERMINAL",0,0,"55",,terminal_output +3765,4964628,"TERMINAL",0,0,"66",,terminal_output +3766,4965664,"TERMINAL",0,0,"77",,terminal_output +3767,4966701,"TERMINAL",0,0,"88",,terminal_output +3768,4967749,"TERMINAL",0,0,"99",,terminal_output +3769,4969011,"TERMINAL",0,0,"1:0010",,terminal_output +3770,4970035,"TERMINAL",0,0,"11",,terminal_output +3771,4970859,"TERMINAL",0,0,"22",,terminal_output +3772,4971981,"TERMINAL",0,0,"33",,terminal_output +3773,4973206,"TERMINAL",0,0,"44",,terminal_output +3774,4974029,"TERMINAL",0,0,"56",,terminal_output +3775,4975049,"TERMINAL",0,0,"77",,terminal_output +3776,4976219,"TERMINAL",0,0,"88",,terminal_output +3777,4977117,"TERMINAL",0,0,"99",,terminal_output +3778,4978379,"TERMINAL",0,0,"1020",,terminal_output +3779,4979208,"TERMINAL",0,0,"11",,terminal_output +3780,4980216,"TERMINAL",0,0,"22",,terminal_output +3781,4981365,"TERMINAL",0,0,"33",,terminal_output +3782,4982426,"TERMINAL",0,0,"44",,terminal_output +3783,4983356,"TERMINAL",0,0,"55",,terminal_output +3784,4984374,"TERMINAL",0,0,"66",,terminal_output +3785,4985512,"TERMINAL",0,0,"77",,terminal_output +3786,4986509,"TERMINAL",0,0,"88",,terminal_output +3787,4987460,"TERMINAL",0,0,"99",,terminal_output +3788,4988791,"TERMINAL",0,0,"2030",,terminal_output +3789,4989550,"TERMINAL",0,0,"11",,terminal_output +3790,4990722,"TERMINAL",0,0,"22",,terminal_output +3791,4991735,"TERMINAL",0,0,"33",,terminal_output +3792,4992696,"TERMINAL",0,0,"44",,terminal_output +3793,4993877,"TERMINAL",0,0,"55",,terminal_output +3794,4994797,"TERMINAL",0,0,"66",,terminal_output +3795,4995904,"TERMINAL",0,0,"77",,terminal_output +3796,4996947,"TERMINAL",0,0,"88",,terminal_output +3797,4998077,"TERMINAL",0,0,"99",,terminal_output +3798,4999032,"TERMINAL",0,0,"3040",,terminal_output +3799,4999984,"TERMINAL",0,0,"12",,terminal_output +3800,5001451,"TERMINAL",0,0,"33",,terminal_output +3801,5002066,"TERMINAL",0,0,"44",,terminal_output +3802,5003497,"TERMINAL",0,0,"55",,terminal_output +3803,5004169,"TERMINAL",0,0,"66",,terminal_output +3804,5005376,"TERMINAL",0,0,"77",,terminal_output +3805,5006268,"TERMINAL",0,0,"88",,terminal_output +3806,5007487,"TERMINAL",0,0,"99",,terminal_output +3807,5008511,"TERMINAL",0,0,"4050",,terminal_output +3808,5009298,"TERMINAL",0,0,"11",,terminal_output +3809,5010360,"TERMINAL",0,0,"22",,terminal_output +3810,5011415,"TERMINAL",0,0,"33",,terminal_output +3811,5012439,"TERMINAL",0,0,"44",,terminal_output +3812,5013463,"TERMINAL",0,0,"55",,terminal_output +3813,5014504,"TERMINAL",0,0,"66",,terminal_output +3814,5015778,"TERMINAL",0,0,"77",,terminal_output +3815,5016596,"TERMINAL",0,0,"88",,terminal_output +3816,5017618,"TERMINAL",0,0,"99",,terminal_output +3817,5018744,"TERMINAL",0,0,"5040:00",,terminal_output +3818,5019783,"TERMINAL",0,0,"11",,terminal_output +3819,5020845,"TERMINAL",0,0,"22",,terminal_output +3820,5021794,"TERMINAL",0,0,"33",,terminal_output +3821,5022862,"TERMINAL",0,0,"44",,terminal_output +3822,5023943,"TERMINAL",0,0,"55",,terminal_output +3823,5024989,"TERMINAL",0,0,"66",,terminal_output +3824,5026014,"TERMINAL",0,0,"78",,terminal_output +3825,5027176,"TERMINAL",0,0,"99",,terminal_output +3826,5028284,"TERMINAL",0,0,"2:0010",,terminal_output +3827,5029081,"TERMINAL",0,0,"11",,terminal_output +3828,5030534,"TERMINAL",0,0,"22",,terminal_output +3829,5031158,"TERMINAL",0,0,"33",,terminal_output +3830,5032195,"TERMINAL",0,0,"44",,terminal_output +3831,5033293,"TERMINAL",0,0,"55",,terminal_output +3832,5034409,"TERMINAL",0,0,"66",,terminal_output +3833,5035427,"TERMINAL",0,0,"77",,terminal_output +3834,5036451,"TERMINAL",0,0,"88",,terminal_output +3835,5037541,"TERMINAL",0,0,"99",,terminal_output +3836,5038456,"TERMINAL",0,0,"1020",,terminal_output +3837,5039513,"TERMINAL",0,0,"11",,terminal_output +3838,5040560,"TERMINAL",0,0,"22",,terminal_output +3839,5041610,"TERMINAL",0,0,"33",,terminal_output +3840,5042616,"TERMINAL",0,0,"44",,terminal_output +3841,5043709,"TERMINAL",0,0,"55",,terminal_output +3842,5044721,"TERMINAL",0,0,"66",,terminal_output +3843,5045752,"TERMINAL",0,0,"77",,terminal_output +3844,5046888,"TERMINAL",0,0,"88",,terminal_output +3845,5047830,"TERMINAL",0,0,"99",,terminal_output +3846,5048944,"TERMINAL",0,0,"2030",,terminal_output +3847,5050061,"TERMINAL",0,0,"11",,terminal_output +3848,5051058,"TERMINAL",0,0,"22",,terminal_output +3849,5052122,"TERMINAL",0,0,"34",,terminal_output +3850,5053033,"TERMINAL",0,0,"55",,terminal_output +3851,5054043,"TERMINAL",0,0,"66",,terminal_output +3852,5055180,"TERMINAL",0,0,"77",,terminal_output +3853,5056298,"TERMINAL",0,0,"88",,terminal_output +3854,5057230,"TERMINAL",0,0,"99",,terminal_output +3855,5058245,"TERMINAL",0,0,"3040",,terminal_output +3856,5059263,"TERMINAL",0,0,"11",,terminal_output +3857,5060599,"TERMINAL",0,0,"22",,terminal_output +3858,5061418,"TERMINAL",0,0,"33",,terminal_output +3859,5062441,"TERMINAL",0,0,"44",,terminal_output +3860,5063465,"TERMINAL",0,0,"55",,terminal_output +3861,5064489,"TERMINAL",0,0,"66",,terminal_output +3862,5065820,"TERMINAL",0,0,"77",,terminal_output +3863,5066639,"TERMINAL",0,0,"88",,terminal_output +3864,5067557,"TERMINAL",0,0,"99",,terminal_output +3865,5068592,"TERMINAL",0,0,"4050",,terminal_output +3866,5069635,"TERMINAL",0,0,"11",,terminal_output +3867,5071139,"TERMINAL",0,0,"22",,terminal_output +3868,5071720,"TERMINAL",0,0,"33",,terminal_output +3869,5072757,"TERMINAL",0,0,"44",,terminal_output +3870,5073902,"TERMINAL",0,0,"55",,terminal_output +3871,5074925,"TERMINAL",0,0,"66",,terminal_output +3872,5075879,"TERMINAL",0,0,"77",,terminal_output +3873,5076971,"TERMINAL",0,0,"88",,terminal_output +3874,5077995,"TERMINAL",0,0,"91:00",,terminal_output +3875,5079045,"TERMINAL",0,0,"511",,terminal_output +3876,5080145,"TERMINAL",0,0,"22",,terminal_output +3877,5081168,"TERMINAL",0,0,"33",,terminal_output +3878,5082125,"TERMINAL",0,0,"44",,terminal_output +3879,5083216,"TERMINAL",0,0,"55",,terminal_output +3880,5084202,"TERMINAL",0,0,"66",,terminal_output +3881,5085261,"TERMINAL",0,0,"77",,terminal_output +3882,5086433,"TERMINAL",0,0,"88",,terminal_output +3883,5087617,"TERMINAL",0,0,"99",,terminal_output +3884,5088743,"TERMINAL",0,0,"3:0010",,terminal_output +3885,5089427,"TERMINAL",0,0,"11",,terminal_output +3886,5090475,"TERMINAL",0,0,"22",,terminal_output +3887,5091515,"TERMINAL",0,0,"33",,terminal_output +3888,5092834,"TERMINAL",0,0,"44",,terminal_output +3889,5093655,"TERMINAL",0,0,"55",,terminal_output +3890,5094637,"TERMINAL",0,0,"66",,terminal_output +3891,5095669,"TERMINAL",0,0,"77",,terminal_output +3892,5096723,"TERMINAL",0,0,"88",,terminal_output +3893,5097770,"TERMINAL",0,0,"99",,terminal_output +3894,5098883,"TERMINAL",0,0,"1020",,terminal_output +3895,5099919,"TERMINAL",0,0,"11",,terminal_output +3896,5100896,"TERMINAL",0,0,"22",,terminal_output +3897,5101946,"TERMINAL",0,0,"33",,terminal_output +3898,5102959,"TERMINAL",0,0,"44",,terminal_output +3899,5104052,"TERMINAL",0,0,"56",,terminal_output +3900,5105116,"TERMINAL",0,0,"77",,terminal_output +3901,5106242,"TERMINAL",0,0,"88",,terminal_output +3902,5107272,"TERMINAL",0,0,"99",,terminal_output +3903,5108190,"TERMINAL",0,0,"2030",,terminal_output +3904,5109163,"TERMINAL",0,0,"11",,terminal_output +3905,5110538,"TERMINAL",0,0,"22",,terminal_output +3906,5111263,"TERMINAL",0,0,"33",,terminal_output +3907,5112457,"TERMINAL",0,0,"44",,terminal_output +3908,5113416,"TERMINAL",0,0,"55",,terminal_output +3909,5114600,"TERMINAL",0,0,"66",,terminal_output +3910,5115450,"TERMINAL",0,0,"77",,terminal_output +3911,5116505,"TERMINAL",0,0,"88",,terminal_output +3912,5117703,"TERMINAL",0,0,"99",,terminal_output +3913,5118727,"TERMINAL",0,0,"3040",,terminal_output +3914,5119561,"TERMINAL",0,0,"11",,terminal_output +3915,5120670,"TERMINAL",0,0,"22",,terminal_output +3916,5121639,"TERMINAL",0,0,"33",,terminal_output +3917,5122714,"TERMINAL",0,0,"44",,terminal_output +3918,5124023,"TERMINAL",0,0,"55",,terminal_output +3919,5124877,"TERMINAL",0,0,"66",,terminal_output +3920,5125936,"TERMINAL",0,0,"77",,terminal_output +3921,5127024,"TERMINAL",0,0,"88",,terminal_output +3922,5127891,"TERMINAL",0,0,"99",,terminal_output +3923,5129055,"TERMINAL",0,0,"4050",,terminal_output +3924,5130296,"TERMINAL",0,0,"12",,terminal_output +3925,5131010,"TERMINAL",0,0,"33",,terminal_output +3926,5132027,"TERMINAL",0,0,"44",,terminal_output +3927,5133101,"TERMINAL",0,0,"55",,terminal_output +3928,5134128,"TERMINAL",0,0,"66",,terminal_output +3929,5135202,"TERMINAL",0,0,"77",,terminal_output +3930,5136534,"TERMINAL",0,0,"88",,terminal_output +3931,5137455,"TERMINAL",0,0,"99",,terminal_output +3932,5138475,"TERMINAL",0,0,"502:00",,terminal_output +3933,5141465,"TERMINAL",0,0,"13",,terminal_output +3934,5142468,"TERMINAL",0,0,"44",,terminal_output +3935,5143902,"TERMINAL",0,0,"55",,terminal_output +3936,5144449,"TERMINAL",0,0,"66",,terminal_output +3937,5145551,"TERMINAL",0,0,"77",,terminal_output +3938,5146692,"TERMINAL",0,0,"88",,terminal_output +3939,5147584,"TERMINAL",0,0,"99",,terminal_output +3940,5148718,"TERMINAL",0,0,"4:0010",,terminal_output +3941,5149675,"TERMINAL",0,0,"11",,terminal_output +3942,5150685,"TERMINAL",0,0,"22",,terminal_output +3943,5151725,"TERMINAL",0,0,"33",,terminal_output +3944,5152803,"TERMINAL",0,0,"44",,terminal_output +3945,5153826,"TERMINAL",0,0,"55",,terminal_output +3946,5154853,"TERMINAL",0,0,"66",,terminal_output +3947,5155876,"TERMINAL",0,0,"77",,terminal_output +3948,5156912,"TERMINAL",0,0,"88",,terminal_output +3949,5157950,"TERMINAL",0,0,"920",,terminal_output +3950,5159045,"TERMINAL",0,0,"111",,terminal_output +3951,5160272,"TERMINAL",0,0,"22",,terminal_output +3952,5161242,"TERMINAL",0,0,"33",,terminal_output +3953,5162116,"TERMINAL",0,0,"44",,terminal_output +3954,5163548,"TERMINAL",0,0,"55",,terminal_output +3955,5164266,"TERMINAL",0,0,"66",,terminal_output +3956,5165262,"TERMINAL",0,0,"77",,terminal_output +3957,5166459,"TERMINAL",0,0,"88",,terminal_output +3958,5167744,"TERMINAL",0,0,"99",,terminal_output +3959,5168562,"TERMINAL",0,0,"2030",,terminal_output +3960,5169483,"TERMINAL",0,0,"11",,terminal_output +3961,5170710,"TERMINAL",0,0,"22",,terminal_output +3962,5171475,"TERMINAL",0,0,"33",,terminal_output +3963,5172503,"TERMINAL",0,0,"44",,terminal_output +3964,5173552,"TERMINAL",0,0,"55",,terminal_output +3965,5174589,"TERMINAL",0,0,"66",,terminal_output +3966,5175634,"TERMINAL",0,0,"77",,terminal_output +3967,5176761,"TERMINAL",0,0,"88",,terminal_output +3968,5177704,"TERMINAL",0,0,"99",,terminal_output +3969,5178801,"TERMINAL",0,0,"3040",,terminal_output +3970,5179782,"TERMINAL",0,0,"11",,terminal_output +3971,5181047,"TERMINAL",0,0,"22",,terminal_output +3972,5182276,"TERMINAL",0,0,"33",,terminal_output +3973,5182909,"TERMINAL",0,0,"44",,terminal_output +3974,5184060,"TERMINAL",0,0,"56",,terminal_output +3975,5185042,"TERMINAL",0,0,"77",,terminal_output +3976,5186282,"TERMINAL",0,0,"88",,terminal_output +3977,5187396,"TERMINAL",0,0,"99",,terminal_output +3978,5188211,"TERMINAL",0,0,"4050",,terminal_output +3979,5189137,"TERMINAL",0,0,"11",,terminal_output +3980,5190184,"TERMINAL",0,0,"22",,terminal_output +3981,5191325,"TERMINAL",0,0,"33",,terminal_output +3982,5192407,"TERMINAL",0,0,"44",,terminal_output +3983,5193329,"TERMINAL",0,0,"55",,terminal_output +3984,5194354,"TERMINAL",0,0,"66",,terminal_output +3985,5195786,"TERMINAL",0,0,"77",,terminal_output +3986,5196605,"TERMINAL",0,0,"88",,terminal_output +3987,5197527,"TERMINAL",0,0,"99",,terminal_output +3988,5198855,"TERMINAL",0,0,"503:00",,terminal_output +3989,5199878,"TERMINAL",0,0,"11",,terminal_output +3990,5200696,"TERMINAL",0,0,"22",,terminal_output +3991,5201608,"TERMINAL",0,0,"33",,terminal_output +3992,5202842,"TERMINAL",0,0,"44",,terminal_output +3993,5203690,"TERMINAL",0,0,"55",,terminal_output +3994,5204730,"TERMINAL",0,0,"66",,terminal_output +3995,5205823,"TERMINAL",0,0,"77",,terminal_output diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-50eefecf-af26-4b6a-b032-3302844830811752135934013-2025_07_10-10.26.13.898/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-50eefecf-af26-4b6a-b032-3302844830811752135934013-2025_07_10-10.26.13.898/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..56a284a186882b02ca937fd7547cd79463cf300c --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-50eefecf-af26-4b6a-b032-3302844830811752135934013-2025_07_10-10.26.13.898/source.csv @@ -0,0 +1,3365 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +2,77,"tasks",0,0,"",Log,tab +3,155,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"10:26:13 AM [info] Activating crowd-code\n10:26:13 AM [info] Recording started\n10:26:13 AM [info] Initializing git provider using file system watchers...\n",Log,tab +4,306,"extension-output-pdoom-org.crowd-code-#1-crowd-code",153,0,"10:26:14 AM [info] Git repository found\n10:26:14 AM [info] Git provider initialized successfully\n10:26:14 AM [info] Initial git state: [object Object]\n",Log,content +5,3267,"TERMINAL",0,0,"/bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command +6,3295,"TERMINAL",0,0,"]633;E;2025-07-10 10:26:17 /bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;4d5e031d-45a2-4c0b-bc81-2fc4b975dca2]633;C]0;tum_cte0515@hkn1990:/hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output +7,17721,"TERMINAL",0,0,"srun --overlap --jobid=3333586 --pty /bin/bash",,terminal_command +8,17764,"TERMINAL",0,0,"]633;E;2025-07-10 10:26:31 srun --overlap --jobid=3333586 --pty /bin/bash;16c08be6-3885-420f-ac53-f5272aed6e54]633;C",,terminal_output +9,17941,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +10,18602,"TERMINAL",0,0,"]0;tum_cte0515@hkn0803:~/Projects/jafar[?2004h]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +11,19904,"TERMINAL",0,0,"s",,terminal_output +12,19970,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +13,20083,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +14,20145,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +15,20614,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +16,20791,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +17,20899,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +18,21007,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +19,21129,"TERMINAL",0,0,"",,terminal_output +20,21515,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +21,21724,"TERMINAL",0,0,"env/",,terminal_output +22,22428,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +23,22533,"TERMINAL",0,0,"in/",,terminal_output +24,22907,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +25,23065,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +26,23667,"TERMINAL",0,0,"tivate",,terminal_output +27,23961,"TERMINAL",0,0,"[?25l[?2004l\r]0;tum_cte0515@hkn0803:~/Projects/jafar[?2004h(jafar) ]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B[?25h",,terminal_output +28,24910,"TERMINAL",0,0,"\r(reverse-i-search)`': ",,terminal_output +29,25130,"TERMINAL",0,0,"s': source .venv/bin/activate\r",,terminal_output +30,25221,"TERMINAL",0,0,"[?25lsh': /bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt[?25h",,terminal_output +31,25325,"TERMINAL",0,0,"\r ': rm sync_runner-grain.sh \r\n\r",,terminal_output +32,26415,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0803:~/Projects/jafar[?2004h(jafar) ]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +33,44774,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"\n# Log the sbatch script\ncat $0\n\n# module unload mpi/openmpi/5.0\n# module unload devel/cuda/12.4\n# source .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_lam_action_space_scaling_20/3318547/lam_1751657975_200000/\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log \\n --num_latent_actions=20 \\n --log_checkpoint_interval=1000 \\n --name=dynamics-yolorun-tf-records-$slurm_job_id \\n --tags dynamics yolo-run tf_records \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $tf_records_dir \\n --lam_checkpoint=$lam_ckpt_dir\n ",shellscript,tab +34,48281,"TERMINAL",0,0,"[?25lsh[?25h",,terminal_output +35,48322,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +36,48419,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +37,48915,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",,terminal_output +38,49329,"TERMINAL",0,0,"[?25lslurm/jobs/mihir/horeka/yolo-runs/tester.sh\r\n[?2004l\r[?25h\r\n# Log the sbatch script\r\ncat $0\r\n\r\n# module unload mpi/openmpi/5.0\r\n# module unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_lam_action_space_scaling_20/3318547/lam_1751657975_200000/\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --min_lr=0 \\r\n --max_lr=1.4e-4 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --num_latent_actions=20 \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-yolorun-tf-records-$slurm_job_id \\r\n --tags dynamics yolo-run tf_records \\r\n --entity instant-uv \\r\n --project jafar \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $tf_records_dir \\r\n --lam_checkpoint=$lam_ckpt_dir\r\n SLURM_STEP_NUM_TASKS=2\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_STEP_GPUS=0,1,2,3\r\nSLURM_CPU_BIND=quiet,mask_cpu:0x00000000FC00000003F00000000FC00000003F\r\nSLURM_TASK_PID=3748328\r\nSLURM_LOCALID=0\r\nSLURM_CPU_BIND_VERBOSE=quiet\r\nSLURMD_NODENAME=hkn0803\r\nSLURM_JOB_START_TIME=1752092725\r\nSLURM_STEP_NODELIST=hkn[0803,0806]\r\nSLURM_JOB_END_TIME=1752179125\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_UMASK=0022\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_STEPID=0\r\nSLURM_CPU_BIND_LIST=0x00000000FC00000003F00000000FC00000003F\r\nSLURM_JOBID=3333586\r\nSLURM_PTY_PORT=35039\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=53\r\nSLURM_CPU_BIND_TYPE=mask_cpu:\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_NTASKS=2\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e27.hkn0803\r\nSLURM_DISTRIBUTION=cyclic\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0803,0806]\r\nSLURM_SRUN_COMM_PORT=40795\r\nSLURM_STEP_ID=0\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=2\r\nSLURM_NNODES=2\r\nSLURM_JOB_ID=3333586\r\nSLURM_NODEID=0\r\nSLURMD_TRES_FREQ=gpu:high,memory=high\r\nSLURM_STEP_NUM_NODES=2\r\nSLURM_STEP_TASKS_PER_NODE=1(x2)\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=bash\r\nSLURM_STEP_LAUNCHER_PORT=40795\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0803,0806]\r\n",,terminal_output +39,49438,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +40,71167,"TERMINAL",0,0,"2025-07-10 10:27:24.988862: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:27:24.988698: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n",,terminal_output +41,71462,"TERMINAL",0,0,"WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136045.246926 3748573 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136045.246985 2681374 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1752136045.283371 2681374 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136045.283890 3748573 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\n",,terminal_output +42,71783,"TERMINAL",0,0,"W0000 00:00:1752136045.606016 3748573 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136045.606040 3748573 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136045.606042 3748573 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136045.606044 3748573 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136045.607750 2681374 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136045.607777 2681374 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136045.607779 2681374 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136045.607780 2681374 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +43,101518,"TERMINAL",0,0,"E0710 10:27:55.328830 3748573 cuda_dnn.cc:522] Loaded runtime CuDNN library: 9.5.1 but source was compiled with: 9.8.0. CuDNN library needs to have matching major version and equal or higher minor version. If using a binary install, upgrade your CuDNN library. If building from sources, make sure the library loaded at runtime is compatible with the version specified during compile configuration.\r\nE0710 10:27:55.329384 2681374 cuda_dnn.cc:522] Loaded runtime CuDNN library: 9.5.1 but source was compiled with: 9.8.0. CuDNN library needs to have matching major version and equal or higher minor version. If using a binary install, upgrade your CuDNN library. If building from sources, make sure the library loaded at runtime is compatible with the version specified during compile configuration.\r\nE0710 10:27:55.332548 3748573 cuda_dnn.cc:522] Loaded runtime CuDNN library: 9.5.1 but source was compiled with: 9.8.0. CuDNN library needs to have matching major version and equal or higher minor version. If using a binary install, upgrade your CuDNN library. If building from sources, make sure the library loaded at runtime is compatible with the version specified during compile configuration.\r\nE0710 10:27:55.334354 2681374 cuda_dnn.cc:522] Loaded runtime CuDNN library: 9.5.1 but source was compiled with: 9.8.0. CuDNN library needs to have matching major version and equal or higher minor version. If using a binary install, upgrade your CuDNN library. If building from sources, make sure the library loaded at runtime is compatible with the version specified during compile configuration.\r\n",,terminal_output +44,101634,"TERMINAL",0,0,"E0710 10:27:55.453247 3748573 cuda_dnn.cc:522] Loaded runtime CuDNN library: 9.5.1 but source was compiled with: 9.8.0. CuDNN library needs to have matching major version and equal or higher minor version. If using a binary install, upgrade your CuDNN library. If building from sources, make sure the library loaded at runtime is compatible with the version specified during compile configuration.\r\nE0710 10:27:55.455332 2681374 cuda_dnn.cc:522] Loaded runtime CuDNN library: 9.5.1 but source was compiled with: 9.8.0. CuDNN library needs to have matching major version and equal or higher minor version. If using a binary install, upgrade your CuDNN library. If building from sources, make sure the library loaded at runtime is compatible with the version specified during compile configuration.\r\nE0710 10:27:55.458474 3748573 cuda_dnn.cc:522] Loaded runtime CuDNN library: 9.5.1 but source was compiled with: 9.8.0. CuDNN library needs to have matching major version and equal or higher minor version. If using a binary install, upgrade your CuDNN library. If building from sources, make sure the library loaded at runtime is compatible with the version specified during compile configuration.\r\nE0710 10:27:55.460536 2681374 cuda_dnn.cc:522] Loaded runtime CuDNN library: 9.5.1 but source was compiled with: 9.8.0. CuDNN library needs to have matching major version and equal or higher minor version. If using a binary install, upgrade your CuDNN library. If building from sources, make sure the library loaded at runtime is compatible with the version specified during compile configuration.\r\n",,terminal_output +45,101767,"TERMINAL",0,0,"initializing jax distributed\r\njax distributed initialized\r\nRunning on 2 devices.\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 134, in \r\n rng = jax.random.PRNGKey(args.seed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 249, in PRNGKey\r\n return _return_prng_keys(True, _key('PRNGKey', seed, impl))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 201, in _key\r\ninitializing jax distributed\r\njax distributed initialized\r\nRunning on 2 devices.\r\n return prng.random_seed(seed, impl=impl)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/prng.py"", line 551, in random_seed\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 134, in \r\n seeds_arr = jnp.asarray(np.int64(seeds))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/lax_numpy.py"", line 5732, in asarray\r\n rng = jax.random.PRNGKey(args.seed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 249, in PRNGKey\r\n return _return_prng_keys(True, _key('PRNGKey', seed, impl))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/random.py"", line 201, in _key\r\n return array(a, dtype=dtype, copy=bool(copy), order=order, device=device)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/lax_numpy.py"", line 5553, in array\r\n return prng.random_seed(seed, impl=impl)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/prng.py"", line 551, in random_seed\r\n seeds_arr = jnp.asarray(np.int64(seeds))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/lax_numpy.py"", line 5732, in asarray\r\n return array(a, dtype=dtype, copy=bool(copy), order=order, device=device)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/lax_numpy.py"", line 5553, in array\r\n out_array: Array = lax_internal._convert_element_type(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 1727, in _convert_element_type\r\n return convert_element_type_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 531, in bind\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 551, in _true_bind\r\n out_array: Array = lax_internal._convert_element_type(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 1727, in _convert_element_type\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 4897, in _convert_element_type_bind_with_trace\r\n return convert_element_type_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 531, in bind\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 551, in _true_bind\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lax/lax.py"", line 4897, in _convert_element_type_bind_with_trace\r\n operand = core.Primitive.bind_with_trace(convert_element_type_p, trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 556, in bind_with_trace\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1060, in process_primitive\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 88, in apply_primitive\r\n operand = core.Primitive.bind_with_trace(convert_element_type_p, trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 556, in bind_with_trace\r\n outs = fun(*args)\r\njaxlib._jax.XlaRuntimeError: FAILED_PRECONDITION: DNN library initialization failed. Look at the errors above for more details.\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1060, in process_primitive\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 88, in apply_primitive\r\n outs = fun(*args)\r\njaxlib._jax.XlaRuntimeError: FAILED_PRECONDITION: DNN library initialization failed. Look at the errors above for more details.\r\n",,terminal_output +46,103513,"TERMINAL",0,0,"srun: error: hkn0806: task 1: Exited with exit code 1\r\nsrun: error: hkn0803: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0803:~/Projects/jafar[?2004h(jafar) ]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +47,186722,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=10:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_dyn_yolorun\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\n# array_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_lam_action_space_scaling_20/3318547/lam_1751657975_200000/\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log \\n --num_latent_actions=20 \\n --log_checkpoint_interval=1000 \\n --name=dynamics-yolorun-tf-records-$slurm_job_id \\n --tags dynamics yolo-run tf_records \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $tf_records_dir \\n --lam_checkpoint=$lam_ckpt_dir\n",shellscript,tab +48,187882,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +49,189613,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",81,0,"",shellscript,selection_mouse +50,190482,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",64,0,"",shellscript,selection_mouse +51,190483,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",63,0,"",shellscript,selection_command +52,191255,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",33,31,"# module unload mpi/openmpi/5.0",shellscript,selection_command +53,191384,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",33,63,"# module unload mpi/openmpi/5.0\n# module unload devel/cuda/12.4",shellscript,selection_command +54,191513,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",33,91,"# module unload mpi/openmpi/5.0\n# module unload devel/cuda/12.4\n# source .venv/bin/activate",shellscript,selection_command +55,191808,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",33,0,"",shellscript,selection_command +56,192607,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",97,1,"",shellscript,content +57,192608,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",65,1,"",shellscript,content +58,192608,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",33,1,"",shellscript,content +59,192727,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",95,1,"",shellscript,content +60,192728,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",64,1,"",shellscript,content +61,192728,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",33,1,"",shellscript,content +62,193118,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",33,0,"",shellscript,selection_command +63,195117,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/yolo-runs/tester.sh",,terminal_output +64,195789,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_lam_action_space_scaling_20/3318547/lam_1751657975_200000/\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --min_lr=0 \\r\n --max_lr=1.4e-4 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --num_latent_actions=20 \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-yolorun-tf-records-$slurm_job_id \\r\n --tags dynamics yolo-run tf_records \\r\n --entity instant-uv \\r\n --project jafar \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $tf_records_dir \\r\n --lam_checkpoint=$lam_ckpt_dir\r\n ",,terminal_output +65,195969,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=2\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_STEP_GPUS=0,1,2,3\r\nSLURM_CPU_BIND=quiet,mask_cpu:0x00000000FC00000003F00000000FC00000003F\r\nSLURM_TASK_PID=3748328\r\nSLURM_LOCALID=0\r\nSLURM_CPU_BIND_VERBOSE=quiet\r\nSLURMD_NODENAME=hkn0803\r\nSLURM_JOB_START_TIME=1752092725\r\nSLURM_STEP_NODELIST=hkn[0803,0806]\r\nSLURM_JOB_END_TIME=1752179125\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_UMASK=0022\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_STEPID=0\r\nSLURM_CPU_BIND_LIST=0x00000000FC00000003F00000000FC00000003F\r\nSLURM_JOBID=3333586\r\nSLURM_PTY_PORT=35039\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=53\r\nSLURM_CPU_BIND_TYPE=mask_cpu:\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_NTASKS=2\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e27.hkn0803\r\nSLURM_DISTRIBUTION=cyclic\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0803,0806]\r\nSLURM_SRUN_COMM_PORT=40795\r\nSLURM_STEP_ID=0\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=2\r\nSLURM_NNODES=2\r\nSLURM_JOB_ID=3333586\r\nSLURM_NODEID=0\r\nSLURMD_TRES_FREQ=gpu:high,memory=high\r\nSLURM_STEP_NUM_NODES=2\r\nSLURM_STEP_TASKS_PER_NODE=1(x2)\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=bash\r\nSLURM_STEP_LAUNCHER_PORT=40795\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0803,0806]\r\n",,terminal_output +66,196109,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +67,198322,"TERMINAL",0,0,"2025-07-10 10:29:32.131995: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136172.145185 3749266 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1752136172.149864 3749266 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\n",,terminal_output +68,198381,"TERMINAL",0,0,"W0000 00:00:1752136172.162913 3749266 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136172.162929 3749266 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136172.162931 3749266 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136172.162932 3749266 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n2025-07-10 10:29:32.166508: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136172.179964 2681953 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1752136172.184309 2681953 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1752136172.196798 2681953 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136172.196814 2681953 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136172.196816 2681953 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136172.196817 2681953 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +69,202593,"TERMINAL",0,0,"W0000 00:00:1752136176.356846 3749266 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136176.365522 2681953 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +70,209226,"TERMINAL",0,0,"2025-07-10 10:29:43.050470: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 9.05GiB (9718810541 bytes) by rematerialization; only reduced to 19.37GiB (20801126400 bytes), down from 19.37GiB (20801126400 bytes) originally\r\n2025-07-10 10:29:43.051258: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 9.05GiB (9718810541 bytes) by rematerialization; only reduced to 19.37GiB (20801126400 bytes), down from 19.37GiB (20801126400 bytes) originally\r\n2025-07-10 10:29:43.051356: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 9.05GiB (9718810541 bytes) by rematerialization; only reduced to 19.37GiB (20801126400 bytes), down from 19.37GiB (20801126400 bytes) originally\r\n2025-07-10 10:29:43.051508: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 9.05GiB (9718810541 bytes) by rematerialization; only reduced to 19.37GiB (20801126400 bytes), down from 19.37GiB (20801126400 bytes) originally\r\n2025-07-10 10:29:43.051564: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 9.05GiB (9718810541 bytes) by rematerialization; only reduced to 19.38GiB (20805320720 bytes), down from 19.38GiB (20805320720 bytes) originally\r\n",,terminal_output +71,209328,"TERMINAL",0,0,"2025-07-10 10:29:43.152990: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 9.05GiB (9718810541 bytes) by rematerialization; only reduced to 19.37GiB (20801126400 bytes), down from 19.37GiB (20801126400 bytes) originally\r\n2025-07-10 10:29:43.153123: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 9.05GiB (9718810541 bytes) by rematerialization; only reduced to 19.37GiB (20801126400 bytes), down from 19.37GiB (20801126400 bytes) originally\r\n2025-07-10 10:29:43.153254: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 9.05GiB (9718810541 bytes) by rematerialization; only reduced to 19.37GiB (20801126400 bytes), down from 19.37GiB (20801126400 bytes) originally\r\n2025-07-10 10:29:43.153458: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 9.05GiB (9718810541 bytes) by rematerialization; only reduced to 19.37GiB (20801126400 bytes), down from 19.37GiB (20801126400 bytes) originally\r\n2025-07-10 10:29:43.153515: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 9.05GiB (9718810541 bytes) by rematerialization; only reduced to 19.38GiB (20805320720 bytes), down from 19.38GiB (20805320720 bytes) originally\r\n",,terminal_output +72,218885,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +73,219617,"TERMINAL",0,0,"2025-07-10 10:29:53.404991: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 19.37GiB (rounded to 20801126400)requested by op \r\n2025-07-10 10:29:53.405351: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] ***************____********************************************************************************_\r\n2025-07-10 10:29:53.405426: W external/xla/xla/service/gpu/autotuning/gemm_fusion_autotuner.cc:1320] Autotuning candidate failed with out of memory error. Consider disabling correctness checking (i.e. --xla_gpu_autotune_level=3) to reduce autotuning memory usage.\r\n",,terminal_output +74,219679,"TERMINAL",0,0,"2025-07-10 10:29:53.506494: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 19.37GiB (rounded to 20801126400)requested by op \r\n2025-07-10 10:29:53.506858: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] ***************____********************************************************************************_\r\n2025-07-10 10:29:53.506932: W external/xla/xla/service/gpu/autotuning/gemm_fusion_autotuner.cc:1320] Autotuning candidate failed with out of memory error. Consider disabling correctness checking (i.e. --xla_gpu_autotune_level=3) to reduce autotuning memory usage.\r\n",,terminal_output +75,219750,"TERMINAL",0,0,"initializing jax distributed\r\njax distributed initialized\r\nRunning on 2 devices.\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 173, in \r\n init_params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 75, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/tokenizer.py"", line 57, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 87, in __call__\r\n x = STBlock(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 41, in __call__\r\n z = nn.MultiHeadAttention(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 674, in __call__\r\n x = self.attention_fn(*attn_args, **attn_kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 266, in dot_product_attention\r\n attn_weights = dot_product_attention_weights(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 132, in dot_product_attention_weights\r\n attn_weights = einsum('...qhd,...khd->...hqk', query, key)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/einsum.py"", line 315, in einsum\r\n return jit_einsum(operand_arrays, contractions, precision,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/contextlib.py"", line 79, in inner\r\ninitializing jax distributed\r\njax distributed initialized\r\nRunning on 2 devices.\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 173, in \r\n init_params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 75, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/tokenizer.py"", line 57, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 87, in __call__\r\n x = STBlock(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 41, in __call__\r\n z = nn.MultiHeadAttention(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 674, in __call__\r\n x = self.attention_fn(*attn_args, **attn_kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 266, in dot_product_attention\r\n attn_weights = dot_product_attention_weights(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 132, in dot_product_attention_weights\r\n attn_weights = einsum('...qhd,...khd->...hqk', query, key)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/einsum.py"", line 315, in einsum\r\n return func(*args, **kwds)\r\njax._src.source_info_util.JaxStackTraceBeforeTransformation: jaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20801126400 bytes.\r\n\r\nThe preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception.\r\n\r\n--------------------\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 173, in \r\n init_params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 75, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/tokenizer.py"", line 57, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 87, in __call__\r\n x = STBlock(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20801126400 bytes.\r\n return jit_einsum(operand_arrays, contractions, precision,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/contextlib.py"", line 79, in inner\r\n return func(*args, **kwds)\r\njax._src.source_info_util.JaxStackTraceBeforeTransformation: jaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20801126400 bytes.\r\n\r\nThe preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception.\r\n\r\n--------------------\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 173, in \r\n init_params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 75, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/tokenizer.py"", line 57, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 87, in __call__\r\n x = STBlock(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 20801126400 bytes.\r\n",,terminal_output +76,220486,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",848,0,"",shellscript,selection_mouse +77,220488,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",847,0,"",shellscript,selection_command +78,221190,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",828,0,"",shellscript,selection_mouse +79,221396,"TERMINAL",0,0,"srun: error: hkn0806: task 1: Exited with exit code 1\r\n",,terminal_output +80,221504,"TERMINAL",0,0,"srun: error: hkn0803: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0803:~/Projects/jafar[?2004h(jafar) ]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +81,235909,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom genie import Genie, restore_genie_components\nfrom models.tokenizer import TokenizerVQVAE\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n # Optimization\n batch_size: int = 36\n min_lr: float = 3e-6\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n print(""initializing jax distributed"")\n jax.distributed.initialize()\n print(""jax distributed initialized"")\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=jnp.float32\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args,\n )\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Restore checkpoint ---\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in dataloader) # type: ignore\n step = 0\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""genie_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +82,236801,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +83,238345,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",829,0,"",shellscript,selection_command +84,238770,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",828,1,"",shellscript,content +85,238869,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,1,"",shellscript,content +86,242968,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,0,"4",shellscript,content +87,242970,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",828,0,"",shellscript,selection_keyboard +88,243026,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",828,0,"8",shellscript,content +89,243026,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",829,0,"",shellscript,selection_keyboard +90,247262,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",828,1,"",shellscript,content +91,247358,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,1,"",shellscript,content +92,254339,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,0,"8",shellscript,content +93,254339,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",828,0,"",shellscript,selection_keyboard +94,254412,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",828,0,"2",shellscript,content +95,254413,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",829,0,"",shellscript,selection_keyboard +96,256756,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/yolo-runs/tester.sh",,terminal_output +97,257382,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_lam_action_space_scaling_20/3318547/lam_1751657975_200000/\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=82 \\r\n --min_lr=0 \\r\n --max_lr=1.4e-4 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --num_latent_actions=20 \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-yolorun-tf-records-$slurm_job_id \\r\n --tags dynamics yolo-run tf_records \\r\n --entity instant-uv \\r\n --project jafar \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $tf_records_dir \\r\n --lam_checkpoint=$lam_ckpt_dir\r\n ",,terminal_output +98,257501,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=2\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_STEP_GPUS=0,1,2,3\r\nSLURM_CPU_BIND=quiet,mask_cpu:0x00000000FC00000003F00000000FC00000003F\r\nSLURM_TASK_PID=3748328\r\nSLURM_LOCALID=0\r\nSLURM_CPU_BIND_VERBOSE=quiet\r\nSLURMD_NODENAME=hkn0803\r\nSLURM_JOB_START_TIME=1752092725\r\nSLURM_STEP_NODELIST=hkn[0803,0806]\r\nSLURM_JOB_END_TIME=1752179125\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_UMASK=0022\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_STEPID=0\r\nSLURM_CPU_BIND_LIST=0x00000000FC00000003F00000000FC00000003F\r\nSLURM_JOBID=3333586\r\nSLURM_PTY_PORT=35039\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=53\r\nSLURM_CPU_BIND_TYPE=mask_cpu:\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_NTASKS=2\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e27.hkn0803\r\nSLURM_DISTRIBUTION=cyclic\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0803,0806]\r\nSLURM_SRUN_COMM_PORT=40795\r\nSLURM_STEP_ID=0\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=2\r\nSLURM_NNODES=2\r\nSLURM_JOB_ID=3333586\r\nSLURM_NODEID=0\r\nSLURMD_TRES_FREQ=gpu:high,memory=high\r\nSLURM_STEP_NUM_NODES=2\r\nSLURM_STEP_TASKS_PER_NODE=1(x2)\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=bash\r\nSLURM_STEP_LAUNCHER_PORT=40795\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0803,0806]\r\n",,terminal_output +99,257626,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +100,259888,"TERMINAL",0,0,"2025-07-10 10:30:33.649161: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:30:33.661614: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136233.661910 3749808 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1752136233.666042 3749808 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136233.675058 2682434 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nW0000 00:00:1752136233.679005 3749808 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136233.679020 3749808 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136233.679022 3749808 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136233.679023 3749808 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nE0000 00:00:1752136233.679812 2682434 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1752136233.693282 2682434 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136233.693301 2682434 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136233.693302 2682434 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136233.693304 2682434 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +101,262753,"TERMINAL",0,0,"W0000 00:00:1752136236.580974 2682434 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +102,263181,"TERMINAL",0,0,"W0000 00:00:1752136237.010058 3749808 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +103,269944,"TERMINAL",0,0,"2025-07-10 10:30:43.725566: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 12.11GiB (13001582509 bytes) by rematerialization; only reduced to 16.55GiB (17767628800 bytes), down from 16.55GiB (17767628800 bytes) originally\r\n2025-07-10 10:30:43.725613: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 12.11GiB (13001582509 bytes) by rematerialization; only reduced to 16.55GiB (17767628800 bytes), down from 16.55GiB (17767628800 bytes) originally\r\n2025-07-10 10:30:43.725664: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 12.11GiB (13001582509 bytes) by rematerialization; only reduced to 16.55GiB (17767628800 bytes), down from 16.55GiB (17767628800 bytes) originally\r\n2025-07-10 10:30:43.726735: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 12.11GiB (13001582509 bytes) by rematerialization; only reduced to 16.55GiB (17767628800 bytes), down from 16.55GiB (17767628800 bytes) originally\r\n2025-07-10 10:30:43.727026: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 12.11GiB (13001582509 bytes) by rematerialization; only reduced to 16.55GiB (17771823120 bytes), down from 16.55GiB (17771823120 bytes) originally\r\n",,terminal_output +104,270077,"TERMINAL",0,0,"2025-07-10 10:30:43.852219: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 12.11GiB (13001582509 bytes) by rematerialization; only reduced to 16.55GiB (17767628800 bytes), down from 16.55GiB (17767628800 bytes) originally\r\n2025-07-10 10:30:43.852392: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 12.11GiB (13001582509 bytes) by rematerialization; only reduced to 16.55GiB (17767628800 bytes), down from 16.55GiB (17767628800 bytes) originally\r\n2025-07-10 10:30:43.852555: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 12.11GiB (13001582509 bytes) by rematerialization; only reduced to 16.55GiB (17767628800 bytes), down from 16.55GiB (17767628800 bytes) originally\r\n2025-07-10 10:30:43.852606: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 12.11GiB (13001582509 bytes) by rematerialization; only reduced to 16.55GiB (17767628800 bytes), down from 16.55GiB (17767628800 bytes) originally\r\n2025-07-10 10:30:43.852678: W external/xla/xla/hlo/transforms/simplifiers/hlo_rematerialization.cc:3022] Can't reduce memory use below 12.11GiB (13001582509 bytes) by rematerialization; only reduced to 16.55GiB (17771823120 bytes), down from 16.55GiB (17771823120 bytes) originally\r\n",,terminal_output +105,270485,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab +106,270487,"extension-output-pdoom-org.crowd-code-#1-crowd-code",304,0,"",Log,selection_mouse +107,271419,"TERMINAL",0,0,"",,terminal_focus +108,275954,"TERMINAL",0,0,"srun",,terminal_focus +109,280242,"TERMINAL",0,0,"2025-07-10 10:30:54.069575: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 16.55GiB (rounded to 17767628800)requested by op \r\n2025-07-10 10:30:54.069941: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************___********************************************************************_______________\r\n2025-07-10 10:30:54.069990: W external/xla/xla/service/gpu/autotuning/gemm_fusion_autotuner.cc:1320] Autotuning candidate failed with out of memory error. Consider disabling correctness checking (i.e. --xla_gpu_autotune_level=3) to reduce autotuning memory usage.\r\n",,terminal_output +110,280301,"TERMINAL",0,0,"initializing jax distributed\r\njax distributed initialized\r\nRunning on 2 devices.\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 173, in \r\n init_params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 75, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/tokenizer.py"", line 57, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 87, in __call__\r\n x = STBlock(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 41, in __call__\r\n z = nn.MultiHeadAttention(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 674, in __call__\r\n x = self.attention_fn(*attn_args, **attn_kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 266, in dot_product_attention\r\n attn_weights = dot_product_attention_weights(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 132, in dot_product_attention_weights\r\n attn_weights = einsum('...qhd,...khd->...hqk', query, key)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/einsum.py"", line 315, in einsum\r\n return jit_einsum(operand_arrays, contractions, precision,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/contextlib.py"", line 79, in inner\r\n return func(*args, **kwds)\r\njax._src.source_info_util.JaxStackTraceBeforeTransformation: jaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17767628800 bytes.\r\n\r\nThe preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception.\r\n\r\n--------------------\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 173, in \r\n init_params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 75, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/tokenizer.py"", line 57, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 87, in __call__\r\n x = STBlock(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17767628800 bytes.\r\n",,terminal_output +111,280423,"TERMINAL",0,0,"2025-07-10 10:30:54.198476: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 16.55GiB (rounded to 17767628800)requested by op \r\n2025-07-10 10:30:54.198826: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] **************___********************************************************************_______________\r\n2025-07-10 10:30:54.198870: W external/xla/xla/service/gpu/autotuning/gemm_fusion_autotuner.cc:1320] Autotuning candidate failed with out of memory error. Consider disabling correctness checking (i.e. --xla_gpu_autotune_level=3) to reduce autotuning memory usage.\r\ninitializing jax distributed\r\njax distributed initialized\r\nRunning on 2 devices.\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 173, in \r\n init_params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 75, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/tokenizer.py"", line 57, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 87, in __call__\r\n x = STBlock(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 41, in __call__\r\n z = nn.MultiHeadAttention(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 674, in __call__\r\n x = self.attention_fn(*attn_args, **attn_kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 266, in dot_product_attention\r\n attn_weights = dot_product_attention_weights(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 132, in dot_product_attention_weights\r\n attn_weights = einsum('...qhd,...khd->...hqk', query, key)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/einsum.py"", line 315, in einsum\r\n return jit_einsum(operand_arrays, contractions, precision,\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/contextlib.py"", line 79, in inner\r\n return func(*args, **kwds)\r\njax._src.source_info_util.JaxStackTraceBeforeTransformation: jaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17767628800 bytes.\r\n\r\nThe preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception.\r\n\r\n--------------------\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 173, in \r\n init_params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 75, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/tokenizer.py"", line 57, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 87, in __call__\r\n x = STBlock(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 17767628800 bytes.\r\n",,terminal_output +112,281998,"TERMINAL",0,0,"srun: error: hkn0806: task 1: Exited with exit code 1\r\nsrun: error: hkn0803: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0803:~/Projects/jafar[?2004h(jafar) ]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +113,285226,"TERMINAL",0,0,"s",,terminal_output +114,285298,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +115,285403,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +116,285641,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +117,287175,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: nvidia-smihkn0803.localdomain: Thu Jul 10 10:30:59 2025Thu Jul 10 10:30:59 2025\r+-----------------------------------------------------------------------------------------+\r| NVIDIA-SMI 570.133.20Driver Version: 570.133.20 CUDA Version: 12.8 |\r|-----------------------------------------+------------------------+----------------------+\r| GPU NamePersistence-M | Bus-IdDisp.A | Volatile Uncorr. ECC |\r| Fan Temp PerfPwr:Usage/Cap |Memory-Usage | GPU-Util Compute M. |\r|||MIG M. |\r|=========================================+========================+======================|\r| 0 NVIDIA A100-SXM4-40GBOn | 00000000:31:00.0 Off |0 |\r| N/A 42C P052W / 300W |\t 27MiB / 40960MiB |\t 0%\t Default |\r|||Disabled |\r+-----------------------------------------+------------------------+----------------------+\r| 1 NVIDIA A100-SXM4-40GBOn | 00000000:4B:00.0 Off |0 |\r| N/A 41C P051W / 300W |\t 27MiB / 40960MiB |\t 0%\t Default |\r|||Disabled |\r+-----------------------------------------+------------------------+----------------------+\r| 2 NVIDIA A100-SXM4-40GBOn | 00000000:CA:00.0 Off |0 |\r| N/A 42C P060W / 300W |\t 27MiB / 40960MiB |\t 0%\t Default |\r|||Disabled |\r+-----------------------------------------+------------------------+----------------------+\r| 3 NVIDIA A100-SXM4-40GBOn | 00000000:E3:00.0 Off |0 |\r| N/A 43C P061W / 300W |\t 27MiB / 40960MiB |\t 0%\t Default |\r|||Disabled |\r+-----------------------------------------+------------------------+----------------------+\r+-----------------------------------------------------------------------------------------+\r| Processes:|\r| GPU GI CIPID Type Process nameGPU Memory |\r|ID IDUsage\t |\r|=========================================================================================|\r| 0 N/A N/A2880G /usr/libexec/Xorg17MiB |\r| 1 N/A N/A2880G /usr/libexec/Xorg17MiB |\r| 2 N/A N/A2880G /usr/libexec/Xorg17MiB |\r| 3 N/A N/A2880G /usr/libexec/Xorg17MiB |\r+-----------------------------------------------------------------------------------------+",,terminal_output +118,288805,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0803:~/Projects/jafar[?2004h(jafar) ]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +119,319674,"TERMINAL",0,0,"[?2004l\r\r\nexit\r\n",,terminal_output +120,319759,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +121,323264,"TERMINAL",0,0,"queue",,terminal_command +122,323318,"TERMINAL",0,0,"]633;E;2025-07-10 10:31:37 queue;16c08be6-3885-420f-ac53-f5272aed6e54]633;C",,terminal_output +123,323393,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Thu Jul 10 10:31:37 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3333586 accelerat interact tum_cte0 R 12:06:12\t 2 hkn[0803,0806]3333584 accelerat interact tum_cte0 R 12:11:31\t 1 hkn05233331283 accelerat train_la tum_cte0 R 1-09:53:24\t 2 hkn[0533,0536]3331284 accelerat train_la tum_cte0 R 1-09:53:24\t 2 hkn[0601-0602]3331285 accelerat train_la tum_cte0 R 1-09:53:24\t 2 hkn[0701,0710]3331286 accelerat train_la tum_cte0 R 1-09:53:24\t 2 hkn[0425,0428]3331287 accelerat train_la tum_cte0 R 1-09:53:24\t 2 hkn[0430-0431]3331288 accelerat train_la tum_cte0 R 1-09:53:24\t 2 hkn[0809,0811]3331282 accelerat train_to tum_cte0 R 1-09:54:05\t 2 hkn[0513,0516]",,terminal_output +124,324423,"TERMINAL",0,0,"8325555556",,terminal_output +125,325500,"TERMINAL",0,0,"9436666667",,terminal_output +126,326503,"TERMINAL",0,0,"40547777778",,terminal_output +127,327117,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +128,332148,"TERMINAL",0,0,"srun --overlap --jobid=3333586 --pty /bin/bash",,terminal_command +129,332195,"TERMINAL",0,0,"]633;E;2025-07-10 10:31:45 srun --overlap --jobid=3333586 --pty /bin/bash;16c08be6-3885-420f-ac53-f5272aed6e54]633;C",,terminal_output +130,332323,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +131,332894,"TERMINAL",0,0,"]0;tum_cte0515@hkn0803:~/Projects/jafar[?2004h]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +132,338672,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +133,339837,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",733,0,"",shellscript,selection_mouse +134,339963,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",733,4,"grep",shellscript,selection_mouse +135,340078,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",727,17,"env | grep SLURM\n",shellscript,selection_mouse +136,343276,"TERMINAL",0,0,"env | grep SLURM\r\n\r",,terminal_output +137,343720,"TERMINAL",0,0,"env | grep SLURM\r\n\r\r\n[?2004l\rSLURM_STEP_NUM_TASKS=2\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_STEP_GPUS=0,1,2,3\r\nSLURM_CPU_BIND=quiet,mask_cpu:0x00000000FC00000003F00000000FC00000003F\r\nSLURM_TASK_PID=3750439\r\nSLURM_LOCALID=0\r\nSLURM_CPU_BIND_VERBOSE=quiet\r\nSLURMD_NODENAME=hkn0803\r\nSLURM_JOB_START_TIME=1752092725\r\nSLURM_STEP_NODELIST=hkn[0803,0806]\r\nSLURM_JOB_END_TIME=1752179125\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_UMASK=0022\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_STEPID=4\r\nSLURM_CPU_BIND_LIST=0x00000000FC00000003F00000000FC00000003F\r\nSLURM_JOBID=3333586\r\nSLURM_PTY_PORT=42135\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=53\r\nSLURM_CPU_BIND_TYPE=mask_cpu:\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_NTASKS=2\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e27.hkn0803\r\nSLURM_DISTRIBUTION=cyclic\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0803,0806]\r\nSLURM_SRUN_COMM_PORT=45957\r\nSLURM_STEP_ID=4\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=2\r\nSLURM_NNODES=2\r\nSLURM_JOB_ID=3333586\r\nSLURM_NODEID=0\r\nSLURMD_TRES_FREQ=gpu:high,memory=high\r\nSLURM_STEP_NUM_NODES=2\r\nSLURM_STEP_TASKS_PER_NODE=1(x2)\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=bash\r\nSLURM_STEP_LAUNCHER_PORT=45957\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0803,0806]\r\n]0;tum_cte0515@hkn0803:~/Projects/jafar[?2004h]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +138,363151,"TERMINAL",0,0,"bash",,terminal_focus +139,364887,"TERMINAL",0,0,"tmux -a",,terminal_command +140,364916,"TERMINAL",0,0,"]633;E;2025-07-10 10:32:18 tmux -a;46e99295-9803-4583-b36f-e400fb9e619d]633;Ctmux: unknown option -- a\r\nusage: tmux [-2CDlNuvV] [-c shell-command] [-f file] [-L socket-name]\r\n [-S socket-path] [-T features] [command [flags]]\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;1",,terminal_output +141,374853,"TERMINAL",0,0,"tmux a",,terminal_command +142,374887,"TERMINAL",0,0,"]633;E;2025-07-10 10:32:28 tmux a;46e99295-9803-4583-b36f-e400fb9e619d]633;C[?1049h[?1h=[?12l[?25h[?1000l[?1002l[?1003l[?1006l[?1005l(B[?12l[?25h[?1006l[?1000l[?1002l[?1003l[?2004l[>c[>q[?2004h[?25lshard-00045-of-00050.tfrecord\r\nshard-00046-of-00050.tfrecord\r\nshard-00047-of-00050.tfrecord\r\nshard-00048-of-00050.tfrecord\r\nshard-00049-of-00050.tfrecord\r\n\r\nsent 3,832,822,618 bytes received 909 bytes 15,300,692.72 bytes/sec\r\ntotal size is 75,584,447,445 speedup is 19.72\r\n[tum_cte0515@hkn1990 jafar]$ \r\n[0] 0:bash* ""hkn1990.localdomain"" 10:32 10-Jul-25(B[?12l[?25h\r[tum_cte0515@hkn1990 jafar]$ (B[?12l[?25h[?1006l[?1000l[?1002l[?1003l[?2004l[?2004h[?25lshard-00045-of-00050.tfrecord\r\nshard-00046-of-00050.tfrecord\r\nshard-00047-of-00050.tfrecord\r\nshard-00048-of-00050.tfrecord\r\nshard-00049-of-00050.tfrecord\r\n\r\nsent 3,832,822,618 bytes received 909 bytes 15,300,692.72 bytes/sec\r\ntotal size is 75,584,447,445 speedup is 19.72\r\n[tum_cte0515@hkn1990 jafar]$ \r\n[0] 0:bash* ""hkn1990.localdomain"" 10:32 10-Jul-25(B[?12l[?25h",,terminal_output +143,374948,"TERMINAL",0,0,"[?7727h",,terminal_output +144,379138,"TERMINAL",0,0,"\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +145,379148,"TERMINAL",0,0,"(B[?12l[?25h[?1006l[?1000l[?1002l[?1003l[?2004l(B[?12l[?25h[?1006l[?1000l[?1002l[?1003l[?2004l[?2004h[?25lshard-00040-of-00050.tfrecord\r\nshard-00041-of-00050.tfrecord\r\nshard-00042-of-00050.tfrecord\r\nshard-00043-of-00050.tfrecord\r\nshard-00044-of-00050.tfrecord\r\nshard-00045-of-00050.tfrecord\r\nshard-00046-of-00050.tfrecord\r\nshard-00047-of-00050.tfrecord\r\nshard-00048-of-00050.tfrecord\r\nshard-00049-of-00050.tfrecord\r\n\r\nsent 3,832,822,618 bytes received 909 bytes 15,300,692.72 bytes/sec\r\ntotal size is 75,584,447,445 speedup is 19.72\r\n[tum_cte0515@hkn1990 jafar]$ \r\n[0] 0:bash* ""hkn1990.localdomain"" 10:32 10-Jul-25(B[?12l[?25h\r[tum_cte0515@hkn1990 jafar]$ (B[?12l[?25h[?1006l[?1000l[?1002l[?1003l[?2004l(B[?12l[?25h[?1006l[?1000l[?1002l[?1003l[?2004l[?25lshard-00033-of-00050.tfrecord\r\nshard-00034-of-00050.tfrecord\r\nshard-00035-of-00050.tfrecord\r\nshard-00036-of-00050.tfrecord\r\nshard-00037-of-00050.tfrecord\r\nshard-00038-of-00050.tfrecord\r\nshard-00039-of-00050.tfrecord\r\nshard-00040-of-00050.tfrecord\r\nshard-00041-of-00050.tfrecord\r\nshard-00042-of-00050.tfrecord\r\nshard-00043-of-00050.tfrecord\r\nshard-00044-of-00050.tfrecord\r\nshard-00045-of-00050.tfrecord\r\nshard-00046-of-00050.tfrecord\r\nshard-00047-of-00050.tfrecord\r\nshard-00048-of-00050.tfrecord\r\nshard-00049-of-00050.tfrecord\r\n\r\nsent 3,832,822,618 bytes received 909 bytes 15,300,692.72 bytes/sec\r\ntotal size is 75,584,447,445 speedup is 19.72\r\n[tum_cte0515@hkn1990 jafar]$ \r\n[0] 0:bash* ""hkn1990.localdomain"" 10:32 10-Jul-25(B[?12l[?25h[?2004h(B[?12l[?25h[?1006l[?1000l[?1002l[?1003l[?2004l[?2004h[?25lshard-00023-of-00050.tfrecord\r\nshard-00024-of-00050.tfrecord\r\nshard-00025-of-00050.tfrecord\r\nshard-00026-of-00050.tfrecord\r\nshard-00027-of-00050.tfrecord\r\nshard-00028-of-00050.tfrecord\r\nshard-00029-of-00050.tfrecord\r\nshard-00030-of-00050.tfrecord\r\nshard-00031-of-00050.tfrecord\r\nshard-00032-of-00050.tfrecord\r\nshard-00033-of-00050.tfrecord\r\nshard-00034-of-00050.tfrecord\r\nshard-00035-of-00050.tfrecord\r\nshard-00036-of-00050.tfrecord\r\nshard-00037-of-00050.tfrecord\r\nshard-00038-of-00050.tfrecord\r\nshard-00039-of-00050.tfrecord\r\nshard-00040-of-00050.tfrecord\r\nshard-00041-of-00050.tfrecord\r\nshard-00042-of-00050.tfrecord\r\nshard-00043-of-00050.tfrecord\r\nshard-00044-of-00050.tfrecord\r\nshard-00045-of-00050.tfrecord\r\nshard-00046-of-00050.tfrecord\r\nshard-00047-of-00050.tfrecord\r\nshard-00048-of-00050.tfrecord\r\nshard-00049-of-00050.tfrecord\r\n\r\nsent 3,832,822,618 bytes received 909 bytes 15,300,692.72 bytes/sec\r\ntotal size is 75,584,447,445 speedup is 19.72\r\n[tum_cte0515@hkn1990 jafar]$ \r\n[0] 0:bash* ""hkn1990.localdomain"" 10:32 10-Jul-25(B[?12l[?25h(B[?12l[?25h[?1006l[?1000l[?1002l[?1003l[?2004l[?2004h[?25lshard-00023-of-00050.tfrecord\r\nshard-00024-of-00050.tfrecord\r\nshard-00025-of-00050.tfrecord\r\nshard-00026-of-00050.tfrecord\r\nshard-00027-of-00050.tfrecord\r\nshard-00028-of-00050.tfrecord\r\nshard-00029-of-00050.tfrecord\r\nshard-00030-of-00050.tfrecord\r\nshard-00031-of-00050.tfrecord\r\nshard-00032-of-00050.tfrecord\r\nshard-00033-of-00050.tfrecord\r\nshard-00034-of-00050.tfrecord\r\nshard-00035-of-00050.tfrecord\r\nshard-00036-of-00050.tfrecord\r\nshard-00037-of-00050.tfrecord\r\nshard-00038-of-00050.tfrecord\r\nshard-00039-of-00050.tfrecord\r\nshard-00040-of-00050.tfrecord\r\nshard-00041-of-00050.tfrecord\r\nshard-00042-of-00050.tfrecord\r\nshard-00043-of-00050.tfrecord\r\nshard-00044-of-00050.tfrecord\r\nshard-00045-of-00050.tfrecord\r\nshard-00046-of-00050.tfrecord\r\nshard-00047-of-00050.tfrecord\r\nshard-00048-of-00050.tfrecord\r\nshard-00049-of-00050.tfrecord\r\n\r\nsent 3,832,822,618 bytes received 909 bytes 15,300,692.72 bytes/sec\r\ntotal size is 75,584,447,445 speedup is 19.72\r\n[tum_cte0515@hkn1990 jafar]$ \r\n[0] 0:bash* ""hkn1990.localdomain"" 10:32 10-Jul-25(B[?12l[?25h(B[?12l[?25h[?1006l[?1000l[?1002l[?1003l[?2004l[?25lshard-00018-of-00050.tfrecord\r\nshard-00019-of-00050.tfrecord\r\nshard-00020-of-00050.tfrecord\r\nshard-00021-of-00050.tfrecord\r\nshard-00022-of-00050.tfrecord\r\nshard-00023-of-00050.tfrecord\r\nshard-00024-of-00050.tfrecord\r\nshard-00025-of-00050.tfrecord\r\nshard-00026-of-00050.tfrecord\r\nshard-00027-of-00050.tfrecord\r\nshard-00028-of-00050.tfrecord\r\nshard-00029-of-00050.tfrecord\r\nshard-00030-of-00050.tfrecord\r\nshard-00031-of-00050.tfrecord\r\nshard-00032-of-00050.tfrecord\r\nshard-00033-of-00050.tfrecord\r\nshard-00034-of-00050.tfrecord\r\nshard-00035-of-00050.tfrecord\r\nshard-00036-of-00050.tfrecord\r\nshard-00037-of-00050.tfrecord\r\nshard-00038-of-00050.tfrecord\r\nshard-00039-of-00050.tfrecord\r\nshard-00040-of-00050.tfrecord\r\nshard-00041-of-00050.tfrecord\r\nshard-00042-of-00050.tfrecord\r\nshard-00043-of-00050.tfrecord\r\nshard-00044-of-00050.tfrecord\r\nshard-00045-of-00050.tfrecord\r\nshard-00046-of-00050.tfrecord\r\nshard-00047-of-00050.tfrecord\r\nshard-00048-of-00050.tfrecord\r\nshard-00049-of-00050.tfrecord\r\n\r\nsent 3,832,822,618 bytes received 909 bytes 15,300,692.72 bytes/sec\r\ntotal size is 75,584,447,445 speedup is 19.72\r\n[tum_cte0515@hkn1990 jafar]$ \r\n[0] 0:bash* ""hkn1990.localdomain"" 10:32 10-Jul-25(B[?12l[?25h[?2004h(B[?12l[?25h[?1006l[?1000l[?1002l[?1003l[?2004l[?2004h[?25lshard-00017-of-00050.tfrecord\r\nshard-00018-of-00050.tfrecord\r\nshard-00019-of-00050.tfrecord\r\nshard-00020-of-00050.tfrecord\r\nshard-00021-of-00050.tfrecord\r\nshard-00022-of-00050.tfrecord\r\nshard-00023-of-00050.tfrecord\r\nshard-00024-of-00050.tfrecord\r\nshard-00025-of-00050.tfrecord\r\nshard-00026-of-00050.tfrecord\r\nshard-00027-of-00050.tfrecord\r\nshard-00028-of-00050.tfrecord\r\nshard-00029-of-00050.tfrecord\r\nshard-00030-of-00050.tfrecord\r\nshard-00031-of-00050.tfrecord\r\nshard-00032-of-00050.tfrecord\r\nshard-00033-of-00050.tfrecord\r\nshard-00034-of-00050.tfrecord\r\nshard-00035-of-00050.tfrecord\r\nshard-00036-of-00050.tfrecord\r\nshard-00037-of-00050.tfrecord\r\nshard-00038-of-00050.tfrecord\r\nshard-00039-of-00050.tfrecord\r\nshard-00040-of-00050.tfrecord\r\nshard-00041-of-00050.tfrecord\r\nshard-00042-of-00050.tfrecord\r\nshard-00043-of-00050.tfrecord\r\nshard-00044-of-00050.tfrecord\r\nshard-00045-of-00050.tfrecord\r\nshard-00046-of-00050.tfrecord\r\nshard-00047-of-00050.tfrecord\r\nshard-00048-of-00050.tfrecord\r\nshard-00049-of-00050.tfrecord\r\n\r\nsent 3,832,822,618 bytes received 909 bytes 15,300,692.72 bytes/sec\r\ntotal size is 75,584,447,445 speedup is 19.72\r\n[tum_cte0515@hkn1990 jafar]$ \r\n[0] 0:bash* ""hkn1990.localdomain"" 10:32 10-Jul-25(B[?12l[?25h",,terminal_output +146,379380,"TERMINAL",0,0,"(B[?12l[?25h[?1006l[?1000l[?1002l[?1003l[?2004l[?2004h[?25lshard-00016-of-00050.tfrecord\r\nshard-00017-of-00050.tfrecord\r\nshard-00018-of-00050.tfrecord\r\nshard-00019-of-00050.tfrecord\r\nshard-00020-of-00050.tfrecord\r\nshard-00021-of-00050.tfrecord\r\nshard-00022-of-00050.tfrecord\r\nshard-00023-of-00050.tfrecord\r\nshard-00024-of-00050.tfrecord\r\nshard-00025-of-00050.tfrecord\r\nshard-00026-of-00050.tfrecord\r\nshard-00027-of-00050.tfrecord\r\nshard-00028-of-00050.tfrecord\r\nshard-00029-of-00050.tfrecord\r\nshard-00030-of-00050.tfrecord\r\nshard-00031-of-00050.tfrecord\r\nshard-00032-of-00050.tfrecord\r\nshard-00033-of-00050.tfrecord\r\nshard-00034-of-00050.tfrecord\r\nshard-00035-of-00050.tfrecord\r\nshard-00036-of-00050.tfrecord\r\nshard-00037-of-00050.tfrecord\r\nshard-00038-of-00050.tfrecord\r\nshard-00039-of-00050.tfrecord\r\nshard-00040-of-00050.tfrecord\r\nshard-00041-of-00050.tfrecord\r\nshard-00042-of-00050.tfrecord\r\nshard-00043-of-00050.tfrecord\r\nshard-00044-of-00050.tfrecord\r\nshard-00045-of-00050.tfrecord\r\nshard-00046-of-00050.tfrecord\r\nshard-00047-of-00050.tfrecord\r\nshard-00048-of-00050.tfrecord\r\nshard-00049-of-00050.tfrecord\r\n\r\nsent 3,832,822,618 bytes received 909 bytes 15,300,692.72 bytes/sec\r\ntotal size is 75,584,447,445 speedup is 19.72\r\n[tum_cte0515@hkn1990 jafar]$ \r\n[0] 0:bash* ""hkn1990.localdomain"" 10:32 10-Jul-25(B[?12l[?25h\r[tum_cte0515@hkn1990 jafar]$ (B[?12l[?25h[?1006l[?1000l[?1002l[?1003l[?2004l(B[?12l[?25h[?1006l[?1000l[?1002l[?1003l[?2004l[?2004h[?25lshard-00013-of-00050.tfrecord\r\nshard-00014-of-00050.tfrecord\r\nshard-00015-of-00050.tfrecord\r\nshard-00016-of-00050.tfrecord\r\nshard-00017-of-00050.tfrecord\r\nshard-00018-of-00050.tfrecord\r\nshard-00019-of-00050.tfrecord\r\nshard-00020-of-00050.tfrecord\r\nshard-00021-of-00050.tfrecord\r\nshard-00022-of-00050.tfrecord\r\nshard-00023-of-00050.tfrecord\r\nshard-00024-of-00050.tfrecord\r\nshard-00025-of-00050.tfrecord\r\nshard-00026-of-00050.tfrecord\r\nshard-00027-of-00050.tfrecord\r\nshard-00028-of-00050.tfrecord\r\nshard-00029-of-00050.tfrecord\r\nshard-00030-of-00050.tfrecord\r\nshard-00031-of-00050.tfrecord\r\nshard-00032-of-00050.tfrecord\r\nshard-00033-of-00050.tfrecord\r\nshard-00034-of-00050.tfrecord\r\nshard-00035-of-00050.tfrecord\r\nshard-00036-of-00050.tfrecord\r\nshard-00037-of-00050.tfrecord\r\nshard-00038-of-00050.tfrecord\r\nshard-00039-of-00050.tfrecord\r\nshard-00040-of-00050.tfrecord\r\nshard-00041-of-00050.tfrecord\r\nshard-00042-of-00050.tfrecord\r\nshard-00043-of-00050.tfrecord\r\nshard-00044-of-00050.tfrecord\r\nshard-00045-of-00050.tfrecord\r\nshard-00046-of-00050.tfrecord\r\nshard-00047-of-00050.tfrecord\r\nshard-00048-of-00050.tfrecord\r\nshard-00049-of-00050.tfrecord\r\n\r\nsent 3,832,822,618 bytes received 909 bytes 15,300,692.72 bytes/sec\r\ntotal size is 75,584,447,445 speedup is 19.72\r\n[tum_cte0515@hkn1990 jafar]$ \r\n[0] 0:bash* ""hkn1990.localdomain"" 10:32 10-Jul-25(B[?12l[?25h",,terminal_output +147,379381,"TERMINAL",0,0,"\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +148,379680,"TERMINAL",0,0,"\r[tum_cte0515@hkn1990 jafar]$ ",,terminal_output +149,387478,"TERMINAL",0,0,"q",,terminal_output +150,387541,"TERMINAL",0,0,"u",,terminal_output +151,387701,"TERMINAL",0,0,"eu",,terminal_output +152,387888,"TERMINAL",0,0,"e",,terminal_output +153,387952,"TERMINAL",0,0,"\n[?2004l",,terminal_output +154,388015,"TERMINAL",0,0,"[?25lEvery 1.0s: squeue --me\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n[?12l[?25hhkn1990.localdomain: Thu Jul 10 10:32:41 2025JOBID PARTITION NAME USER STTIME NODES NODELIST(REASON)3333586 accelerat interact tum_cte0 R 12:07:16 2 hkn[0803,0806]3333584 accelerat interact tum_cte0 R 12:12:35 1 hkn05233331283 accelerat train_la tum_cte0 R 1-09:54:28 2 hkn[0533,0536]3331284 accelerat train_la tum_cte0 R 1-09:54:28 2 hkn[0601-0602]3331285 accelerat train_la tum_cte0 R 1-09:54:28 2 hkn[0701,0710]3331286 accelerat train_la tum_cte0 R 1-09:54:28 2 hkn[0425,0428]3331287 accelerat train_la tum_cte0 R 1-09:54:28 2 hkn[0430-0431]3331288 accelerat train_la tum_cte0 R 1-09:54:28 2 hkn[0809,0811]3331282 accelerat train_to tum_cte0 R 1-09:55:09 2 hkn[0513,0516]",,terminal_output +155,389060,"TERMINAL",0,0,"27699999910[?25l\r\n[0] 0:watch* ""hkn1990.localdomain"" 10:32 10-Jul-25(B[?12l[?25h",,terminal_output +156,389598,"TERMINAL",0,0,"[?25lshard-00014-of-00050.tfrecord\r\nshard-00015-of-00050.tfrecord\r\nshard-00016-of-00050.tfrecord\r\nshard-00017-of-00050.tfrecord\r\nshard-00018-of-00050.tfrecord\r\nshard-00019-of-00050.tfrecord\r\nshard-00020-of-00050.tfrecord\r\nshard-00021-of-00050.tfrecord\r\nshard-00022-of-00050.tfrecord\r\nshard-00023-of-00050.tfrecord\r\nshard-00024-of-00050.tfrecord\r\nshard-00025-of-00050.tfrecord\r\nshard-00026-of-00050.tfrecord\r\nshard-00027-of-00050.tfrecord\r\nshard-00028-of-00050.tfrecord\r\nshard-00029-of-00050.tfrecord\r\nshard-00030-of-00050.tfrecord\r\nshard-00031-of-00050.tfrecord\r\nshard-00032-of-00050.tfrecord\r\nshard-00033-of-00050.tfrecord\r\nshard-00034-of-00050.tfrecord\r\nshard-00035-of-00050.tfrecord\r\nshard-00036-of-00050.tfrecord\r\nshard-00037-of-00050.tfrecord\r\nshard-00038-of-00050.tfrecord\r\nshard-00039-of-00050.tfrecord\r\nshard-00040-of-00050.tfrecord\r\nshard-00041-of-00050.tfrecord\r\nshard-00042-of-00050.tfrecord\r\nshard-00043-of-00050.tfrecord\r\nshard-00044-of-00050.tfrecord\r\nshard-00045-of-00050.tfrecord\r\nshard-00046-of-00050.tfrecord\r\nshard-00047-of-00050.tfrecord\r\nshard-00048-of-00050.tfrecord\r\nshard-00049-of-00050.tfrecord\r\n\r\nsent 3,832,822,618 bytes received 909 bytes 15,300,692.72 bytes/sec\r\ntotal size is 75,584,447,445 speedup is 19.72\r\n[tum_cte0515@hkn1990 jafar]$ queue\r\n[?12l[?25h[?25l\r\n[0] 0:bash* ""hkn1990.localdomain"" 10:32 10-Jul-25(B[?12l[?25h[tum_cte0515@hkn1990 jafar]$ [?2004h",,terminal_output +157,391451,"TERMINAL",0,0,"sl",,terminal_output +158,391717,"TERMINAL",0,0,"u",,terminal_output +159,391864,"TERMINAL",0,0,"r",,terminal_output +160,392495,"TERMINAL",0,0,"",,terminal_output +161,392637,"TERMINAL",0,0,"",,terminal_output +162,392780,"TERMINAL",0,0,"",,terminal_output +163,392915,"TERMINAL",0,0,"",,terminal_output +164,393112,"TERMINAL",0,0,"",,terminal_output +165,393232,"TERMINAL",0,0,"e",,terminal_output +166,393454,"TERMINAL",0,0,"n",,terminal_output +167,393614,"TERMINAL",0,0,"v ",,terminal_output +168,394115,"TERMINAL",0,0,"»",,terminal_output +169,394681,"TERMINAL",0,0,"",,terminal_output +170,394917,"TERMINAL",0,0,"|",,terminal_output +171,395177,"TERMINAL",0,0," ",,terminal_output +172,395534,"TERMINAL",0,0,"g",,terminal_output +173,395671,"TERMINAL",0,0,"r",,terminal_output +174,395849,"TERMINAL",0,0,"e",,terminal_output +175,395911,"TERMINAL",0,0,"p",,terminal_output +176,396155,"TERMINAL",0,0," ",,terminal_output +177,397214,"TERMINAL",0,0,"L",,terminal_output +178,397507,"TERMINAL",0,0,"",,terminal_output +179,397748,"TERMINAL",0,0,"S",,terminal_output +180,397810,"TERMINAL",0,0,"L",,terminal_output +181,398150,"TERMINAL",0,0,"U",,terminal_output +182,398212,"TERMINAL",0,0,"R",,terminal_output +183,398380,"TERMINAL",0,0,"M",,terminal_output +184,399150,"TERMINAL",0,0,"\n[?2004l[tum_cte0515@hkn1990 jafar]$ [?2004h",,terminal_output +185,401474,"TERMINAL",0,0,"s",,terminal_output +186,401538,"TERMINAL",0,0,"m",,terminal_output +187,401651,"TERMINAL",0,0,"i",,terminal_output +188,401845,"TERMINAL",0,0,"\n[?2004l",,terminal_output +189,401971,"TERMINAL",0,0,"\nbash: smi: command not found...\r\n",,terminal_output +190,403125,"TERMINAL",0,0,"^C\n[tum_cte0515@hkn1990 jafar]$ [?2004h",,terminal_output +191,403684,"TERMINAL",0,0,"logout\r\n[?2004l(B[?1l>[?12l[?25h[?1000l[?1002l[?1003l[?1006l[?1005l[?7727l[?1004l[?1049l[exited]\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +192,413238,"TERMINAL",0,0,"\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +193,413503,"TERMINAL",0,0,"\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +194,413646,"TERMINAL",0,0,"\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +195,413708,"TERMINAL",0,0,"\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +196,414078,"TERMINAL",0,0,"srun",,terminal_focus +197,419715,"TERMINAL",0,0,"i",,terminal_output +198,419870,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +199,419978,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +200,420332,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +201,420811,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +202,420869,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +203,420931,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +204,421066,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn0803.localdomain: Thu Jul 10 10:33:14 2025Partition dev_cpuonly: 11 nodes idle\rPartition cpuonly: 37 nodes idle\rPartition dev_accelerated:\t 0 nodes idle\rPartition accelerated:\t 3 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 7 nodes idle",,terminal_output +205,422169,"TERMINAL",0,0,"5",,terminal_output +206,422948,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0803:~/Projects/jafar[?2004h]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +207,424071,"TERMINAL",0,0,"\r(reverse-i-search)`': ",,terminal_output +208,424417,"TERMINAL",0,0,"s': smi",,terminal_output +209,424602,"TERMINAL",0,0,"[?25lsa': salloc --time=24:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=5 --gres=gpu:1 --cpus-per-task=5\r[?25h",,terminal_output +210,424662,"TERMINAL",0,0,"[?25ls[1@l': sal[?25h",,terminal_output +211,424849,"TERMINAL",0,0,"[?25ls[1@l': sall[?25h",,terminal_output +212,425485,"TERMINAL",0,0,"\r]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;Bsalloc --time=24:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=5 --gres=gpu:1 --cpus-per-task=5",,terminal_output +213,426191,"TERMINAL",0,0,"",,terminal_output +214,426770,"TERMINAL",0,0,"",,terminal_output +215,426931,"TERMINAL",0,0,"",,terminal_output +216,427136,"TERMINAL",0,0,"",,terminal_output +217,427558,"TERMINAL",0,0,"[?25l4[?25h",,terminal_output +218,427698,"TERMINAL",0,0,"[?25l2[?25h",,terminal_output +219,428178,"TERMINAL",0,0,"[?25l0:[1@1[?25h[1@2",,terminal_output +220,428655,"TERMINAL",0,0,"",,terminal_output +221,428822,"TERMINAL",0,0,"",,terminal_output +222,428970,"TERMINAL",0,0,"",,terminal_output +223,429144,"TERMINAL",0,0,"",,terminal_output +224,429324,"TERMINAL",0,0,"",,terminal_output +225,429455,"TERMINAL",0,0,"",,terminal_output +226,429681,"TERMINAL",0,0,"",,terminal_output +227,429799,"TERMINAL",0,0,"",,terminal_output +228,430002,"TERMINAL",0,0,"",,terminal_output +229,432128,"TERMINAL",0,0,"[?25l1[?25h",,terminal_output +230,432251,"TERMINAL",0,0,"[?25l [1@2[?25h",,terminal_output +231,433345,"TERMINAL",0,0,"",,terminal_output +232,433588,"TERMINAL",0,0,"",,terminal_output +233,434128,"TERMINAL",0,0,"[?25l5[?25h",,terminal_output +234,434329,"TERMINAL",0,0,"[?25l [1@4[?25h",,terminal_output +235,434932,"TERMINAL",0,0,"",,terminal_output +236,435124,"TERMINAL",0,0,"",,terminal_output +237,435428,"TERMINAL",0,0,"",,terminal_output +238,435538,"TERMINAL",0,0,"",,terminal_output +239,437382,"TERMINAL",0,0,"[?25l1 --cpus-per-task=5[?25h",,terminal_output +240,437486,"TERMINAL",0,0,"[?25l4 --cpus-per-task=5[?25h",,terminal_output +241,440155,"TERMINAL",0,0,"\r\n[?2004l\rsalloc: Pending job allocation 3334542\r\nsalloc: job 3334542 queued and waiting for resources\r\n",,terminal_output +242,443336,"TERMINAL",0,0,"bash",,terminal_focus +243,444968,"TERMINAL",0,0,"queue",,terminal_command +244,445018,"TERMINAL",0,0,"]633;E;2025-07-10 10:33:38 queue;46e99295-9803-4583-b36f-e400fb9e619d]633;C",,terminal_output +245,445084,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Thu Jul 10 10:33:38 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3334542 accelerat interact tum_cte0 PD\t0:00\t 2 (Priority)3333586 accelerat interact tum_cte0 R 12:08:13\t 2 hkn[0803,0806]3333584 accelerat interact tum_cte0 R 12:13:32\t 1 hkn05233331283 accelerat train_la tum_cte0 R 1-09:55:25\t 2 hkn[0533,0536]3331284 accelerat train_la tum_cte0 R 1-09:55:25\t 2 hkn[0601-0602]3331285 accelerat train_la tum_cte0 R 1-09:55:25\t 2 hkn[0701,0710]3331286 accelerat train_la tum_cte0 R 1-09:55:25\t 2 hkn[0425,0428]3331287 accelerat train_la tum_cte0 R 1-09:55:25\t 2 hkn[0430-0431]3331288 accelerat train_la tum_cte0 R 1-09:55:25\t 2 hkn[0809,0811]3331282 accelerat train_to tum_cte0 R 1-09:56:06\t 2 hkn[0513,0516]",,terminal_output +246,446128,"TERMINAL",0,0,"9436666667",,terminal_output +247,447257,"TERMINAL",0,0,"40547777778",,terminal_output +248,448227,"TERMINAL",0,0,"17699999910",,terminal_output +249,449306,"TERMINAL",0,0,"3873030303030301",,terminal_output +250,450301,"TERMINAL",0,0,"4981111112",,terminal_output +251,451355,"TERMINAL",0,0,"52092222223",,terminal_output +252,452411,"TERMINAL",0,0,"61403333334",,terminal_output +253,453503,"TERMINAL",0,0,"7214444445",,terminal_output +254,454527,"TERMINAL",0,0,"8325555556",,terminal_output +255,455352,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +256,457216,"TERMINAL",0,0,"scancel 3333586",,terminal_command +257,457266,"TERMINAL",0,0,"]633;E;2025-07-10 10:33:51 scancel 3333586;46e99295-9803-4583-b36f-e400fb9e619d]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +258,457316,"TERMINAL",0,0,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\r\nslurmstepd: error: *** STEP 3333586.4 ON hkn0803 CANCELLED AT 2025-07-10T10:33:51 ***\r\nsalloc: Job allocation 3334542 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;tum_cte0515@hkn0803:~/Projects/jafar[?2004h]633;Ajafar[tum_cte0515@hkn0803 jafar]$ ]633;B",,terminal_output +259,459763,"TERMINAL",0,0,"srun",,terminal_focus +260,466826,"TERMINAL",0,0,"[?2004l\r\r\nexit\r\n",,terminal_output +261,466869,"TERMINAL",0,0,"srun: error: hkn0803: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;1",,terminal_output +262,471027,"TERMINAL",0,0,"salloc --time=12:00:00 --partition=accelerated --nodes=2 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5",,terminal_command +263,471079,"TERMINAL",0,0,"]633;E;2025-07-10 10:34:04 salloc --time=12:00:00 --partition=accelerated --nodes=2 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5;16c08be6-3885-420f-ac53-f5272aed6e54]633;Csalloc: Pending job allocation 3334543\r\nsalloc: job 3334543 queued and waiting for resources\r\n",,terminal_output +264,472217,"TERMINAL",0,0,"bash",,terminal_focus +265,474715,"TERMINAL",0,0,"queue",,terminal_command +266,474752,"TERMINAL",0,0,"]633;E;2025-07-10 10:34:08 queue;46e99295-9803-4583-b36f-e400fb9e619d]633;C",,terminal_output +267,474816,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Thu Jul 10 10:34:08 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3333586 accelerat interact tum_cte0 CG 12:08:26\t 1 hkn08033334543 accelerat interact tum_cte0 PD\t0:00\t 2 (Priority)3333584 accelerat interact tum_cte0 R 12:14:02\t 1 hkn05233331283 accelerat train_la tum_cte0 R 1-09:55:55\t 2 hkn[0533,0536]3331284 accelerat train_la tum_cte0 R 1-09:55:55\t 2 hkn[0601-0602]3331285 accelerat train_la tum_cte0 R 1-09:55:55\t 2 hkn[0701,0710]3331286 accelerat train_la tum_cte0 R 1-09:55:55\t 2 hkn[0425,0428]3331287 accelerat train_la tum_cte0 R 1-09:55:55\t 2 hkn[0430-0431]3331288 accelerat train_la tum_cte0 R 1-09:55:55\t 2 hkn[0809,0811]3331282 accelerat train_to tum_cte0 R 1-09:56:36\t 2 hkn[0513,0516]",,terminal_output +268,475865,"TERMINAL",0,0,"936666667",,terminal_output +269,476899,"TERMINAL",0,0,"1047777778",,terminal_output +270,477438,"TERMINAL",0,0,"salloc",,terminal_focus +271,477957,"TERMINAL",0,0,"158888889",,terminal_output +272,479106,"TERMINAL",0,0,"2699999940",,terminal_output +273,480128,"TERMINAL",0,0,"376:006:006:006:006:006:001",,terminal_output +274,480795,"TERMINAL",0,0,"",,terminal_focus +275,481114,"TERMINAL",0,0,"481111112",,terminal_output +276,482221,"TERMINAL",0,0,"592222223",,terminal_output +277,483178,"TERMINAL",0,0,"6114444445",,terminal_output +278,484234,"TERMINAL",0,0,"825555556",,terminal_output +279,485276,"TERMINAL",0,0,"936666667",,terminal_output +280,486323,"TERMINAL",0,0,"2047777778",,terminal_output +281,487373,"TERMINAL",0,0,"salloc",,terminal_focus +282,487437,"TERMINAL",0,0,"158888889",,terminal_output +283,488244,"TERMINAL",0,0,"salloc: job 3334543 has been allocated resources\r\nsalloc: Granted job allocation 3334543\r\n",,terminal_output +284,488352,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +285,488420,"TERMINAL",0,0,"2 Rhkn[0719,0806]699999950",,terminal_output +286,489549,"TERMINAL",0,0,"3171010101010101",,terminal_output +287,490531,"TERMINAL",0,0,"4281111112",,terminal_output +288,491605,"TERMINAL",0,0,"5392222223",,terminal_output +289,492597,"TERMINAL",0,0,"64203333334",,terminal_output +290,493762,"TERMINAL",0,0,"7514444445",,terminal_output +291,494705,"TERMINAL",0,0,"8625555556",,terminal_output +292,495797,"TERMINAL",0,0,"9736666667",,terminal_output +293,496821,"TERMINAL",0,0,"30847777778",,terminal_output +294,497844,"TERMINAL",0,0,"\r14543 R 0:09\t 2 hkn[0719,0806]358412:14:25\t 1 hkn05231283train_la1-09:56:18\t 2 hkn[0533,0536]48601-060258701,071068425,04287830-043188809,0812to59513,0516",,terminal_output +295,498942,"TERMINAL",0,0,"21069999997:00",,terminal_output +296,499932,"TERMINAL",0,0,"3172020202020201",,terminal_output +297,501018,"TERMINAL",0,0,"4281111112",,terminal_output +298,502045,"TERMINAL",0,0,"5392222223",,terminal_output +299,503067,"TERMINAL",0,0,"64303333334",,terminal_output +300,504192,"TERMINAL",0,0,"7514444445",,terminal_output +301,505139,"TERMINAL",0,0,"8625555556",,terminal_output +302,506242,"TERMINAL",0,0,"9847777778",,terminal_output +303,507261,"TERMINAL",0,0,"41958888889",,terminal_output +304,508287,"TERMINAL",0,0,"220699999910",,terminal_output +305,509337,"TERMINAL",0,0,"3173030303030301",,terminal_output +306,510374,"TERMINAL",0,0,"4281111112",,terminal_output +307,511418,"TERMINAL",0,0,"5392222223",,terminal_output +308,512456,"TERMINAL",0,0,"64403333334",,terminal_output +309,513611,"TERMINAL",0,0,"7514444445",,terminal_output +310,514545,"TERMINAL",0,0,"8625555556",,terminal_output +311,515459,"TERMINAL",0,0,"salloc: Nodes hkn[0719,0806] are ready for job\r\n",,terminal_output +312,515589,"TERMINAL",0,0,"9736666667",,terminal_output +313,516683,"TERMINAL",0,0,"50847777778",,terminal_output +314,517197,"TERMINAL",0,0,"]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h[tum_cte0515@hkn0719 jafar]$ ",,terminal_output +315,517704,"TERMINAL",0,0,"1958888889",,terminal_output +316,518712,"TERMINAL",0,0,"230699999920",,terminal_output +317,519857,"TERMINAL",0,0,"3174040404040401",,terminal_output +318,520803,"TERMINAL",0,0,"4281111112",,terminal_output +319,521877,"TERMINAL",0,0,"5392222223",,terminal_output +320,522931,"TERMINAL",0,0,"64503333334",,terminal_output +321,523208,"TERMINAL",0,0,"s",,terminal_output +322,523260,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +323,523321,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +324,523383,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +325,523532,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +326,523722,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +327,523810,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +328,523906,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +329,523975,"TERMINAL",0,0,"7514444445",,terminal_output +330,524037,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +331,524681,"TERMINAL",0,0,"env/",,terminal_output +332,524910,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +333,525015,"TERMINAL",0,0,"in/",,terminal_output +334,525015,"TERMINAL",0,0,"8625555556",,terminal_output +335,525234,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +336,525337,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +337,525519,"TERMINAL",0,0,"tivate",,terminal_output +338,525715,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +339,526029,"TERMINAL",0,0,"9736666667",,terminal_output +340,526378,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_output +341,526528,"TERMINAL",0,0,"alloc --time=12:00:00 --partition=accelerated --nodes=2 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5",,terminal_output +342,526678,"TERMINAL",0,0,"\ridling",,terminal_output +343,527132,"TERMINAL",0,0,"5:00847777778",,terminal_output +344,527262,"TERMINAL",0,0,"env | grep SLURM",,terminal_output +345,527437,"TERMINAL",0,0,"smi",,terminal_output +346,527591,"TERMINAL",0,0,"env | grep SLURM",,terminal_output +347,527932,"TERMINAL",0,0,"smi",,terminal_output +348,528101,"TERMINAL",0,0,"env | grep SLURM",,terminal_output +349,528112,"TERMINAL",0,0,"1958888889",,terminal_output +350,529052,"TERMINAL",0,0,"smi",,terminal_output +351,529157,"TERMINAL",0,0,"240699999930",,terminal_output +352,529265,"TERMINAL",0,0,"env | grep SLURM",,terminal_output +353,529545,"TERMINAL",0,0,"smi",,terminal_output +354,529942,"TERMINAL",0,0,"env | grep SLURM",,terminal_output +355,530188,"TERMINAL",0,0,"3285151515151512",,terminal_output +356,530338,"TERMINAL",0,0,"\r\n[?2004l\rSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=409404\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0719\r\nSLURM_JOB_START_TIME=1752136462\r\nSLURM_STEP_NODELIST=hkn0719\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752179662\r\nSLURM_PMI2_SRUN_PORT=43929\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3334543\r\nSLURM_PTY_PORT=44699\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_JOB_QOS=normal\r\nSLURM_PTY_WIN_ROW=48\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0719\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0719,0806]\r\nSLURM_SRUN_COMM_PORT=38371\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3334543\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0719\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38371\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0719,0806]\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +357,531332,"TERMINAL",0,0,"5392222223",,terminal_output +358,532263,"TERMINAL",0,0,"645:003333334",,terminal_output +359,533304,"TERMINAL",0,0,"7514444445",,terminal_output +360,534344,"TERMINAL",0,0,"8625555556",,terminal_output +361,535386,"TERMINAL",0,0,"9736666667",,terminal_output +362,536423,"TERMINAL",0,0,"10847777778",,terminal_output +363,537461,"TERMINAL",0,0,"1958888889",,terminal_output +364,538505,"TERMINAL",0,0,"250699999940",,terminal_output +365,539621,"TERMINAL",0,0,"3177:007:007:007:007:007:001",,terminal_output +366,540645,"TERMINAL",0,0,"4281111112",,terminal_output +367,541647,"TERMINAL",0,0,"5392222223",,terminal_output +368,541866,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +369,542692,"TERMINAL",0,0,"64103333334",,terminal_output +370,543825,"TERMINAL",0,0,"7514444445",,terminal_output +371,544778,"TERMINAL",0,0,"8625555556",,terminal_output +372,545498,"TERMINAL",0,0,"bash",,terminal_focus +373,545823,"TERMINAL",0,0,"9736666667",,terminal_output +374,546406,"TERMINAL",0,0,"srun",,terminal_focus +375,546868,"TERMINAL",0,0,"20847777778",,terminal_output +376,547479,"TERMINAL",0,0,"s",,terminal_output +377,547543,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +378,547604,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +379,547954,"TERMINAL",0,0,"1958888889",,terminal_output +380,547964,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",,terminal_output +381,548651,"TERMINAL",0,0,"\rslurm/jobs/mihir/horeka/yolo-runs/tester.sh\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_lam_action_space_scaling_20/3318547/lam_1751657975_200000/\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=82 \\r\n --min_lr=0 \\r\n --max_lr=1.4e-4 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --num_latent_actions=20 \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-yolorun-tf-records-$slurm_job_id \\r\n --tags dynamics yolo-run tf_records \\r\n --entity instant-uv \\r\n --project jafar \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $tf_records_dir \\r\n --lam_checkpoint=$lam_ckpt_dir\r\n ",,terminal_output +382,548803,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=409404\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0719\r\nSLURM_JOB_START_TIME=1752136462\r\nSLURM_STEP_NODELIST=hkn0719\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752179662\r\nSLURM_PMI2_SRUN_PORT=43929\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3334543\r\nSLURM_PTY_PORT=44699\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=48\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0719\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0719,0806]\r\nSLURM_SRUN_COMM_PORT=38371\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3334543\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0719\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38371\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0719,0806]\r\n",,terminal_output +383,548934,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +384,548994,"TERMINAL",0,0,"21:00699999950",,terminal_output +385,550004,"TERMINAL",0,0,"3171010101010101",,terminal_output +386,551041,"TERMINAL",0,0,"4281111112",,terminal_output +387,552089,"TERMINAL",0,0,"5392222223",,terminal_output +388,553137,"TERMINAL",0,0,"64203333334",,terminal_output +389,554318,"TERMINAL",0,0,"7514444445",,terminal_output +390,555218,"TERMINAL",0,0,"8736666667",,terminal_output +391,555304,"TERMINAL",0,0,"2025-07-10 10:35:29.094264: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:35:29.094501: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:35:29.094854: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:35:29.094972: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136529.107349 2687759 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136529.107555 2687757 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136529.107896 2687760 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136529.108223 2687758 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1752136529.111618 2687759 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136529.112228 2687757 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136529.112244 2687760 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136529.112461 2687758 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1752136529.125414 2687759 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.125429 2687759 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.125431 2687759 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.125432 2687759 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.125866 2687758 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.125881 2687758 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.125883 2687758 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.125884 2687758 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.125831 2687760 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.125847 2687760 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.125849 2687760 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.125851 2687760 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.126137 2687757 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.126154 2687757 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.126156 2687757 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136529.126157 2687757 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +392,556318,"TERMINAL",0,0,"30847777778",,terminal_output +393,557340,"TERMINAL",0,0,"1958888889",,terminal_output +394,558336,"TERMINAL",0,0,"21069999998:00",,terminal_output +395,559388,"TERMINAL",0,0,"3172020202020201",,terminal_output +396,560428,"TERMINAL",0,0,"4281111112",,terminal_output +397,561476,"TERMINAL",0,0,"5392222223",,terminal_output +398,562523,"TERMINAL",0,0,"64303333334",,terminal_output +399,563585,"TERMINAL",0,0,"7514444445",,terminal_output +400,564204,"TERMINAL",0,0,"W0000 00:00:1752136537.989385 2687757 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136537.991307 2687758 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136538.007143 2687760 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136538.010763 2687759 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +401,564619,"TERMINAL",0,0,"8625555556",,terminal_output +402,565663,"TERMINAL",0,0,"9736666667",,terminal_output +403,566712,"TERMINAL",0,0,"40847777778",,terminal_output +404,567782,"TERMINAL",0,0,"1958888889",,terminal_output +405,568804,"TERMINAL",0,0,"220699999910",,terminal_output +406,569841,"TERMINAL",0,0,"3173030303030301",,terminal_output +407,570883,"TERMINAL",0,0,"4281111112",,terminal_output +408,571981,"TERMINAL",0,0,"5392222223",,terminal_output +409,573003,"TERMINAL",0,0,"64403333334",,terminal_output +410,574027,"TERMINAL",0,0,"7514444445",,terminal_output +411,575053,"TERMINAL",0,0,"2025-07-10 10:35:48.867135: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:35:48.867140: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:35:48.867137: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:35:48.867162: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n",,terminal_output +412,575084,"TERMINAL",0,0,"8625555556",,terminal_output +413,575119,"TERMINAL",0,0,"WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136548.927457 409663 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136548.927286 409665 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136548.927282 409666 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136548.927714 409664 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1752136548.934107 409663 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136548.934111 409664 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136548.934106 409665 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136548.934104 409666 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\n",,terminal_output +414,575266,"TERMINAL",0,0,"W0000 00:00:1752136549.091935 409663 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091960 409663 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091964 409663 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091966 409663 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091938 409664 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091963 409664 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091966 409664 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091967 409664 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091932 409665 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091959 409665 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091962 409665 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091963 409665 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091938 409666 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091962 409666 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091964 409666 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136549.091966 409666 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +415,576112,"TERMINAL",0,0,"9736666667",,terminal_output +416,577160,"TERMINAL",0,0,"50847777778",,terminal_output +417,578242,"TERMINAL",0,0,"130699999920",,terminal_output +418,579255,"TERMINAL",0,0,"3174040404040401",,terminal_output +419,580303,"TERMINAL",0,0,"4281111112",,terminal_output +420,581354,"TERMINAL",0,0,"5392222223",,terminal_output +421,582425,"TERMINAL",0,0,"64503333334",,terminal_output +422,583550,"TERMINAL",0,0,"7514444445",,terminal_output +423,584495,"TERMINAL",0,0,"8625555556",,terminal_output +424,585545,"TERMINAL",0,0,"9736666667",,terminal_output +425,586624,"TERMINAL",0,0,"6:00847777778",,terminal_output +426,587665,"TERMINAL",0,0,"1958888889",,terminal_output +427,588777,"TERMINAL",0,0,"240699999930",,terminal_output +428,589798,"TERMINAL",0,0,"3175050505050501",,terminal_output +429,590780,"TERMINAL",0,0,"4281111112",,terminal_output +430,591823,"TERMINAL",0,0,"5392222223",,terminal_output +431,592868,"TERMINAL",0,0,"646:003333334",,terminal_output +432,593995,"TERMINAL",0,0,"7514444445",,terminal_output +433,594961,"TERMINAL",0,0,"8625555556",,terminal_output +434,596044,"TERMINAL",0,0,"9736666667",,terminal_output +435,597072,"TERMINAL",0,0,"10847777778",,terminal_output +436,598198,"TERMINAL",0,0,"1958888889",,terminal_output +437,599220,"TERMINAL",0,0,"250699999940",,terminal_output +438,600245,"TERMINAL",0,0,"3288:018:018:018:018:018:012",,terminal_output +439,601267,"TERMINAL",0,0,"5392222223",,terminal_output +440,602295,"TERMINAL",0,0,"64103333334",,terminal_output +441,603329,"TERMINAL",0,0,"7514444445",,terminal_output +442,604374,"TERMINAL",0,0,"8625555556",,terminal_output +443,605463,"TERMINAL",0,0,"9736666667",,terminal_output +444,606489,"TERMINAL",0,0,"20847777778",,terminal_output +445,607516,"TERMINAL",0,0,"1958888889",,terminal_output +446,608746,"TERMINAL",0,0,"22:00699999950",,terminal_output +447,609720,"TERMINAL",0,0,"3171010101010101",,terminal_output +448,610625,"TERMINAL",0,0,"W0000 00:00:1752136584.446538 409663 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136584.446610 409664 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136584.446495 409665 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136584.446591 409666 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +449,610769,"TERMINAL",0,0,"4281111112",,terminal_output +450,611921,"TERMINAL",0,0,"5392222223",,terminal_output +451,612860,"TERMINAL",0,0,"64203333334",,terminal_output +452,613964,"TERMINAL",0,0,"7514444445",,terminal_output +453,614987,"TERMINAL",0,0,"8625555556",,terminal_output +454,615997,"TERMINAL",0,0,"9736666667",,terminal_output +455,617047,"TERMINAL",0,0,"30847777778",,terminal_output +456,618093,"TERMINAL",0,0,"1958888889",,terminal_output +457,619136,"TERMINAL",0,0,"21069999999:00",,terminal_output +458,620212,"TERMINAL",0,0,"3282121212121212",,terminal_output +459,621338,"TERMINAL",0,0,"5392222223",,terminal_output +460,622300,"TERMINAL",0,0,"64303333334",,terminal_output +461,623348,"TERMINAL",0,0,"7514444445",,terminal_output +462,624395,"TERMINAL",0,0,"8625555556",,terminal_output +463,625442,"TERMINAL",0,0,"9736666667",,terminal_output +464,626483,"TERMINAL",0,0,"40847777778",,terminal_output +465,627582,"TERMINAL",0,0,"1958888889",,terminal_output +466,628573,"TERMINAL",0,0,"220699999910",,terminal_output +467,629632,"TERMINAL",0,0,"3173030303030301",,terminal_output +468,630658,"TERMINAL",0,0,"4281111112",,terminal_output +469,631781,"TERMINAL",0,0,"5392222223",,terminal_output +470,632745,"TERMINAL",0,0,"64403333334",,terminal_output +471,633829,"TERMINAL",0,0,"7514444445",,terminal_output +472,634830,"TERMINAL",0,0,"8625555556",,terminal_output +473,635880,"TERMINAL",0,0,"9736666667",,terminal_output +474,636920,"TERMINAL",0,0,"50847777778",,terminal_output +475,638027,"TERMINAL",0,0,"1958888889",,terminal_output +476,638133,"TERMINAL",0,0,"initializing jax distributed\r\njax distributed initialized\r\nRunning on 8 devices.\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 127, in \r\ninitializing jax distributed\r\njax distributed initialized\r\nRunning on 8 devices.\r\n raise ValueError(\r\nValueError: Global batch size 82 must be divisible by number of devices 8.\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 127, in \r\n raise ValueError(\r\nValueError: Global batch size 82 must be divisible by number of devices 8.\r\ninitializing jax distributed\r\njax distributed initialized\r\nRunning on 8 devices.\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 127, in \r\ninitializing jax distributed\r\njax distributed initialized\r\nRunning on 8 devices.\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 127, in \r\n raise ValueError(\r\nValueError: Global batch size 82 must be divisible by number of devices 8.\r\n raise ValueError(\r\nValueError: Global batch size 82 must be divisible by number of devices 8.\r\ninitializing jax distributed\r\njax distributed initialized\r\nRunning on 8 devices.\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 127, in \r\ninitializing jax distributed\r\njax distributed initialized\r\nRunning on 8 devices.\r\n raise ValueError(\r\nValueError: Global batch size 82 must be divisible by number of devices 8.\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 127, in \r\ninitializing jax distributed\r\njax distributed initialized\r\nRunning on 8 devices.\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 127, in \r\n raise ValueError(\r\nValueError: Global batch size 82 must be divisible by number of devices 8.\r\n raise ValueError(\r\nValueError: Global batch size 82 must be divisible by number of devices 8.\r\ninitializing jax distributed\r\njax distributed initialized\r\nRunning on 8 devices.\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 127, in \r\n raise ValueError(\r\nValueError: Global batch size 82 must be divisible by number of devices 8.\r\n",,terminal_output +477,639004,"TERMINAL",0,0,"230699999920",,terminal_output +478,639329,"TERMINAL",0,0,"srun: error: hkn0806: tasks 4,6: Exited with exit code 1\r\n",,terminal_output +479,639465,"TERMINAL",0,0,"srun: error: hkn0806: task 7: Exited with exit code 1\r\n",,terminal_output +480,639770,"TERMINAL",0,0,"srun: error: hkn0806: task 5: Exited with exit code 1\r\n",,terminal_output +481,640078,"TERMINAL",0,0,"3174040404040401",,terminal_output +482,640693,"TERMINAL",0,0,"srun: error: hkn0719: tasks 0,2: Exited with exit code 1\r\n",,terminal_output +483,640827,"TERMINAL",0,0,"srun: error: hkn0719: task 1: Exited with exit code 1\r\n",,terminal_output +484,641055,"TERMINAL",0,0,"srun: error: hkn0719: task 3: Exited with exit code 1\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +485,641117,"TERMINAL",0,0,"4281111112",,terminal_output +486,642242,"TERMINAL",0,0,"5392222223",,terminal_output +487,643164,"TERMINAL",0,0,"64503333334",,terminal_output +488,644284,"TERMINAL",0,0,"7625555556",,terminal_output +489,645298,"TERMINAL",0,0,"9736666667",,terminal_output +490,646326,"TERMINAL",0,0,"7:00847777778",,terminal_output +491,647346,"TERMINAL",0,0,"1958888889",,terminal_output +492,648385,"TERMINAL",0,0,"240699999930",,terminal_output +493,649433,"TERMINAL",0,0,"3175050505050501",,terminal_output +494,650471,"TERMINAL",0,0,"4281111112",,terminal_output +495,651515,"TERMINAL",0,0,"5392222223",,terminal_output +496,652560,"TERMINAL",0,0,"647:003333334",,terminal_output +497,653698,"TERMINAL",0,0,"7514444445",,terminal_output +498,654720,"TERMINAL",0,0,"8625555556",,terminal_output +499,655689,"TERMINAL",0,0,"9736666667",,terminal_output +500,656735,"TERMINAL",0,0,"10847777778",,terminal_output +501,657790,"TERMINAL",0,0,"1958888889",,terminal_output +502,658814,"TERMINAL",0,0,"250699999940",,terminal_output +503,659941,"TERMINAL",0,0,"3179:009:009:009:009:009:001",,terminal_output +504,660965,"TERMINAL",0,0,"4281111112",,terminal_output +505,661995,"TERMINAL",0,0,"5392222223",,terminal_output +506,662981,"TERMINAL",0,0,"64103333334",,terminal_output +507,664038,"TERMINAL",0,0,"7514444445",,terminal_output +508,665055,"TERMINAL",0,0,"8625555556",,terminal_output +509,666096,"TERMINAL",0,0,"9736666667",,terminal_output +510,667211,"TERMINAL",0,0,"20847777778",,terminal_output +511,668238,"TERMINAL",0,0,"13:00699999950",,terminal_output +512,669272,"TERMINAL",0,0,"3171010101010101",,terminal_output +513,670297,"TERMINAL",0,0,"4281111112",,terminal_output +514,671333,"TERMINAL",0,0,"5392222223",,terminal_output +515,672371,"TERMINAL",0,0,"64203333334",,terminal_output +516,673410,"TERMINAL",0,0,"7514444445",,terminal_output +517,674458,"TERMINAL",0,0,"8625555556",,terminal_output +518,675497,"TERMINAL",0,0,"9736666667",,terminal_output +519,676634,"TERMINAL",0,0,"30847777778",,terminal_output +520,677576,"TERMINAL",0,0,"1958888889",,terminal_output +521,678684,"TERMINAL",0,0,"210699999910:00:00",,terminal_output +522,679811,"TERMINAL",0,0,"3172020202020201",,terminal_output +523,680837,"TERMINAL",0,0,"4281111112",,terminal_output +524,681775,"TERMINAL",0,0,"5392222223",,terminal_output +525,682885,"TERMINAL",0,0,"64303333334",,terminal_output +526,683903,"TERMINAL",0,0,"7514444445",,terminal_output +527,684334,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",0,0,"",shellscript,tab +528,684912,"TERMINAL",0,0,"8625555556",,terminal_output +529,685976,"TERMINAL",0,0,"9736666667",,terminal_output +530,687081,"TERMINAL",0,0,"40847777778",,terminal_output +531,688071,"TERMINAL",0,0,"1958888889",,terminal_output +532,689229,"TERMINAL",0,0,"220699999910",,terminal_output +533,689810,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +534,690170,"TERMINAL",0,0,"3173030303030301",,terminal_output +535,691475,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",829,0,"",shellscript,selection_mouse +536,692183,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",828,1,"",shellscript,content +537,692198,"TERMINAL",0,0,"4392222223",,terminal_output +538,692292,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,1,"",shellscript,content +539,693649,"TERMINAL",0,0,"65414444445",,terminal_output +540,694289,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,0,"9",shellscript,content +541,694291,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",828,0,"",shellscript,selection_keyboard +542,694436,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",828,0,"6",shellscript,content +543,694437,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",829,0,"",shellscript,selection_keyboard +544,695558,"TERMINAL",0,0,"8736666667",,terminal_output +545,697344,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/yolo-runs/tester.sh",,terminal_output +546,698026,"TERMINAL",0,0,"50958888889",,terminal_output +547,698582,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_lam_action_space_scaling_20/3318547/lam_1751657975_200000/\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --min_lr=0 \\r\n --max_lr=1.4e-4 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --num_latent_actions=20 \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-yolorun-tf-records-$slurm_job_id \\r\n --tags dynamics yolo-run tf_records \\r\n --entity instant-uv \\r\n --project jafar \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $tf_records_dir \\r\n --lam_checkpoint=$lam_ckpt_dir\r\n ",,terminal_output +548,698714,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=409404\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0719\r\nSLURM_JOB_START_TIME=1752136462\r\nSLURM_STEP_NODELIST=hkn0719\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752179662\r\nSLURM_PMI2_SRUN_PORT=43929\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3334543\r\nSLURM_PTY_PORT=44699\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=48\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0719\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0719,0806]\r\nSLURM_SRUN_COMM_PORT=38371\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3334543\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0719\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38371\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0719,0806]\r\n",,terminal_output +549,698831,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +550,700814,"TERMINAL",0,0,"232841414141414122",,terminal_output +551,701066,"TERMINAL",0,0,"2025-07-10 10:37:54.816739: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136674.829901 2688401 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1752136674.834093 2688401 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1752136674.847554 2688401 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136674.847569 2688401 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136674.847571 2688401 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136674.847572 2688401 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n2025-07-10 10:37:54.858291: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:37:54.861519: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:37:54.862106: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136674.871264 2688403 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136674.874524 2688400 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136674.875019 2688402 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1752136674.875666 2688403 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136674.878949 2688400 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136674.879249 2688402 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1752136674.889156 2688403 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136674.889171 2688403 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136674.889172 2688403 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136674.889174 2688403 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136674.891885 2688402 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136674.891901 2688402 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136674.891902 2688402 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136674.891904 2688402 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136674.892043 2688400 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136674.892059 2688400 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136674.892061 2688400 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136674.892062 2688400 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +552,701311,"TERMINAL",0,0,"2025-07-10 10:37:55.105226: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:37:55.105313: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:37:55.106647: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:37:55.106766: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136675.118382 410361 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136675.118381 410363 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136675.119944 410360 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136675.119924 410362 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1752136675.122710 410363 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136675.122820 410361 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136675.124143 410360 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136675.124145 410362 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1752136675.136808 410361 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.136825 410361 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.136827 410361 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.136828 410361 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.136807 410363 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.136824 410363 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.136826 410363 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.136827 410363 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.137398 410360 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.137414 410360 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.137416 410360 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.137417 410360 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.137398 410362 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.137414 410362 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.137416 410362 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136675.137418 410362 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +553,702662,"TERMINAL",0,0,"54503333334",,terminal_output +554,703773,"TERMINAL",0,0,"7514444445",,terminal_output +555,704203,"TERMINAL",0,0,"W0000 00:00:1752136678.022347 2688401 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136678.033014 2688402 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +556,704261,"TERMINAL",0,0,"W0000 00:00:1752136678.049487 2688403 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136678.059020 2688400 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +557,704796,"TERMINAL",0,0,"8625555556",,terminal_output +558,705105,"TERMINAL",0,0,"W0000 00:00:1752136678.875727 410363 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136678.901679 410362 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136678.907110 410360 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136678.916025 410361 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +559,705817,"TERMINAL",0,0,"9736666667",,terminal_output +560,707827,"TERMINAL",0,0,"8:00958888889",,terminal_output +561,710943,"TERMINAL",0,0,"242851515151515132",,terminal_output +562,715854,"TERMINAL",0,0,"578:036666667",,terminal_output +563,720059,"TERMINAL",0,0,"1051710:00:0010:00:0010:00:0010:00:0010:00:0010:00:0041",,terminal_output +564,723650,"TERMINAL",0,0,"45114444445",,terminal_output +565,726127,"TERMINAL",0,0,"8736666667",,terminal_output +566,727220,"TERMINAL",0,0,"20958888889",,terminal_output +567,728234,"TERMINAL",0,0,"24:00699999950",,terminal_output +568,729295,"TERMINAL",0,0,"3171010101010101",,terminal_output +569,730398,"TERMINAL",0,0,"4281111112",,terminal_output +570,731386,"TERMINAL",0,0,"5392222223",,terminal_output +571,732429,"TERMINAL",0,0,"64203333334",,terminal_output +572,733477,"TERMINAL",0,0,"7514444445",,terminal_output +573,734522,"TERMINAL",0,0,"8625555556",,terminal_output +574,735563,"TERMINAL",0,0,"9736666667",,terminal_output +575,737565,"TERMINAL",0,0,"30958888889",,terminal_output +576,740225,"TERMINAL",0,0,"21172020202020201:01",,terminal_output +577,740610,"TERMINAL",0,0,"2025-07-10 10:38:34.435862: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +578,740717,"TERMINAL",0,0,"2025-07-10 10:38:34.542349: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +579,740776,"TERMINAL",0,0,"2025-07-10 10:38:34.602055: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +580,740916,"TERMINAL",0,0,"2025-07-10 10:38:34.743703: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +581,742848,"TERMINAL",0,0,"44303333334",,terminal_output +582,743808,"TERMINAL",0,0,"2025-07-10 10:38:37.550162: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +583,743912,"TERMINAL",0,0,"2025-07-10 10:38:37.702265: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +584,743913,"TERMINAL",0,0,"7514444445",,terminal_output +585,744015,"TERMINAL",0,0,"2025-07-10 10:38:37.805313: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +586,744143,"TERMINAL",0,0,"2025-07-10 10:38:37.966032: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +587,744527,"TERMINAL",0,0,"2025-07-10 10:38:38.356607: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +588,744768,"TERMINAL",0,0,"2025-07-10 10:38:38.596352: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +589,744940,"TERMINAL",0,0,"8625555556",,terminal_output +590,746066,"TERMINAL",0,0,"9736666667",,terminal_output +591,746575,"TERMINAL",0,0,"2025-07-10 10:38:40.343950: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +592,746739,"TERMINAL",0,0,"2025-07-10 10:38:40.567781: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +593,747084,"TERMINAL",0,0,"40847777778",,terminal_output +594,748084,"TERMINAL",0,0,"1958888889",,terminal_output +595,748143,"TERMINAL",0,0,"2025-07-10 10:38:41.943296: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +596,748250,"TERMINAL",0,0,"2025-07-10 10:38:42.063782: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +597,749135,"TERMINAL",0,0,"220699999910",,terminal_output +598,749851,"TERMINAL",0,0,"2025-07-10 10:38:43.605190: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +599,750016,"TERMINAL",0,0,"2025-07-10 10:38:43.843504: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +600,750185,"TERMINAL",0,0,"3283131313131312",,terminal_output +601,751283,"TERMINAL",0,0,"5392222223",,terminal_output +602,752334,"TERMINAL",0,0,"64403333334",,terminal_output +603,753361,"TERMINAL",0,0,"7514444445",,terminal_output +604,754414,"TERMINAL",0,0,"8625555556",,terminal_output +605,755461,"TERMINAL",0,0,"9736666667",,terminal_output +606,756511,"TERMINAL",0,0,"50847777778",,terminal_output +607,757557,"TERMINAL",0,0,"1958888889",,terminal_output +608,758437,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +609,758604,"TERMINAL",0,0,"230699999920",,terminal_output +610,759682,"TERMINAL",0,0,"3174040404040401",,terminal_output +611,759792,"TERMINAL",0,0,"wandb: creating run\r\n",,terminal_output +612,759907,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_103852-vtned0wj\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-yolorun-tf-records-debug-mihir\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/vtned0wj\r\n",,terminal_output +613,760809,"TERMINAL",0,0,"4281111112",,terminal_output +614,761746,"TERMINAL",0,0,"5392222223",,terminal_output +615,762856,"TERMINAL",0,0,"64503333334",,terminal_output +616,763879,"TERMINAL",0,0,"7514444445",,terminal_output +617,764896,"TERMINAL",0,0,"8625555556",,terminal_output +618,766037,"TERMINAL",0,0,"9736666667",,terminal_output +619,767057,"TERMINAL",0,0,"9:00847777778",,terminal_output +620,768080,"TERMINAL",0,0,"1958888889",,terminal_output +621,769082,"TERMINAL",0,0,"240699999930",,terminal_output +622,770230,"TERMINAL",0,0,"3175050505050501",,terminal_output +623,771178,"TERMINAL",0,0,"4392222223",,terminal_output +624,772286,"TERMINAL",0,0,"649:003333334",,terminal_output +625,773317,"TERMINAL",0,0,"7514444445",,terminal_output +626,774325,"TERMINAL",0,0,"8625555556",,terminal_output +627,775357,"TERMINAL",0,0,"9736666667",,terminal_output +628,776393,"TERMINAL",0,0,"10847777778",,terminal_output +629,777439,"TERMINAL",0,0,"1958888889",,terminal_output +630,778489,"TERMINAL",0,0,"250699999940",,terminal_output +631,779647,"TERMINAL",0,0,"3171:001:001:001:001:001:001",,terminal_output +632,780674,"TERMINAL",0,0,"4281111112",,terminal_output +633,781635,"TERMINAL",0,0,"5392222223",,terminal_output +634,782673,"TERMINAL",0,0,"64103333334",,terminal_output +635,783733,"TERMINAL",0,0,"7514444445",,terminal_output +636,784872,"TERMINAL",0,0,"8625555556",,terminal_output +637,785899,"TERMINAL",0,0,"9736666667",,terminal_output +638,786923,"TERMINAL",0,0,"20847777778",,terminal_output +639,787884,"TERMINAL",0,0,"1958888889",,terminal_output +640,788927,"TERMINAL",0,0,"25:00699999950",,terminal_output +641,789995,"TERMINAL",0,0,"3171010101010101",,terminal_output +642,791021,"TERMINAL",0,0,"4281111112",,terminal_output +643,792054,"TERMINAL",0,0,"5392222223",,terminal_output +644,792673,"TERMINAL",0,0,"initializing jax distributed\r\njax distributed initialized\r\nRunning on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 229, in \r\n for videos in dataloader:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 226, in \r\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in dataloader) # type: ignore\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/data/ops/dataset_ops.py"", line 4788, in __next__\r\n return nest.map_structure(to_numpy, next(self._iterator))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/data/ops/iterator_ops.py"", line 826, in __next__\r\n return self._next_internal()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/data/ops/iterator_ops.py"", line 776, in _next_internal\r\n ret = gen_dataset_ops.iterator_get_next(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/ops/gen_dataset_ops.py"", line 3086, in iterator_get_next\r\n _ops.raise_from_not_ok_status(e, name)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/framework/ops.py"", line 6006, in raise_from_not_ok_status\r\n raise core._status_to_exception(e) from None # pylint: disable=protected-access\r\ntensorflow.python.framework.errors_impl.UnknownError: {{function_node __wrapped__IteratorGetNext_output_types_1_device_/job:localhost/replica:0/task:0/device:CPU:0}} /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord/shard-00447-of-00500.tfrecord; Input/output error [Op:IteratorGetNext] name: \r\n",,terminal_output +645,793086,"TERMINAL",0,0,"initializing jax distributed\r\njax distributed initialized\r\nRunning on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 229, in \r\n for videos in dataloader:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 226, in \r\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in dataloader) # type: ignore\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/data/ops/dataset_ops.py"", line 4788, in __next__\r\n return nest.map_structure(to_numpy, next(self._iterator))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/data/ops/iterator_ops.py"", line 826, in __next__\r\n return self._next_internal()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/data/ops/iterator_ops.py"", line 776, in _next_internal\r\n ret = gen_dataset_ops.iterator_get_next(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/ops/gen_dataset_ops.py"", line 3086, in iterator_get_next\r\n _ops.raise_from_not_ok_status(e, name)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/framework/ops.py"", line 6006, in raise_from_not_ok_status\r\n raise core._status_to_exception(e) from None # pylint: disable=protected-access\r\ntensorflow.python.framework.errors_impl.UnknownError: {{function_node __wrapped__IteratorGetNext_output_types_1_device_/job:localhost/replica:0/task:0/device:CPU:0}} /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord/shard-00249-of-00500.tfrecord; Input/output error [Op:IteratorGetNext] name: \r\n",,terminal_output +646,793148,"TERMINAL",0,0,"64203333334",,terminal_output +647,794202,"TERMINAL",0,0,"7514444445",,terminal_output +648,795121,"TERMINAL",0,0,"initializing jax distributed\r\njax distributed initialized\r\nRunning on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 229, in \r\n for videos in dataloader:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 226, in \r\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in dataloader) # type: ignore\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/data/ops/dataset_ops.py"", line 4788, in __next__\r\n return nest.map_structure(to_numpy, next(self._iterator))\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/data/ops/iterator_ops.py"", line 826, in __next__\r\n return self._next_internal()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/data/ops/iterator_ops.py"", line 776, in _next_internal\r\n ret = gen_dataset_ops.iterator_get_next(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/ops/gen_dataset_ops.py"", line 3086, in iterator_get_next\r\n _ops.raise_from_not_ok_status(e, name)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tensorflow/python/framework/ops.py"", line 6006, in raise_from_not_ok_status\r\n raise core._status_to_exception(e) from None # pylint: disable=protected-access\r\ntensorflow.python.framework.errors_impl.UnknownError: {{function_node __wrapped__IteratorGetNext_output_types_1_device_/job:localhost/replica:0/task:0/device:CPU:0}} /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord/shard-00312-of-00500.tfrecord; Input/output error [Op:IteratorGetNext] name: \r\n",,terminal_output +649,795215,"TERMINAL",0,0,"8736666667",,terminal_output +650,796245,"TERMINAL",0,0,"30847777778",,terminal_output +651,797362,"TERMINAL",0,0,"1958888889",,terminal_output +652,798313,"TERMINAL",0,0,"21069999992:00",,terminal_output +653,799809,"TERMINAL",0,0,"3172020202020201",,terminal_output +654,800850,"TERMINAL",0,0,"4281111112",,terminal_output +655,801895,"TERMINAL",0,0,"5392222223",,terminal_output +656,802998,"TERMINAL",0,0,"64303333334",,terminal_output +657,803977,"TERMINAL",0,0,"7514444445",,terminal_output +658,805050,"TERMINAL",0,0,"8625555556",,terminal_output +659,806070,"TERMINAL",0,0,"9736666667",,terminal_output +660,807194,"TERMINAL",0,0,"40847777778",,terminal_output +661,808220,"TERMINAL",0,0,"1958888889",,terminal_output +662,809246,"TERMINAL",0,0,"221730303030303011",,terminal_output +663,810269,"TERMINAL",0,0,"4281111112",,terminal_output +664,811334,"TERMINAL",0,0,"5392222223",,terminal_output +665,812423,"TERMINAL",0,0,"64403333334",,terminal_output +666,813447,"TERMINAL",0,0,"7514444445",,terminal_output +667,814448,"TERMINAL",0,0,"8625555556",,terminal_output +668,815498,"TERMINAL",0,0,"9736666667",,terminal_output +669,816583,"TERMINAL",0,0,"50847777778",,terminal_output +670,817639,"TERMINAL",0,0,"1958888889",,terminal_output +671,818667,"TERMINAL",0,0,"230699999920",,terminal_output +672,819684,"TERMINAL",0,0,"3174040404040401",,terminal_output +673,820818,"TERMINAL",0,0,"4281111112",,terminal_output +674,821837,"TERMINAL",0,0,"5392222223",,terminal_output +675,822866,"TERMINAL",0,0,"64503333334",,terminal_output +676,823891,"TERMINAL",0,0,"7514444445",,terminal_output +677,825014,"TERMINAL",0,0,"8625555556",,terminal_output +678,825966,"TERMINAL",0,0,"9736666667",,terminal_output +679,827060,"TERMINAL",0,0,"40:00847777778",,terminal_output +680,828085,"TERMINAL",0,0,"1958888889",,terminal_output +681,828700,"TERMINAL",0,0,"2025-07-10 10:40:02.480332: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-10 10:40:02.480366: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +682,829109,"TERMINAL",0,0,"240699999930",,terminal_output +683,830243,"TERMINAL",0,0,"3175050505050501",,terminal_output +684,831177,"TERMINAL",0,0,"4392222223",,terminal_output +685,832284,"TERMINAL",0,0,"6420:003333334",,terminal_output +686,833308,"TERMINAL",0,0,"7514444445",,terminal_output +687,834355,"TERMINAL",0,0,"8625555556",,terminal_output +688,835360,"TERMINAL",0,0,"9736666667",,terminal_output +689,836410,"TERMINAL",0,0,"10847777778",,terminal_output +690,837458,"TERMINAL",0,0,"1958888889",,terminal_output +691,838505,"TERMINAL",0,0,"250699999940",,terminal_output +692,839551,"TERMINAL",0,0,"3172:002:002:002:002:002:001",,terminal_output +693,840594,"TERMINAL",0,0,"4281111112",,terminal_output +694,841705,"TERMINAL",0,0,"5392222223",,terminal_output +695,842729,"TERMINAL",0,0,"64103333334",,terminal_output +696,843757,"TERMINAL",0,0,"7514444445",,terminal_output +697,844777,"TERMINAL",0,0,"8625555556",,terminal_output +698,845820,"TERMINAL",0,0,"9736666667",,terminal_output +699,846929,"TERMINAL",0,0,"20847777778",,terminal_output +700,847954,"TERMINAL",0,0,"1958888889",,terminal_output +701,848971,"TERMINAL",0,0,"26:00699999950",,terminal_output +702,850110,"TERMINAL",0,0,"3171010101010101",,terminal_output +703,851127,"TERMINAL",0,0,"4281111112",,terminal_output +704,852122,"TERMINAL",0,0,"5392222223",,terminal_output +705,853177,"TERMINAL",0,0,"64203333334",,terminal_output +706,854215,"TERMINAL",0,0,"7625555556",,terminal_output +707,855327,"TERMINAL",0,0,"9736666667",,terminal_output +708,856358,"TERMINAL",0,0,"30847777778",,terminal_output +709,857353,"TERMINAL",0,0,"1958888889",,terminal_output +710,858401,"TERMINAL",0,0,"21069999993:00",,terminal_output +711,859448,"TERMINAL",0,0,"3172020202020201",,terminal_output +712,860500,"TERMINAL",0,0,"4281111112",,terminal_output +713,861533,"TERMINAL",0,0,"5392222223",,terminal_output +714,862609,"TERMINAL",0,0,"64303333334",,terminal_output +715,863722,"TERMINAL",0,0,"7514444445",,terminal_output +716,864663,"TERMINAL",0,0,"8625555556",,terminal_output +717,865708,"TERMINAL",0,0,"9736666667",,terminal_output +718,866745,"TERMINAL",0,0,"40847777778",,terminal_output +719,867819,"TERMINAL",0,0,"1958888889",,terminal_output +720,868842,"TERMINAL",0,0,"220699999910",,terminal_output +721,869970,"TERMINAL",0,0,"3173030303030301",,terminal_output +722,870940,"TERMINAL",0,0,"4281111112",,terminal_output +723,871240,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3334543.1 tasks 0-7: running\r\n",,terminal_output +724,871808,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3334543.1\r\nsrun: forcing job termination\r\nsrun: Job step aborted: Waiting up to 32 seconds for job step to finish.\r\nslurmstepd: error: *** STEP 3334543.1 ON hkn0719 CANCELLED AT 2025-07-10T10:40:45 ***\r\n",,terminal_output +725,872010,"TERMINAL",0,0,"5392222223",,terminal_output +726,872023,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3334543.1\r\nsrun: job abort in progress\r\n",,terminal_output +727,872186,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3334543.1\r\n",,terminal_output +728,872386,"TERMINAL",0,0,"^C",,terminal_output +729,872632,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3334543.1\r\nsrun: job abort in progress\r\n",,terminal_output +730,872876,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3334543.1\r\n",,terminal_output +731,873026,"TERMINAL",0,0,"]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +732,873038,"TERMINAL",0,0,"64403333334",,terminal_output +733,873097,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +734,873367,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +735,874099,"TERMINAL",0,0,"7514444445",,terminal_output +736,875193,"TERMINAL",0,0,"8625555556",,terminal_output +737,875757,"TERMINAL",0,0,"watch",,terminal_focus +738,884913,"TERMINAL",0,0,"cd $ws_dir",,terminal_command +739,887163,"TERMINAL",0,0,"cd data",,terminal_command +740,887682,"TERMINAL",0,0,"ls",,terminal_command +741,887733,"TERMINAL",0,0,"]633;E;2025-07-10 10:41:01 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C",,terminal_output +742,887874,"TERMINAL",0,0,"checkpoints dummy_arrayrecords knoms_mp4 knoms_tfrecords_200_shards knoms_tfrecords_500_shards_overfit_10 open_ai_minecraft_first_try_tfrecord overfit_dir_openai_npy\r\ncoinrun dummy_arrayrecords_500_shards knoms_mp4_clips knoms_tfrecords_2_shards_overfit open_ai_minecraft open_ai_minecraft_npy overfit_dir_openai_tfrecord\r\ndata_knoms knoms_arrayrecords_500_shards knoms_npy knoms_tfrecords_500_shards open_ai_minecraft_first_try open_ai_minecraft_tfrecord procgen_env_16_episodes_20000\r\ndummy knoms_arrayrecords_500_shards_optimized_layout knoms_tfrecords knoms_tfrecords_500_shards_overfit_1 open_ai_minecraft_first_try_npy overfit_dir\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data]633;D;0",,terminal_output +743,889245,"TERMINAL",0,0,"cd ..",,terminal_command +744,889633,"TERMINAL",0,0,"ls",,terminal_command +745,889676,"TERMINAL",0,0,"]633;E;2025-07-10 10:41:03 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C",,terminal_output +746,889729,"TERMINAL",0,0,"checkpoints count_items.sh data data_new huggingface logs scripts\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared]633;D;0",,terminal_output +747,892232,"TERMINAL",0,0,"cd data_new/",,terminal_command +748,892547,"TERMINAL",0,0,"ls",,terminal_command +749,892592,"TERMINAL",0,0,"]633;E;2025-07-10 10:41:06 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C",,terminal_output +750,892687,"TERMINAL",0,0,"open_ai_minecraft open_ai_minecraft_arrayrecords_chunked open_ai_minecraft_arrayrecords_chunked_subset open_ai_minecraft_npy open_ai_minecraft_tfrecord_uncurrupted-2\r\nopen_ai_minecraft_arrayrecords open_ai_minecraft_arrayrecords_chunked_compressed open_ai_minecraft_arrayrecords_chunked_uncompressed open_ai_minecraft_tfrecord_uncurrupted\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new]633;D;0",,terminal_output +751,909444,"TERMINAL",0,0,"cd open_ai_minecraft_tfrecord_uncurrupted-2/",,terminal_command +752,909754,"TERMINAL",0,0,"ls",,terminal_command +753,909804,"TERMINAL",0,0,"]633;E;2025-07-10 10:41:23 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C",,terminal_output +754,910471,"TERMINAL",0,0,"shard-00000-of-00500.tfrecord shard-00072-of-00500.tfrecord shard-00144-of-00500.tfrecord shard-00216-of-00500.tfrecord shard-00288-of-00500.tfrecord shard-00360-of-00500.tfrecord shard-00432-of-00500.tfrecord\r\nshard-00001-of-00500.tfrecord shard-00073-of-00500.tfrecord shard-00145-of-00500.tfrecord shard-00217-of-00500.tfrecord shard-00289-of-00500.tfrecord shard-00361-of-00500.tfrecord shard-00433-of-00500.tfrecord\r\nshard-00002-of-00500.tfrecord shard-00074-of-00500.tfrecord shard-00146-of-00500.tfrecord shard-00218-of-00500.tfrecord shard-00290-of-00500.tfrecord shard-00362-of-00500.tfrecord shard-00434-of-00500.tfrecord\r\nshard-00003-of-00500.tfrecord shard-00075-of-00500.tfrecord shard-00147-of-00500.tfrecord shard-00219-of-00500.tfrecord shard-00291-of-00500.tfrecord shard-00363-of-00500.tfrecord shard-00435-of-00500.tfrecord\r\nshard-00004-of-00500.tfrecord shard-00076-of-00500.tfrecord shard-00148-of-00500.tfrecord shard-00220-of-00500.tfrecord shard-00292-of-00500.tfrecord shard-00364-of-00500.tfrecord shard-00436-of-00500.tfrecord\r\nshard-00005-of-00500.tfrecord shard-00077-of-00500.tfrecord shard-00149-of-00500.tfrecord shard-00221-of-00500.tfrecord shard-00293-of-00500.tfrecord shard-00365-of-00500.tfrecord shard-00437-of-00500.tfrecord\r\nshard-00006-of-00500.tfrecord shard-00078-of-00500.tfrecord shard-00150-of-00500.tfrecord shard-00222-of-00500.tfrecord shard-00294-of-00500.tfrecord shard-00366-of-00500.tfrecord shard-00438-of-00500.tfrecord\r\nshard-00007-of-00500.tfrecord shard-00079-of-00500.tfrecord shard-00151-of-00500.tfrecord shard-00223-of-00500.tfrecord shard-00295-of-00500.tfrecord shard-00367-of-00500.tfrecord shard-00439-of-00500.tfrecord\r\nshard-00008-of-00500.tfrecord shard-00080-of-00500.tfrecord shard-00152-of-00500.tfrecord shard-00224-of-00500.tfrecord shard-00296-of-00500.tfrecord shard-00368-of-00500.tfrecord shard-00440-of-00500.tfrecord\r\nshard-00009-of-00500.tfrecord shard-00081-of-00500.tfrecord shard-00153-of-00500.tfrecord shard-00225-of-00500.tfrecord shard-00297-of-00500.tfrecord shard-00369-of-00500.tfrecord shard-00441-of-00500.tfrecord\r\nshard-00010-of-00500.tfrecord shard-00082-of-00500.tfrecord shard-00154-of-00500.tfrecord shard-00226-of-00500.tfrecord shard-00298-of-00500.tfrecord shard-00370-of-00500.tfrecord shard-00442-of-00500.tfrecord\r\nshard-00011-of-00500.tfrecord shard-00083-of-00500.tfrecord shard-00155-of-00500.tfrecord shard-00227-of-00500.tfrecord shard-00299-of-00500.tfrecord shard-00371-of-00500.tfrecord shard-00443-of-00500.tfrecord\r\nshard-00012-of-00500.tfrecord shard-00084-of-00500.tfrecord shard-00156-of-00500.tfrecord shard-00228-of-00500.tfrecord shard-00300-of-00500.tfrecord shard-00372-of-00500.tfrecord shard-00444-of-00500.tfrecord\r\nshard-00013-of-00500.tfrecord shard-00085-of-00500.tfrecord shard-00157-of-00500.tfrecord shard-00229-of-00500.tfrecord shard-00301-of-00500.tfrecord shard-00373-of-00500.tfrecord shard-00445-of-00500.tfrecord\r\nshard-00014-of-00500.tfrecord shard-00086-of-00500.tfrecord shard-00158-of-00500.tfrecord shard-00230-of-00500.tfrecord shard-00302-of-00500.tfrecord shard-00374-of-00500.tfrecord shard-00446-of-00500.tfrecord\r\nshard-00015-of-00500.tfrecord shard-00087-of-00500.tfrecord shard-00159-of-00500.tfrecord shard-00231-of-00500.tfrecord shard-00303-of-00500.tfrecord shard-00375-of-00500.tfrecord shard-00447-of-00500.tfrecord\r\nshard-00016-of-00500.tfrecord shard-00088-of-00500.tfrecord shard-00160-of-00500.tfrecord shard-00232-of-00500.tfrecord shard-00304-of-00500.tfrecord shard-00376-of-00500.tfrecord shard-00448-of-00500.tfrecord\r\nshard-00017-of-00500.tfrecord shard-00089-of-00500.tfrecord shard-00161-of-00500.tfrecord shard-00233-of-00500.tfrecord shard-00305-of-00500.tfrecord shard-00377-of-00500.tfrecord shard-00449-of-00500.tfrecord\r\nshard-00018-of-00500.tfrecord shard-00090-of-00500.tfrecord shard-00162-of-00500.tfrecord shard-00234-of-00500.tfrecord shard-00306-of-00500.tfrecord shard-00378-of-00500.tfrecord shard-00450-of-00500.tfrecord\r\nshard-00019-of-00500.tfrecord shard-00091-of-00500.tfrecord shard-00163-of-00500.tfrecord shard-00235-of-00500.tfrecord shard-00307-of-00500.tfrecord shard-00379-of-00500.tfrecord shard-00451-of-00500.tfrecord\r\nshard-00020-of-00500.tfrecord shard-00092-of-00500.tfrecord shard-00164-of-00500.tfrecord shard-00236-of-00500.tfrecord shard-00308-of-00500.tfrecord shard-00380-of-00500.tfrecord shard-00452-of-00500.tfrecord\r\nshard-00021-of-00500.tfrecord shard-00093-of-00500.tfrecord shard-00165-of-00500.tfrecord shard-00237-of-00500.tfrecord shard-00309-of-00500.tfrecord shard-00381-of-00500.tfrecord shard-00453-of-00500.tfrecord\r\nshard-00022-of-00500.tfrecord shard-00094-of-00500.tfrecord shard-00166-of-00500.tfrecord shard-00238-of-00500.tfrecord shard-00310-of-00500.tfrecord shard-00382-of-00500.tfrecord shard-00454-of-00500.tfrecord\r\nshard-00023-of-00500.tfrecord shard-00095-of-00500.tfrecord shard-00167-of-00500.tfrecord shard-00239-of-00500.tfrecord shard-00311-of-00500.tfrecord shard-00383-of-00500.tfrecord shard-00455-of-00500.tfrecord\r\nshard-00024-of-00500.tfrecord shard-00096-of-00500.tfrecord shard-00168-of-00500.tfrecord shard-00240-of-00500.tfrecord shard-00312-of-00500.tfrecord shard-00384-of-00500.tfrecord shard-00456-of-00500.tfrecord\r\nshard-00025-of-00500.tfrecord shard-00097-of-00500.tfrecord shard-00169-of-00500.tfrecord shard-00241-of-00500.tfrecord shard-00313-of-00500.tfrecord shard-00385-of-00500.tfrecord shard-00457-of-00500.tfrecord\r\nshard-00026-of-00500.tfrecord shard-00098-of-00500.tfrecord shard-00170-of-00500.tfrecord shard-00242-of-00500.tfrecord shard-00314-of-00500.tfrecord shard-00386-of-00500.tfrecord shard-00458-of-00500.tfrecord\r\nshard-00027-of-00500.tfrecord shard-00099-of-00500.tfrecord shard-00171-of-00500.tfrecord shard-00243-of-00500.tfrecord shard-00315-of-00500.tfrecord shard-00387-of-00500.tfrecord shard-00459-of-00500.tfrecord\r\nshard-00028-of-00500.tfrecord shard-00100-of-00500.tfrecord shard-00172-of-00500.tfrecord shard-00244-of-00500.tfrecord shard-00316-of-00500.tfrecord shard-00388-of-00500.tfrecord shard-00460-of-00500.tfrecord\r\nshard-00029-of-00500.tfrecord shard-00101-of-00500.tfrecord shard-00173-of-00500.tfrecord shard-00245-of-00500.tfrecord shard-00317-of-00500.tfrecord shard-00389-of-00500.tfrecord shard-00461-of-00500.tfrecord\r\nshard-00030-of-00500.tfrecord shard-00102-of-00500.tfrecord shard-00174-of-00500.tfrecord shard-00246-of-00500.tfrecord shard-00318-of-00500.tfrecord shard-00390-of-00500.tfrecord shard-00462-of-00500.tfrecord\r\nshard-00031-of-00500.tfrecord shard-00103-of-00500.tfrecord shard-00175-of-00500.tfrecord shard-00247-of-00500.tfrecord shard-00319-of-00500.tfrecord shard-00391-of-00500.tfrecord shard-00463-of-00500.tfrecord\r\nshard-00032-of-00500.tfrecord shard-00104-of-00500.tfrecord shard-00176-of-00500.tfrecord shard-00248-of-00500.tfrecord shard-00320-of-00500.tfrecord shard-00392-of-00500.tfrecord shard-00464-of-00500.tfrecord\r\nshard-00033-of-00500.tfrecord shard-00105-of-00500.tfrecord shard-00177-of-00500.tfrecord shard-00249-of-00500.tfrecord shard-00321-of-00500.tfrecord shard-00393-of-00500.tfrecord shard-00465-of-00500.tfrecord\r\nshard-00034-of-00500.tfrecord shard-00106-of-00500.tfrecord shard-00178-of-00500.tfrecord shard-00250-of-00500.tfrecord shard-00322-of-00500.tfrecord shard-00394-of-00500.tfrecord shard-00466-of-00500.tfrecord\r\nshard-00035-of-00500.tfrecord shard-00107-of-00500.tfrecord shard-00179-of-00500.tfrecord shard-00251-of-00500.tfrecord shard-00323-of-00500.tfrecord shard-00395-of-00500.tfrecord shard-00467-of-00500.tfrecord\r\nshard-00036-of-00500.tfrecord shard-00108-of-00500.tfrecord shard-00180-of-00500.tfrecord shard-00252-of-00500.tfrecord shard-00324-of-00500.tfrecord shard-00396-of-00500.tfrecord shard-00468-of-00500.tfrecord\r\nshard-00037-of-00500.tfrecord shard-00109-of-00500.tfrecord shard-00181-of-00500.tfrecord shard-00253-of-00500.tfrecord shard-00325-of-00500.tfrecord shard-00397-of-00500.tfrecord shard-00469-of-00500.tfrecord\r\nshard-00038-of-00500.tfrecord shard-00110-of-00500.tfrecord shard-00182-of-00500.tfrecord shard-00254-of-00500.tfrecord shard-00326-of-00500.tfrecord shard-00398-of-00500.tfrecord shard-00470-of-00500.tfrecord\r\nshard-00039-of-00500.tfrecord shard-00111-of-00500.tfrecord shard-00183-of-00500.tfrecord shard-00255-of-00500.tfrecord shard-00327-of-00500.tfrecord shard-00399-of-00500.tfrecord shard-00471-of-00500.tfrecord\r\nshard-00040-of-00500.tfrecord shard-00112-of-00500.tfrecord shard-00184-of-00500.tfrecord shard-00256-of-00500.tfrecord shard-00328-of-00500.tfrecord shard-00400-of-00500.tfrecord shard-00472-of-00500.tfrecord\r\nshard-00041-of-00500.tfrecord shard-00113-of-00500.tfrecord shard-00185-of-00500.tfrecord shard-00257-of-00500.tfrecord shard-00329-of-00500.tfrecord shard-00401-of-00500.tfrecord shard-00473-of-00500.tfrecord\r\nshard-00042-of-00500.tfrecord shard-00114-of-00500.tfrecord shard-00186-of-00500.tfrecord shard-00258-of-00500.tfrecord shard-00330-of-00500.tfrecord shard-00402-of-00500.tfrecord shard-00474-of-00500.tfrecord\r\nshard-00043-of-00500.tfrecord shard-00115-of-00500.tfrecord shard-00187-of-00500.tfrecord shard-00259-of-00500.tfrecord shard-00331-of-00500.tfrecord shard-00403-of-00500.tfrecord shard-00475-of-00500.tfrecord\r\nshard-00044-of-00500.tfrecord shard-00116-of-00500.tfrecord shard-00188-of-00500.tfrecord shard-00260-of-00500.tfrecord shard-00332-of-00500.tfrecord shard-00404-of-00500.tfrecord shard-00476-of-00500.tfrecord\r\nshard-00045-of-00500.tfrecord shard-00117-of-00500.tfrecord shard-00189-of-00500.tfrecord shard-00261-of-00500.tfrecord shard-00333-of-00500.tfrecord shard-00405-of-00500.tfrecord shard-00477-of-00500.tfrecord\r\nshard-00046-of-00500.tfrecord shard-00118-of-00500.tfrecord shard-00190-of-00500.tfrecord shard-00262-of-00500.tfrecord shard-00334-of-00500.tfrecord shard-00406-of-00500.tfrecord shard-00478-of-00500.tfrecord\r\nshard-00047-of-00500.tfrecord shard-00119-of-00500.tfrecord shard-00191-of-00500.tfrecord shard-00263-of-00500.tfrecord shard-00335-of-00500.tfrecord shard-00407-of-00500.tfrecord shard-00479-of-00500.tfrecord\r\nshard-00048-of-00500.tfrecord shard-00120-of-00500.tfrecord shard-00192-of-00500.tfrecord shard-00264-of-00500.tfrecord shard-00336-of-00500.tfrecord shard-00408-of-00500.tfrecord shard-00480-of-00500.tfrecord\r\nshard-00049-of-00500.tfrecord shard-00121-of-00500.tfrecord shard-00193-of-00500.tfrecord shard-00265-of-00500.tfrecord shard-00337-of-00500.tfrecord shard-00409-of-00500.tfrecord shard-00481-of-00500.tfrecord\r\nshard-00050-of-00500.tfrecord shard-00122-of-00500.tfrecord shard-00194-of-00500.tfrecord shard-00266-of-00500.tfrecord shard-00338-of-00500.tfrecord shard-00410-of-00500.tfrecord shard-00482-of-00500.tfrecord\r\nshard-00051-of-00500.tfrecord shard-00123-of-00500.tfrecord shard-00195-of-00500.tfrecord shard-00267-of-00500.tfrecord shard-00339-of-00500.tfrecord shard-00411-of-00500.tfrecord shard-00483-of-00500.tfrecord\r\nshard-00052-of-00500.tfrecord shard-00124-of-00500.tfrecord shard-00196-of-00500.tfrecord shard-00268-of-00500.tfrecord shard-00340-of-00500.tfrecord shard-00412-of-00500.tfrecord shard-00484-of-00500.tfrecord\r\nshard-00053-of-00500.tfrecord shard-00125-of-00500.tfrecord shard-00197-of-00500.tfrecord shard-00269-of-00500.tfrecord shard-00341-of-00500.tfrecord shard-00413-of-00500.tfrecord shard-00485-of-00500.tfrecord\r\nshard-00054-of-00500.tfrecord shard-00126-of-00500.tfrecord shard-00198-of-00500.tfrecord shard-00270-of-00500.tfrecord shard-00342-of-00500.tfrecord shard-00414-of-00500.tfrecord shard-00486-of-00500.tfrecord\r\nshard-00055-of-00500.tfrecord shard-00127-of-00500.tfrecord shard-00199-of-00500.tfrecord shard-00271-of-00500.tfrecord shard-00343-of-00500.tfrecord shard-00415-of-00500.tfrecord shard-00487-of-00500.tfrecord\r\nshard-00056-of-00500.tfrecord shard-00128-of-00500.tfrecord shard-00200-of-00500.tfrecord shard-00272-of-00500.tfrecord shard-00344-of-00500.tfrecord shard-00416-of-00500.tfrecord shard-00488-of-00500.tfrecord\r\nshard-00057-of-00500.tfrecord shard-00129-of-00500.tfrecord shard-00201-of-00500.tfrecord shard-00273-of-00500.tfrecord shard-00345-of-00500.tfrecord shard-00417-of-00500.tfrecord shard-00489-of-00500.tfrecord\r\nshard-00058-of-00500.tfrecord shard-00130-of-00500.tfrecord shard-00202-of-00500.tfrecord shard-00274-of-00500.tfrecord shard-00346-of-00500.tfrecord shard-00418-of-00500.tfrecord shard-00490-of-00500.tfrecord\r\nshard-00059-of-00500.tfrecord shard-00131-of-00500.tfrecord shard-00203-of-00500.tfrecord shard-00275-of-00500.tfrecord shard-00347-of-00500.tfrecord shard-00419-of-00500.tfrecord shard-00491-of-00500.tfrecord\r\nshard-00060-of-00500.tfrecord shard-00132-of-00500.tfrecord shard-00204-of-00500.tfrecord shard-00276-of-00500.tfrecord shard-00348-of-00500.tfrecord shard-00420-of-00500.tfrecord shard-00492-of-00500.tfrecord\r\nshard-00061-of-00500.tfrecord shard-00133-of-00500.tfrecord shard-00205-of-00500.tfrecord shard-00277-of-00500.tfrecord shard-00349-of-00500.tfrecord shard-00421-of-00500.tfrecord shard-00493-of-00500.tfrecord\r\nshard-00062-of-00500.tfrecord shard-00134-of-00500.tfrecord shard-00206-of-00500.tfrecord shard-00278-of-00500.tfrecord shard-00350-of-00500.tfrecord shard-00422-of-00500.tfrecord shard-00494-of-00500.tfrecord\r\nshard-00063-of-00500.tfrecord shard-00135-of-00500.tfrecord shard-00207-of-00500.tfrecord shard-00279-of-00500.tfrecord shard-00351-of-00500.tfrecord shard-00423-of-00500.tfrecord shard-00495-of-00500.tfrecord\r\nshard-00064-of-00500.tfrecord shard-00136-of-00500.tfrecord shard-00208-of-00500.tfrecord shard-00280-of-00500.tfrecord shard-00352-of-00500.tfrecord shard-00424-of-00500.tfrecord shard-00496-of-00500.tfrecord\r\nshard-00065-of-00500.tfrecord shard-00137-of-00500.tfrecord shard-00209-of-00500.tfrecord shard-00281-of-00500.tfrecord shard-00353-of-00500.tfrecord shard-00425-of-00500.tfrecord shard-00497-of-00500.tfrecord\r\nshard-00066-of-00500.tfrecord shard-00138-of-00500.tfrecord shard-00210-of-00500.tfrecord shard-00282-of-00500.tfrecord shard-00354-of-00500.tfrecord shard-00426-of-00500.tfrecord shard-00498-of-00500.tfrecord\r\nshard-00067-of-00500.tfrecord shard-00139-of-00500.tfrecord shard-00211-of-00500.tfrecord shard-00283-of-00500.tfrecord shard-00355-of-00500.tfrecord shard-00427-of-00500.tfrecord shard-00499-of-00500.tfrecord\r\nshard-00068-of-00500.tfrecord shard-00140-of-00500.tfrecord shard-00212-of-00500.tfrecord shard-00284-of-00500.tfrecord shard-00356-of-00500.tfrecord shard-00428-of-00500.tfrecord\r\nshard-00069-of-00500.tfrecord shard-00141-of-00500.tfrecord shard-00213-of-00500.tfrecord shard-00285-of-00500.tfrecord shard-00357-of-00500.tfrecord shard-00429-of-00500.tfrecord\r\nshard-00070-of-00500.tfrecord shard-00142-of-00500.tfrecord shard-00214-of-00500.tfrecord shard-00286-of-00500.tfrecord shard-00358-of-00500.tfrecord shard-00430-of-00500.tfrecord\r\nshard-00071-of-00500.tfrecord shard-00143-of-00500.tfrecord shard-00215-of-00500.tfrecord shard-00287-of-00500.tfrecord shard-00359-of-00500.tfrecord shard-00431-of-00500.tfrecord\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2]633;D;0",,terminal_output +755,911948,"TERMINAL",0,0,"pwd",,terminal_command +756,914724,"TERMINAL",0,0,"srun",,terminal_focus +757,916900,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/yolo-runs/tester.sh",,terminal_output +758,917507,"TERMINAL",0,0,"\r",,terminal_output +759,918648,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +760,919736,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",222,0,"",shellscript,selection_mouse +761,919900,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",218,4,"cord",shellscript,selection_mouse +762,919918,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",204,18,"minecraft_tfrecord",shellscript,selection_mouse +763,919973,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",191,31,"data/open_ai_minecraft_tfrecord",shellscript,selection_mouse +764,919975,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",179,43,"a_ws_shared/data/open_ai_minecraft_tfrecord",shellscript,selection_mouse +765,919975,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",119,103,"\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord",shellscript,selection_mouse +766,920197,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",140,82,"/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord",shellscript,selection_mouse +767,920213,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",139,83,"s/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord",shellscript,selection_mouse +768,920298,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",138,84,"fs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord",shellscript,selection_mouse +769,920364,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",137,85,"kfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord",shellscript,selection_mouse +770,920433,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",136,86,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord",shellscript,selection_mouse +771,921374,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",136,86,"",shellscript,content +772,921528,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,1,"",shellscript,content +773,921723,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2",shellscript,content +774,924034,"models/dynamics.py",0,0,"from typing import Dict, Any\n\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\n\nfrom utils.nn import STTransformer\n\n\nclass DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(self.model_dim)\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)\n \n\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n return dict(token_logits=logits, mask=mask)\n",python,tab +775,927690,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/yolo-runs/tester.sh",,terminal_output +776,929387,"TERMINAL",0,0,"bash",,terminal_focus +777,930457,"TERMINAL",0,0,"srun",,terminal_focus +778,931086,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_lam_action_space_scaling_20/3318547/lam_1751657975_200000/\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --min_lr=0 \\r\n --max_lr=1.4e-4 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --num_latent_actions=20 \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-yolorun-tf-records-$slurm_job_id \\r\n --tags dynamics yolo-run tf_records \\r\n --entity instant-uv \\r\n --project jafar \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $tf_records_dir \\r\n --lam_checkpoint=$lam_ckpt_dir\r\n ",,terminal_output +779,931232,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=409404\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0719\r\nSLURM_JOB_START_TIME=1752136462\r\nSLURM_STEP_NODELIST=hkn0719\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752179662\r\nSLURM_PMI2_SRUN_PORT=43929\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3334543\r\nSLURM_PTY_PORT=44699\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=48\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0719\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0719,0806]\r\nSLURM_SRUN_COMM_PORT=38371\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3334543\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0719\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38371\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0719,0806]\r\n",,terminal_output +780,931382,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +781,932245,"TERMINAL",0,0,"bash",,terminal_focus +782,935927,"TERMINAL",0,0,"2025-07-10 10:41:49.630596: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:41:49.630930: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:41:49.631038: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:41:49.632004: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136909.643456 416473 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136909.644107 416472 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136909.644064 416474 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136909.646193 416471 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1752136909.647886 416473 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136909.648234 416474 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136909.648765 416472 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136909.650493 416471 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1752136909.662179 416473 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.662196 416473 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.662198 416473 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.662200 416473 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.662169 416474 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.662186 416474 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.662187 416474 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.662189 416474 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.662606 416472 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.662621 416472 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.662623 416472 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.662625 416472 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.663604 416471 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.663621 416471 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.663624 416471 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136909.663627 416471 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +783,936853,"TERMINAL",0,0,"2025-07-10 10:41:50.633047: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:41:50.633514: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:41:50.633607: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-10 10:41:50.633828: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136910.646374 2694299 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136910.646688 2694298 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136910.646688 2694300 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1752136910.646882 2694301 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1752136910.650882 2694298 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136910.650896 2694299 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136910.650958 2694301 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1752136910.651058 2694300 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1752136910.664155 2694298 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664168 2694298 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664170 2694298 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664171 2694298 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664149 2694300 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664164 2694300 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664166 2694300 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664167 2694300 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664359 2694301 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664376 2694301 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664378 2694301 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664379 2694301 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664581 2694299 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664597 2694299 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664599 2694299 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1752136910.664600 2694299 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +784,943594,"TERMINAL",0,0,"W0000 00:00:1752136917.419468 416471 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136917.419468 416473 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136917.419470 416474 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136917.420374 416472 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +785,945233,"TERMINAL",0,0,"W0000 00:00:1752136919.041799 2694301 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136919.042423 2694298 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136919.042442 2694299 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1752136919.042323 2694300 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +786,979678,"TERMINAL",0,0,"2025-07-10 10:42:33.507785: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +787,979735,"TERMINAL",0,0,"2025-07-10 10:42:33.548295: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +788,980666,"TERMINAL",0,0,"2025-07-10 10:42:34.485546: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +789,981076,"TERMINAL",0,0,"2025-07-10 10:42:34.854398: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +790,982790,"TERMINAL",0,0,"2025-07-10 10:42:36.616577: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +791,982853,"TERMINAL",0,0,"2025-07-10 10:42:36.675008: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +792,983841,"TERMINAL",0,0,"2025-07-10 10:42:37.635655: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +793,984145,"TERMINAL",0,0,"2025-07-10 10:42:37.909600: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +794,984480,"TERMINAL",0,0,"2025-07-10 10:42:38.309708: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +795,984685,"TERMINAL",0,0,"2025-07-10 10:42:38.507174: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +796,985679,"TERMINAL",0,0,"2025-07-10 10:42:39.497054: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +797,985805,"TERMINAL",0,0,"2025-07-10 10:42:39.634759: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +798,987935,"TERMINAL",0,0,"2025-07-10 10:42:41.764020: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +799,988078,"TERMINAL",0,0,"2025-07-10 10:42:41.906492: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +800,988996,"TERMINAL",0,0,"2025-07-10 10:42:42.824000: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +801,989061,"TERMINAL",0,0,"2025-07-10 10:42:42.885144: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +802,998107,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +803,999096,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_104251-3wth3u43\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-yolorun-tf-records-debug-mihir\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3wth3u43\r\n",,terminal_output +804,1057028,"TERMINAL",0,0,"srun",,terminal_focus +805,1062482,"TERMINAL",0,0,"2025-07-10 10:43:56.310693: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +806,1066366,"TERMINAL",0,0,"2025-07-10 10:44:00.196874: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-10 10:44:00.196920: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +807,1234339,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3334543.2 tasks 0-7: running\r\n",,terminal_output +808,1234662,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3334543.2\r\nsrun: forcing job termination\r\nsrun: Job step aborted: Waiting up to 32 seconds for job step to finish.\r\nslurmstepd: error: *** STEP 3334543.2 ON hkn0719 CANCELLED AT 2025-07-10T10:46:48 ***\r\n",,terminal_output +809,1234874,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3334543.2\r\nsrun: job abort in progress\r\n",,terminal_output +810,1235036,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3334543.2\r\n",,terminal_output +811,1235703,"TERMINAL",0,0,"^C",,terminal_output +812,1235854,"TERMINAL",0,0,"]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +813,1235926,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +814,1850379,"TERMINAL",0,0,"bash",,terminal_focus +815,1881220,"TERMINAL",0,0,"cd /pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords",,terminal_command +816,1881281,"TERMINAL",0,0,"]633;E;2025-07-10 10:57:35 cd /pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords;46e99295-9803-4583-b36f-e400fb9e619d]633;C]0;tum_cte0515@hkn1990:/pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords]633;D;0",,terminal_output +817,1881699,"TERMINAL",0,0,"ls",,terminal_command +818,1884155,"TERMINAL",0,0,"cd 10fps_160x90/",,terminal_command +819,1885090,"TERMINAL",0,0,"ls",,terminal_command +820,1885141,"TERMINAL",0,0,"]633;E;2025-07-10 10:57:38 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C",,terminal_output +821,1923374,"TERMINAL",0,0,"^C\r\n]0;tum_cte0515@hkn1990:/pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90]633;D;130",,terminal_output +822,1929380,"TERMINAL",0,0,"ls -l | wc -l",,terminal_command +823,1929419,"TERMINAL",0,0,"]633;E;2025-07-10 10:58:23 ls -l | wc -l;46e99295-9803-4583-b36f-e400fb9e619d]633;C",,terminal_output +824,1983289,"TERMINAL",0,0,"57191\r\n]0;tum_cte0515@hkn1990:/pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90]633;D;0",,terminal_output +825,2181335,"TERMINAL",0,0,"srun",,terminal_focus +826,2547524,"TERMINAL",0,0,"bash",,terminal_focus +827,2553498,"TERMINAL",0,0,"pwd",,terminal_command +828,2558461,"TERMINAL",0,0,"srun",,terminal_focus +829,2569055,"train_dynamics.py",0,0,"",python,tab +830,2569775,"TERMINAL",0,0,"bash",,terminal_focus +831,2571281,"TERMINAL",0,0,"srun",,terminal_focus +832,2572764,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +833,2573540,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",744,0,"",shellscript,selection_mouse +834,2574205,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",118,0,"",shellscript,selection_mouse +835,2574774,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",306,0,"",shellscript,selection_mouse +836,2575453,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",240,0,"",shellscript,selection_mouse +837,2575688,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",239,1,"2",shellscript,selection_mouse +838,2577956,"TERMINAL",0,0,"[?25lgi[?25h",,terminal_output +839,2578017,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +840,2578137,"TERMINAL",0,0,"[?25lt[?25h[?25l [?25h",,terminal_output +841,2578380,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +842,2578443,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +843,2578496,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +844,2578676,"TERMINAL",0,0,"[?25lc[?25h[?25lk[?25h",,terminal_output +845,2578867,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +846,2578929,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +847,2579188,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +848,2579784,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +849,2579847,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +850,2580266,"TERMINAL",0,0,"[?25lr[?25h[?25lu[?25h",,terminal_output +851,2580526,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +852,2580682,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +853,2580745,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +854,2580797,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +855,2580963,"TERMINAL",0,0,"[?25l-[?25h",,terminal_output +856,2581262,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +857,2581327,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +858,2581933,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +859,2582029,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +860,2582644,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +861,2582707,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +862,2582778,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +863,2582972,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +864,2583087,"TERMINAL",0,0,"error: Your local changes to the following files would be overwritten by checkout:\r\n\ttrain_dynamics.py\r\nPlease commit your changes or stash them before you switch branches.\r\nAborting\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +865,2585623,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +866,2585705,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +867,2585773,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +868,2585881,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +869,2586061,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +870,2586214,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +871,2586275,"TERMINAL",0,0,"[?25lf[?25h",,terminal_output +872,2586487,"TERMINAL",0,0,"[?25lf[?25h[?25l [?25h",,terminal_output +873,2587905,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +874,2588113,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +875,2588402,"TERMINAL",0,0,"ain_",,terminal_output +876,2589567,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +877,2589779,"TERMINAL",0,0,"[?25ly[?25h",,terminal_output +878,2589981,"TERMINAL",0,0,"namics.py",,terminal_output +879,2590343,"TERMINAL",0,0,"\r\n[?2004l\r[?1h=\rdiff --git a/train_dynamics.py b/train_dynamics.py\r\nindex 2864031..cc1f62d 100644\r\n--- a/train_dynamics.py\r\n+++ b/train_dynamics.py\r\n@@ -115,7 +115,9 @@ def train_step(state, inputs):\r\n \r\n \r\n if __name__ == ""__main__"":\r\n+ print(""initializing jax distributed"")\r\n jax.distributed.initialize()\r\n+ print(""jax distributed initialized"")\r\n num_devices = jax.device_count()\r\n if num_devices == 0:\r\n raise ValueError(""No JAX devices found."")\r\n\r[?1l>]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +880,2591327,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +881,2591389,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +882,2591537,"TERMINAL",0,0,"[?25lt[?25h[?25l [?25h",,terminal_output +883,2591778,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +884,2592020,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +885,2592141,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +886,2592235,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +887,2592292,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +888,2592525,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +889,2592867,"TERMINAL",0,0,"Saved working directory and index state WIP on runner: 316eae6 removed tmp\r\n",,terminal_output +890,2592938,"TERMINAL",0,0,"]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +891,2593194,"TERMINAL",0,0,"git stash",,terminal_output +892,2593364,"TERMINAL",0,0,"diff train_dynamics.py",,terminal_output +893,2594232,"TERMINAL",0,0,"\r\n[?2004l\r[?1h=\r\r[?1l>]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +894,2597204,"TERMINAL",0,0,"git diff train_dynamics.py",,terminal_output +895,2597392,"TERMINAL",0,0,"stash",,terminal_output +896,2597768,"TERMINAL",0,0,"diff train_dynamics.py",,terminal_output +897,2598258,"TERMINAL",0,0,"checkout runner-grain",,terminal_output +898,2598793,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +899,2598992,"TERMINAL",0,0,"Switched to branch 'runner-grain'\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +900,2600366,"",0,0,"Switched from branch 'runner' to 'runner-grain'",,git_branch_checkout +901,2600712,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom genie import Genie, restore_genie_components\nfrom models.tokenizer import TokenizerVQVAE\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n # Optimization\n batch_size: int = 36\n min_lr: float = 3e-6\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n print(""initializing jax distributed"")\n jax.distributed.initialize()\n print(""jax distributed initialized"")\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=jnp.float32\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args,\n )\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Restore checkpoint ---\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in dataloader) # type: ignore\n step = 0\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""genie_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +902,2600872,"train_dynamics.py",862,6248," data_dir: str = """"\n # Optimization\n batch_size: int = 36\n min_lr: float = 0.0\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=jnp.float32\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args,\n )\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Restore checkpoint ---\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n array_record_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n",python,content +903,2602201,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_lam_action_space_scaling_20/3318547/lam_1751657975_200000/\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log \\n --num_latent_actions=20 \\n --log_checkpoint_interval=1000 \\n --name=dynamics-yolorun-tf-records-$slurm_job_id \\n --tags dynamics yolo-run tf_records \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $tf_records_dir \\n --lam_checkpoint=$lam_ckpt_dir\n ",shellscript,tab +904,2603505,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",119,0,"",shellscript,selection_mouse +905,2604077,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",240,0,"",shellscript,selection_mouse +906,2604240,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",229,11,"currupted-2",shellscript,selection_mouse +907,2604256,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",195,45,"_new/open_ai_minecraft_tfrecord_uncurrupted-2",shellscript,selection_mouse +908,2604312,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",119,121,"\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2",shellscript,selection_mouse +909,2604313,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",118,122,"\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2",shellscript,selection_mouse +910,2604394,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",119,121,"\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2",shellscript,selection_mouse +911,2604610,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",140,100,"/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2",shellscript,selection_mouse +912,2604624,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",138,102,"fs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2",shellscript,selection_mouse +913,2604674,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",137,103,"kfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2",shellscript,selection_mouse +914,2604675,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",136,104,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2",shellscript,selection_mouse +915,2604675,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,105,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2",shellscript,selection_mouse +916,2604728,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",134,106,"=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2",shellscript,selection_mouse +917,2605306,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,105,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2",shellscript,selection_mouse +918,2605961,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,105,"",shellscript,content +919,2606266,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,0,"/pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90",shellscript,content +920,2611730,"TERMINAL",0,0,"git checkout runner-grain",,terminal_output +921,2612002,"TERMINAL",0,0,"diff train_dynamics.py",,terminal_output +922,2612375,"TERMINAL",0,0,"stash",,terminal_output +923,2612585,"TERMINAL",0,0,"diff train_dynamics.py",,terminal_output +924,2612950,"TERMINAL",0,0,"checkout runner-grain",,terminal_output +925,2613261,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/yolo-runs/tester.sh",,terminal_output +926,2615060,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +927,2616738,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1254,0,"",shellscript,selection_mouse +928,2616902,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1254,2,"la",shellscript,selection_mouse +929,2616918,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1254,4,"lam_",shellscript,selection_mouse +930,2616934,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1254,6,"lam_ck",shellscript,selection_mouse +931,2616989,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1254,8,"lam_ckpt",shellscript,selection_mouse +932,2616991,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1254,9,"lam_ckpt_",shellscript,selection_mouse +933,2616992,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1254,10,"lam_ckpt_d",shellscript,selection_mouse +934,2617001,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1254,11,"lam_ckpt_di",shellscript,selection_mouse +935,2617059,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1254,12,"lam_ckpt_dir",shellscript,selection_mouse +936,2619550,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1254,12,"t",shellscript,content +937,2619552,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1255,0,"",shellscript,selection_keyboard +938,2619702,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1255,0,"f",shellscript,content +939,2619703,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1256,0,"",shellscript,selection_keyboard +940,2620485,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1256,0,"_",shellscript,content +941,2620486,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1257,0,"",shellscript,selection_keyboard +942,2620675,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1257,0,"r",shellscript,content +943,2620677,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1258,0,"",shellscript,selection_keyboard +944,2620852,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1258,0,"e",shellscript,content +945,2620853,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1259,0,"",shellscript,selection_keyboard +946,2621008,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1259,0,"c",shellscript,content +947,2621009,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1260,0,"",shellscript,selection_keyboard +948,2621109,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1260,0,"o",shellscript,content +949,2621110,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1261,0,"",shellscript,selection_keyboard +950,2621273,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1261,0,"r",shellscript,content +951,2621274,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1262,0,"",shellscript,selection_keyboard +952,2621422,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1262,0,"d",shellscript,content +953,2621423,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1263,0,"",shellscript,selection_keyboard +954,2621557,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1263,0,"s",shellscript,content +955,2621558,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1264,0,"",shellscript,selection_keyboard +956,2621819,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1264,0,"_",shellscript,content +957,2621820,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1265,0,"",shellscript,selection_keyboard +958,2622013,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1265,0,"d",shellscript,content +959,2622014,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1266,0,"",shellscript,selection_keyboard +960,2622170,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1266,0,"i",shellscript,content +961,2622171,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1267,0,"",shellscript,selection_keyboard +962,2622225,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1267,0,"r",shellscript,content +963,2622226,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1268,0,"",shellscript,selection_keyboard +964,2624411,"TERMINAL",0,0,"[?25l#[?25h",,terminal_output +965,2625331,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\ntf_records_dir=/pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_lam_action_space_scaling_20/3318547/lam_1751657975_200000/\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --min_lr=0 \\r\n --max_lr=1.4e-4 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --num_latent_actions=20 \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-yolorun-tf-records-$slurm_job_id \\r\n --tags dynamics yolo-run tf_records \\r\n --entity instant-uv \\r\n --project jafar \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $tf_records_dir \\r\n --lam_checkpoint=$tf_records_dir\r\n ",,terminal_output +966,2625447,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=409404\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0719\r\nSLURM_JOB_START_TIME=1752136462\r\nSLURM_STEP_NODELIST=hkn0719\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752179662\r\nSLURM_PMI2_SRUN_PORT=43929\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3334543\r\nSLURM_PTY_PORT=44699\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=48\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0719\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0719,0806]\r\nSLURM_SRUN_COMM_PORT=38371\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3334543\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0719\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38371\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0719,0806]\r\n",,terminal_output +967,2625584,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +968,2667629,"TERMINAL",0,0,"2025-07-10 11:10:41.459076: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +969,2668236,"TERMINAL",0,0,"2025-07-10 11:10:41.961632: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-10 11:10:41.969242: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +970,2668670,"TERMINAL",0,0,"2025-07-10 11:10:42.499646: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +971,2670684,"TERMINAL",0,0,"2025-07-10 11:10:44.507459: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +972,2671204,"TERMINAL",0,0,"2025-07-10 11:10:44.936335: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-10 11:10:45.004044: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +973,2671459,"TERMINAL",0,0,"2025-07-10 11:10:45.284026: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +974,2671655,"TERMINAL",0,0,"2025-07-10 11:10:45.484795: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +975,2671836,"TERMINAL",0,0,"2025-07-10 11:10:45.662156: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +976,2673763,"TERMINAL",0,0,"2025-07-10 11:10:47.583173: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +977,2674700,"TERMINAL",0,0,"2025-07-10 11:10:48.527924: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +978,2674864,"TERMINAL",0,0,"2025-07-10 11:10:48.691177: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +979,2675280,"TERMINAL",0,0,"2025-07-10 11:10:49.107314: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +980,2677022,"TERMINAL",0,0,"2025-07-10 11:10:50.805394: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +981,2678268,"TERMINAL",0,0,"2025-07-10 11:10:52.094526: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +982,2685545,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +983,2686121,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_111059-nodygl4a\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-yolorun-tf-records-debug-mihir\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/nodygl4a\r\n",,terminal_output +984,2715300,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 207, in \r\n train_state = restore_genie_components(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 282, in restore_genie_components\r\n PyTreeCheckpointer()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 803, in restore\r\n structure, use_zarr3_metadata = self._get_internal_metadata(directory)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 959, in _get_internal_metadata\r\n raise FileNotFoundError(\r\nFileNotFoundError: No structure could be identified for the checkpoint at /pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90.\r\nRunning on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 207, in \r\n train_state = restore_genie_components(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 282, in restore_genie_components\r\n PyTreeCheckpointer()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 803, in restore\r\n structure, use_zarr3_metadata = self._get_internal_metadata(directory)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 959, in _get_internal_metadata\r\n raise FileNotFoundError(\r\nFileNotFoundError: No structure could be identified for the checkpoint at /pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90.\r\n",,terminal_output +985,2715358,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 207, in \r\n train_state = restore_genie_components(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 282, in restore_genie_components\r\n PyTreeCheckpointer()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 803, in restore\r\n structure, use_zarr3_metadata = self._get_internal_metadata(directory)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 959, in _get_internal_metadata\r\n raise FileNotFoundError(\r\nFileNotFoundError: No structure could be identified for the checkpoint at /pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90.\r\n",,terminal_output +986,2715420,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 207, in \r\n train_state = restore_genie_components(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 282, in restore_genie_components\r\n PyTreeCheckpointer()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 803, in restore\r\n structure, use_zarr3_metadata = self._get_internal_metadata(directory)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 959, in _get_internal_metadata\r\n raise FileNotFoundError(\r\nFileNotFoundError: No structure could be identified for the checkpoint at /pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90.\r\n",,terminal_output +987,2716915,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 207, in \r\n train_state = restore_genie_components(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 282, in restore_genie_components\r\n PyTreeCheckpointer()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 803, in restore\r\n structure, use_zarr3_metadata = self._get_internal_metadata(directory)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 959, in _get_internal_metadata\r\n raise FileNotFoundError(\r\nFileNotFoundError: No structure could be identified for the checkpoint at /pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90.\r\n",,terminal_output +988,2717384,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 207, in \r\n train_state = restore_genie_components(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 282, in restore_genie_components\r\n PyTreeCheckpointer()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 803, in restore\r\n structure, use_zarr3_metadata = self._get_internal_metadata(directory)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 959, in _get_internal_metadata\r\n raise FileNotFoundError(\r\nFileNotFoundError: No structure could be identified for the checkpoint at /pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90.\r\n",,terminal_output +989,2717442,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 207, in \r\n train_state = restore_genie_components(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 282, in restore_genie_components\r\n PyTreeCheckpointer()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 803, in restore\r\n structure, use_zarr3_metadata = self._get_internal_metadata(directory)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 959, in _get_internal_metadata\r\n raise FileNotFoundError(\r\nFileNotFoundError: No structure could be identified for the checkpoint at /pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90.\r\n",,terminal_output +990,2717679,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 207, in \r\n train_state = restore_genie_components(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 282, in restore_genie_components\r\n PyTreeCheckpointer()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 803, in restore\r\n structure, use_zarr3_metadata = self._get_internal_metadata(directory)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 959, in _get_internal_metadata\r\n raise FileNotFoundError(\r\nFileNotFoundError: No structure could be identified for the checkpoint at /pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90.\r\n",,terminal_output +991,2718337,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run dynamics-yolorun-tf-records-debug-mihir at: https://wandb.ai/instant-uv/jafar/runs/nodygl4a\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_111059-nodygl4a/logs\r\n",,terminal_output +992,2719806,"TERMINAL",0,0,"srun: error: hkn0806: task 5: Exited with exit code 1\r\n",,terminal_output +993,2719960,"TERMINAL",0,0,"srun: error: hkn0719: tasks 0-1: Exited with exit code 1\r\nsrun: error: hkn0806: tasks 4,6: Exited with exit code 1\r\n",,terminal_output +994,2720171,"TERMINAL",0,0,"srun: error: hkn0806: task 7: Exited with exit code 1\r\n",,terminal_output +995,2720279,"TERMINAL",0,0,"srun: error: hkn0719: tasks 2-3: Exited with exit code 1\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +996,3440098,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/yolo-runs/tester.sh",,terminal_output +997,3444649,"TERMINAL",0,0,"\r",,terminal_output +998,3445805,"TERMINAL",0,0,"bash",,terminal_focus +999,3447495,"TERMINAL",0,0,"srun",,terminal_focus +1000,3449788,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +1001,3455184,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1267,0,"",shellscript,selection_mouse +1002,3456210,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1254,14,"lam_ckpt_dir",shellscript,content +1003,3456534,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,110,"",shellscript,content +1004,3457480,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,0,"/pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90",shellscript,content +1005,3465626,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/yolo-runs/tester.sh",,terminal_output +1006,3465783,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\ntf_records_dir=/pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_lam_action_space_scaling_20/3318547/lam_1751657975_200000/\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --min_lr=0 \\r\n --max_lr=1.4e-4 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --num_latent_actions=20 \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-yolorun-tf-records-$slurm_job_id \\r\n --tags dynamics yolo-run tf_records \\r\n --entity instant-uv \\r\n --project jafar \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $tf_records_dir \\r\n --lam_checkpoint=$lam_ckpt_dir\r\n ",,terminal_output +1007,3465929,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=409404\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0719\r\nSLURM_JOB_START_TIME=1752136462\r\nSLURM_STEP_NODELIST=hkn0719\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752179662\r\nSLURM_PMI2_SRUN_PORT=43929\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3334543\r\nSLURM_PTY_PORT=44699\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=48\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0719\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0719,0806]\r\nSLURM_SRUN_COMM_PORT=38371\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3334543\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0719\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38371\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0719,0806]\r\n",,terminal_output +1008,3466070,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +1009,3504858,"TERMINAL",0,0,"2025-07-10 11:24:38.595116: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-10 11:24:38.677037: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-10 11:24:38.685498: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1010,3505264,"TERMINAL",0,0,"2025-07-10 11:24:38.981566: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1011,3507823,"TERMINAL",0,0,"2025-07-10 11:24:41.572035: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1012,3507877,"TERMINAL",0,0,"2025-07-10 11:24:41.670276: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-10 11:24:41.688943: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1013,3508084,"TERMINAL",0,0,"2025-07-10 11:24:41.910508: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1014,3508746,"TERMINAL",0,0,"2025-07-10 11:24:42.477038: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-10 11:24:42.503347: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1015,3510793,"TERMINAL",0,0,"2025-07-10 11:24:44.593239: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1016,3511113,"TERMINAL",0,0,"2025-07-10 11:24:44.821276: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1017,3512025,"TERMINAL",0,0,"2025-07-10 11:24:45.796607: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1018,3512128,"TERMINAL",0,0,"2025-07-10 11:24:45.907380: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1019,3513966,"TERMINAL",0,0,"2025-07-10 11:24:47.724234: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1020,3514077,"TERMINAL",0,0,"2025-07-10 11:24:47.904372: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1021,3521609,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +1022,3522465,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_112455-53w9ks6h\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-yolorun-tf-records-debug-mihir\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/53w9ks6h\r\n",,terminal_output +1023,3560073,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1024,3560905,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1025,3561145,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1026,3561954,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1027,3562066,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run dynamics-yolorun-tf-records-debug-mihir at: https://wandb.ai/instant-uv/jafar/runs/53w9ks6h\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_112455-53w9ks6h/logs\r\n",,terminal_output +1028,3562479,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1029,3562536,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1030,3564433,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1031,3576016,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1032,3578037,"TERMINAL",0,0,"srun: error: hkn0719: tasks 1-2: Exited with exit code 1\r\n",,terminal_output +1033,3578177,"TERMINAL",0,0,"srun: error: hkn0806: tasks 4-6: Exited with exit code 1\r\n",,terminal_output +1034,3578485,"TERMINAL",0,0,"srun: error: hkn0719: task 3: Exited with exit code 1\r\nsrun: error: hkn0806: task 7: Exited with exit code 1\r\n",,terminal_output +1035,3578547,"TERMINAL",0,0,"srun: error: hkn0719: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +1036,3591563,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +1037,3591947,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",239,0,"",shellscript,selection_mouse +1038,3592755,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,110,"",shellscript,content +1039,3592957,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2",shellscript,content +1040,3595438,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,105,"",shellscript,content +1041,3596670,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord",shellscript,content +1042,3597623,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,2,"",shellscript,content +1043,3597927,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,0,"82",shellscript,content +1044,3598787,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,2,"",shellscript,content +1045,3598994,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,0,"48",shellscript,content +1046,3599230,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,2,"",shellscript,content +1047,3599425,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,0,"96",shellscript,content +1048,3599661,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",93,0,"# ",shellscript,content +1049,3599661,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",63,0,"# ",shellscript,content +1050,3599661,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",33,0,"# ",shellscript,content +1051,3601216,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",97,2,"",shellscript,content +1052,3601217,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",65,2,"",shellscript,content +1053,3601217,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",33,2,"",shellscript,content +1054,3601403,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,2,"",shellscript,content +1055,3601566,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,0,"48",shellscript,content +1056,3601693,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,2,"",shellscript,content +1057,3601855,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,0,"82",shellscript,content +1058,3602083,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,2,"",shellscript,content +1059,3602154,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",827,0,"96",shellscript,content +1060,3602296,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,87,"",shellscript,content +1061,3602450,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_tfrecord_uncurrupted-2",shellscript,content +1062,3602636,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,105,"",shellscript,content +1063,3602999,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",135,0,"/pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90",shellscript,content +1064,3608155,"TERMINAL",0,0,"bash",,terminal_focus +1065,3609924,"TERMINAL",0,0,"srun",,terminal_focus +1066,3613068,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=10:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_dyn_yolorun\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\n# array_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_lam_action_space_scaling_20/3318547/lam_1751657975_200000/\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log \\n --num_latent_actions=20 \\n --log_checkpoint_interval=1000 \\n --name=dynamics-yolorun-tf-records-$slurm_job_id \\n --tags dynamics yolo-run tf_records \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $tf_records_dir \\n --lam_checkpoint=$lam_ckpt_dir\n",shellscript,tab +1067,3613787,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1522,0,"",shellscript,selection_mouse +1068,3613789,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1521,0,"",shellscript,selection_command +1069,3614727,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1285,0,"",shellscript,selection_mouse +1070,3618794,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",724,0,"",shellscript,selection_mouse +1071,3620452,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",648,0,"",shellscript,selection_command +1072,3621361,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",648,0,"#",shellscript,content +1073,3621363,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",649,0,"",shellscript,selection_keyboard +1074,3621438,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",649,0," ",shellscript,content +1075,3621438,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",650,0,"",shellscript,selection_keyboard +1076,3621738,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",649,0,"",shellscript,selection_command +1077,3621888,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",525,0,"",shellscript,selection_command +1078,3622423,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",524,0,"",shellscript,selection_command +1079,3622935,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",524,1,"",shellscript,content +1080,3623098,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",524,1,"",shellscript,content +1081,3623196,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",646,0,"",shellscript,selection_command +1082,3623484,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",751,0,"",shellscript,selection_command +1083,3623986,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",752,0,"",shellscript,selection_command +1084,3623999,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",777,0,"",shellscript,selection_command +1085,3624045,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",804,0,"",shellscript,selection_command +1086,3624074,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",805,0,"",shellscript,selection_command +1087,3624110,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",917,0,"",shellscript,selection_command +1088,3624132,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",942,0,"",shellscript,selection_command +1089,3624191,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",943,0,"",shellscript,selection_command +1090,3624192,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",960,0,"",shellscript,selection_command +1091,3624214,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",961,0,"",shellscript,selection_command +1092,3624271,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1112,0,"",shellscript,selection_command +1093,3624280,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1267,0,"",shellscript,selection_command +1094,3624314,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1268,0,"",shellscript,selection_command +1095,3624359,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1285,0,"",shellscript,selection_command +1096,3624372,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1286,0,"",shellscript,selection_command +1097,3624402,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1318,0,"",shellscript,selection_command +1098,3624464,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1351,0,"",shellscript,selection_command +1099,3624465,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1373,0,"",shellscript,selection_command +1100,3624480,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1390,0,"",shellscript,selection_command +1101,3624525,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1412,0,"",shellscript,selection_command +1102,3624560,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1444,0,"",shellscript,selection_command +1103,3624605,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1456,0,"",shellscript,selection_command +1104,3624631,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1486,0,"",shellscript,selection_command +1105,3624641,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1523,0,"",shellscript,selection_command +1106,3624695,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1578,0,"",shellscript,selection_command +1107,3625003,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1620,0,"",shellscript,selection_command +1108,3625148,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1646,0,"",shellscript,selection_command +1109,3625296,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1668,0,"",shellscript,selection_command +1110,3625481,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1717,0,"",shellscript,selection_command +1111,3625845,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1749,0,"",shellscript,selection_command +1112,3626213,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1748,0,"",shellscript,selection_command +1113,3626393,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1747,0,"",shellscript,selection_command +1114,3627047,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1733,14,"",shellscript,content +1115,3627467,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1733,0,"a",shellscript,content +1116,3627468,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1734,0,"",shellscript,selection_keyboard +1117,3628355,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1734,0,"r",shellscript,content +1118,3628356,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1735,0,"",shellscript,selection_keyboard +1119,3628788,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",1733,2,"array_records_dir",shellscript,content +1120,3633098,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/yolo-runs/tester.sh",,terminal_output +1121,3635725,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\ntf_records_dir=/pfs/work8/workspace/ffhk/scratch/tum_ind3695-jafar_workspace/data/open_ai_minecraft_arrayrecords/10fps_160x90\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_lam_action_space_scaling_20/3318547/lam_1751657975_200000/\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --min_lr=0 \\r\n --max_lr=1.4e-4 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --num_latent_actions=20 \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-yolorun-tf-records-$slurm_job_id \\r\n --tags dynamics yolo-run tf_records \\r\n --entity instant-uv \\r\n --project jafar \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $tf_records_dir \\r\n --lam_checkpoint=$lam_ckpt_dir\r\n ",,terminal_output +1122,3635960,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=409404\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0719\r\nSLURM_JOB_START_TIME=1752136462\r\nSLURM_STEP_NODELIST=hkn0719\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752179662\r\nSLURM_PMI2_SRUN_PORT=43929\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3334543\r\nSLURM_PTY_PORT=44699\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=48\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0719\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0719,0806]\r\nSLURM_SRUN_COMM_PORT=38371\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3334543\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0719\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38371\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0719,0806]\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +1123,3672890,"TERMINAL",0,0,"2025-07-10 11:27:26.676896: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1124,3673054,"TERMINAL",0,0,"2025-07-10 11:27:26.881155: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1125,3674021,"TERMINAL",0,0,"2025-07-10 11:27:27.740663: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1126,3674129,"TERMINAL",0,0,"2025-07-10 11:27:27.942898: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1127,3675863,"TERMINAL",0,0,"2025-07-10 11:27:29.634473: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1128,3676072,"TERMINAL",0,0,"2025-07-10 11:27:29.899245: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1129,3676878,"TERMINAL",0,0,"2025-07-10 11:27:30.705593: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1130,3677191,"TERMINAL",0,0,"2025-07-10 11:27:30.936493: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1131,3677494,"TERMINAL",0,0,"2025-07-10 11:27:31.320723: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1132,3677556,"TERMINAL",0,0,"2025-07-10 11:27:31.363769: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1133,3678100,"TERMINAL",0,0,"2025-07-10 11:27:31.921396: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1134,3679926,"TERMINAL",0,0,"2025-07-10 11:27:33.753220: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1135,3680688,"TERMINAL",0,0,"2025-07-10 11:27:34.517123: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1136,3680819,"TERMINAL",0,0,"2025-07-10 11:27:34.644621: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1137,3681433,"TERMINAL",0,0,"2025-07-10 11:27:35.261380: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1138,3683147,"TERMINAL",0,0,"2025-07-10 11:27:36.965952: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1139,3691017,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +1140,3691530,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_112744-dj2wt6td\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-yolorun-tf-records-debug-mihir\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/dj2wt6td\r\n",,terminal_output +1141,3732795,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1142,3733270,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1143,3735562,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1144,3736659,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1145,3737723,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run dynamics-yolorun-tf-records-debug-mihir at: https://wandb.ai/instant-uv/jafar/runs/dj2wt6td\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_112744-dj2wt6td/logs\r\n",,terminal_output +1146,3743728,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1147,3746621,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1148,3747014,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1149,3748771,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349920, 'dynamics': 29735424, 'total': 87074960}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 217, in \r\n dataloader = get_dataloader(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py"", line 123, in get_dataloader\r\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/grain/_src/python/data_sources.py"", line 81, in __init__\r\n super().__init__(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 261, in __init__\r\n self._read_instructions = _get_read_instructions(paths)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 169, in _get_read_instructions\r\n return _run_in_parallel(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 111, in _run_in_parallel\r\n raise completed_future.exception()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/concurrent/futures/thread.py"", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/array_record/python/array_record_data_source.py"", line 164, in get_read_instruction\r\n reader.close()\r\nRuntimeError: ArrayRecord file should be at least 64KB big\r\n",,terminal_output +1150,3750777,"TERMINAL",0,0,"srun: error: hkn0719: task 2: Exited with exit code 1\r\n",,terminal_output +1151,3750885,"TERMINAL",0,0,"srun: error: hkn0806: task 6: Exited with exit code 1\r\nsrun: error: hkn0719: task 1: Exited with exit code 1\r\n",,terminal_output +1152,3751017,"TERMINAL",0,0,"srun: error: hkn0806: tasks 4-5: Exited with exit code 1\r\n",,terminal_output +1153,3751260,"TERMINAL",0,0,"srun: error: hkn0719: task 3: Exited with exit code 1\r\nsrun: error: hkn0806: task 7: Exited with exit code 1\r\nsrun: error: hkn0719: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +1154,3755364,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +1155,3760062,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",0,0,"",shellscript,tab +1156,3761106,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",522,0,"",shellscript,selection_mouse +1157,3761108,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",521,0,"",shellscript,selection_command +1158,3761548,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch",556,0,"",shellscript,selection_mouse +1159,3764706,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +1160,3765971,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",244,0,"",shellscript,selection_command +1161,3766136,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",245,0,"\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked",shellscript,content +1162,3766147,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",246,0,"",shellscript,selection_command +1163,3766550,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",120,0,"",shellscript,selection_command +1164,3766847,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",120,126,"",shellscript,content +1165,3766932,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",242,0,"",shellscript,selection_command +1166,3767175,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",308,0,"",shellscript,selection_command +1167,3767635,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",309,0,"",shellscript,selection_command +1168,3767698,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",326,0,"",shellscript,selection_command +1169,3767699,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",353,0,"",shellscript,selection_command +1170,3767715,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",354,0,"",shellscript,selection_command +1171,3767775,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",413,0,"",shellscript,selection_command +1172,3767777,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",438,0,"",shellscript,selection_command +1173,3767833,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",439,0,"",shellscript,selection_command +1174,3767847,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",590,0,"",shellscript,selection_command +1175,3767869,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",745,0,"",shellscript,selection_command +1176,3767932,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",746,0,"",shellscript,selection_command +1177,3767933,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",763,0,"",shellscript,selection_command +1178,3767971,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",764,0,"",shellscript,selection_command +1179,3767993,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",796,0,"",shellscript,selection_command +1180,3768022,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",829,0,"",shellscript,selection_command +1181,3768057,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",851,0,"",shellscript,selection_command +1182,3768077,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",868,0,"",shellscript,selection_command +1183,3768114,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",890,0,"",shellscript,selection_command +1184,3768165,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",922,0,"",shellscript,selection_command +1185,3768171,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",934,0,"",shellscript,selection_command +1186,3768225,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",964,0,"",shellscript,selection_command +1187,3768238,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1001,0,"",shellscript,selection_command +1188,3768292,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1056,0,"",shellscript,selection_command +1189,3768384,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1098,0,"",shellscript,selection_command +1190,3768545,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1124,0,"",shellscript,selection_command +1191,3768691,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1146,0,"",shellscript,selection_command +1192,3768825,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1195,0,"",shellscript,selection_command +1193,3769170,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1227,0,"",shellscript,selection_command +1194,3769427,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1226,0,"",shellscript,selection_command +1195,3769654,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1225,0,"",shellscript,selection_command +1196,3770076,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1211,14,"",shellscript,content +1197,3770644,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1211,0,"a",shellscript,content +1198,3770645,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1212,0,"",shellscript,selection_keyboard +1199,3770814,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1212,0,"r",shellscript,content +1200,3770815,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1213,0,"",shellscript,selection_keyboard +1201,3770973,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1213,0,"r",shellscript,content +1202,3770973,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1214,0,"",shellscript,selection_keyboard +1203,3771218,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1211,3,"array_records_dir",shellscript,content +1204,3771619,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1227,0,"",shellscript,selection_command +1205,3831241,"models/dynamics.py",0,0,"from typing import Dict, Any\n\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\n\nfrom utils.nn import STTransformer\n\n\nclass DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(self.model_dim)\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)\n \n\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n return dict(token_logits=logits, mask=mask)\n",python,tab +1206,3831348,"models/dynamics.py",1262,384," vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,content +1207,3832680,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +1208,3849184,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"#!/usr/bin/env bash\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\n# array_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords/10fps_160x90\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""0000""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=1000 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,tab +1209,3852467,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",365,0,"",shellscript,selection_mouse +1210,3852468,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",364,0,"",shellscript,selection_command +1211,3876942,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",470,0,"",shellscript,selection_mouse +1212,3877661,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",336,0,"",shellscript,selection_mouse +1213,3877810,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",320,45,"open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1214,3877954,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",237,129,"array_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset\n",shellscript,selection_mouse +1215,3878448,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",336,0,"",shellscript,selection_mouse +1216,3878449,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",320,45,"open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1217,3878613,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",237,129,"array_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset\n",shellscript,selection_mouse +1218,3879738,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",255,0,"",shellscript,selection_mouse +1219,3879738,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",254,2,"=/",shellscript,selection_mouse +1220,3879866,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",254,2,"=/",shellscript,selection_mouse +1221,3879878,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",254,6,"=/hkfs",shellscript,selection_mouse +1222,3879936,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",254,11,"=/hkfs/work",shellscript,selection_mouse +1223,3879937,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",254,21,"=/hkfs/work/workspace",shellscript,selection_mouse +1224,3879941,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",254,29,"=/hkfs/work/workspace/scratch",shellscript,selection_mouse +1225,3879957,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",254,41,"=/hkfs/work/workspace/scratch/tum_ind3695",shellscript,selection_mouse +1226,3879974,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",254,56,"=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared",shellscript,selection_mouse +1227,3880033,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",254,65,"=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new",shellscript,selection_mouse +1228,3880033,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",254,111,"=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1229,3880139,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",254,177,"=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'",shellscript,selection_mouse +1230,3880524,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",254,111,"=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1231,3881066,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",361,0,"",shellscript,selection_mouse +1232,3881445,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",363,0,"",shellscript,selection_mouse +1233,3881598,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",320,45,"open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1234,3881765,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",320,111,"open_ai_minecraft_arrayrecords_chunked_subset\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'",shellscript,selection_mouse +1235,3881783,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",320,45,"open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1236,3881841,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",169,196,"jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords/10fps_160x90\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1237,3881842,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",157,208,"tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords/10fps_160x90\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1238,3881859,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",149,216,"scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords/10fps_160x90\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1239,3881921,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",148,217,"/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords/10fps_160x90\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1240,3881922,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",266,99,"workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1241,3881983,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",320,75,"open_ai_minecraft_arrayrecords_chunked_subset\nws_dir='/hkfs/work/workspace/",shellscript,selection_mouse +1242,3881984,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",320,74,"open_ai_minecraft_arrayrecords_chunked_subset\nws_dir='/hkfs/work/workspace",shellscript,selection_mouse +1243,3882175,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",261,104,"work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1244,3882192,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",260,105,"/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1245,3882262,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",256,109,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1246,3882376,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",255,110,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1247,3882442,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",254,111,"=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1248,3882913,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",255,110,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1249,3883024,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",256,109,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1250,3883944,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",255,110,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked_subset",shellscript,selection_mouse +1251,3929355,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"",shellscript,tab +1252,3930661,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",174,0,"",shellscript,selection_mouse +1253,3936666,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1254,3937502,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",222,0,"",shellscript,selection_mouse +1255,3937959,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",350,0,"",shellscript,selection_mouse +1256,3938342,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",365,0,"\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked",shellscript,content +1257,3938353,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",366,0,"",shellscript,selection_command +1258,3938888,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",237,0,"",shellscript,selection_command +1259,3939182,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",237,129,"",shellscript,content +1260,3946703,"train_dynamics.py",0,0,"",python,tab +1261,3947331,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1262,3995226,"models/tokenizer.py",0,0,"from typing import Dict, Any, Tuple\n\nimport flax.linen as nn\n\nfrom utils.preprocess import patchify, unpatchify\nfrom utils.nn import STTransformer, VectorQuantizer\n\n\nclass TokenizerVQVAE(nn.Module):\n """"""ST-ViVit VQ-VAE""""""\n\n in_dim: int\n model_dim: int\n latent_dim: int\n num_latents: int\n patch_size: int\n num_blocks: int\n num_heads: int\n dropout: float\n codebook_dropout: float\n\n def setup(self):\n self.encoder = STTransformer(\n self.model_dim,\n self.latent_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.vq = VectorQuantizer(\n self.latent_dim,\n self.num_latents,\n self.codebook_dropout,\n )\n self.out_dim = self.in_dim * self.patch_size**2\n self.decoder = STTransformer(\n self.model_dim,\n self.out_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n H, W = batch[""videos""].shape[2:4]\n outputs = self.vq_encode(batch[""videos""], training)\n recon = self.decoder(outputs[""z_q""]) # (B, T, H_down * W_down, C)\n recon = nn.sigmoid(recon)\n outputs[""recon""] = unpatchify(recon, self.patch_size, H, W)\n return outputs\n\n def vq_encode(self, videos: Any, training: bool = True) -> Dict[str, Any]:\n # --- Preprocess + encode ---\n B, T = videos.shape[:2]\n x = patchify(videos, self.patch_size)\n N = x.shape[2]\n x = self.encoder(x) # (B, T, N, E)\n\n # --- Vector quantize ---\n x = x.reshape(B * T * N, self.latent_dim)\n z_q, z, emb, indices = self.vq(x, training)\n z_q = z_q.reshape(B, T, N, self.latent_dim)\n indices = indices.reshape(B, T, N)\n return dict(z_q=z_q, z=z, emb=emb, indices=indices)\n\n def decode(self, indices: Any, video_hw: Tuple[int, int]):\n z = self.vq.codebook[indices]\n recon = self.decoder(z)\n recon = nn.sigmoid(recon)\n return unpatchify(recon, self.patch_size, *video_hw)\n",python,tab +1263,3997589,"models/tokenizer.py",524,0,"",python,selection_mouse +1264,3997592,"models/tokenizer.py",523,0,"",python,selection_command +1265,4004981,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1266,4006707,"train_tokenizer_bak.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\nfrom utils.logger import CompositeLogger\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 0.0\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log_dir: str = ""logs/"" \n loggers: list[str] = field(default_factory=lambda: [""console""]) # options: console, local, tb, wandb\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if jax.process_index() == 0:\n cfg = vars(args).copy()\n cfg[""model_param_count""] = param_counts\n logger = CompositeLogger(args.loggers, cfg)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer__\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n array_record_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in dataloader) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n step += 1\n\n # --- Logging ---\n if step % args.log_interval == 0 and jax.process_index() == 0:\n logger.log_metrics(\n {\n ""loss"": loss,\n **metrics,\n },\n step\n )\n if step % args.log_image_interval == 0:\n\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=np.asarray(gt_seq[0] * 255.).astype(np.uint8),\n recon=np.asarray(recon_seq[0] * 255.).astype(np.uint8),\n true_vs_recon=np.asarray(comparison_seq.astype(np.uint8)\n ),\n )\n logger.log_images(log_images, step)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +1267,4007790,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 0.0\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args,\n )\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer__\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n array_record_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in dataloader) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +1268,4009689,"train_tokenizer.py",570,0,"",python,selection_mouse +1269,4059127,"train_tokenizer.py",8194,0,"",python,selection_mouse +1270,4059663,"train_tokenizer.py",7926,0,"",python,selection_mouse +1271,4059664,"train_tokenizer.py",7925,0,"",python,selection_command +1272,4060915,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1273,4063406,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",392,0,"",shellscript,selection_mouse +1274,4063955,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",144,0,"",shellscript,selection_mouse +1275,4064127,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",139,9,"workspace",shellscript,selection_mouse +1276,4064298,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",108,129,"# array_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords/10fps_160x90\n",shellscript,selection_mouse +1277,4064900,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",273,0,"",shellscript,selection_mouse +1278,4065057,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",266,9,"workspace",shellscript,selection_mouse +1279,4065199,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",237,122,"array_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n",shellscript,selection_mouse +1280,4065790,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",273,0,"",shellscript,selection_mouse +1281,4066413,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",266,9,"workspace",shellscript,selection_mouse +1282,4066578,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",237,122,"array_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n",shellscript,selection_mouse +1283,4067180,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",273,0,"",shellscript,selection_mouse +1284,4067376,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",266,9,"workspace",shellscript,selection_mouse +1285,4068215,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",151,0,"",shellscript,selection_mouse +1286,4068367,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",149,7,"scratch",shellscript,selection_mouse +1287,4069043,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",151,0,"",shellscript,selection_mouse +1288,4069044,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",149,7,"scratch",shellscript,selection_mouse +1289,4741822,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +1290,4741900,"TERMINAL",0,0,"[?25li[?25h[?25lt[?25h",,terminal_output +1291,4742063,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1292,4742216,"TERMINAL",0,0,"[?25lb[?25h[?25lr[?25h",,terminal_output +1293,4742460,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1294,4742601,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1295,4742664,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1296,4742842,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +1297,4743524,"TERMINAL",0,0,"\r\n[?2004l\r[?1h=\r add-wandb-name-and-tags\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/explicit-image-dims\r\n fix-sampling\r\n grain-dataloader\r\n logging-variants\r\n main\r\n preprocess_video\r\n revised-dataloader\r\n runner\r\n* runner-grain\r\n speedup-tfrecord-preprocessing\r\n tmp\r\n\r[?1l>]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +1298,4744907,"TERMINAL",0,0,"[?25lg[?25h[?25li[?25h[?25lt[?25h",,terminal_output +1299,4745039,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1300,4745214,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1301,4745276,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1302,4745370,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1303,4745472,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1304,4745546,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1305,4745658,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1306,4745764,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1307,4745883,"TERMINAL",0,0,"On branch runner-grain\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tlocal-logs/\r\n\tlogs/\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tsample.py_bak\r\n\tscripts_cremers/\r\n\tscripts_horeka/\r\n\tslurm-3309772.out\r\n\tslurm/\r\n\ttrain_tokenizer_bak.py\r\n\tutils/logger_bak.py\r\n\tutils/visualizer.py\r\n\r\nnothing added to commit but untracked files present (use ""git add"" to track)\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +1308,4747828,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +1309,4747880,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +1310,4747943,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1311,4748076,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1312,4748316,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +1313,4748804,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1314,4748869,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +1315,4748930,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1316,4749068,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1317,4749175,"TERMINAL",0,0,"[?25lk[?25h",,terminal_output +1318,4749358,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +1319,4749418,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1320,4749600,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1321,4749657,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1322,4749998,"TERMINAL",0,0,"[?25lg[?25h[?25lr[?25h",,terminal_output +1323,4750270,"TERMINAL",0,0,"[?25la[?25h[?25li[?25h[?25ln[?25h",,terminal_output +1324,4750587,"TERMINAL",0,0,"[?25l-[?25h",,terminal_output +1325,4750855,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1326,4751053,"TERMINAL",0,0,"[?25lta[?25h",,terminal_output +1327,4751219,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1328,4751393,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1329,4752157,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1330,4752303,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1331,4752441,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1332,4752608,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1333,4752749,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +1334,4752854,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1335,4752907,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1336,4753109,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1337,4753176,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +1338,4753238,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1339,4753354,"TERMINAL",0,0,"Switched to branch 'grain-dataloader'\r\nYour branch is up to date with 'origin/grain-dataloader'.\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +1340,4753947,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +1341,4754056,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +1342,4754127,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1343,4754223,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1344,4754368,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +1345,4754563,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1346,4754723,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1347,4754891,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1348,4754953,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1349,4755569,"",0,0,"Switched from branch 'runner-grain' to 'grain-dataloader'",,git_branch_checkout +1350,4756709,"TERMINAL",0,0,"remote: Enumerating objects: 15, done.\r\nremote: Counting objects: 6% (1/15)\rremote: Counting objects: 13% (2/15)\rremote: Counting objects: 20% (3/15)\rremote: Counting objects: 26% (4/15)\rremote: Counting objects: 33% (5/15)\rremote: Counting objects: 40% (6/15)\rremote: Counting objects: 46% (7/15)\rremote: Counting objects: 53% (8/15)\rremote: Counting objects: 60% (9/15)\rremote: Counting objects: 66% (10/15)\rremote: Counting objects: 73% (11/15)\rremote: Counting objects: 80% (12/15)\rremote: Counting objects: 86% (13/15)\rremote: Counting objects: 93% (14/15)\rremote: Counting objects: 100% (15/15)\rremote: Counting objects: 100% (15/15), done.\r\nremote: Compressing objects: 25% (1/4)\rremote: Compressing objects: 50% (2/4)\rremote: Compressing objects: 75% (3/4)\rremote: Compressing objects: 100% (4/4)\rremote: Compressing objects: 100% (4/4), done.\r\nremote: Total 15 (delta 11), reused 13 (delta 11), pack-reused 0 (from 0)\r\n",,terminal_output +1351,4756797,"TERMINAL",0,0,"Unpacking objects: 6% (1/15)\rUnpacking objects: 13% (2/15)\rUnpacking objects: 20% (3/15)\r",,terminal_output +1352,4756855,"TERMINAL",0,0,"Unpacking objects: 26% (4/15)\rUnpacking objects: 33% (5/15)\rUnpacking objects: 40% (6/15)\r",,terminal_output +1353,4756909,"TERMINAL",0,0,"Unpacking objects: 46% (7/15)\rUnpacking objects: 53% (8/15)\rUnpacking objects: 60% (9/15)\rUnpacking objects: 66% (10/15)\rUnpacking objects: 73% (11/15)\rUnpacking objects: 80% (12/15)\rUnpacking objects: 86% (13/15)\rUnpacking objects: 93% (14/15)\rUnpacking objects: 100% (15/15)\rUnpacking objects: 100% (15/15), 4.34 KiB | 20.00 KiB/s, done.\r\n",,terminal_output +1354,4757086,"TERMINAL",0,0,"From github.com:p-doom/jafar\r\n 7410790..75647c1 grain-dataloader -> origin/grain-dataloader\r\n",,terminal_output +1355,4757144,"TERMINAL",0,0," b9e4ff6..b4614ee main -> origin/main\r\n",,terminal_output +1356,4757248,"TERMINAL",0,0,"Updating 7410790..75647c1\r\nFast-forward\r\n",,terminal_output +1357,4757347,"TERMINAL",0,0," genie.py | 250 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------\r\n sample.py | 65 +++++++++++++++++++++++---------------------------\r\n train_dynamics.py | 8 +++----\r\n train_lam.py | 96 +++++++++++++++++++++++++++++++++++++++++++++++---------------------------\r\n train_tokenizer.py | 96 +++++++++++++++++++++++++++++++++++++++++++++++---------------------------\r\n utils/dataloader.py | 2 +-\r\n utils/preprocess_dataset.py | 59 ++++++++++++++++++++++++++--------------------\r\n 7 files changed, 333 insertions(+), 243 deletions(-)\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +1358,4787692,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"#!/usr/bin/env bash\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\n# array_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords/10fps_160x90\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""0000""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=1000 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,tab +1359,4790347,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",691,0,"",shellscript,selection_mouse +1360,4791045,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",651,0,"",shellscript,selection_mouse +1361,4792096,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",617,0,"",shellscript,selection_mouse +1362,4792697,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",651,0,"",shellscript,selection_mouse +1363,4793262,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",688,0,"",shellscript,selection_mouse +1364,4793789,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",690,0,"",shellscript,selection_mouse +1365,4794342,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",691,0,"",shellscript,selection_mouse +1366,4795267,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",723,0,"",shellscript,selection_mouse +1367,4796381,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",755,0,"",shellscript,selection_command +1368,4796623,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",756,0,"",shellscript,selection_command +1369,4796806,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",757,0,"",shellscript,selection_command +1370,4796952,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",758,0,"",shellscript,selection_command +1371,4797087,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",759,0,"",shellscript,selection_command +1372,4797244,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",760,0,"",shellscript,selection_command +1373,4798034,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",759,1,"",shellscript,content +1374,4798140,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",758,1,"",shellscript,content +1375,4798704,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",757,0,"",shellscript,selection_command +1376,4798819,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",724,0,"",shellscript,selection_command +1377,4799032,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",692,0,"",shellscript,selection_command +1378,4799175,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",670,0,"",shellscript,selection_command +1379,4799356,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",653,0,"",shellscript,selection_command +1380,4799448,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",631,0,"",shellscript,selection_command +1381,4799821,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",598,0,"",shellscript,selection_command +1382,4801485,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 0.0\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args,\n )\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer__\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n array_record_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in dataloader) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +1383,4801572,"train_tokenizer.py",284,9296,"\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 0.0\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args,\n )\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add('model_state', ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler)\n handler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\n handler_registry.add('dataloader_state', grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add('dataloader_state', grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n \n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n \n checkpoint_manager = ocp.CheckpointManager(\n os.path.abspath(args.ckpt_dir),\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n \n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_train_state = jax.tree_util.tree_map(ocp.utils.to_shape_dtype_struct, train_state)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n )\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(grain_iterator),\n )\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()",python,content +1384,4807093,"train_tokenizer.py",633,0,"",python,selection_mouse +1385,4807219,"train_tokenizer.py",628,9,"num_steps",python,selection_mouse +1386,4809445,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1387,4811410,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",599,0,"\n",shellscript,content +1388,4811966,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",600,0,"-",shellscript,content +1389,4811967,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",601,0,"",shellscript,selection_keyboard +1390,4812649,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",600,1,"",shellscript,content +1391,4813068,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",600,0," ",shellscript,content +1392,4813833,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",604,0,"-",shellscript,content +1393,4813834,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",605,0,"",shellscript,selection_keyboard +1394,4813951,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",605,0,"-",shellscript,content +1395,4813952,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",606,0,"",shellscript,selection_keyboard +1396,4814226,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",606,0,"num_steps",shellscript,content +1397,4815828,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",615,0,"=",shellscript,content +1398,4815833,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",616,0,"",shellscript,selection_keyboard +1399,4816629,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",616,0,"2",shellscript,content +1400,4816630,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",617,0,"",shellscript,selection_keyboard +1401,4816686,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",617,0,"0",shellscript,content +1402,4816687,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",618,0,"",shellscript,selection_keyboard +1403,4817289,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",617,1,"",shellscript,content +1404,4818091,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",616,1,"",shellscript,content +1405,4818239,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",616,0,"3",shellscript,content +1406,4818239,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",617,0,"",shellscript,selection_keyboard +1407,4818329,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",617,0,"0",shellscript,content +1408,4818330,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",618,0,"",shellscript,selection_keyboard +1409,4819573,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",618,0," ",shellscript,content +1410,4819574,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",619,0,"",shellscript,selection_keyboard +1411,4819700,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",619,0,"\",shellscript,content +1412,4819700,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",620,0,"",shellscript,selection_keyboard +1413,4820148,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",619,0,"",shellscript,selection_command +1414,4820817,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",837,0,"",shellscript,selection_mouse +1415,4821285,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1011,0,"",shellscript,selection_mouse +1416,4821286,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1010,0,"",shellscript,selection_command +1417,4821829,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1012,0,"",shellscript,selection_mouse +1418,4822461,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",675,0,"",shellscript,selection_mouse +1419,4822461,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",674,0,"",shellscript,selection_command +1420,4823099,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1011,0,"",shellscript,selection_mouse +1421,4823114,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1010,0,"",shellscript,selection_command +1422,4824176,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",548,0,"",shellscript,selection_mouse +1423,4824751,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",543,0,"",shellscript,selection_mouse +1424,4824896,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",533,14,"CHECKPOINT_DIR",shellscript,selection_mouse +1425,4830545,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",514,0,"",shellscript,selection_mouse +1426,4832240,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",480,0,"",shellscript,selection_mouse +1427,4832392,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",480,6,"ws_dir",shellscript,selection_mouse +1428,4832646,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",480,18,"ws_dir/checkpoints",shellscript,selection_mouse +1429,4832706,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",480,19,"ws_dir/checkpoints/",shellscript,selection_mouse +1430,4832729,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",480,20,"ws_dir/checkpoints/$",shellscript,selection_mouse +1431,4832783,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",480,28,"ws_dir/checkpoints/$job_name",shellscript,selection_mouse +1432,4833098,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",502,0,"",shellscript,selection_mouse +1433,4833646,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",501,1,"o",shellscript,selection_mouse +1434,4833677,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",500,2,"jo",shellscript,selection_mouse +1435,4833687,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",499,3,"$jo",shellscript,selection_mouse +1436,4833696,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",498,4,"/$jo",shellscript,selection_mouse +1437,4834080,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",498,0,"",shellscript,selection_mouse +1438,4834998,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",425,0,"",shellscript,selection_mouse +1439,4836560,"TERMINAL",0,0,"bash",,terminal_focus +1440,4842628,"TERMINAL",0,0,"cd $ws_dir",,terminal_command +1441,4843045,"TERMINAL",0,0,"ls",,terminal_command +1442,4843097,"TERMINAL",0,0,"]633;E;2025-07-10 11:46:56 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C",,terminal_output +1443,4843196,"TERMINAL",0,0,"checkpoints count_items.sh data data_new huggingface logs scripts\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared]633;D;0",,terminal_output +1444,4846239,"TERMINAL",0,0,"cd checkpoints/",,terminal_command +1445,4846975,"TERMINAL",0,0,"ls",,terminal_command +1446,4847037,"TERMINAL",0,0,"]633;E;2025-07-10 11:47:00 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C",,terminal_output +1447,4847108,"TERMINAL",0,0,"0000 3290367 3292213 3292331 3292337 3294603 3296575 3297582 3297727 3299068 3300233 3301026 3307618 3310437 3313565 dyn tokenizer\r\n3290283 3290391 3292221 3292332 3292338 3296502 3297569 3297586 3299016 3299069 3300290 3301027 3307619 3311671 3313570 dynamics_ckpt_dir tokenizer_ckpt_dir\r\n3290284 3290392 3292258 3292333 3292339 3296540 3297575 3297606 3299062 3299258 3300658 3301029 3309662 3311672 3313571 lam train_lam_minecraft_overfit_sample\r\n3290295 3290439 3292328 3292334 3294600 3296571 3297576 3297671 3299063 3299259 3300663 3301030 3309663 3313562 3313572 lam-1-action train_tokenizer_batch_size_scaling_16_node\r\n3290296 3290440 3292329 3292335 3294601 3296573 3297577 3297693 3299065 3299272 3300672 3301031 3309699 3313563 3316022 lam_ckpt_dir train_tokenizer_minecraft_overfit_sample\r\n3290366 3291405 3292330 3292336 3294602 3296574 3297578 3297706 3299066 3299579 3301025 3306801 3310436 3313564 debug lam_main_test\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints]633;D;0",,terminal_output +1448,4849230,"TERMINAL",0,0,"cd debug/",,terminal_command +1449,4849571,"TERMINAL",0,0,"ls",,terminal_command +1450,4853427,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",463,0,"",shellscript,selection_mouse +1451,4853937,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",461,0,"",shellscript,selection_mouse +1452,4855232,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",460,1,"",shellscript,content +1453,4855368,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",459,1,"",shellscript,content +1454,4855500,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",458,1,"",shellscript,content +1455,4855696,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",457,1,"",shellscript,content +1456,4856124,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",457,0,"m",shellscript,content +1457,4856128,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",458,0,"",shellscript,selection_keyboard +1458,4856304,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",458,0,"i",shellscript,content +1459,4856305,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",459,0,"",shellscript,selection_keyboard +1460,4856393,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",459,0,"h",shellscript,content +1461,4856394,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",460,0,"",shellscript,selection_keyboard +1462,4856491,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",460,0,"i",shellscript,content +1463,4856492,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",461,0,"",shellscript,selection_keyboard +1464,4856500,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",461,0,"r",shellscript,content +1465,4856501,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",462,0,"",shellscript,selection_keyboard +1466,4856723,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",462,0,"-",shellscript,content +1467,4856724,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",463,0,"",shellscript,selection_keyboard +1468,4856945,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",463,0,"d",shellscript,content +1469,4856946,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",464,0,"",shellscript,selection_keyboard +1470,4857150,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",464,0,"e",shellscript,content +1471,4857151,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",465,0,"",shellscript,selection_keyboard +1472,4857235,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",465,0,"b",shellscript,content +1473,4857236,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",466,0,"",shellscript,selection_keyboard +1474,4857306,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",466,0,"u",shellscript,content +1475,4857307,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",467,0,"",shellscript,selection_keyboard +1476,4857420,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",467,0,"g",shellscript,content +1477,4857423,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",468,0,"",shellscript,selection_keyboard +1478,4857550,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",468,0,"-",shellscript,content +1479,4857551,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",469,0,"",shellscript,selection_keyboard +1480,4858967,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",469,0,"c",shellscript,content +1481,4858968,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",470,0,"",shellscript,selection_keyboard +1482,4859064,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",470,0,"h",shellscript,content +1483,4859065,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",471,0,"",shellscript,selection_keyboard +1484,4859187,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",471,0,"e",shellscript,content +1485,4859188,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",472,0,"",shellscript,selection_keyboard +1486,4859364,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",472,0,"c",shellscript,content +1487,4859365,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",473,0,"",shellscript,selection_keyboard +1488,4859435,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",473,0,"k",shellscript,content +1489,4859435,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",474,0,"",shellscript,selection_keyboard +1490,4859636,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",474,0,"p",shellscript,content +1491,4859637,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",475,0,"",shellscript,selection_keyboard +1492,4859835,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",475,0,"o",shellscript,content +1493,4859836,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",476,0,"",shellscript,selection_keyboard +1494,4859989,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",476,0,"i",shellscript,content +1495,4859990,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",477,0,"",shellscript,selection_keyboard +1496,4860128,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",477,0,"n",shellscript,content +1497,4860129,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",478,0,"",shellscript,selection_keyboard +1498,4860311,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",478,0,"t",shellscript,content +1499,4860312,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",479,0,"",shellscript,selection_keyboard +1500,4860387,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",479,0,"i",shellscript,content +1501,4860387,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",480,0,"",shellscript,selection_keyboard +1502,4860471,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",480,0,"n",shellscript,content +1503,4860472,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",481,0,"",shellscript,selection_keyboard +1504,4860628,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",481,0,"g",shellscript,content +1505,4860629,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",482,0,"",shellscript,selection_keyboard +1506,4861196,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",481,1,"",shellscript,content +1507,4861340,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",480,1,"",shellscript,content +1508,4861480,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",479,1,"",shellscript,content +1509,4861622,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",478,1,"",shellscript,content +1510,4861764,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",477,1,"",shellscript,content +1511,4861893,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",476,1,"",shellscript,content +1512,4862198,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",475,1,"",shellscript,content +1513,4862319,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",474,1,"",shellscript,content +1514,4862452,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",473,1,"",shellscript,content +1515,4862605,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",472,1,"",shellscript,content +1516,4862737,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",471,1,"",shellscript,content +1517,4862880,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",470,1,"",shellscript,content +1518,4863011,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",469,1,"",shellscript,content +1519,4863362,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",469,0,"g",shellscript,content +1520,4863363,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",470,0,"",shellscript,selection_keyboard +1521,4863472,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",470,0,"r",shellscript,content +1522,4863473,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",471,0,"",shellscript,selection_keyboard +1523,4863678,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",471,0,"a",shellscript,content +1524,4863679,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",472,0,"",shellscript,selection_keyboard +1525,4863691,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",472,0,"i",shellscript,content +1526,4863692,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",473,0,"",shellscript,selection_keyboard +1527,4863753,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",473,0,"n",shellscript,content +1528,4863753,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",474,0,"",shellscript,selection_keyboard +1529,4863970,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",474,0,"-",shellscript,content +1530,4863971,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",475,0,"",shellscript,selection_keyboard +1531,4864198,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",475,0,"c",shellscript,content +1532,4864198,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",476,0,"",shellscript,selection_keyboard +1533,4864290,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",476,0,"h",shellscript,content +1534,4864290,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",477,0,"",shellscript,selection_keyboard +1535,4864403,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",477,0,"e",shellscript,content +1536,4864403,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",478,0,"",shellscript,selection_keyboard +1537,4864503,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",478,0,"c",shellscript,content +1538,4864504,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",479,0,"",shellscript,selection_keyboard +1539,4864644,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",479,0,"k",shellscript,content +1540,4864645,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",480,0,"",shellscript,selection_keyboard +1541,4864838,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",480,0,"p",shellscript,content +1542,4864839,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",481,0,"",shellscript,selection_keyboard +1543,4865020,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",481,0,"o",shellscript,content +1544,4865021,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",482,0,"",shellscript,selection_keyboard +1545,4865198,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",482,0,"i",shellscript,content +1546,4865199,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",483,0,"",shellscript,selection_keyboard +1547,4865298,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",483,0,"n",shellscript,content +1548,4865298,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",484,0,"",shellscript,selection_keyboard +1549,4865417,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",484,0,"t",shellscript,content +1550,4865418,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",485,0,"",shellscript,selection_keyboard +1551,4865473,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",485,0,"i",shellscript,content +1552,4865474,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",486,0,"",shellscript,selection_keyboard +1553,4865563,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",486,0,"n",shellscript,content +1554,4865564,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",487,0,"",shellscript,selection_keyboard +1555,4865620,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",487,0,"g",shellscript,content +1556,4865621,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",488,0,"",shellscript,selection_keyboard +1557,4868385,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",425,0,"",shellscript,selection_mouse +1558,4868950,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",490,0,"",shellscript,selection_mouse +1559,4869583,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",472,0,"",shellscript,selection_mouse +1560,4871914,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",487,0,"",shellscript,selection_mouse +1561,4872779,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",488,0,"",shellscript,selection_mouse +1562,4873783,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",488,0,"-",shellscript,content +1563,4873784,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",489,0,"",shellscript,selection_keyboard +1564,4873998,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",489,0,"o",shellscript,content +1565,4873999,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",490,0,"",shellscript,selection_keyboard +1566,4874151,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",490,0,"l",shellscript,content +1567,4874152,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",491,0,"",shellscript,selection_keyboard +1568,4874461,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",491,0,"d",shellscript,content +1569,4874461,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",492,0,"",shellscript,selection_keyboard +1570,4875086,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",491,0,"",shellscript,selection_command +1571,4876414,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",812,0,"",shellscript,selection_mouse +1572,4876421,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",811,0,"",shellscript,selection_command +1573,4877191,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1043,0,"",shellscript,selection_mouse +1574,4877865,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",706,0,"",shellscript,selection_mouse +1575,4877866,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",705,0,"",shellscript,selection_command +1576,4879469,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",808,0,"",shellscript,selection_mouse +1577,4880044,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",824,0,"",shellscript,selection_mouse +1578,4880046,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",823,0,"",shellscript,selection_command +1579,4880595,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",824,0,"",shellscript,selection_mouse +1580,4880595,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",823,0,"",shellscript,selection_command +1581,4881098,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",811,0,"",shellscript,selection_mouse +1582,4882467,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",810,0,"",shellscript,selection_mouse +1583,4883474,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",809,1,"",shellscript,content +1584,4883611,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",808,1,"",shellscript,content +1585,4883781,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",808,0,"2",shellscript,content +1586,4883781,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",809,0,"",shellscript,selection_keyboard +1587,4906588,"TERMINAL",0,0,"srun",,terminal_focus +1588,4908210,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1589,4912586,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1590,4912649,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +1591,4912710,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1592,4913065,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",,terminal_output +1593,4913401,"TERMINAL",0,0,"\rslurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh\r\n[?2004l\r",,terminal_output +1594,4913586,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=409404\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0719\r\nSLURM_JOB_START_TIME=1752136462\r\nSLURM_STEP_NODELIST=hkn0719\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752179662\r\nSLURM_PMI2_SRUN_PORT=43929\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3334543\r\nSLURM_PTY_PORT=44699\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=48\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0719\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0719,0806]\r\nSLURM_SRUN_COMM_PORT=38371\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3334543\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0719\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38371\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0719,0806]\r\n",,terminal_output +1595,4913735,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +1596,4943435,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 177, in \r\n lr_schedule = optax.warmup_cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 605, in warmup_cosine_decay_schedule\r\n cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 361, in cosine_decay_schedule\r\n raise ValueError(\r\nValueError: The cosine_decay_schedule requires positive decay_steps, got decay_steps=-9970.\r\n",,terminal_output +1597,4943684,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 177, in \r\n lr_schedule = optax.warmup_cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 605, in warmup_cosine_decay_schedule\r\n cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 361, in cosine_decay_schedule\r\n raise ValueError(\r\nValueError: The cosine_decay_schedule requires positive decay_steps, got decay_steps=-9970.\r\n",,terminal_output +1598,4943756,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 177, in \r\n lr_schedule = optax.warmup_cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 605, in warmup_cosine_decay_schedule\r\n cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 361, in cosine_decay_schedule\r\n raise ValueError(\r\nValueError: The cosine_decay_schedule requires positive decay_steps, got decay_steps=-9970.\r\n",,terminal_output +1599,4944371,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 177, in \r\n lr_schedule = optax.warmup_cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 605, in warmup_cosine_decay_schedule\r\n cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 361, in cosine_decay_schedule\r\n raise ValueError(\r\nValueError: The cosine_decay_schedule requires positive decay_steps, got decay_steps=-9970.\r\n",,terminal_output +1600,4946907,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 177, in \r\n lr_schedule = optax.warmup_cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 605, in warmup_cosine_decay_schedule\r\n cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 361, in cosine_decay_schedule\r\n raise ValueError(\r\nValueError: The cosine_decay_schedule requires positive decay_steps, got decay_steps=-9970.\r\n",,terminal_output +1601,4947174,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +1602,4947685,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 177, in \r\n lr_schedule = optax.warmup_cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 605, in warmup_cosine_decay_schedule\r\n cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 361, in cosine_decay_schedule\r\n raise ValueError(\r\nValueError: The cosine_decay_schedule requires positive decay_steps, got decay_steps=-9970.\r\n",,terminal_output +1603,4947794,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_114841-9we7712k\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run tokenizer-batch-size-scaling-2-node-sqrt-lr-mihir-debug-grain-checkpointing-old\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/9we7712k\r\n",,terminal_output +1604,4947936,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 177, in \r\n lr_schedule = optax.warmup_cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 605, in warmup_cosine_decay_schedule\r\n cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 361, in cosine_decay_schedule\r\n raise ValueError(\r\nValueError: The cosine_decay_schedule requires positive decay_steps, got decay_steps=-9970.\r\n",,terminal_output +1605,4948536,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 177, in \r\n lr_schedule = optax.warmup_cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 605, in warmup_cosine_decay_schedule\r\n cosine_decay_schedule(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/optax/schedules/_schedule.py"", line 361, in cosine_decay_schedule\r\n raise ValueError(\r\nValueError: The cosine_decay_schedule requires positive decay_steps, got decay_steps=-9970.\r\n",,terminal_output +1606,4949329,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run tokenizer-batch-size-scaling-2-node-sqrt-lr-mihir-debug-grain-checkpointing-old at: https://wandb.ai/instant-uv/jafar/runs/9we7712k\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_114841-9we7712k/logs\r\n",,terminal_output +1607,4950244,"TERMINAL",0,0,"srun: error: hkn0719: tasks 0,2: Exited with exit code 1\r\nsrun: error: hkn0806: tasks 4,6: Exited with exit code 1\r\n",,terminal_output +1608,4950364,"TERMINAL",0,0,"srun: error: hkn0719: task 1: Exited with exit code 1\r\nsrun: error: hkn0806: task 7: Exited with exit code 1\r\n",,terminal_output +1609,4950559,"TERMINAL",0,0,"srun: error: hkn0719: task 3: Exited with exit code 1\r\nsrun: error: hkn0806: task 5: Exited with exit code 1\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +1610,4969374,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1611,4974428,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",648,0,"",shellscript,selection_mouse +1612,4975411,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",647,0,"",shellscript,selection_command +1613,4975867,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",631,21,"",shellscript,content +1614,4975881,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",635,0,"",shellscript,selection_command +1615,4978576,"train_tokenizer.py",0,0,"",python,tab +1616,4981425,"train_tokenizer.py",990,0,"",python,selection_mouse +1617,4981566,"train_tokenizer.py",980,12,"warmup_steps",python,selection_mouse +1618,4982203,"train_tokenizer.py",1021,0,"",python,selection_mouse +1619,4982207,"train_tokenizer.py",1020,0,"",python,selection_command +1620,4982584,"train_tokenizer.py",1012,9,"Tokenizer",python,selection_mouse +1621,4982585,"train_tokenizer.py",1013,8,"okenizer",python,selection_command +1622,4982736,"train_tokenizer.py",1006,16," # Tokenizer\n",python,selection_mouse +1623,4982881,"train_tokenizer.py",1021,0,"",python,selection_mouse +1624,4982882,"train_tokenizer.py",1020,0,"",python,selection_command +1625,4983378,"train_tokenizer.py",1003,0,"",python,selection_mouse +1626,4983514,"train_tokenizer.py",1000,5,"10000",python,selection_mouse +1627,4985296,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1628,4986591,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",631,0," --num_steps=30 \\n",shellscript,content +1629,4987420,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",646,0,"",shellscript,selection_command +1630,4987682,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",651,0,"\n ",shellscript,content +1631,4988478,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",656,0,"-",shellscript,content +1632,4988479,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",657,0,"",shellscript,selection_keyboard +1633,4988597,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",657,0,"-",shellscript,content +1634,4988598,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",658,0,"",shellscript,selection_keyboard +1635,4988769,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",658,0,"w",shellscript,content +1636,4988770,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",659,0,"",shellscript,selection_keyboard +1637,4988938,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",659,0,"a",shellscript,content +1638,4988939,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",660,0,"",shellscript,selection_keyboard +1639,4989096,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",660,0,"r",shellscript,content +1640,4989096,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",661,0,"",shellscript,selection_keyboard +1641,4989168,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",661,0,"m",shellscript,content +1642,4989169,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",662,0,"",shellscript,selection_keyboard +1643,4989348,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",662,0,"u",shellscript,content +1644,4989348,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",663,0,"",shellscript,selection_keyboard +1645,4989479,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",663,0,"p",shellscript,content +1646,4989480,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",664,0,"",shellscript,selection_keyboard +1647,4989864,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",664,0,"_",shellscript,content +1648,4989865,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",665,0,"",shellscript,selection_keyboard +1649,4990277,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",665,0,"s",shellscript,content +1650,4990278,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",666,0,"",shellscript,selection_keyboard +1651,4990407,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",666,0,"t",shellscript,content +1652,4990408,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",667,0,"",shellscript,selection_keyboard +1653,4990574,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",667,0,"e",shellscript,content +1654,4990575,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",668,0,"",shellscript,selection_keyboard +1655,4990834,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",668,0,"s",shellscript,content +1656,4990835,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",669,0,"",shellscript,selection_keyboard +1657,4991384,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",668,1,"",shellscript,content +1658,4991663,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",668,0,"p",shellscript,content +1659,4991663,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",669,0,"",shellscript,selection_keyboard +1660,4991749,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",669,0,"s",shellscript,content +1661,4991749,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",670,0,"",shellscript,selection_keyboard +1662,4992041,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",670,0,"=",shellscript,content +1663,4992042,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",671,0,"",shellscript,selection_keyboard +1664,4992185,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",671,0,"1",shellscript,content +1665,4992185,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",672,0,"",shellscript,selection_keyboard +1666,4993361,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",672,0," ",shellscript,content +1667,4993362,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",673,0,"",shellscript,selection_keyboard +1668,4993457,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",673,0,"\",shellscript,content +1669,4993458,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",674,0,"",shellscript,selection_keyboard +1670,4994052,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",673,0,"",shellscript,selection_command +1671,4997616,"train_tokenizer.py",0,0,"",python,tab +1672,5004037,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",,terminal_output +1673,5004184,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1674,5004339,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=409404\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0719\r\nSLURM_JOB_START_TIME=1752136462\r\nSLURM_STEP_NODELIST=hkn0719\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752179662\r\nSLURM_PMI2_SRUN_PORT=43929\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3334543\r\nSLURM_PTY_PORT=44699\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=48\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0719\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0719,0806]\r\nSLURM_SRUN_COMM_PORT=38371\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3334543\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0719\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38371\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0719,0806]\r\n",,terminal_output +1675,5004444,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +1676,5033007,"TERMINAL",0,0,"bash",,terminal_focus +1677,5033948,"TERMINAL",0,0,"ls",,terminal_command +1678,5035700,"TERMINAL",0,0,"cd mihir-debug-grain-checkpointing-old/",,terminal_command +1679,5036021,"TERMINAL",0,0,"ls",,terminal_command +1680,5036040,"TERMINAL",0,0,"]633;E;2025-07-10 11:50:09 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old]633;D;0",,terminal_output +1681,5036955,"TERMINAL",0,0,"ls",,terminal_command +1682,5038207,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +1683,5038932,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_115012-hd13xmoh\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run tokenizer-batch-size-scaling-2-node-sqrt-lr-mihir-debug-grain-checkpointing-old\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/hd13xmoh\r\n",,terminal_output +1684,5041795,"TERMINAL",0,0,"watch -n1 ls",,terminal_command +1685,5041846,"TERMINAL",0,0,"]633;E;2025-07-10 11:50:15 watch -n1 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C[?1049h(B[?7hEvery 1.0s: lshkn1990.localdomain: Thu Jul 10 11:50:15 2025",,terminal_output +1686,5042850,"TERMINAL",0,0,"6",,terminal_output +1687,5043941,"TERMINAL",0,0,"7",,terminal_output +1688,5044878,"TERMINAL",0,0,"8",,terminal_output +1689,5045989,"TERMINAL",0,0,"9",,terminal_output +1690,5047015,"TERMINAL",0,0,"20",,terminal_output +1691,5047931,"TERMINAL",0,0,"1",,terminal_output +1692,5048941,"TERMINAL",0,0,"2",,terminal_output +1693,5049984,"TERMINAL",0,0,"3",,terminal_output +1694,5050975,"TERMINAL",0,0,"4",,terminal_output +1695,5051996,"TERMINAL",0,0,"5",,terminal_output +1696,5053011,"TERMINAL",0,0,"6",,terminal_output +1697,5054025,"TERMINAL",0,0,"7",,terminal_output +1698,5055040,"TERMINAL",0,0,"8",,terminal_output +1699,5056053,"TERMINAL",0,0,"9",,terminal_output +1700,5057071,"TERMINAL",0,0,"30",,terminal_output +1701,5058172,"TERMINAL",0,0,"1",,terminal_output +1702,5059106,"TERMINAL",0,0,"2",,terminal_output +1703,5060124,"TERMINAL",0,0,"3",,terminal_output +1704,5061145,"TERMINAL",0,0,"4",,terminal_output +1705,5062156,"TERMINAL",0,0,"5",,terminal_output +1706,5063172,"TERMINAL",0,0,"6",,terminal_output +1707,5064217,"TERMINAL",0,0,"7",,terminal_output +1708,5065240,"TERMINAL",0,0,"9",,terminal_output +1709,5066220,"TERMINAL",0,0,"40",,terminal_output +1710,5067288,"TERMINAL",0,0,"1",,terminal_output +1711,5068312,"TERMINAL",0,0,"2",,terminal_output +1712,5069337,"TERMINAL",0,0,"3",,terminal_output +1713,5070286,"TERMINAL",0,0,"4",,terminal_output +1714,5071385,"TERMINAL",0,0,"5",,terminal_output +1715,5072322,"TERMINAL",0,0,"6",,terminal_output +1716,5073432,"TERMINAL",0,0,"7",,terminal_output +1717,5074353,"TERMINAL",0,0,"8",,terminal_output +1718,5075380,"TERMINAL",0,0,"9",,terminal_output +1719,5076401,"TERMINAL",0,0,"50",,terminal_output +1720,5077426,"TERMINAL",0,0,"1",,terminal_output +1721,5078449,"TERMINAL",0,0,"2",,terminal_output +1722,5079440,"TERMINAL",0,0,"3",,terminal_output +1723,5080498,"TERMINAL",0,0,"4",,terminal_output +1724,5081461,"TERMINAL",0,0,"5",,terminal_output +1725,5082545,"TERMINAL",0,0,"6",,terminal_output +1726,5083490,"TERMINAL",0,0,"7",,terminal_output +1727,5084635,"TERMINAL",0,0,"8",,terminal_output +1728,5085617,"TERMINAL",0,0,"9",,terminal_output +1729,5086753,"TERMINAL",0,0,"1:00",,terminal_output +1730,5087561,"TERMINAL",0,0,"1",,terminal_output +1731,5088191,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1732,5088576,"TERMINAL",0,0,"2",,terminal_output +1733,5089652,"TERMINAL",0,0,"3",,terminal_output +1734,5089869,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",829,0,"",shellscript,selection_mouse +1735,5090036,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",807,23,"log_checkpoint_interval",shellscript,selection_mouse +1736,5090369,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",801,34," --log_checkpoint_interval=2 \\n",shellscript,selection_mouse +1737,5090606,"TERMINAL",0,0,"4",,terminal_output +1738,5090961,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",829,0,"",shellscript,selection_mouse +1739,5090961,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",807,23,"log_checkpoint_interval",shellscript,selection_mouse +1740,5091146,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",801,34," --log_checkpoint_interval=2 \\n",shellscript,selection_mouse +1741,5091623,"TERMINAL",0,0,"5",,terminal_output +1742,5091876,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",832,0,"",shellscript,selection_mouse +1743,5091877,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",831,1,"2",shellscript,selection_mouse +1744,5092048,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",807,25,"log_checkpoint_interval=2",shellscript,selection_mouse +1745,5092121,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",831,15,"2 \\n --log \",shellscript,selection_mouse +1746,5092202,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",807,25,"log_checkpoint_interval=2",shellscript,selection_mouse +1747,5092641,"TERMINAL",0,0,"6",,terminal_output +1748,5092688,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",811,0,"",shellscript,selection_mouse +1749,5092689,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",807,23,"log_checkpoint_interval",shellscript,selection_mouse +1750,5092911,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",807,39,"log_checkpoint_interval=2 \\n --log \",shellscript,selection_mouse +1751,5092970,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",807,23,"log_checkpoint_interval",shellscript,selection_mouse +1752,5092990,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",807,24,"log_checkpoint_interval=",shellscript,selection_mouse +1753,5093004,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",807,25,"log_checkpoint_interval=2",shellscript,selection_mouse +1754,5093062,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",807,26,"log_checkpoint_interval=2 ",shellscript,selection_mouse +1755,5093062,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",807,27,"log_checkpoint_interval=2 \",shellscript,selection_mouse +1756,5093452,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",834,0,"",shellscript,selection_mouse +1757,5093456,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",833,0,"",shellscript,selection_command +1758,5093595,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",834,0,"",shellscript,selection_mouse +1759,5093597,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",833,0,"",shellscript,selection_command +1760,5093671,"TERMINAL",0,0,"7",,terminal_output +1761,5093756,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",832,1," ",shellscript,selection_mouse +1762,5093774,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",832,2," \",shellscript,selection_command +1763,5093785,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",807,27,"log_checkpoint_interval=2 \",shellscript,selection_mouse +1764,5094004,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,28,"-log_checkpoint_interval=2 \",shellscript,selection_mouse +1765,5094429,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,0,"",shellscript,selection_mouse +1766,5094676,"TERMINAL",0,0,"8",,terminal_output +1767,5095247,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",768,0,"",shellscript,selection_mouse +1768,5095248,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",767,0,"",shellscript,selection_command +1769,5095689,"TERMINAL",0,0,"9",,terminal_output +1770,5096744,"TERMINAL",0,0,"srun",,terminal_focus +1771,5096756,"TERMINAL",0,0,"10",,terminal_output +1772,5097722,"TERMINAL",0,0,"1",,terminal_output +1773,5098737,"TERMINAL",0,0,"2",,terminal_output +1774,5099756,"TERMINAL",0,0,"3",,terminal_output +1775,5100785,"TERMINAL",0,0,"4",,terminal_output +1776,5101788,"TERMINAL",0,0,"5",,terminal_output +1777,5102816,"TERMINAL",0,0,"6",,terminal_output +1778,5103818,"TERMINAL",0,0,"7",,terminal_output +1779,5104869,"TERMINAL",0,0,"8",,terminal_output +1780,5105848,"TERMINAL",0,0,"9",,terminal_output +1781,5106937,"TERMINAL",0,0,"20",,terminal_output +1782,5107952,"TERMINAL",0,0,"1",,terminal_output +1783,5108966,"TERMINAL",0,0,"2",,terminal_output +1784,5110092,"TERMINAL",0,0,"3",,terminal_output +1785,5111020,"TERMINAL",0,0,"4",,terminal_output +1786,5112039,"TERMINAL",0,0,"5",,terminal_output +1787,5112961,"TERMINAL",0,0,"6",,terminal_output +1788,5113985,"TERMINAL",0,0,"7",,terminal_output +1789,5114993,"TERMINAL",0,0,"8",,terminal_output +1790,5116009,"TERMINAL",0,0,"9",,terminal_output +1791,5117055,"TERMINAL",0,0,"30",,terminal_output +1792,5118041,"TERMINAL",0,0,"1",,terminal_output +1793,5119058,"TERMINAL",0,0,"2",,terminal_output +1794,5120082,"TERMINAL",0,0,"3",,terminal_output +1795,5120099,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +1796,5120538,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\nRunning on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\nRunning on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +1797,5121091,"TERMINAL",0,0,"4",,terminal_output +1798,5122108,"TERMINAL",0,0,"5",,terminal_output +1799,5122893,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +1800,5123199,"TERMINAL",0,0,"6",,terminal_output +1801,5123200,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +1802,5124225,"TERMINAL",0,0,"7",,terminal_output +1803,5125247,"TERMINAL",0,0,"8",,terminal_output +1804,5126271,"TERMINAL",0,0,"9",,terminal_output +1805,5127296,"TERMINAL",0,0,"41",,terminal_output +1806,5128212,"TERMINAL",0,0,"2",,terminal_output +1807,5129242,"TERMINAL",0,0,"3",,terminal_output +1808,5130237,"TERMINAL",0,0,"4",,terminal_output +1809,5131291,"TERMINAL",0,0,"5",,terminal_output +1810,5132268,"TERMINAL",0,0,"6",,terminal_output +1811,5133340,"TERMINAL",0,0,"7",,terminal_output +1812,5134354,"TERMINAL",0,0,"8",,terminal_output +1813,5135321,"TERMINAL",0,0,"9",,terminal_output +1814,5136333,"TERMINAL",0,0,"50",,terminal_output +1815,5136537,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +1816,5136959,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\nRunning on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +1817,5137434,"TERMINAL",0,0,"1",,terminal_output +1818,5138367,"TERMINAL",0,0,"2",,terminal_output +1819,5139390,"TERMINAL",0,0,"3",,terminal_output +1820,5139624,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +1821,5139995,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +1822,5140505,"TERMINAL",0,0,"4",,terminal_output +1823,5141427,"TERMINAL",0,0,"5",,terminal_output +1824,5141806,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +1825,5142145,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +1826,5142451,"TERMINAL",0,0,"6",,terminal_output +1827,5143452,"TERMINAL",0,0,"7",,terminal_output +1828,5144467,"TERMINAL",0,0,"8",,terminal_output +1829,5145484,"TERMINAL",0,0,"9",,terminal_output +1830,5146504,"TERMINAL",0,0,"2:00",,terminal_output +1831,5147523,"TERMINAL",0,0,"1",,terminal_output +1832,5148536,"TERMINAL",0,0,"2",,terminal_output +1833,5149572,"TERMINAL",0,0,"3",,terminal_output +1834,5150576,"TERMINAL",0,0,"4",,terminal_output +1835,5151587,"TERMINAL",0,0,"5",,terminal_output +1836,5152613,"TERMINAL",0,0,"6",,terminal_output +1837,5153625,"TERMINAL",0,0,"7",,terminal_output +1838,5154641,"TERMINAL",0,0,"8",,terminal_output +1839,5155658,"TERMINAL",0,0,"9",,terminal_output +1840,5156705,"TERMINAL",0,0,"10",,terminal_output +1841,5157682,"TERMINAL",0,0,"1",,terminal_output +1842,5158700,"TERMINAL",0,0,"2",,terminal_output +1843,5159784,"TERMINAL",0,0,"3",,terminal_output +1844,5160766,"TERMINAL",0,0,"4",,terminal_output +1845,5161748,"TERMINAL",0,0,"5",,terminal_output +1846,5162763,"TERMINAL",0,0,"6",,terminal_output +1847,5163853,"TERMINAL",0,0,"7",,terminal_output +1848,5164798,"TERMINAL",0,0,"8",,terminal_output +1849,5165812,"TERMINAL",0,0,"9",,terminal_output +1850,5166829,"TERMINAL",0,0,"20",,terminal_output +1851,5167849,"TERMINAL",0,0,"1",,terminal_output +1852,5168869,"TERMINAL",0,0,"2",,terminal_output +1853,5169894,"TERMINAL",0,0,"3",,terminal_output +1854,5170917,"TERMINAL",0,0,"4",,terminal_output +1855,5171942,"TERMINAL",0,0,"5",,terminal_output +1856,5172919,"TERMINAL",0,0,"6",,terminal_output +1857,5173935,"TERMINAL",0,0,"7",,terminal_output +1858,5175014,"TERMINAL",0,0,"8",,terminal_output +1859,5175967,"TERMINAL",0,0,"9",,terminal_output +1860,5177065,"TERMINAL",0,0,"30",,terminal_output +1861,5178003,"TERMINAL",0,0,"1",,terminal_output +1862,5179015,"TERMINAL",0,0,"2",,terminal_output +1863,5180034,"TERMINAL",0,0,"3",,terminal_output +1864,5181046,"TERMINAL",0,0,"4",,terminal_output +1865,5182062,"TERMINAL",0,0,"5",,terminal_output +1866,5183081,"TERMINAL",0,0,"6",,terminal_output +1867,5184095,"TERMINAL",0,0,"7",,terminal_output +1868,5185114,"TERMINAL",0,0,"8",,terminal_output +1869,5186130,"TERMINAL",0,0,"9",,terminal_output +1870,5187200,"TERMINAL",0,0,"40",,terminal_output +1871,5188161,"TERMINAL",0,0,"1",,terminal_output +1872,5189182,"TERMINAL",0,0,"2",,terminal_output +1873,5190198,"TERMINAL",0,0,"4",,terminal_output +1874,5191294,"TERMINAL",0,0,"5",,terminal_output +1875,5192230,"TERMINAL",0,0,"6",,terminal_output +1876,5193246,"TERMINAL",0,0,"7",,terminal_output +1877,5194353,"TERMINAL",0,0,"8",,terminal_output +1878,5195288,"TERMINAL",0,0,"9",,terminal_output +1879,5196299,"TERMINAL",0,0,"50",,terminal_output +1880,5197318,"TERMINAL",0,0,"1",,terminal_output +1881,5198360,"TERMINAL",0,0,"2",,terminal_output +1882,5199357,"TERMINAL",0,0,"3",,terminal_output +1883,5200411,"TERMINAL",0,0,"4",,terminal_output +1884,5201431,"TERMINAL",0,0,"5",,terminal_output +1885,5202398,"TERMINAL",0,0,"6",,terminal_output +1886,5203416,"TERMINAL",0,0,"7",,terminal_output +1887,5204432,"TERMINAL",0,0,"8",,terminal_output +1888,5205538,"TERMINAL",0,0,"9",,terminal_output +1889,5206457,"TERMINAL",0,0,"3:00",,terminal_output +1890,5207476,"TERMINAL",0,0,"1",,terminal_output +1891,5208497,"TERMINAL",0,0,"2",,terminal_output +1892,5209509,"TERMINAL",0,0,"3",,terminal_output +1893,5210523,"TERMINAL",0,0,"4",,terminal_output +1894,5211541,"TERMINAL",0,0,"5",,terminal_output +1895,5212558,"TERMINAL",0,0,"6",,terminal_output +1896,5213618,"TERMINAL",0,0,"7",,terminal_output +1897,5214588,"TERMINAL",0,0,"8",,terminal_output +1898,5215613,"TERMINAL",0,0,"9",,terminal_output +1899,5216627,"TERMINAL",0,0,"10",,terminal_output +1900,5217716,"TERMINAL",0,0,"1",,terminal_output +1901,5218655,"TERMINAL",0,0,"2",,terminal_output +1902,5219670,"TERMINAL",0,0,"3",,terminal_output +1903,5220791,"TERMINAL",0,0,"4",,terminal_output +1904,5221753,"TERMINAL",0,0,"5",,terminal_output +1905,5222728,"TERMINAL",0,0,"6",,terminal_output +1906,5223743,"TERMINAL",0,0,"7",,terminal_output +1907,5224783,"TERMINAL",0,0,"8",,terminal_output +1908,5225805,"TERMINAL",0,0,"9",,terminal_output +1909,5226792,"TERMINAL",0,0,"20",,terminal_output +1910,5227853,"TERMINAL",0,0,"1",,terminal_output +1911,5228822,"TERMINAL",0,0,"2",,terminal_output +1912,5229837,"TERMINAL",0,0,"3",,terminal_output +1913,5230853,"TERMINAL",0,0,"4",,terminal_output +1914,5231871,"TERMINAL",0,0,"5",,terminal_output +1915,5232971,"TERMINAL",0,0,"6",,terminal_output +1916,5233902,"TERMINAL",0,0,"7",,terminal_output +1917,5235022,"TERMINAL",0,0,"8",,terminal_output +1918,5236045,"TERMINAL",0,0,"9",,terminal_output +1919,5236967,"TERMINAL",0,0,"30",,terminal_output +1920,5237999,"TERMINAL",0,0,"1",,terminal_output +1921,5239018,"TERMINAL",0,0,"2",,terminal_output +1922,5240042,"TERMINAL",0,0,"3",,terminal_output +1923,5241055,"TERMINAL",0,0,"4",,terminal_output +1924,5242049,"TERMINAL",0,0,"5",,terminal_output +1925,5243047,"TERMINAL",0,0,"6",,terminal_output +1926,5244066,"TERMINAL",0,0,"7",,terminal_output +1927,5245076,"TERMINAL",0,0,"8",,terminal_output +1928,5246093,"TERMINAL",0,0,"9",,terminal_output +1929,5247113,"TERMINAL",0,0,"40",,terminal_output +1930,5248198,"TERMINAL",0,0,"1",,terminal_output +1931,5249212,"TERMINAL",0,0,"3",,terminal_output +1932,5250231,"TERMINAL",0,0,"4",,terminal_output +1933,5251243,"TERMINAL",0,0,"5",,terminal_output +1934,5252258,"TERMINAL",0,0,"6",,terminal_output +1935,5253275,"TERMINAL",0,0,"7",,terminal_output +1936,5254295,"TERMINAL",0,0,"8",,terminal_output +1937,5255315,"TERMINAL",0,0,"9",,terminal_output +1938,5255942,"TERMINAL",0,0,"watch",,terminal_focus +1939,5256327,"TERMINAL",0,0,"50",,terminal_output +1940,5257342,"TERMINAL",0,0,"1",,terminal_output +1941,5258368,"TERMINAL",0,0,"2",,terminal_output +1942,5259372,"TERMINAL",0,0,"3",,terminal_output +1943,5260390,"TERMINAL",0,0,"4",,terminal_output +1944,5261440,"TERMINAL",0,0,"5",,terminal_output +1945,5262426,"TERMINAL",0,0,"6",,terminal_output +1946,5263492,"TERMINAL",0,0,"7",,terminal_output +1947,5264458,"TERMINAL",0,0,"8",,terminal_output +1948,5265158,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old]633;D;0",,terminal_output +1949,5265682,"TERMINAL",0,0,"ls",,terminal_command +1950,5266405,"TERMINAL",0,0,"ls",,terminal_command +1951,5266426,"TERMINAL",0,0,"]633;E;2025-07-10 11:54:00 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old]633;D;0",,terminal_output +1952,5268411,"TERMINAL",0,0,"watch -n1 ls",,terminal_command +1953,5268463,"TERMINAL",0,0,"]633;E;2025-07-10 11:54:02 watch -n1 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C[?1049h(B[?7hEvery 1.0s: lshkn1990.localdomain: Thu Jul 10 11:54:02 2025",,terminal_output +1954,5269225,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old]633;D;0",,terminal_output +1955,5278540,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1956,5279838,"TERMINAL",0,0,"srun",,terminal_focus +1957,5283548,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1958,5285399,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",825,0,"",shellscript,selection_mouse +1959,5285562,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",807,23,"log_checkpoint_interval",shellscript,selection_mouse +1960,5289695,"train_tokenizer.py",0,0,"",python,tab +1961,5292302,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1962,5302004,"TERMINAL",0,0,"Step 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.1537831425666809\r\nStep 4, loss: 0.1349092721939087\r\nStep 5, loss: 0.12313485145568848\r\nStep 6, loss: 0.11425906419754028\r\nStep 7, loss: 0.11623465269804001\r\nStep 8, loss: 0.11064581573009491\r\nStep 9, loss: 0.11033858358860016\r\nStep 10, loss: 0.1102488785982132\r\nStep 11, loss: 0.12258508056402206\r\nStep 12, loss: 0.11272275447845459\r\nStep 13, loss: 0.10657089948654175\r\nStep 14, loss: 0.10401155799627304\r\nStep 15, loss: 0.11549561470746994\r\nStep 16, loss: 0.0998886376619339\r\nStep 17, loss: 0.10765416920185089\r\nStep 18, loss: 0.09560668468475342\r\nStep 19, loss: 0.10045348107814789\r\nStep 20, loss: 0.11126334220170975\r\nStep 21, loss: 0.1092255562543869\r\nStep 22, loss: 0.10868747532367706\r\nStep 23, loss: 0.10156326740980148\r\nStep 24, loss: 0.09680286794900894\r\nStep 25, loss: 0.09754930436611176\r\nStep 26, loss: 0.09146726876497269\r\nStep 27, loss: 0.10419808328151703\r\nStep 28, loss: 0.09570800513029099\r\nStep 29, loss: 0.1064097061753273\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.1537831425666809\r\nStep 4, loss: 0.1349092721939087\r\nStep 5, loss: 0.12313485145568848\r\nStep 6, loss: 0.11425906419754028\r\nStep 7, loss: 0.11623465269804001\r\nStep 8, loss: 0.11064581573009491\r\nStep 9, loss: 0.11033858358860016\r\nStep 10, loss: 0.1102488785982132\r\nStep 11, loss: 0.12258508056402206\r\nStep 12, loss: 0.11272275447845459\r\nStep 13, loss: 0.10657089948654175\r\nStep 14, loss: 0.10401155799627304\r\nStep 15, loss: 0.11549561470746994\r\nStep 16, loss: 0.0998886376619339\r\nStep 17, loss: 0.10765416920185089\r\nStep 18, loss: 0.09560668468475342\r\nStep 19, loss: 0.10045348107814789\r\nStep 20, loss: 0.11126334220170975\r\nStep 21, loss: 0.1092255562543869\r\nStep 22, loss: 0.10868747532367706\r\nStep 23, loss: 0.10156326740980148\r\nStep 24, loss: 0.09680286794900894\r\nStep 25, loss: 0.09754930436611176\r\nStep 26, loss: 0.09146726876497269\r\nStep 27, loss: 0.10419808328151703\r\nStep 28, loss: 0.09570800513029099\r\nStep 29, loss: 0.1064097061753273\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.1537831425666809\r\nStep 4, loss: 0.1349092721939087\r\nStep 5, loss: 0.12313485145568848\r\nStep 6, loss: 0.11425906419754028\r\nStep 7, loss: 0.11623465269804001\r\nStep 8, loss: 0.11064581573009491\r\nStep 9, loss: 0.11033858358860016\r\nStep 10, loss: 0.1102488785982132\r\nStep 11, loss: 0.12258508056402206\r\nStep 12, loss: 0.11272275447845459\r\nStep 13, loss: 0.10657089948654175\r\nStep 14, loss: 0.10401155799627304\r\nStep 15, loss: 0.11549561470746994\r\nStep 16, loss: 0.0998886376619339\r\nStep 17, loss: 0.10765416920185089\r\nStep 18, loss: 0.09560668468475342\r\nStep 19, loss: 0.10045348107814789\r\nStep 20, loss: 0.11126334220170975\r\nStep 21, loss: 0.1092255562543869\r\nStep 22, loss: 0.10868747532367706\r\nStep 23, loss: 0.10156326740980148\r\nStep 24, loss: 0.09680286794900894\r\nStep 25, loss: 0.09754930436611176\r\nStep 26, loss: 0.09146726876497269\r\nStep 27, loss: 0.10419808328151703\r\nStep 28, loss: 0.09570800513029099\r\nStep 29, loss: 0.1064097061753273\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.1537831425666809\r\nStep 4, loss: 0.1349092721939087\r\nStep 5, loss: 0.12313485145568848\r\nStep 6, loss: 0.11425906419754028\r\nStep 7, loss: 0.11623465269804001\r\nStep 8, loss: 0.11064581573009491\r\nStep 9, loss: 0.11033858358860016\r\nStep 10, loss: 0.1102488785982132\r\nStep 11, loss: 0.12258508056402206\r\nStep 12, loss: 0.11272275447845459\r\nStep 13, loss: 0.10657089948654175\r\nStep 14, loss: 0.10401155799627304\r\nStep 15, loss: 0.11549561470746994\r\nStep 16, loss: 0.0998886376619339\r\nStep 17, loss: 0.10765416920185089\r\nStep 18, loss: 0.09560668468475342\r\nStep 19, loss: 0.10045348107814789\r\nStep 20, loss: 0.11126334220170975\r\nStep 21, loss: 0.1092255562543869\r\nStep 22, loss: 0.10868747532367706\r\nStep 23, loss: 0.10156326740980148\r\nStep 24, loss: 0.09680286794900894\r\nStep 25, loss: 0.09754930436611176\r\nStep 26, loss: 0.09146726876497269\r\nStep 27, loss: 0.10419808328151703\r\nStep 28, loss: 0.09570800513029099\r\nStep 29, loss: 0.1064097061753273\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.1537831425666809\r\nStep 4, loss: 0.1349092721939087\r\nStep 5, loss: 0.12313485145568848\r\nStep 6, loss: 0.11425906419754028\r\nStep 7, loss: 0.11623465269804001\r\nStep 8, loss: 0.11064581573009491\r\nStep 9, loss: 0.11033858358860016\r\nStep 10, loss: 0.1102488785982132\r\nStep 11, loss: 0.12258508056402206\r\nStep 12, loss: 0.11272275447845459\r\nStep 13, loss: 0.10657089948654175\r\nStep 14, loss: 0.10401155799627304\r\nStep 15, loss: 0.11549561470746994\r\nStep 16, loss: 0.0998886376619339\r\nStep 17, loss: 0.10765416920185089\r\nStep 18, loss: 0.09560668468475342\r\nStep 19, loss: 0.10045348107814789\r\nStep 20, loss: 0.11126334220170975\r\nStep 21, loss: 0.1092255562543869\r\nStep 22, loss: 0.10868747532367706\r\nStep 23, loss: 0.10156326740980148\r\nStep 24, loss: 0.09680286794900894\r\nStep 25, loss: 0.09754930436611176\r\nStep 26, loss: 0.09146726876497269\r\nStep 27, loss: 0.10419808328151703\r\nStep 28, loss: 0.09570800513029099\r\nStep 29, loss: 0.1064097061753273\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.1537831425666809\r\nStep 4, loss: 0.1349092721939087\r\nStep 5, loss: 0.12313485145568848\r\nStep 6, loss: 0.11425906419754028\r\nStep 7, loss: 0.11623465269804001\r\nStep 8, loss: 0.11064581573009491\r\nStep 9, loss: 0.11033858358860016\r\nStep 10, loss: 0.1102488785982132\r\nStep 11, loss: 0.12258508056402206\r\nStep 12, loss: 0.11272275447845459\r\nStep 13, loss: 0.10657089948654175\r\nStep 14, loss: 0.10401155799627304\r\nStep 15, loss: 0.11549561470746994\r\nStep 16, loss: 0.0998886376619339\r\nStep 17, loss: 0.10765416920185089\r\nStep 18, loss: 0.09560668468475342\r\nStep 19, loss: 0.10045348107814789\r\nStep 20, loss: 0.11126334220170975\r\nStep 21, loss: 0.1092255562543869\r\nStep 22, loss: 0.10868747532367706\r\nStep 23, loss: 0.10156326740980148\r\nStep 24, loss: 0.09680286794900894\r\nStep 25, loss: 0.09754930436611176\r\nStep 26, loss: 0.09146726876497269\r\nStep 27, loss: 0.10419808328151703\r\nStep 28, loss: 0.09570800513029099\r\nStep 29, loss: 0.1064097061753273\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.1537831425666809\r\nStep 4, loss: 0.1349092721939087\r\nStep 5, loss: 0.12313485145568848\r\nStep 6, loss: 0.11425906419754028\r\nStep 7, loss: 0.11623465269804001\r\nStep 8, loss: 0.11064581573009491\r\nStep 9, loss: 0.11033858358860016\r\nStep 10, loss: 0.1102488785982132\r\nStep 11, loss: 0.12258508056402206\r\nStep 12, loss: 0.11272275447845459\r\nStep 13, loss: 0.10657089948654175\r\nStep 14, loss: 0.10401155799627304\r\nStep 15, loss: 0.11549561470746994\r\nStep 16, loss: 0.0998886376619339\r\nStep 17, loss: 0.10765416920185089\r\nStep 18, loss: 0.09560668468475342\r\nStep 19, loss: 0.10045348107814789\r\nStep 20, loss: 0.11126334220170975\r\nStep 21, loss: 0.1092255562543869\r\nStep 22, loss: 0.10868747532367706\r\nStep 23, loss: 0.10156326740980148\r\nStep 24, loss: 0.09680286794900894\r\nStep 25, loss: 0.09754930436611176\r\nStep 26, loss: 0.09146726876497269\r\nStep 27, loss: 0.10419808328151703\r\nStep 28, loss: 0.09570800513029099\r\nStep 29, loss: 0.1064097061753273\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.1537831425666809\r\nStep 4, loss: 0.1349092721939087\r\nStep 5, loss: 0.12313485145568848\r\nStep 6, loss: 0.11425906419754028\r\nStep 7, loss: 0.11623465269804001\r\nStep 8, loss: 0.11064581573009491\r\nStep 9, loss: 0.11033858358860016\r\nStep 10, loss: 0.1102488785982132\r\nStep 11, loss: 0.12258508056402206\r\nStep 12, loss: 0.11272275447845459\r\nStep 13, loss: 0.10657089948654175\r\nStep 14, loss: 0.10401155799627304\r\nStep 15, loss: 0.11549561470746994\r\nStep 16, loss: 0.0998886376619339\r\nStep 17, loss: 0.10765416920185089\r\nStep 18, loss: 0.09560668468475342\r\nStep 19, loss: 0.10045348107814789\r\nStep 20, loss: 0.11126334220170975\r\nStep 21, loss: 0.1092255562543869\r\nStep 22, loss: 0.10868747532367706\r\nStep 23, loss: 0.10156326740980148\r\nStep 24, loss: 0.09680286794900894\r\nStep 25, loss: 0.09754930436611176\r\nStep 26, loss: 0.09146726876497269\r\nStep 27, loss: 0.10419808328151703\r\nStep 28, loss: 0.09570800513029099\r\nStep 29, loss: 0.1064097061753273\r\n",,terminal_output +1963,5302416,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 8, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 10, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 9, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +1964,5302478,"TERMINAL",0,0,"Filtering out episode with length 4, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +1965,5302565,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 2, which is shorter than the requested sequence length 16.\r\n",,terminal_output +1966,5302636,"TERMINAL",0,0,"Filtering out episode with length 8, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +1967,5302751,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 7, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 3, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 5, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 11, which is shorter than the requested sequence length 16.\r\n",,terminal_output +1968,5302820,"TERMINAL",0,0,"bash",,terminal_focus +1969,5303731,"TERMINAL",0,0,"Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +1970,5304277,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 6, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +1971,5304346,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +1972,5305908,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run tokenizer-batch-size-scaling-2-node-sqrt-lr-mihir-debug-grain-checkpointing-old at: https://wandb.ai/instant-uv/jafar/runs/hd13xmoh\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_115012-hd13xmoh/logs\r\n",,terminal_output +1973,5307122,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 12 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +1974,5307237,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +1975,5308089,"TERMINAL",0,0,"]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +1976,5314471,"TERMINAL",0,0,"ls",,terminal_command +1977,5315295,"TERMINAL",0,0,"ls",,terminal_command +1978,5316298,"TERMINAL",0,0,"cd ..",,terminal_command +1979,5316791,"TERMINAL",0,0,"ls",,terminal_command +1980,5339215,"TERMINAL",0,0,"cd /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared//checkpoints/debug/mihir-debug-grain-checkpointing-old",,terminal_command +1981,5340003,"TERMINAL",0,0,"ls",,terminal_command +1982,5340007,"TERMINAL",0,0,"]633;E;2025-07-10 11:55:13 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old]633;D;0",,terminal_output +1983,5360569,"TERMINAL",0,0,"srun",,terminal_focus +1984,5361570,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1985,5362492,"train_tokenizer.py",0,0,"",python,tab +1986,5363976,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1987,5364742,"train_tokenizer.py",0,0,"",python,tab +1988,5365964,"train_tokenizer.py",585,0,"",python,selection_mouse +1989,5368179,"train_tokenizer.py",584,0,"",python,selection_mouse +1990,5369139,"train_tokenizer.py",600,0,"",python,selection_mouse +1991,5369691,"train_tokenizer.py",604,0,"",python,selection_mouse +1992,5369830,"train_tokenizer.py",601,4,"Args",python,selection_mouse +1993,5371055,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +1994,5372562,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",832,0,"",shellscript,selection_mouse +1995,5376637,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",529,0,"",shellscript,selection_mouse +1996,5380728,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",421,0,"",shellscript,selection_mouse +1997,5381273,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",422,0,"",shellscript,selection_mouse +1998,5382908,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",422,1,"",shellscript,content +1999,5385296,"TERMINAL",0,0,"bash",,terminal_focus +2000,5388013,"TERMINAL",0,0,"srun",,terminal_focus +2001,5389008,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",,terminal_output +2002,5394028,"TERMINAL",0,0,"bash",,terminal_focus +2003,5396361,"TERMINAL",0,0,"ls",,terminal_command +2004,5397385,"TERMINAL",0,0,"cd ..",,terminal_command +2005,5397749,"TERMINAL",0,0,"ls",,terminal_command +2006,5402268,"TERMINAL",0,0,"cd mihir-debug-grain-checkpointing-old/",,terminal_command +2007,5404850,"TERMINAL",0,0,"ls -la",,terminal_command +2008,5406961,"TERMINAL",0,0,"cd ..",,terminal_command +2009,5410937,"TERMINAL",0,0,"rm .rf mihir-debug-grain-checkpointing-old/",,terminal_command +2010,5415991,"TERMINAL",0,0,"rm -rf mihir-debug-grain-checkpointing-old/",,terminal_command +2011,5416000,"TERMINAL",0,0,"]633;E;2025-07-10 11:56:29 rm -rf mihir-debug-grain-checkpointing-old/;46e99295-9803-4583-b36f-e400fb9e619d]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug]633;D;0",,terminal_output +2012,5418621,"TERMINAL",0,0,"ls",,terminal_command +2013,5418637,"TERMINAL",0,0,"]633;E;2025-07-10 11:56:32 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C0000 debug-mihir\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug]633;D;0",,terminal_output +2014,5419915,"TERMINAL",0,0,"srun",,terminal_focus +2015,5421100,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +2016,5421435,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=409404\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0719\r\nSLURM_JOB_START_TIME=1752136462\r\nSLURM_STEP_NODELIST=hkn0719\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752179662\r\nSLURM_PMI2_SRUN_PORT=43929\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3334543\r\nSLURM_PTY_PORT=44699\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=48\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0719\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0719,0806]\r\nSLURM_SRUN_COMM_PORT=38371\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3334543\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0719\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38371\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0719,0806]\r\n",,terminal_output +2017,5421570,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +2018,5458189,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +2019,5458776,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_115712-k9nteunj\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run tokenizer-batch-size-scaling-2-node-sqrt-lr-mihir-debug-grain-checkpointing-old\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/k9nteunj\r\n",,terminal_output +2020,5487252,"TERMINAL",0,0,"bash",,terminal_focus +2021,5488069,"TERMINAL",0,0,"ls",,terminal_command +2022,5514186,"train_tokenizer.py",0,0,"",python,tab +2023,5518265,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2024,5518321,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2025,5518519,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\nRunning on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2026,5518593,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2027,5518644,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2028,5528888,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +2029,5544221,"TERMINAL",0,0,"ls",,terminal_command +2030,5544229,"TERMINAL",0,0,"]633;E;2025-07-10 11:58:38 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C0000 debug-mihir mihir-debug-grain-checkpointing-old\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug]633;D;0",,terminal_output +2031,5545954,"TERMINAL",0,0,"cd mihir-debug-grain-checkpointing-old/",,terminal_command +2032,5546320,"TERMINAL",0,0,"ls",,terminal_command +2033,5547084,"TERMINAL",0,0,"ls",,terminal_command +2034,5547685,"TERMINAL",0,0,"ls",,terminal_command +2035,5550878,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2036,5551267,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\nRunning on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2037,5558632,"train_tokenizer.py",0,0,"",python,tab +2038,5559199,"train_tokenizer.py",1222,0,"",python,selection_mouse +2039,5560464,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2040,5560850,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2041,5562600,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2042,5562909,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2043,5565468,"train_tokenizer.py",1498,0,"",python,selection_command +2044,5565665,"train_tokenizer.py",6711,0,"",python,selection_command +2045,5568021,"train_tokenizer.py",6758,0,"",python,selection_mouse +2046,5568022,"train_tokenizer.py",6757,0,"",python,selection_command +2047,5568570,"train_tokenizer.py",6724,0,"",python,selection_mouse +2048,5568743,"train_tokenizer.py",6711,23,"log_checkpoint_interval",python,selection_mouse +2049,5618313,"TERMINAL",0,0,"srun",,terminal_focus +2050,5631752,"train_tokenizer.py",0,0,"",python,tab +2051,5633025,"train_tokenizer.py",6758,0,"",python,selection_mouse +2052,5633036,"train_tokenizer.py",6757,0,"",python,selection_command +2053,5633549,"train_tokenizer.py",6758,0,"",python,selection_mouse +2054,5633550,"train_tokenizer.py",6757,0,"",python,selection_command +2055,5633751,"train_tokenizer.py",6758,0,"",python,selection_mouse +2056,5633751,"train_tokenizer.py",6757,0,"",python,selection_command +2057,5634155,"train_tokenizer.py",6716,0,"",python,selection_mouse +2058,5634413,"train_tokenizer.py",6711,23,"log_checkpoint_interval",python,selection_mouse +2059,5635774,"train_tokenizer.py",6758,0,"",python,selection_mouse +2060,5635775,"train_tokenizer.py",6757,0,"",python,selection_command +2061,5636373,"train_tokenizer.py",6822,0,"",python,selection_mouse +2062,5636532,"train_tokenizer.py",6803,23,"cleanup_tmp_directories",python,selection_mouse +2063,5637124,"train_tokenizer.py",6659,0,"",python,selection_mouse +2064,5637285,"train_tokenizer.py",6652,24,"CheckpointManagerOptions",python,selection_mouse +2065,5642067,"train_tokenizer.py",6925,0,"",python,selection_mouse +2066,5642961,"train_tokenizer.py",6917,0,"",python,selection_mouse +2067,5643103,"train_tokenizer.py",6916,4,"args",python,selection_mouse +2068,5643334,"train_tokenizer.py",6916,5,"args.",python,selection_mouse +2069,5643352,"train_tokenizer.py",6916,13,"args.ckpt_dir",python,selection_mouse +2070,5643743,"train_tokenizer.py",6923,0,"",python,selection_mouse +2071,5643744,"train_tokenizer.py",6921,8,"ckpt_dir",python,selection_mouse +2072,5643958,"train_tokenizer.py",6920,9,".ckpt_dir",python,selection_mouse +2073,5643981,"train_tokenizer.py",6916,13,"args.ckpt_dir",python,selection_mouse +2074,5644136,"train_tokenizer.py",6915,14,"(args.ckpt_dir",python,selection_mouse +2075,5644199,"train_tokenizer.py",6908,21,"abspath(args.ckpt_dir",python,selection_mouse +2076,5644402,"train_tokenizer.py",6907,22,".abspath(args.ckpt_dir",python,selection_mouse +2077,5644421,"train_tokenizer.py",6903,26,"path.abspath(args.ckpt_dir",python,selection_mouse +2078,5644502,"train_tokenizer.py",6902,27,".path.abspath(args.ckpt_dir",python,selection_mouse +2079,5644521,"train_tokenizer.py",6900,29,"os.path.abspath(args.ckpt_dir",python,selection_mouse +2080,5644967,"train_tokenizer.py",6900,0,"",python,selection_mouse +2081,5644968,"train_tokenizer.py",6900,2,"os",python,selection_mouse +2082,5645189,"train_tokenizer.py",6900,3,"os.",python,selection_mouse +2083,5645207,"train_tokenizer.py",6900,7,"os.path",python,selection_mouse +2084,5645245,"train_tokenizer.py",6900,8,"os.path.",python,selection_mouse +2085,5645299,"train_tokenizer.py",6900,15,"os.path.abspath",python,selection_mouse +2086,5645352,"train_tokenizer.py",6900,16,"os.path.abspath(",python,selection_mouse +2087,5645358,"train_tokenizer.py",6900,20,"os.path.abspath(args",python,selection_mouse +2088,5645415,"train_tokenizer.py",6900,66,"os.path.abspath(args.ckpt_dir),\n options=checkpoint_options",python,selection_mouse +2089,5645657,"train_tokenizer.py",6900,67,"os.path.abspath(args.ckpt_dir),\n options=checkpoint_options,",python,selection_mouse +2090,5645858,"train_tokenizer.py",6900,30,"os.path.abspath(args.ckpt_dir)",python,selection_mouse +2091,5646205,"train_tokenizer.py",6930,0,"",python,selection_mouse +2092,5646206,"train_tokenizer.py",6929,2,"),",python,selection_mouse +2093,5646356,"train_tokenizer.py",6929,2,"),",python,selection_mouse +2094,5646368,"train_tokenizer.py",6921,10,"ckpt_dir),",python,selection_mouse +2095,5646419,"train_tokenizer.py",6929,37,"),\n options=checkpoint_options",python,selection_mouse +2096,5646558,"train_tokenizer.py",6929,19,"),\n options=",python,selection_mouse +2097,5646579,"train_tokenizer.py",6929,18,"),\n options",python,selection_mouse +2098,5646742,"train_tokenizer.py",6929,11,"),\n ",python,selection_mouse +2099,5646805,"train_tokenizer.py",6929,10,"),\n ",python,selection_mouse +2100,5646875,"train_tokenizer.py",6899,32," os.path.abspath(args.ckpt_dir),",python,selection_mouse +2101,5647092,"train_tokenizer.py",6900,31,"os.path.abspath(args.ckpt_dir),",python,selection_mouse +2102,5647585,"train_tokenizer.py",6901,0,"",python,selection_mouse +2103,5647586,"train_tokenizer.py",6900,2,"os",python,selection_mouse +2104,5647804,"train_tokenizer.py",6900,47,"os.path.abspath(args.ckpt_dir),\n options",python,selection_mouse +2105,5647826,"train_tokenizer.py",6900,48,"os.path.abspath(args.ckpt_dir),\n options=",python,selection_mouse +2106,5647883,"train_tokenizer.py",6900,66,"os.path.abspath(args.ckpt_dir),\n options=checkpoint_options",python,selection_mouse +2107,5647883,"train_tokenizer.py",6900,15,"os.path.abspath",python,selection_mouse +2108,5647939,"train_tokenizer.py",6900,16,"os.path.abspath(",python,selection_mouse +2109,5647940,"train_tokenizer.py",6900,20,"os.path.abspath(args",python,selection_mouse +2110,5647993,"train_tokenizer.py",6900,21,"os.path.abspath(args.",python,selection_mouse +2111,5648063,"train_tokenizer.py",6900,29,"os.path.abspath(args.ckpt_dir",python,selection_mouse +2112,5648347,"train_tokenizer.py",6900,30,"os.path.abspath(args.ckpt_dir)",python,selection_mouse +2113,5648675,"train_tokenizer.py",6930,0,"",python,selection_mouse +2114,5648676,"train_tokenizer.py",6929,2,"),",python,selection_mouse +2115,5648835,"train_tokenizer.py",6929,2,"),",python,selection_mouse +2116,5648849,"train_tokenizer.py",6929,38,"),\n options=checkpoint_options,",python,selection_mouse +2117,5648902,"train_tokenizer.py",6929,37,"),\n options=checkpoint_options",python,selection_mouse +2118,5648956,"train_tokenizer.py",6916,15,"args.ckpt_dir),",python,selection_mouse +2119,5649009,"train_tokenizer.py",6915,16,"(args.ckpt_dir),",python,selection_mouse +2120,5649014,"train_tokenizer.py",6908,23,"abspath(args.ckpt_dir),",python,selection_mouse +2121,5649129,"train_tokenizer.py",6907,24,".abspath(args.ckpt_dir),",python,selection_mouse +2122,5649148,"train_tokenizer.py",6903,28,"path.abspath(args.ckpt_dir),",python,selection_mouse +2123,5649247,"train_tokenizer.py",6902,29,".path.abspath(args.ckpt_dir),",python,selection_mouse +2124,5649306,"train_tokenizer.py",6900,31,"os.path.abspath(args.ckpt_dir),",python,selection_mouse +2125,5649378,"train_tokenizer.py",6899,32," os.path.abspath(args.ckpt_dir),",python,selection_mouse +2126,5649445,"train_tokenizer.py",6898,33," os.path.abspath(args.ckpt_dir),",python,selection_mouse +2127,5649905,"train_tokenizer.py",6898,0,"",python,selection_mouse +2128,5650271,"train_tokenizer.py",6900,0,"",python,selection_mouse +2129,5650423,"train_tokenizer.py",6900,2,"os",python,selection_mouse +2130,5650627,"train_tokenizer.py",6900,3,"os.",python,selection_mouse +2131,5650634,"train_tokenizer.py",6900,7,"os.path",python,selection_mouse +2132,5650649,"train_tokenizer.py",6900,8,"os.path.",python,selection_mouse +2133,5650706,"train_tokenizer.py",6900,15,"os.path.abspath",python,selection_mouse +2134,5650714,"train_tokenizer.py",6900,20,"os.path.abspath(args",python,selection_mouse +2135,5650733,"train_tokenizer.py",6900,66,"os.path.abspath(args.ckpt_dir),\n options=checkpoint_options",python,selection_mouse +2136,5650829,"train_tokenizer.py",6900,67,"os.path.abspath(args.ckpt_dir),\n options=checkpoint_options,",python,selection_mouse +2137,5651292,"train_tokenizer.py",6967,0,"",python,selection_mouse +2138,5651298,"train_tokenizer.py",6966,0,"",python,selection_command +2139,5655155,"TERMINAL",0,0,"bash",,terminal_focus +2140,5658624,"TERMINAL",0,0,"ls",,terminal_command +2141,5660903,"TERMINAL",0,0,"cd ..",,terminal_command +2142,5661152,"TERMINAL",0,0,"ls",,terminal_command +2143,5662952,"TERMINAL",0,0,"cd ..",,terminal_command +2144,5663573,"TERMINAL",0,0,"ls",,terminal_command +2145,5663617,"TERMINAL",0,0,"]633;E;2025-07-10 12:00:37 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C",,terminal_output +2146,5663720,"TERMINAL",0,0,"0000 3290367 3292213 3292331 3292337 3294603 3296575 3297582 3297727 3299068 3300233 3301026 3307618 3310437 3313565 dyn tokenizer\r\n3290283 3290391 3292221 3292332 3292338 3296502 3297569 3297586 3299016 3299069 3300290 3301027 3307619 3311671 3313570 dynamics_ckpt_dir tokenizer_ckpt_dir\r\n3290284 3290392 3292258 3292333 3292339 3296540 3297575 3297606 3299062 3299258 3300658 3301029 3309662 3311672 3313571 lam train_lam_minecraft_overfit_sample\r\n3290295 3290439 3292328 3292334 3294600 3296571 3297576 3297671 3299063 3299259 3300663 3301030 3309663 3313562 3313572 lam-1-action train_tokenizer_batch_size_scaling_16_node\r\n3290296 3290440 3292329 3292335 3294601 3296573 3297577 3297693 3299065 3299272 3300672 3301031 3309699 3313563 3316022 lam_ckpt_dir train_tokenizer_minecraft_overfit_sample\r\n3290366 3291405 3292330 3292336 3294602 3296574 3297578 3297706 3299066 3299579 3301025 3306801 3310436 3313564 debug lam_main_test\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints]633;D;0",,terminal_output +2147,5671518,"TERMINAL",0,0,"cd debug/l",,terminal_command +2148,5671764,"TERMINAL",0,0,"ls",,terminal_command +2149,5673482,"TERMINAL",0,0,"cd mihir-debug-grain-checkpointing-old/",,terminal_command +2150,5673829,"TERMINAL",0,0,"ls",,terminal_command +2151,5676175,"TERMINAL",0,0,"srun",,terminal_focus +2152,5677490,"train_tokenizer.py",0,0,"",python,tab +2153,5697474,"TERMINAL",0,0,"bash",,terminal_focus +2154,5702154,"train_tokenizer.py",6964,0,"",python,selection_mouse +2155,5702324,"train_tokenizer.py",6948,18,"checkpoint_options",python,selection_mouse +2156,5702699,"train_tokenizer.py",6925,0,"",python,selection_mouse +2157,5712380,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +2158,5713572,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",260,0,"",shellscript,selection_mouse +2159,5714519,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",576,0,"",shellscript,selection_mouse +2160,5715174,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",503,0,"",shellscript,selection_mouse +2161,5715699,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",493,0,"",shellscript,selection_mouse +2162,5716402,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",511,0,"",shellscript,selection_mouse +2163,5716563,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",510,6,"ws_dir",shellscript,selection_mouse +2164,5716793,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",510,18,"ws_dir/checkpoints",shellscript,selection_mouse +2165,5716801,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",510,67,"ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR",shellscript,selection_mouse +2166,5716916,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",510,42,"ws_dir/checkpoints/$job_name/$slurm_job_id",shellscript,selection_mouse +2167,5717576,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",551,0,"",shellscript,selection_mouse +2168,5717970,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",577,0,"",shellscript,selection_mouse +2169,5717981,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",576,0,"",shellscript,selection_command +2170,5718191,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",552,24,"\nmkdir -p $CHECKPOINT_DI",shellscript,selection_mouse +2171,5718193,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",552,25,"\nmkdir -p $CHECKPOINT_DIR",shellscript,selection_command +2172,5718203,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",551,26,"d\nmkdir -p $CHECKPOINT_DIR",shellscript,selection_mouse +2173,5718219,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",549,28,"_id\nmkdir -p $CHECKPOINT_DIR",shellscript,selection_mouse +2174,5718273,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",547,30,"ob_id\nmkdir -p $CHECKPOINT_DIR",shellscript,selection_mouse +2175,5718274,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",546,31,"job_id\nmkdir -p $CHECKPOINT_DIR",shellscript,selection_mouse +2176,5718286,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",545,32,"_job_id\nmkdir -p $CHECKPOINT_DIR",shellscript,selection_mouse +2177,5718745,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",552,0,"",shellscript,selection_mouse +2178,5718751,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",551,0,"",shellscript,selection_command +2179,5718908,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",540,12,"slurm_job_id",shellscript,selection_mouse +2180,5718909,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",541,11,"lurm_job_id",shellscript,selection_command +2181,5719053,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",541,36,"lurm_job_id\nmkdir -p $CHECKPOINT_DIR",shellscript,selection_mouse +2182,5719125,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",541,37,"lurm_job_id\nmkdir -p $CHECKPOINT_DIR\n",shellscript,selection_mouse +2183,5719387,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",541,36,"lurm_job_id\nmkdir -p $CHECKPOINT_DIR",shellscript,selection_mouse +2184,5719652,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",510,31,"ws_dir/checkpoints/$job_name/$s",shellscript,selection_mouse +2185,5719710,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,32,"$ws_dir/checkpoints/$job_name/$s",shellscript,selection_mouse +2186,5720760,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,0,"",shellscript,selection_mouse +2187,5720761,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",508,2,"=$",shellscript,selection_mouse +2188,5720962,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",508,2,"=$",shellscript,selection_mouse +2189,5720974,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",508,8,"=$ws_dir",shellscript,selection_mouse +2190,5721026,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",508,9,"=$ws_dir/",shellscript,selection_mouse +2191,5721027,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",508,20,"=$ws_dir/checkpoints",shellscript,selection_mouse +2192,5721086,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",508,22,"=$ws_dir/checkpoints/$",shellscript,selection_mouse +2193,5721105,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",508,30,"=$ws_dir/checkpoints/$job_name",shellscript,selection_mouse +2194,5721163,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",508,44,"=$ws_dir/checkpoints/$job_name/$slurm_job_id",shellscript,selection_mouse +2195,5721526,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",543,0,"",shellscript,selection_mouse +2196,5723943,"TERMINAL",0,0,"Step 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378324687480927\r\nStep 4, loss: 0.13490642607212067\r\nStep 5, loss: 0.12313499301671982\r\nStep 6, loss: 0.1142599880695343\r\nStep 7, loss: 0.11624960601329803\r\nStep 8, loss: 0.11065180599689484\r\nStep 9, loss: 0.11036459356546402\r\nStep 10, loss: 0.11022736877202988\r\nStep 11, loss: 0.12252587825059891\r\nStep 12, loss: 0.11272387206554413\r\nStep 13, loss: 0.10641051828861237\r\nStep 14, loss: 0.10383390635251999\r\nStep 15, loss: 0.1150636076927185\r\nStep 16, loss: 0.09955383092164993\r\nStep 17, loss: 0.10739227384328842\r\nStep 18, loss: 0.09532461315393448\r\nStep 19, loss: 0.10048551857471466\r\nStep 20, loss: 0.11120827496051788\r\nStep 21, loss: 0.10904798656702042\r\nStep 22, loss: 0.1087569072842598\r\nStep 23, loss: 0.1015014573931694\r\nStep 24, loss: 0.09657584875822067\r\nStep 25, loss: 0.09748344123363495\r\nStep 26, loss: 0.09136204421520233\r\nStep 27, loss: 0.10394073277711868\r\nStep 28, loss: 0.09545308351516724\r\nStep 29, loss: 0.10612607002258301\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378324687480927\r\nStep 4, loss: 0.13490642607212067\r\nStep 5, loss: 0.12313499301671982\r\nStep 6, loss: 0.1142599880695343\r\nStep 7, loss: 0.11624960601329803\r\nStep 8, loss: 0.11065180599689484\r\nStep 9, loss: 0.11036459356546402\r\nStep 10, loss: 0.11022736877202988\r\nStep 11, loss: 0.12252587825059891\r\nStep 12, loss: 0.11272387206554413\r\nStep 13, loss: 0.10641051828861237\r\nStep 14, loss: 0.10383390635251999\r\nStep 15, loss: 0.1150636076927185\r\nStep 16, loss: 0.09955383092164993\r\nStep 17, loss: 0.10739227384328842\r\nStep 18, loss: 0.09532461315393448\r\nStep 19, loss: 0.10048551857471466\r\nStep 20, loss: 0.11120827496051788\r\nStep 21, loss: 0.10904798656702042\r\nStep 22, loss: 0.1087569072842598\r\nStep 23, loss: 0.1015014573931694\r\nStep 24, loss: 0.09657584875822067\r\nStep 25, loss: 0.09748344123363495\r\nStep 26, loss: 0.09136204421520233\r\nStep 27, loss: 0.10394073277711868\r\nStep 28, loss: 0.09545308351516724\r\nStep 29, loss: 0.10612607002258301\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378324687480927\r\nStep 4, loss: 0.13490642607212067\r\nStep 5, loss: 0.12313499301671982\r\nStep 6, loss: 0.1142599880695343\r\nStep 7, loss: 0.11624960601329803\r\nStep 8, loss: 0.11065180599689484\r\nStep 9, loss: 0.11036459356546402\r\nStep 10, loss: 0.11022736877202988\r\nStep 11, loss: 0.12252587825059891\r\nStep 12, loss: 0.11272387206554413\r\nStep 13, loss: 0.10641051828861237\r\nStep 14, loss: 0.10383390635251999\r\nStep 15, loss: 0.1150636076927185\r\nStep 16, loss: 0.09955383092164993\r\nStep 17, loss: 0.10739227384328842\r\nStep 18, loss: 0.09532461315393448\r\nStep 19, loss: 0.10048551857471466\r\nStep 20, loss: 0.11120827496051788\r\nStep 21, loss: 0.10904798656702042\r\nStep 22, loss: 0.1087569072842598\r\nStep 23, loss: 0.1015014573931694\r\nStep 24, loss: 0.09657584875822067\r\nStep 25, loss: 0.09748344123363495\r\nStep 26, loss: 0.09136204421520233\r\nStep 27, loss: 0.10394073277711868\r\nStep 28, loss: 0.09545308351516724\r\nStep 29, loss: 0.10612607002258301\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378324687480927\r\nStep 4, loss: 0.13490642607212067\r\nStep 5, loss: 0.12313499301671982\r\nStep 6, loss: 0.1142599880695343\r\nStep 7, loss: 0.11624960601329803\r\nStep 8, loss: 0.11065180599689484\r\nStep 9, loss: 0.11036459356546402\r\nStep 10, loss: 0.11022736877202988\r\nStep 11, loss: 0.12252587825059891\r\nStep 12, loss: 0.11272387206554413\r\nStep 13, loss: 0.10641051828861237\r\nStep 14, loss: 0.10383390635251999\r\nStep 15, loss: 0.1150636076927185\r\nStep 16, loss: 0.09955383092164993\r\nStep 17, loss: 0.10739227384328842\r\nStep 18, loss: 0.09532461315393448\r\nStep 19, loss: 0.10048551857471466\r\nStep 20, loss: 0.11120827496051788\r\nStep 21, loss: 0.10904798656702042\r\nStep 22, loss: 0.1087569072842598\r\nStep 23, loss: 0.1015014573931694\r\nStep 24, loss: 0.09657584875822067\r\nStep 25, loss: 0.09748344123363495\r\nStep 26, loss: 0.09136204421520233\r\nStep 27, loss: 0.10394073277711868\r\nStep 28, loss: 0.09545308351516724\r\nStep 29, loss: 0.10612607002258301\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378324687480927\r\nStep 4, loss: 0.13490642607212067\r\nStep 5, loss: 0.12313499301671982\r\nStep 6, loss: 0.1142599880695343\r\nStep 7, loss: 0.11624960601329803\r\nStep 8, loss: 0.11065180599689484\r\nStep 9, loss: 0.11036459356546402\r\nStep 10, loss: 0.11022736877202988\r\nStep 11, loss: 0.12252587825059891\r\nStep 12, loss: 0.11272387206554413\r\nStep 13, loss: 0.10641051828861237\r\nStep 14, loss: 0.10383390635251999\r\nStep 15, loss: 0.1150636076927185\r\nStep 16, loss: 0.09955383092164993\r\nStep 17, loss: 0.10739227384328842\r\nStep 18, loss: 0.09532461315393448\r\nStep 19, loss: 0.10048551857471466\r\nStep 20, loss: 0.11120827496051788\r\nStep 21, loss: 0.10904798656702042\r\nStep 22, loss: 0.1087569072842598\r\nStep 23, loss: 0.1015014573931694\r\nStep 24, loss: 0.09657584875822067\r\nStep 25, loss: 0.09748344123363495\r\nStep 26, loss: 0.09136204421520233\r\nStep 27, loss: 0.10394073277711868\r\nStep 28, loss: 0.09545308351516724\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378324687480927\r\nStep 4, loss: 0.13490642607212067\r\nStep 5, loss: 0.12313499301671982\r\nStep 6, loss: 0.1142599880695343\r\nStep 7, loss: 0.11624960601329803\r\nStep 8, loss: 0.11065180599689484\r\nStep 9, loss: 0.11036459356546402\r\nStep 10, loss: 0.11022736877202988\r\nStep 11, loss: 0.12252587825059891\r\nStep 12, loss: 0.11272387206554413\r\nStep 13, loss: 0.10641051828861237\r\nStep 14, loss: 0.10383390635251999\r\nStep 15, loss: 0.1150636076927185\r\nStep 16, loss: 0.09955383092164993\r\nStep 17, loss: 0.10739227384328842\r\nStep 18, loss: 0.09532461315393448\r\nStep 19, loss: 0.10048551857471466\r\nStep 20, loss: 0.11120827496051788\r\nStep 21, loss: 0.10904798656702042\r\nStep 22, loss: 0.1087569072842598\r\nStep 23, loss: 0.1015014573931694\r\nStep 24, loss: 0.09657584875822067\r\nStep 25, loss: 0.09748344123363495\r\nStep 26, loss: 0.09136204421520233\r\nStep 27, loss: 0.10394073277711868\r\nStep 28, loss: 0.09545308351516724\r\nStep 29, loss: 0.10612607002258301\r\nStep 29, loss: 0.10612607002258301\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378324687480927\r\nStep 4, loss: 0.13490642607212067\r\nStep 5, loss: 0.12313499301671982\r\nStep 6, loss: 0.1142599880695343\r\nStep 7, loss: 0.11624960601329803\r\nStep 8, loss: 0.11065180599689484\r\nStep 9, loss: 0.11036459356546402\r\nStep 10, loss: 0.11022736877202988\r\nStep 11, loss: 0.12252587825059891\r\nStep 12, loss: 0.11272387206554413\r\nStep 13, loss: 0.10641051828861237\r\nStep 14, loss: 0.10383390635251999\r\nStep 15, loss: 0.1150636076927185\r\nStep 16, loss: 0.09955383092164993\r\nStep 17, loss: 0.10739227384328842\r\nStep 18, loss: 0.09532461315393448\r\nStep 19, loss: 0.10048551857471466\r\nStep 20, loss: 0.11120827496051788\r\nStep 21, loss: 0.10904798656702042\r\nStep 22, loss: 0.1087569072842598\r\nStep 23, loss: 0.1015014573931694\r\nStep 24, loss: 0.09657584875822067\r\nStep 25, loss: 0.09748344123363495\r\nStep 26, loss: 0.09136204421520233\r\nStep 27, loss: 0.10394073277711868\r\nStep 28, loss: 0.09545308351516724\r\nStep 29, loss: 0.10612607002258301\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378324687480927\r\nStep 4, loss: 0.13490642607212067\r\nStep 5, loss: 0.12313499301671982\r\nStep 6, loss: 0.1142599880695343\r\nStep 7, loss: 0.11624960601329803\r\nStep 8, loss: 0.11065180599689484\r\nStep 9, loss: 0.11036459356546402\r\nStep 10, loss: 0.11022736877202988\r\nStep 11, loss: 0.12252587825059891\r\nStep 12, loss: 0.11272387206554413\r\nStep 13, loss: 0.10641051828861237\r\nStep 14, loss: 0.10383390635251999\r\nStep 15, loss: 0.1150636076927185\r\nStep 16, loss: 0.09955383092164993\r\nStep 17, loss: 0.10739227384328842\r\nStep 18, loss: 0.09532461315393448\r\nStep 19, loss: 0.10048551857471466\r\nStep 20, loss: 0.11120827496051788\r\nStep 21, loss: 0.10904798656702042\r\nStep 22, loss: 0.1087569072842598\r\nStep 23, loss: 0.1015014573931694\r\nStep 24, loss: 0.09657584875822067\r\nStep 25, loss: 0.09748344123363495\r\nStep 26, loss: 0.09136204421520233\r\nStep 27, loss: 0.10394073277711868\r\nStep 28, loss: 0.09545308351516724\r\nStep 29, loss: 0.10612607002258301\r\n",,terminal_output +2197,5724191,"TERMINAL",0,0,"Filtering out episode with length 8, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2198,5724248,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2199,5724311,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 8, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2200,5724375,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 9, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 2, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2201,5724563,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 7, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 5, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 11, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2202,5724676,"TERMINAL",0,0,"Filtering out episode with length 10, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 3, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2203,5725665,"TERMINAL",0,0,"Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2204,5726211,"train_tokenizer.py",0,0,"",python,tab +2205,5726301,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 6, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2206,5727869,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run tokenizer-batch-size-scaling-2-node-sqrt-lr-mihir-debug-grain-checkpointing-old at: https://wandb.ai/instant-uv/jafar/runs/k9nteunj\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_115712-k9nteunj/logs\r\n",,terminal_output +2207,5728915,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +2208,5728972,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +2209,5729033,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +2210,5729141,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +2211,5729936,"TERMINAL",0,0,"]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +2212,5730255,"train_tokenizer.py",6910,0,"",python,selection_mouse +2213,5730403,"train_tokenizer.py",6908,7,"abspath",python,selection_mouse +2214,5733765,"train_tokenizer.py",6895,0,"",python,selection_mouse +2215,5733890,"train_tokenizer.py",6892,8," ",python,selection_mouse +2216,5734788,"train_tokenizer.py",6899,0,"",python,selection_mouse +2217,5734789,"train_tokenizer.py",6892,8," ",python,selection_mouse +2218,5735694,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +2219,5737298,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,0,"",shellscript,selection_mouse +2220,5737480,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,2,"$w",shellscript,selection_mouse +2221,5737493,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,4,"$ws_",shellscript,selection_mouse +2222,5737509,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,7,"$ws_dir",shellscript,selection_mouse +2223,5737560,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,14,"$ws_dir/checkp",shellscript,selection_mouse +2224,5737561,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,17,"$ws_dir/checkpoin",shellscript,selection_mouse +2225,5737567,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,20,"$ws_dir/checkpoints/",shellscript,selection_mouse +2226,5737585,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,22,"$ws_dir/checkpoints/$j",shellscript,selection_mouse +2227,5737602,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,23,"$ws_dir/checkpoints/$jo",shellscript,selection_mouse +2228,5737661,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,26,"$ws_dir/checkpoints/$job_n",shellscript,selection_mouse +2229,5737661,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,27,"$ws_dir/checkpoints/$job_na",shellscript,selection_mouse +2230,5737662,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,28,"$ws_dir/checkpoints/$job_nam",shellscript,selection_mouse +2231,5737668,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,30,"$ws_dir/checkpoints/$job_name/",shellscript,selection_mouse +2232,5737685,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,31,"$ws_dir/checkpoints/$job_name/$",shellscript,selection_mouse +2233,5737737,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,32,"$ws_dir/checkpoints/$job_name/$s",shellscript,selection_mouse +2234,5737738,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,33,"$ws_dir/checkpoints/$job_name/$sl",shellscript,selection_mouse +2235,5737738,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,34,"$ws_dir/checkpoints/$job_name/$slu",shellscript,selection_mouse +2236,5737752,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,36,"$ws_dir/checkpoints/$job_name/$slurm",shellscript,selection_mouse +2237,5737804,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,37,"$ws_dir/checkpoints/$job_name/$slurm_",shellscript,selection_mouse +2238,5737805,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,39,"$ws_dir/checkpoints/$job_name/$slurm_jo",shellscript,selection_mouse +2239,5737818,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",493,16,"\nCHECKPOINT_DIR=",shellscript,selection_mouse +2240,5738268,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,43,"$ws_dir/checkpoints/$job_name/$slurm_job_id",shellscript,selection_mouse +2241,5739350,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,0,"",shellscript,selection_mouse +2242,5739530,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,1,"/",shellscript,selection_mouse +2243,5739531,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,2,"/h",shellscript,selection_mouse +2244,5739540,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,3,"/hk",shellscript,selection_mouse +2245,5739554,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,5,"/hkfs",shellscript,selection_mouse +2246,5739606,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,7,"/hkfs/w",shellscript,selection_mouse +2247,5739607,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,9,"/hkfs/wor",shellscript,selection_mouse +2248,5739607,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,11,"/hkfs/work/",shellscript,selection_mouse +2249,5739621,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,13,"/hkfs/work/wo",shellscript,selection_mouse +2250,5739687,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,15,"/hkfs/work/work",shellscript,selection_mouse +2251,5739688,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,17,"/hkfs/work/worksp",shellscript,selection_mouse +2252,5739688,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,18,"/hkfs/work/workspa",shellscript,selection_mouse +2253,5739690,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,20,"/hkfs/work/workspace",shellscript,selection_mouse +2254,5739706,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,21,"/hkfs/work/workspace/",shellscript,selection_mouse +2255,5739728,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,22,"/hkfs/work/workspace/s",shellscript,selection_mouse +2256,5739740,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,24,"/hkfs/work/workspace/scr",shellscript,selection_mouse +2257,5739793,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,26,"/hkfs/work/workspace/scrat",shellscript,selection_mouse +2258,5739794,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,27,"/hkfs/work/workspace/scratc",shellscript,selection_mouse +2259,5739794,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,29,"/hkfs/work/workspace/scratch/",shellscript,selection_mouse +2260,5739804,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,30,"/hkfs/work/workspace/scratch/t",shellscript,selection_mouse +2261,5739837,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,32,"/hkfs/work/workspace/scratch/tum",shellscript,selection_mouse +2262,5739859,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,35,"/hkfs/work/workspace/scratch/tum_in",shellscript,selection_mouse +2263,5739877,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,38,"/hkfs/work/workspace/scratch/tum_ind36",shellscript,selection_mouse +2264,5739902,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,40,"/hkfs/work/workspace/scratch/tum_ind3695",shellscript,selection_mouse +2265,5739960,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,42,"/hkfs/work/workspace/scratch/tum_ind3695-j",shellscript,selection_mouse +2266,5739960,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,43,"/hkfs/work/workspace/scratch/tum_ind3695-ja",shellscript,selection_mouse +2267,5739961,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,44,"/hkfs/work/workspace/scratch/tum_ind3695-jaf",shellscript,selection_mouse +2268,5739965,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,45,"/hkfs/work/workspace/scratch/tum_ind3695-jafa",shellscript,selection_mouse +2269,5739982,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,46,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_",shellscript,selection_mouse +2270,5740033,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,47,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_w",shellscript,selection_mouse +2271,5740040,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,48,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws",shellscript,selection_mouse +2272,5740046,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,49,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_",shellscript,selection_mouse +2273,5740103,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,50,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_s",shellscript,selection_mouse +2274,5740104,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,51,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_sh",shellscript,selection_mouse +2275,5740104,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,52,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_sha",shellscript,selection_mouse +2276,5740160,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,53,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shar",shellscript,selection_mouse +2277,5740160,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,54,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_share",shellscript,selection_mouse +2278,5740216,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,55,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared",shellscript,selection_mouse +2279,5740218,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,56,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared'",shellscript,selection_mouse +2280,5740714,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",367,55,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared",shellscript,selection_mouse +2281,5751258,"TERMINAL",0,0,"s",,terminal_command +2282,5751269,"TERMINAL",0,0,"]633;E;2025-07-10 12:02:05 s;46e99295-9803-4583-b36f-e400fb9e619d]633;Cbash: s: command not found...\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old]633;D;127",,terminal_output +2283,5751911,"TERMINAL",0,0,"ls",,terminal_command +2284,5751925,"TERMINAL",0,0,"]633;E;2025-07-10 12:02:05 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old]633;D;0",,terminal_output +2285,5752999,"TERMINAL",0,0,"python",,terminal_command +2286,5753049,"TERMINAL",0,0,"]633;E;2025-07-10 12:02:06 python;46e99295-9803-4583-b36f-e400fb9e619d]633;C",,terminal_output +2287,5753111,"TERMINAL",0,0,"Python 3.10.18 (main, Jun 4 2025, 17:36:27) [Clang 20.1.4 ] on linux\r\nType ""help"", ""copyright"", ""credits"" or ""license"" for more information.\r\n",,terminal_output +2288,5753165,"TERMINAL",0,0,">>> ",,terminal_output +2289,5754802,"TERMINAL",0,0,"i",,terminal_output +2290,5754948,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +2291,5755178,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +2292,5755519,"TERMINAL",0,0,"[?25lo\r[?25h",,terminal_output +2293,5755700,"TERMINAL",0,0,"[?25lp[?25h[?25lo[?25h",,terminal_output +2294,5755916,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +2295,5756156,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +2296,5756277,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +2297,5756566,"TERMINAL",0,0,"[?25lo\r[?25h",,terminal_output +2298,5756655,"TERMINAL",0,0,"[?25l \r[?25h",,terminal_output +2299,5756798,"TERMINAL",0,0,"[?25lt\r[?25h",,terminal_output +2300,5756960,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +2301,5757023,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +2302,5757164,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +2303,5757300,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +2304,5757376,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +2305,5757524,"TERMINAL",0,0,"\r\n>>> ",,terminal_output +2306,5760567,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",360,0,"",shellscript,selection_mouse +2307,5760733,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",359,6,"ws_dir",shellscript,selection_mouse +2308,5765221,"TERMINAL",0,0,"ws_dir",,terminal_output +2309,5766112,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +2310,5766301,"TERMINAL",0,0,"[?25l=[?25h[?25l [?25h",,terminal_output +2311,5775962,"TERMINAL",0,0,"[?25l""[?25h",,terminal_output +2312,5776499,"TERMINAL",0,0,"[?25l/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/chec[?25h[?25lkpoints/debug/mihir-debug-grain-checkpointing-old[?25h",,terminal_output +2313,5776778,"TERMINAL",0,0,"[?25l""[?25h",,terminal_output +2314,5777071,"TERMINAL",0,0,"\r\n>>> ",,terminal_output +2315,5778246,"TERMINAL",0,0,"o",,terminal_output +2316,5778426,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +2317,5778592,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +2318,5779310,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +2319,5779381,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +2320,5779558,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +2321,5780143,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +2322,5780398,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +2323,5780627,"TERMINAL",0,0,"[?25lt[?25h[?25lh[?25h",,terminal_output +2324,5781216,"TERMINAL",0,0,"[?25l([?25h",,terminal_output +2325,5781634,"TERMINAL",0,0,"[?25lw[?25h",,terminal_output +2326,5781798,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +2327,5782316,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +2328,5782426,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +2329,5782489,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +2330,5782594,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +2331,5783116,"TERMINAL",0,0,"[?25l)[?25h",,terminal_output +2332,5784574,"TERMINAL",0,0,"\r\nTraceback (most recent call last):\r\n File """", line 1, in \r\nAttributeError: module 'os' has no attribute 'abspath'. Did you mean: '_fspath'?\r\n>>> ",,terminal_output +2333,5785717,"TERMINAL",0,0,"\r>>> os.abspath(ws_dir)",,terminal_output +2334,5788317,"train_tokenizer.py",0,0,"",python,tab +2335,5790841,"TERMINAL",0,0,"[?25l[?25h",,terminal_output +2336,5791106,"TERMINAL",0,0,"[?25l[?25h",,terminal_output +2337,5791791,"TERMINAL",0,0,"[?25l[?25h",,terminal_output +2338,5792386,"TERMINAL",0,0,"[?25l[?25h",,terminal_output +2339,5792848,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +2340,5792980,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +2341,5793403,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +2342,5793633,"TERMINAL",0,0,"\r[1@pa",,terminal_output +2343,5793695,"TERMINAL",0,0,"[?25la\r[1@ab[?25h",,terminal_output +2344,5793947,"TERMINAL",0,0,"[?25lba\r[1@ta[?25h\r[1@ha",,terminal_output +2345,5794172,"TERMINAL",0,0,"[?25la\r[1@.a[?25h",,terminal_output +2346,5794385,"TERMINAL",0,0,"\r\n'/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old'\r\n>>> ",,terminal_output +2347,5802129,"TERMINAL",0,0,"\r>>> os.path.abspath(ws_dir)",,terminal_output +2348,5802690,"TERMINAL",0,0," ",,terminal_output +2349,5803022,"TERMINAL",0,0,"[?25l=[?25h",,terminal_output +2350,5803148,"TERMINAL",0,0,"[?25l=[?25h",,terminal_output +2351,5803209,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +2352,5803672,"TERMINAL",0,0,"[?25lw[?25h",,terminal_output +2353,5803921,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +2354,5804077,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +2355,5804453,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +2356,5804514,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +2357,5804646,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +2358,5804849,"TERMINAL",0,0,"\r\nTrue\r\n>>> ",,terminal_output +2359,5807011,"train_tokenizer.py",7100,0,"",python,selection_mouse +2360,5807022,"train_tokenizer.py",7099,0,"",python,selection_command +2361,5807532,"train_tokenizer.py",6996,0,"",python,selection_mouse +2362,5808051,"train_tokenizer.py",7016,0,"",python,selection_mouse +2363,5808053,"train_tokenizer.py",7015,0,"",python,selection_command +2364,5844017,"TERMINAL",0,0,"^D\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old]633;D;0",,terminal_output +2365,5845594,"TERMINAL",0,0,"ls",,terminal_command +2366,5846418,"TERMINAL",0,0,"ls",,terminal_command +2367,5846436,"TERMINAL",0,0,"]633;E;2025-07-10 12:03:40 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old]633;D;0",,terminal_output +2368,5846945,"TERMINAL",0,0,"ls",,terminal_command +2369,5846960,"TERMINAL",0,0,"]633;E;2025-07-10 12:03:40 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old]633;D;0",,terminal_output +2370,5849322,"TERMINAL",0,0,"srun",,terminal_focus +2371,5864652,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +2372,5867546,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,0,"",shellscript,selection_mouse +2373,5867653,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,1,"l",shellscript,selection_mouse +2374,5867673,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,4,"log_",shellscript,selection_mouse +2375,5867686,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,6,"log_ch",shellscript,selection_mouse +2376,5867740,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,39,"log_checkpoint_interval=2 \\n --log \",shellscript,selection_mouse +2377,5868228,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,26,"log_checkpoint_interval=2 ",shellscript,selection_mouse +2378,5868502,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,25,"log_checkpoint_interval=2",shellscript,selection_mouse +2379,5869279,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",831,0,"",shellscript,selection_mouse +2380,5869411,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",830,1,"2",shellscript,selection_mouse +2381,5869464,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",830,15,"2 \\n --log \",shellscript,selection_mouse +2382,5869745,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",830,14,"2 \\n --log ",shellscript,selection_mouse +2383,5869801,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",830,13,"2 \\n --log",shellscript,selection_mouse +2384,5869876,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,25,"log_checkpoint_interval=2",shellscript,selection_mouse +2385,5870061,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",805,26,"-log_checkpoint_interval=2",shellscript,selection_mouse +2386,5870597,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",804,0,"",shellscript,selection_mouse +2387,5870774,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",800,4," ",shellscript,selection_mouse +2388,5870848,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",800,5," -",shellscript,selection_mouse +2389,5870904,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",800,29," --log_checkpoint_interval",shellscript,selection_mouse +2390,5871460,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",800,30," --log_checkpoint_interval=",shellscript,selection_mouse +2391,5871479,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",800,31," --log_checkpoint_interval=2",shellscript,selection_mouse +2392,5871496,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",800,32," --log_checkpoint_interval=2 ",shellscript,selection_mouse +2393,5878049,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1007,0,"",shellscript,selection_mouse +2394,5878052,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1006,0,"",shellscript,selection_command +2395,5878950,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",818,0,"",shellscript,selection_mouse +2396,5879075,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,23,"log_checkpoint_interval",shellscript,selection_mouse +2397,5879382,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,24,"log_checkpoint_interval=",shellscript,selection_mouse +2398,5879441,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,25,"log_checkpoint_interval=2",shellscript,selection_mouse +2399,5879442,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,26,"log_checkpoint_interval=2 ",shellscript,selection_mouse +2400,5879457,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",806,27,"log_checkpoint_interval=2 \",shellscript,selection_mouse +2401,5882784,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",821,0,"",shellscript,selection_mouse +2402,5885628,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",493,0,"",shellscript,selection_mouse +2403,5886108,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",520,0,"",shellscript,selection_mouse +2404,5886851,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",568,0,"",shellscript,selection_mouse +2405,5887501,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",509,0,"",shellscript,selection_mouse +2406,5887688,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",508,2,"=$",shellscript,selection_mouse +2407,5887848,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",508,2,"=$",shellscript,selection_mouse +2408,5887903,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",508,8,"=$ws_dir",shellscript,selection_mouse +2409,5888538,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",516,0,"",shellscript,selection_mouse +2410,5889192,"TERMINAL",0,0,"bash",,terminal_focus +2411,5890947,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",493,0,"",shellscript,selection_mouse +2412,5891838,"TERMINAL",0,0,"srun",,terminal_focus +2413,5892670,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +2414,5892745,"TERMINAL",0,0,"[?25lw[?25h",,terminal_output +2415,5892855,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +2416,5892927,"TERMINAL",0,0,"\r\n[?2004l\r/home/hk-project-p0023960/tum_cte0515/Projects/jafar\r\n]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +2417,5897408,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +2418,5898542,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",465,0,"",shellscript,selection_mouse +2419,5899026,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",510,0,"",shellscript,selection_mouse +2420,5899546,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",516,0,"",shellscript,selection_mouse +2421,5901618,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",552,0,"\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id",shellscript,content +2422,5901631,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",553,0,"",shellscript,selection_command +2423,5902154,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",494,0,"",shellscript,selection_command +2424,5903126,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",494,0,"#",shellscript,content +2425,5903128,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",495,0,"",shellscript,selection_keyboard +2426,5903188,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",495,0," ",shellscript,content +2427,5903188,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",496,0,"",shellscript,selection_keyboard +2428,5903484,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",495,0,"",shellscript,selection_command +2429,5903755,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",556,0,"",shellscript,selection_command +2430,5904058,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",557,0,"",shellscript,selection_command +2431,5904559,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",558,0,"",shellscript,selection_command +2432,5904621,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",559,0,"",shellscript,selection_command +2433,5904621,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",560,0,"",shellscript,selection_command +2434,5904687,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",561,0,"",shellscript,selection_command +2435,5904688,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",562,0,"",shellscript,selection_command +2436,5904705,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",563,0,"",shellscript,selection_command +2437,5904758,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",564,0,"",shellscript,selection_command +2438,5904759,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",565,0,"",shellscript,selection_command +2439,5904829,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",566,0,"",shellscript,selection_command +2440,5904830,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",567,0,"",shellscript,selection_command +2441,5904950,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",568,0,"",shellscript,selection_command +2442,5905099,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",569,0,"",shellscript,selection_command +2443,5905249,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",570,0,"",shellscript,selection_command +2444,5905520,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",570,1,"",shellscript,content +2445,5905860,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",570,1,"",shellscript,content +2446,5906030,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",570,1,"",shellscript,content +2447,5906266,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",570,1,"",shellscript,content +2448,5906441,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",570,1,"",shellscript,content +2449,5906624,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",570,1,"",shellscript,content +2450,5906789,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",570,1,"",shellscript,content +2451,5907512,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",570,0,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar",shellscript,content +2452,5914411,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",621,1,"",shellscript,content +2453,5914610,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",620,1,"",shellscript,content +2454,5914678,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",619,1,"",shellscript,content +2455,5914825,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",618,1,"",shellscript,content +2456,5914948,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",617,1,"",shellscript,content +2457,5915336,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",616,1,"",shellscript,content +2458,5915469,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",615,1,"",shellscript,content +2459,5915631,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",614,1,"",shellscript,content +2460,5915756,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",613,1,"",shellscript,content +2461,5915904,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",612,1,"",shellscript,content +2462,5916044,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",611,1,"",shellscript,content +2463,5916193,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",610,1,"",shellscript,content +2464,5916334,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",609,1,"",shellscript,content +2465,5916463,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",608,1,"",shellscript,content +2466,5916775,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",607,1,"",shellscript,content +2467,5918884,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",607,0,"/",shellscript,content +2468,5918885,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",608,0,"",shellscript,selection_keyboard +2469,5920069,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",608,0,"c",shellscript,content +2470,5920070,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",609,0,"",shellscript,selection_keyboard +2471,5920168,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",609,0,"h",shellscript,content +2472,5920169,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",610,0,"",shellscript,selection_keyboard +2473,5920632,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",610,0,"p",shellscript,content +2474,5920633,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",611,0,"",shellscript,selection_keyboard +2475,5921054,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",611,0,"o",shellscript,content +2476,5921055,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",612,0,"",shellscript,selection_keyboard +2477,5921370,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",611,1,"",shellscript,content +2478,5921467,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",610,1,"",shellscript,content +2479,5922025,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",609,1,"",shellscript,content +2480,5922834,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",609,0,"k",shellscript,content +2481,5922835,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",610,0,"",shellscript,selection_keyboard +2482,5923024,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",610,0,"p",shellscript,content +2483,5923025,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",611,0,"",shellscript,selection_keyboard +2484,5923142,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",611,0,"t",shellscript,content +2485,5923143,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",612,0,"",shellscript,selection_keyboard +2486,5923473,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",612,0,"_",shellscript,content +2487,5923474,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",613,0,"",shellscript,selection_keyboard +2488,5923831,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",613,0,"t",shellscript,content +2489,5923832,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",614,0,"",shellscript,selection_keyboard +2490,5923912,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",614,0,"e",shellscript,content +2491,5923913,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",615,0,"",shellscript,selection_keyboard +2492,5924080,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",615,0,"s",shellscript,content +2493,5924080,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",616,0,"",shellscript,selection_keyboard +2494,5924138,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",616,0,"t",shellscript,content +2495,5924138,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",617,0,"",shellscript,selection_keyboard +2496,5927262,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",623,0,"",shellscript,selection_mouse +2497,5927395,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",618,11,"checkpoints",shellscript,selection_mouse +2498,5927662,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",618,11,"",shellscript,content +2499,5927982,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",617,1,"",shellscript,content +2500,5928949,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",667,0,"",shellscript,selection_mouse +2501,5929467,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",666,0,"",shellscript,selection_mouse +2502,5932840,"TERMINAL",0,0,"bash",,terminal_focus +2503,5933778,"TERMINAL",0,0,"srun",,terminal_focus +2504,5934863,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +2505,5942808,"TERMINAL",0,0,"pwd",,terminal_output +2506,5943250,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",,terminal_output +2507,5944122,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +2508,5944279,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=409404\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0719\r\nSLURM_JOB_START_TIME=1752136462\r\nSLURM_STEP_NODELIST=hkn0719\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752179662\r\nSLURM_PMI2_SRUN_PORT=43929\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3334543\r\nSLURM_PTY_PORT=44699\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=48\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0719\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0719,0806]\r\nSLURM_SRUN_COMM_PORT=38371\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3334543\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0719\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38371\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0719,0806]\r\n",,terminal_output +2509,5944420,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +2510,5982039,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +2511,5982648,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_120555-6g6x000v\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run tokenizer-batch-size-scaling-2-node-sqrt-lr-mihir-debug-grain-checkpointing-old\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/6g6x000v\r\n",,terminal_output +2512,6000188,"TERMINAL",0,0,"bash",,terminal_focus +2513,6003071,"TERMINAL",0,0,"ls",,terminal_command +2514,6004995,"TERMINAL",0,0,"cd ..",,terminal_command +2515,6005715,"TERMINAL",0,0,"cd",,terminal_command +2516,6010451,"TERMINAL",0,0,"ls",,terminal_command +2517,6015390,"TERMINAL",0,0,"cd ckpt_test/",,terminal_command +2518,6015653,"TERMINAL",0,0,"ls",,terminal_command +2519,6016881,"TERMINAL",0,0,"cd debug/",,terminal_command +2520,6017225,"TERMINAL",0,0,"ls",,terminal_command +2521,6018940,"TERMINAL",0,0,"cd mihir-debug-grain-checkpointing-old/",,terminal_command +2522,6019582,"TERMINAL",0,0,"ls",,terminal_command +2523,6020490,"TERMINAL",0,0,"ls",,terminal_command +2524,6020520,"TERMINAL",0,0,"]633;E;2025-07-10 12:06:34 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C]0;tum_cte0515@hkn1990:~/ckpt_test/debug/mihir-debug-grain-checkpointing-old]633;D;0",,terminal_output +2525,6059656,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2526,6059991,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2527,6060050,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\nRunning on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2528,6064432,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2529,6064781,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2530,6065961,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2531,6066315,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2532,6066376,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2533,6066893,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2534,6067339,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2535,6070006,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2536,6070255,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2537,6157843,"TERMINAL",0,0,"srun",,terminal_focus +2538,6187843,"TERMINAL",0,0,"bash",,terminal_focus +2539,6189239,"TERMINAL",0,0,"srun",,terminal_focus +2540,6189954,"TERMINAL",0,0,"bash",,terminal_focus +2541,6190808,"TERMINAL",0,0,"ls",,terminal_command +2542,6191558,"TERMINAL",0,0,"ls",,terminal_command +2543,6192240,"TERMINAL",0,0,"ls",,terminal_command +2544,6193174,"TERMINAL",0,0,"pwd",,terminal_command +2545,6195121,"TERMINAL",0,0,"ls",,terminal_command +2546,6203257,"train_tokenizer.py",0,0,"",python,tab +2547,6206519,"train_tokenizer.py",6752,0,"",python,selection_mouse +2548,6206666,"train_tokenizer.py",6744,11,"max_to_keep",python,selection_mouse +2549,6207333,"train_tokenizer.py",6758,0,"",python,selection_mouse +2550,6207340,"train_tokenizer.py",6757,0,"",python,selection_command +2551,6207506,"train_tokenizer.py",6758,0,"",python,selection_mouse +2552,6207516,"train_tokenizer.py",6757,0,"",python,selection_command +2553,6208012,"train_tokenizer.py",6707,0,"",python,selection_mouse +2554,6208171,"train_tokenizer.py",6706,4,"args",python,selection_mouse +2555,6208428,"train_tokenizer.py",6706,28,"args.log_checkpoint_interval",python,selection_mouse +2556,6208740,"train_tokenizer.py",6720,0,"",python,selection_mouse +2557,6208740,"train_tokenizer.py",6711,23,"log_checkpoint_interval",python,selection_mouse +2558,6212425,"train_tokenizer.py",6725,0,"",python,selection_mouse +2559,6212426,"train_tokenizer.py",6711,23,"log_checkpoint_interval",python,selection_mouse +2560,6226820,"TERMINAL",0,0,"Step 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.153783917427063\r\nStep 4, loss: 0.13490808010101318\r\nStep 5, loss: 0.12313539534807205\r\nStep 6, loss: 0.11425895243883133\r\nStep 7, loss: 0.116230808198452\r\nStep 8, loss: 0.11064261198043823\r\nStep 9, loss: 0.1103254184126854\r\nStep 10, loss: 0.11026494950056076\r\nStep 11, loss: 0.12261614948511124\r\nStep 12, loss: 0.11275434494018555\r\nStep 13, loss: 0.10663222521543503\r\nStep 14, loss: 0.10399676859378815\r\nStep 15, loss: 0.11558172106742859\r\nStep 16, loss: 0.10003791749477386\r\nStep 17, loss: 0.1081681028008461\r\nStep 18, loss: 0.0952216386795044\r\nStep 19, loss: 0.10122641921043396\r\nStep 20, loss: 0.11139006912708282\r\nStep 21, loss: 0.10945642739534378\r\nStep 22, loss: 0.10919532924890518\r\nStep 23, loss: 0.102116659283638\r\nStep 24, loss: 0.09697520732879639\r\nStep 25, loss: 0.09762175381183624\r\nStep 26, loss: 0.09154018759727478\r\nStep 27, loss: 0.10423220694065094\r\nStep 28, loss: 0.09575474262237549\r\nStep 29, loss: 0.10642489045858383\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.153783917427063\r\nStep 4, loss: 0.13490808010101318\r\nStep 5, loss: 0.12313539534807205\r\nStep 6, loss: 0.11425895243883133\r\nStep 7, loss: 0.116230808198452\r\nStep 8, loss: 0.11064261198043823\r\nStep 9, loss: 0.1103254184126854\r\nStep 10, loss: 0.11026494950056076\r\nStep 11, loss: 0.12261614948511124\r\nStep 12, loss: 0.11275434494018555\r\nStep 13, loss: 0.10663222521543503\r\nStep 14, loss: 0.10399676859378815\r\nStep 15, loss: 0.11558172106742859\r\nStep 16, loss: 0.10003791749477386\r\nStep 17, loss: 0.1081681028008461\r\nStep 18, loss: 0.0952216386795044\r\nStep 19, loss: 0.10122641921043396\r\nStep 20, loss: 0.11139006912708282\r\nStep 21, loss: 0.10945642739534378\r\nStep 22, loss: 0.10919532924890518\r\nStep 23, loss: 0.102116659283638\r\nStep 24, loss: 0.09697520732879639\r\nStep 25, loss: 0.09762175381183624\r\nStep 26, loss: 0.09154018759727478\r\nStep 27, loss: 0.10423220694065094\r\nStep 28, loss: 0.09575474262237549\r\nStep 29, loss: 0.10642489045858383\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.153783917427063\r\nStep 4, loss: 0.13490808010101318\r\nStep 5, loss: 0.12313539534807205\r\nStep 6, loss: 0.11425895243883133\r\nStep 7, loss: 0.116230808198452\r\nStep 8, loss: 0.11064261198043823\r\nStep 9, loss: 0.1103254184126854\r\nStep 10, loss: 0.11026494950056076\r\nStep 11, loss: 0.12261614948511124\r\nStep 12, loss: 0.11275434494018555\r\nStep 13, loss: 0.10663222521543503\r\nStep 14, loss: 0.10399676859378815\r\nStep 15, loss: 0.11558172106742859\r\nStep 16, loss: 0.10003791749477386\r\nStep 17, loss: 0.1081681028008461\r\nStep 18, loss: 0.0952216386795044\r\nStep 19, loss: 0.10122641921043396\r\nStep 20, loss: 0.11139006912708282\r\nStep 21, loss: 0.10945642739534378\r\nStep 22, loss: 0.10919532924890518\r\nStep 23, loss: 0.102116659283638\r\nStep 24, loss: 0.09697520732879639\r\nStep 25, loss: 0.09762175381183624\r\nStep 26, loss: 0.09154018759727478\r\nStep 27, loss: 0.10423220694065094\r\nStep 28, loss: 0.09575474262237549\r\nStep 29, loss: 0.10642489045858383\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.153783917427063\r\nStep 4, loss: 0.13490808010101318\r\nStep 5, loss: 0.12313539534807205\r\nStep 6, loss: 0.11425895243883133\r\nStep 7, loss: 0.116230808198452\r\nStep 8, loss: 0.11064261198043823\r\nStep 9, loss: 0.1103254184126854\r\nStep 10, loss: 0.11026494950056076\r\nStep 11, loss: 0.12261614948511124\r\nStep 12, loss: 0.11275434494018555\r\nStep 13, loss: 0.10663222521543503\r\nStep 14, loss: 0.10399676859378815\r\nStep 15, loss: 0.11558172106742859\r\nStep 16, loss: 0.10003791749477386\r\nStep 17, loss: 0.1081681028008461\r\nStep 18, loss: 0.0952216386795044\r\nStep 19, loss: 0.10122641921043396\r\nStep 20, loss: 0.11139006912708282\r\nStep 21, loss: 0.10945642739534378\r\nStep 22, loss: 0.10919532924890518\r\nStep 23, loss: 0.102116659283638\r\nStep 24, loss: 0.09697520732879639\r\nStep 25, loss: 0.09762175381183624\r\nStep 26, loss: 0.09154018759727478\r\nStep 27, loss: 0.10423220694065094\r\nStep 28, loss: 0.09575474262237549\r\nStep 29, loss: 0.10642489045858383\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.153783917427063\r\nStep 4, loss: 0.13490808010101318\r\nStep 5, loss: 0.12313539534807205\r\nStep 6, loss: 0.11425895243883133\r\nStep 7, loss: 0.116230808198452\r\nStep 8, loss: 0.11064261198043823\r\nStep 9, loss: 0.1103254184126854\r\nStep 10, loss: 0.11026494950056076\r\nStep 11, loss: 0.12261614948511124\r\nStep 12, loss: 0.11275434494018555\r\nStep 13, loss: 0.10663222521543503\r\nStep 14, loss: 0.10399676859378815\r\nStep 15, loss: 0.11558172106742859\r\nStep 16, loss: 0.10003791749477386\r\nStep 17, loss: 0.1081681028008461\r\nStep 18, loss: 0.0952216386795044\r\nStep 19, loss: 0.10122641921043396\r\nStep 20, loss: 0.11139006912708282\r\nStep 21, loss: 0.10945642739534378\r\nStep 22, loss: 0.10919532924890518\r\nStep 23, loss: 0.102116659283638\r\nStep 24, loss: 0.09697520732879639\r\nStep 25, loss: 0.09762175381183624\r\nStep 26, loss: 0.09154018759727478\r\nStep 27, loss: 0.10423220694065094\r\nStep 28, loss: 0.09575474262237549\r\nStep 29, loss: 0.10642489045858383\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.153783917427063\r\nStep 4, loss: 0.13490808010101318\r\nStep 5, loss: 0.12313539534807205\r\nStep 6, loss: 0.11425895243883133\r\nStep 7, loss: 0.116230808198452\r\nStep 8, loss: 0.11064261198043823\r\nStep 9, loss: 0.1103254184126854\r\nStep 10, loss: 0.11026494950056076\r\nStep 11, loss: 0.12261614948511124\r\nStep 12, loss: 0.11275434494018555\r\nStep 13, loss: 0.10663222521543503\r\nStep 14, loss: 0.10399676859378815\r\nStep 15, loss: 0.11558172106742859\r\nStep 16, loss: 0.10003791749477386\r\nStep 17, loss: 0.1081681028008461\r\nStep 18, loss: 0.0952216386795044\r\nStep 19, loss: 0.10122641921043396\r\nStep 20, loss: 0.11139006912708282\r\nStep 21, loss: 0.10945642739534378\r\nStep 22, loss: 0.10919532924890518\r\nStep 23, loss: 0.102116659283638\r\nStep 24, loss: 0.09697520732879639\r\nStep 25, loss: 0.09762175381183624\r\nStep 26, loss: 0.09154018759727478\r\nStep 27, loss: 0.10423220694065094\r\nStep 28, loss: 0.09575474262237549\r\nStep 29, loss: 0.10642489045858383\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.153783917427063\r\nStep 4, loss: 0.13490808010101318\r\nStep 5, loss: 0.12313539534807205\r\nStep 6, loss: 0.11425895243883133\r\nStep 7, loss: 0.116230808198452\r\nStep 8, loss: 0.11064261198043823\r\nStep 9, loss: 0.1103254184126854\r\nStep 10, loss: 0.11026494950056076\r\nStep 11, loss: 0.12261614948511124\r\nStep 12, loss: 0.11275434494018555\r\nStep 13, loss: 0.10663222521543503\r\nStep 14, loss: 0.10399676859378815\r\nStep 15, loss: 0.11558172106742859\r\nStep 16, loss: 0.10003791749477386\r\nStep 17, loss: 0.1081681028008461\r\nStep 18, loss: 0.0952216386795044\r\nStep 19, loss: 0.10122641921043396\r\nStep 20, loss: 0.11139006912708282\r\nStep 21, loss: 0.10945642739534378\r\nStep 22, loss: 0.10919532924890518\r\nStep 23, loss: 0.102116659283638\r\nStep 24, loss: 0.09697520732879639\r\nStep 25, loss: 0.09762175381183624\r\nStep 26, loss: 0.09154018759727478\r\nStep 27, loss: 0.10423220694065094\r\nStep 28, loss: 0.09575474262237549\r\nStep 29, loss: 0.10642489045858383\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.153783917427063\r\nStep 4, loss: 0.13490808010101318\r\nStep 5, loss: 0.12313539534807205\r\nStep 6, loss: 0.11425895243883133\r\nStep 7, loss: 0.116230808198452\r\nStep 8, loss: 0.11064261198043823\r\nStep 9, loss: 0.1103254184126854\r\nStep 10, loss: 0.11026494950056076\r\nStep 11, loss: 0.12261614948511124\r\nStep 12, loss: 0.11275434494018555\r\nStep 13, loss: 0.10663222521543503\r\nStep 14, loss: 0.10399676859378815\r\nStep 15, loss: 0.11558172106742859\r\nStep 16, loss: 0.10003791749477386\r\nStep 17, loss: 0.1081681028008461\r\nStep 18, loss: 0.0952216386795044\r\nStep 19, loss: 0.10122641921043396\r\nStep 20, loss: 0.11139006912708282\r\nStep 21, loss: 0.10945642739534378\r\nStep 22, loss: 0.10919532924890518\r\nStep 23, loss: 0.102116659283638\r\nStep 24, loss: 0.09697520732879639\r\nStep 25, loss: 0.09762175381183624\r\nStep 26, loss: 0.09154018759727478\r\nStep 27, loss: 0.10423220694065094\r\nStep 28, loss: 0.09575474262237549\r\nStep 29, loss: 0.10642489045858383\r\n",,terminal_output +2561,6227060,"TERMINAL",0,0,"Filtering out episode with length 15, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2562,6227119,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2563,6227224,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 8, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 10, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2564,6227341,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 9, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2565,6227400,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 7, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2566,6227460,"TERMINAL",0,0,"Filtering out episode with length 4, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2567,6227576,"TERMINAL",0,0,"Filtering out episode with length 5, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 11, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 2, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 3, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 8, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2568,6228623,"TERMINAL",0,0,"Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2569,6228763,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +2570,6228981,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2571,6229145,"TERMINAL",0,0,"Filtering out episode with length 6, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2572,6229339,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2573,6230032,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1153,0,"",shellscript,selection_mouse +2574,6230147,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run tokenizer-batch-size-scaling-2-node-sqrt-lr-mihir-debug-grain-checkpointing-old at: https://wandb.ai/instant-uv/jafar/runs/6g6x000v\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_120555-6g6x000v/logs\r\n",,terminal_output +2575,6230180,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1152,1,"\n",shellscript,selection_mouse +2576,6230198,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1096,57,"\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2577,6230217,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",934,219,"\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2578,6230269,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",866,287,"_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2579,6230270,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",820,333," --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2580,6230270,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",818,335," --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2581,6230280,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",796,357," --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2582,6230335,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",763,390," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2583,6230348,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",740,413," --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2584,6230399,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,434," --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2585,6230400,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",686,467,"srun python train_tokenizer.py \\n --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2586,6231244,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +2587,6231296,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +2588,6232256,"TERMINAL",0,0,"]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +2589,6243841,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1153,0,"",shellscript,selection_mouse +2590,6244650,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1131,22,"ir $array_records_dir\n",shellscript,selection_mouse +2591,6244666,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1108,45,"ct jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2592,6244718,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1015,138,"s tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2593,6244719,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",943,210,"me=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2594,6244728,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",930,223,"og \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2595,6244755,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",895,258,"log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2596,6244756,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",863,290,"log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2597,6244809,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",841,312,"max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2598,6244815,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",824,329,"min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2599,6244930,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",823,330,"-min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2600,6244951,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",840,313,"-max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2601,6244963,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",839,314,"--max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2602,6245015,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",861,292,"--log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2603,6245016,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",892,261," --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2604,6245031,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",926,227," --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2605,6245085,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",939,214,"--name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2606,6245086,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1011,142,"-tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2607,6245098,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1012,141,"tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2608,6245155,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1078,75,"ntity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2609,6245156,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1080,73,"ity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2610,6245156,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1108,45,"ct jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2611,6245163,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1109,44,"t jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2612,6245182,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1110,43," jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2613,6245489,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1110,0,"",shellscript,selection_mouse +2614,6245489,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1103,7,"project",shellscript,selection_mouse +2615,6245643,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1097,22," --project jafar \\n",shellscript,selection_mouse +2616,6245849,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1071,48," --entity instant-uv \\n --project jafar \\n",shellscript,selection_mouse +2617,6245866,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",935,184," --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n",shellscript,selection_mouse +2618,6245918,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",889,230," --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n",shellscript,selection_mouse +2619,6245920,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",818,301," --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n",shellscript,selection_mouse +2620,6245921,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",763,356," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n",shellscript,selection_mouse +2621,6245974,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",740,379," --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n",shellscript,selection_mouse +2622,6246117,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,400," --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n",shellscript,selection_mouse +2623,6246652,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",720,0,"",shellscript,selection_mouse +2624,6246652,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,4," ",shellscript,selection_mouse +2625,6246786,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,21," --num_steps=30 \\n",shellscript,selection_mouse +2626,6246974,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,44," --num_steps=30 \\n --warmup_steps=1 \\n",shellscript,selection_mouse +2627,6246987,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,77," --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n",shellscript,selection_mouse +2628,6247045,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,116," --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n",shellscript,selection_mouse +2629,6247045,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,138," --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n",shellscript,selection_mouse +2630,6247046,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,170," --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n",shellscript,selection_mouse +2631,6247053,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,204," --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n",shellscript,selection_mouse +2632,6247070,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,216," --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n",shellscript,selection_mouse +2633,6247124,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,287," --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n",shellscript,selection_mouse +2634,6247125,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,352," --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n",shellscript,selection_mouse +2635,6247136,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,378," --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n",shellscript,selection_mouse +2636,6247188,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",719,400," --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n",shellscript,selection_mouse +2637,6247407,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1118,0,"",shellscript,selection_mouse +2638,6255607,"TERMINAL",0,0,"ls",,terminal_command +2639,6261950,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1153,0,"",shellscript,selection_mouse +2640,6262257,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",922,231,"\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2641,6262258,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",685,468,"\nsrun python train_tokenizer.py \\n --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2642,6262262,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",107,1046,"\n# array_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords/10fps_160x90\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared'\n\njob_name=""debug""\nslurm_job_id=""mihir-debug-grain-checkpointing-old""\n\n# CHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/ckpt_test/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2643,6262279,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,1153,"#!/usr/bin/env bash\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\n# array_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords/10fps_160x90\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared'\n\njob_name=""debug""\nslurm_job_id=""mihir-debug-grain-checkpointing-old""\n\n# CHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/ckpt_test/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --num_steps=30 \\n --warmup_steps=1 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.4e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=2 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-sqrt-lr-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node sqrt-lr-scaling \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir\n",shellscript,selection_mouse +2644,6311256,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",795,0,"",shellscript,selection_mouse +2645,6311769,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",762,0,"",shellscript,selection_mouse +2646,6312058,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",762,0,"\n ",shellscript,content +2647,6312309,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",767,0,"--save_ckpt",shellscript,content +2648,6312790,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",778,0," ",shellscript,content +2649,6312790,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",779,0,"",shellscript,selection_keyboard +2650,6312955,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",779,0,"\",shellscript,content +2651,6312956,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",780,0,"",shellscript,selection_keyboard +2652,6314214,"train_tokenizer.py",0,0,"",python,tab +2653,6320209,"train_tokenizer.py",1080,0,"",python,selection_mouse +2654,6322183,"train_tokenizer.py",1130,0,"",python,selection_mouse +2655,6325414,"train_tokenizer.py",804,0,"",python,selection_mouse +2656,6325562,"train_tokenizer.py",802,9,"save_ckpt",python,selection_mouse +2657,6327662,"TERMINAL",0,0,"srun",,terminal_focus +2658,6328731,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",,terminal_output +2659,6331464,"train_tokenizer.py",0,0,"",python,tab +2660,6332658,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +2661,6333542,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",413,0,"",shellscript,selection_mouse +2662,6334364,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",546,0,"",shellscript,selection_mouse +2663,6336462,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",545,0,"",shellscript,selection_command +2664,6336673,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",606,0,"",shellscript,selection_command +2665,6337046,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",555,87,"",shellscript,content +2666,6337103,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",494,0,"",shellscript,selection_command +2667,6337834,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",494,1,"",shellscript,content +2668,6337966,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",494,1,"",shellscript,content +2669,6339342,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",539,0,"",shellscript,selection_mouse +2670,6339485,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",538,2,"/$",shellscript,selection_mouse +2671,6339710,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",538,2,"/$",shellscript,selection_mouse +2672,6339711,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",530,10,"job_name/$",shellscript,selection_mouse +2673,6339766,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",529,11,"$job_name/$",shellscript,selection_mouse +2674,6339767,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",517,23,"checkpoints/$job_name/$",shellscript,selection_mouse +2675,6340078,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",524,0,"",shellscript,selection_mouse +2676,6340079,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",517,11,"checkpoints",shellscript,selection_mouse +2677,6340591,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",507,0,"",shellscript,selection_mouse +2678,6341723,"TERMINAL",0,0,"bash",,terminal_focus +2679,6342873,"TERMINAL",0,0,"cd ..",,terminal_command +2680,6343518,"TERMINAL",0,0,"cd ..",,terminal_command +2681,6343847,"TERMINAL",0,0,"ls",,terminal_command +2682,6343870,"TERMINAL",0,0,"]633;E;2025-07-10 12:11:57 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;Cdebug\r\n]0;tum_cte0515@hkn1990:~/ckpt_test]633;D;0",,terminal_output +2683,6344737,"TERMINAL",0,0,"cd ..",,terminal_command +2684,6345298,"TERMINAL",0,0,"ls",,terminal_command +2685,6349187,"TERMINAL",0,0,"rm -rf ckpt_test/",,terminal_command +2686,6349197,"TERMINAL",0,0,"]633;E;2025-07-10 12:12:03 rm -rf ckpt_test/;46e99295-9803-4583-b36f-e400fb9e619d]633;C]0;tum_cte0515@hkn1990:~]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515",,terminal_output +2687,6473325,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1082,0,"",shellscript,selection_mouse +2688,6473885,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",863,0,"",shellscript,selection_mouse +2689,6473887,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",862,0,"",shellscript,selection_command +2690,6474322,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1081,0,"",shellscript,selection_mouse +2691,6474324,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1080,0,"",shellscript,selection_command +2692,6486222,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",1082,0,"",shellscript,selection_mouse +2693,6486958,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",851,0,"",shellscript,selection_mouse +2694,6486959,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",850,0,"",shellscript,selection_command +2695,6489012,"TERMINAL",0,0,"srun",,terminal_focus +2696,6492562,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +2697,6494153,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",255,0,"",shellscript,selection_mouse +2698,6494281,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",255,1,"/",shellscript,selection_mouse +2699,6494290,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",255,3,"/hk",shellscript,selection_mouse +2700,6494323,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",255,10,"/hkfs/work",shellscript,selection_mouse +2701,6494336,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",255,20,"/hkfs/work/workspace",shellscript,selection_mouse +2702,6494350,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",255,37,"/hkfs/work/workspace/scratch/tum_ind3",shellscript,selection_mouse +2703,6494373,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",178,77,"hared/data_new/open_ai_minecraft_arrayrecords/10fps_160x90\narray_records_dir=",shellscript,selection_mouse +2704,6494392,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",107,148,"\n# array_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords/10fps_160x90\narray_records_dir=",shellscript,selection_mouse +2705,6494474,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",221,34,"ds/10fps_160x90\narray_records_dir=",shellscript,selection_mouse +2706,6494475,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",226,29,"fps_160x90\narray_records_dir=",shellscript,selection_mouse +2707,6494476,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",255,103,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked",shellscript,selection_mouse +2708,6495183,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",358,0,"",shellscript,selection_mouse +2709,6495189,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",357,0,"",shellscript,selection_command +2710,6496691,"TERMINAL",0,0,"bash",,terminal_focus +2711,6514570,"TERMINAL",0,0,"cd /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared//checkpoints/debug/mihir-debug-grain-checkpointing-old",,terminal_command +2712,6514577,"TERMINAL",0,0,"]633;E;2025-07-10 12:14:48 cd /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared//checkpoints/debug/mihir-debug-grain-checkpointing-old;46e99295-9803-4583-b36f-e400fb9e619d]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old]633;D;0",,terminal_output +2713,6515595,"TERMINAL",0,0,"ls",,terminal_command +2714,6516549,"TERMINAL",0,0,"ls",,terminal_command +2715,6518872,"TERMINAL",0,0,"srun",,terminal_focus +2716,6519683,"TERMINAL",0,0,"\rpwd",,terminal_output +2717,6520068,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",,terminal_output +2718,6520350,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +2719,6520553,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=409404\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0719\r\nSLURM_JOB_START_TIME=1752136462\r\nSLURM_STEP_NODELIST=hkn0719\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752179662\r\nSLURM_PMI2_SRUN_PORT=43929\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3334543\r\nSLURM_PTY_PORT=44699\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=48\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0719\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0719,0806]\r\nSLURM_SRUN_COMM_PORT=38371\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3334543\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0719\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38371\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0719,0806]\r\n",,terminal_output +2720,6520689,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +2721,6561078,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +2722,6561921,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_121534-it66oef1\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run tokenizer-batch-size-scaling-2-node-sqrt-lr-mihir-debug-grain-checkpointing-old\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/it66oef1\r\n",,terminal_output +2723,6636548,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2724,6636891,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\nRunning on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2725,6636973,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2726,6641119,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2727,6641312,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2728,6645412,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2729,6645584,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2730,6645933,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2731,6646042,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2732,6646528,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2733,6646864,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2734,6649208,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2735,6649423,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2736,6771361,"TERMINAL",0,0,"bash",,terminal_focus +2737,6772119,"TERMINAL",0,0,"ls",,terminal_command +2738,6772145,"TERMINAL",0,0,"]633;E;2025-07-10 12:19:05 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C000008 000010 000012\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old]633;D;0",,terminal_output +2739,6781886,"TERMINAL",0,0,"ls",,terminal_command +2740,6781896,"TERMINAL",0,0,"]633;E;2025-07-10 12:19:15 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C000010 000012 000014 000016.orbax-checkpoint-tmp-21\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old]633;D;0",,terminal_output +2741,6783234,"TERMINAL",0,0,"ls",,terminal_command +2742,6784130,"TERMINAL",0,0,"ls",,terminal_command +2743,6784739,"TERMINAL",0,0,"ls",,terminal_command +2744,6785276,"TERMINAL",0,0,"ls",,terminal_command +2745,6785851,"TERMINAL",0,0,"ls",,terminal_command +2746,6803084,"train_tokenizer.py",0,0,"",python,tab +2747,6804161,"train_tokenizer.py",5114,0,"",python,selection_mouse +2748,6804163,"train_tokenizer.py",5113,0,"",python,selection_command +2749,6806305,"train_tokenizer.py",7010,0,"",python,selection_mouse +2750,6806308,"train_tokenizer.py",7009,0,"",python,selection_command +2751,6806840,"train_tokenizer.py",6931,0,"",python,selection_mouse +2752,6806859,"train_tokenizer.py",6930,0,"",python,selection_command +2753,6807735,"train_tokenizer.py",6794,0,"",python,selection_mouse +2754,6807735,"train_tokenizer.py",6793,0,"",python,selection_command +2755,6808172,"train_tokenizer.py",6758,0,"",python,selection_mouse +2756,6808176,"train_tokenizer.py",6757,0,"",python,selection_command +2757,6819385,"TERMINAL",0,0,"Step 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378260612487793\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490994274616241\r\nStep 5, loss: 0.1231343150138855\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425687372684479\r\nStep 7, loss: 0.11625244468450546\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11065095663070679\r\nStep 9, loss: 0.11035721749067307\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11024822294712067\r\nStep 11, loss: 0.12256664037704468\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11280044168233871\r\nStep 13, loss: 0.10648010671138763\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10374370962381363\r\nStep 15, loss: 0.11526947468519211\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.09977377951145172\r\nStep 17, loss: 0.10807716846466064\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09569589793682098\r\nStep 19, loss: 0.10040535777807236\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.1111653596162796\r\nStep 21, loss: 0.10894852876663208\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10860054939985275\r\nStep 23, loss: 0.1017841249704361\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.0967213362455368\r\nStep 25, loss: 0.09758710861206055\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09159968048334122\r\nStep 27, loss: 0.10436154156923294\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09572086483240128\r\nStep 29, loss: 0.1064358577132225\r\nSaved checkpoint at step 30\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378260612487793\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490994274616241\r\nStep 5, loss: 0.1231343150138855\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425687372684479\r\nStep 7, loss: 0.11625244468450546\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11065095663070679\r\nStep 9, loss: 0.11035721749067307\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11024822294712067\r\nStep 11, loss: 0.12256664037704468\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11280044168233871\r\nStep 13, loss: 0.10648010671138763\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10374370962381363\r\nStep 15, loss: 0.11526947468519211\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.09977377951145172\r\nStep 17, loss: 0.10807716846466064\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09569589793682098\r\nStep 19, loss: 0.10040535777807236\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.1111653596162796\r\nStep 21, loss: 0.10894852876663208\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10860054939985275\r\nStep 23, loss: 0.1017841249704361\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.0967213362455368\r\nStep 25, loss: 0.09758710861206055\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09159968048334122\r\nStep 27, loss: 0.10436154156923294\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09572086483240128\r\nStep 29, loss: 0.1064358577132225\r\nSaved checkpoint at step 30\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378260612487793\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490994274616241\r\nStep 5, loss: 0.1231343150138855\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425687372684479\r\nStep 7, loss: 0.11625244468450546\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11065095663070679\r\nStep 9, loss: 0.11035721749067307\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11024822294712067\r\nStep 11, loss: 0.12256664037704468\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11280044168233871\r\nStep 13, loss: 0.10648010671138763\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10374370962381363\r\nStep 15, loss: 0.11526947468519211\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.09977377951145172\r\nStep 17, loss: 0.10807716846466064\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09569589793682098\r\nStep 19, loss: 0.10040535777807236\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.1111653596162796\r\nStep 21, loss: 0.10894852876663208\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10860054939985275\r\nStep 23, loss: 0.1017841249704361\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.0967213362455368\r\nStep 25, loss: 0.09758710861206055\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09159968048334122\r\nStep 27, loss: 0.10436154156923294\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09572086483240128\r\nStep 29, loss: 0.1064358577132225\r\nSaved checkpoint at step 30\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378260612487793\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490994274616241\r\nStep 5, loss: 0.1231343150138855\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425687372684479\r\nStep 7, loss: 0.11625244468450546\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11065095663070679\r\nStep 9, loss: 0.11035721749067307\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11024822294712067\r\nStep 11, loss: 0.12256664037704468\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11280044168233871\r\nStep 13, loss: 0.10648010671138763\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10374370962381363\r\nStep 15, loss: 0.11526947468519211\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.09977377951145172\r\nStep 17, loss: 0.10807716846466064\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09569589793682098\r\nStep 19, loss: 0.10040535777807236\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.1111653596162796\r\nStep 21, loss: 0.10894852876663208\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10860054939985275\r\nStep 23, loss: 0.1017841249704361\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.0967213362455368\r\nStep 25, loss: 0.09758710861206055\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09159968048334122\r\nStep 27, loss: 0.10436154156923294\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09572086483240128\r\nStep 29, loss: 0.1064358577132225\r\nSaved checkpoint at step 30\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378260612487793\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490994274616241\r\nStep 5, loss: 0.1231343150138855\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425687372684479\r\nStep 7, loss: 0.11625244468450546\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11065095663070679\r\nStep 9, loss: 0.11035721749067307\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11024822294712067\r\nStep 11, loss: 0.12256664037704468\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11280044168233871\r\nStep 13, loss: 0.10648010671138763\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10374370962381363\r\nStep 15, loss: 0.11526947468519211\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.09977377951145172\r\nStep 17, loss: 0.10807716846466064\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09569589793682098\r\nStep 19, loss: 0.10040535777807236\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.1111653596162796\r\nStep 21, loss: 0.10894852876663208\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10860054939985275\r\nStep 23, loss: 0.1017841249704361\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.0967213362455368\r\nStep 25, loss: 0.09758710861206055\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09159968048334122\r\nStep 27, loss: 0.10436154156923294\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09572086483240128\r\nStep 29, loss: 0.1064358577132225\r\nSaved checkpoint at step 30\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378260612487793\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490994274616241\r\nStep 5, loss: 0.1231343150138855\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425687372684479\r\nStep 7, loss: 0.11625244468450546\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11065095663070679\r\nStep 9, loss: 0.11035721749067307\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11024822294712067\r\nStep 11, loss: 0.12256664037704468\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11280044168233871\r\nStep 13, loss: 0.10648010671138763\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10374370962381363\r\nStep 15, loss: 0.11526947468519211\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.09977377951145172\r\nStep 17, loss: 0.10807716846466064\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09569589793682098\r\nStep 19, loss: 0.10040535777807236\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.1111653596162796\r\nStep 21, loss: 0.10894852876663208\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10860054939985275\r\nStep 23, loss: 0.1017841249704361\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.0967213362455368\r\nStep 25, loss: 0.09758710861206055\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09159968048334122\r\nStep 27, loss: 0.10436154156923294\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09572086483240128\r\nStep 29, loss: 0.1064358577132225\r\nSaved checkpoint at step 30\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378260612487793\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490994274616241\r\nStep 5, loss: 0.1231343150138855\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425687372684479\r\nStep 7, loss: 0.11625244468450546\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11065095663070679\r\nStep 9, loss: 0.11035721749067307\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11024822294712067\r\nStep 11, loss: 0.12256664037704468\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11280044168233871\r\nStep 13, loss: 0.10648010671138763\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10374370962381363\r\nStep 15, loss: 0.11526947468519211\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.09977377951145172\r\nStep 17, loss: 0.10807716846466064\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09569589793682098\r\nStep 19, loss: 0.10040535777807236\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.1111653596162796\r\nStep 21, loss: 0.10894852876663208\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10860054939985275\r\nStep 23, loss: 0.1017841249704361\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.0967213362455368\r\nStep 25, loss: 0.09758710861206055\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09159968048334122\r\nStep 27, loss: 0.10436154156923294\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09572086483240128\r\nStep 29, loss: 0.1064358577132225\r\nSaved checkpoint at step 30\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378260612487793\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490994274616241\r\nStep 5, loss: 0.1231343150138855\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425687372684479\r\nStep 7, loss: 0.11625244468450546\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11065095663070679\r\nStep 9, loss: 0.11035721749067307\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11024822294712067\r\nStep 11, loss: 0.12256664037704468\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11280044168233871\r\nStep 13, loss: 0.10648010671138763\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10374370962381363\r\nStep 15, loss: 0.11526947468519211\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.09977377951145172\r\nStep 17, loss: 0.10807716846466064\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09569589793682098\r\nStep 19, loss: 0.10040535777807236\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.1111653596162796\r\nStep 21, loss: 0.10894852876663208\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10860054939985275\r\nStep 23, loss: 0.1017841249704361\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.0967213362455368\r\nStep 25, loss: 0.09758710861206055\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09159968048334122\r\nStep 27, loss: 0.10436154156923294\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09572086483240128\r\nStep 29, loss: 0.1064358577132225\r\nSaved checkpoint at step 30\r\n",,terminal_output +2758,6819597,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2759,6819706,"TERMINAL",0,0,"Filtering out episode with length 15, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2760,6819901,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 8, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2761,6819963,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 9, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 7, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2762,6820042,"TERMINAL",0,0,"Filtering out episode with length 3, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 8, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2763,6820312,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 5, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 11, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 2, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2764,6820363,"TERMINAL",0,0,"Filtering out episode with length 10, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2765,6821319,"TERMINAL",0,0,"Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2766,6821390,"train_tokenizer.py",6758,0,"",python,selection_mouse +2767,6821402,"train_tokenizer.py",6757,0,"",python,selection_command +2768,6821528,"TERMINAL",0,0,"Filtering out episode with length 6, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2769,6821753,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2770,6822071,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2771,6822266,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2772,6822570,"train_tokenizer.py",6758,0,"\n ",python,content +2773,6822692,"train_tokenizer.py",6767,0," keep_period=20,",python,content +2774,6823012,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run tokenizer-batch-size-scaling-2-node-sqrt-lr-mihir-debug-grain-checkpointing-old at: https://wandb.ai/instant-uv/jafar/runs/it66oef1\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_121534-it66oef1/logs\r\n",,terminal_output +2775,6823848,"train_tokenizer.py",6771,0,"",python,selection_mouse +2776,6824057,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +2777,6824206,"train_tokenizer.py",6767,4,"",python,content +2778,6825020,"train_tokenizer.py",6780,0,"",python,selection_mouse +2779,6825028,"TERMINAL",0,0,"]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +2780,6825311,"train_tokenizer.py",6779,2,"20",python,selection_mouse +2781,6826023,"train_tokenizer.py",6779,2,"1",python,content +2782,6826024,"train_tokenizer.py",6780,0,"",python,selection_keyboard +2783,6826067,"train_tokenizer.py",6780,0,"0",python,content +2784,6826068,"train_tokenizer.py",6781,0,"",python,selection_keyboard +2785,6826422,"train_tokenizer.py",6780,1,"",python,content +2786,6826558,"train_tokenizer.py",6779,1,"",python,content +2787,6838712,"train_tokenizer.py",6779,0,"1",python,content +2788,6838713,"train_tokenizer.py",6780,0,"",python,selection_keyboard +2789,6838790,"train_tokenizer.py",6780,0,"0",python,content +2790,6838791,"train_tokenizer.py",6781,0,"",python,selection_keyboard +2791,6839609,"train_tokenizer.py",6780,0,"",python,selection_command +2792,6840349,"train_tokenizer.py",7124,0,"",python,selection_mouse +2793,6840360,"train_tokenizer.py",7123,0,"",python,selection_command +2794,6843323,"TERMINAL",0,0,"srun",,terminal_focus +2795,6844731,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",0,0,"",shellscript,tab +2796,6847192,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",491,0,"",shellscript,selection_mouse +2797,6848269,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",490,1,"",shellscript,content +2798,6848413,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",489,1,"",shellscript,content +2799,6848634,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",488,1,"",shellscript,content +2800,6848865,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",488,0,"n",shellscript,content +2801,6848866,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",489,0,"",shellscript,selection_keyboard +2802,6848925,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",489,0,"e",shellscript,content +2803,6848926,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",490,0,"",shellscript,selection_keyboard +2804,6849043,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",490,0,"w",shellscript,content +2805,6849044,"slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",491,0,"",shellscript,selection_keyboard +2806,6851552,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/batchsize_scaling/tokenizer/sqrt_lr/tester.sh",,terminal_output +2807,6852544,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +2808,6852704,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=409404\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0719\r\nSLURM_JOB_START_TIME=1752136462\r\nSLURM_STEP_NODELIST=hkn0719\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1752179662\r\nSLURM_PMI2_SRUN_PORT=43929\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3334543\r\nSLURM_PTY_PORT=44699\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=48\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0719\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=228\r\nSLURM_NODELIST=hkn[0719,0806]\r\nSLURM_SRUN_COMM_PORT=38371\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=8\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3334543\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0719\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38371\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0719,0806]\r\n",,terminal_output +2809,6852848,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +2810,6853274,"TERMINAL",0,0,"bash",,terminal_focus +2811,6854998,"TERMINAL",0,0,"ls",,terminal_command +2812,6855001,"TERMINAL",0,0,"]633;E;2025-07-10 12:20:28 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C000026 000028 000030\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-old]633;D;0",,terminal_output +2813,6857347,"TERMINAL",0,0,"cd ..",,terminal_command +2814,6857357,"TERMINAL",0,0,"]633;E;2025-07-10 12:20:31 cd ..;46e99295-9803-4583-b36f-e400fb9e619d]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug]633;D;0",,terminal_output +2815,6857649,"TERMINAL",0,0,"ls",,terminal_command +2816,6857659,"TERMINAL",0,0,"]633;E;2025-07-10 12:20:31 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C0000 debug-mihir mihir-debug-grain-checkpointing-new mihir-debug-grain-checkpointing-old\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug]633;D;0",,terminal_output +2817,6887765,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +2818,6888382,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_122101-skb2m6bd\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run tokenizer-batch-size-scaling-2-node-sqrt-lr-mihir-debug-grain-checkpointing-new\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/skb2m6bd\r\n",,terminal_output +2819,6949776,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2820,6949832,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2821,6950152,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\nRunning on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\nRunning on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\nRunning on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2822,6977473,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2823,6977833,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2824,6977892,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2825,6982463,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2826,6982907,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2827,6985201,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 8).\r\n",,terminal_output +2828,6985569,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nStarting training from step 0...\r\n",,terminal_output +2829,7002388,"TERMINAL",0,0,"cd mihir-debug-grain-checkpointing-new/",,terminal_command +2830,7002720,"TERMINAL",0,0,"ls",,terminal_command +2831,7010118,"TERMINAL",0,0,"watch -n1 ""ls""",,terminal_command +2832,7010169,"TERMINAL",0,0,"]633;E;2025-07-10 12:23:03 watch -n1 ""ls"";46e99295-9803-4583-b36f-e400fb9e619d]633;C",,terminal_output +2833,7010235,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: lshkn1990.localdomain: Thu Jul 10 12:23:04 2025",,terminal_output +2834,7011229,"TERMINAL",0,0,"5",,terminal_output +2835,7012259,"TERMINAL",0,0,"6",,terminal_output +2836,7013259,"TERMINAL",0,0,"7",,terminal_output +2837,7014320,"TERMINAL",0,0,"8",,terminal_output +2838,7015292,"TERMINAL",0,0,"9",,terminal_output +2839,7016308,"TERMINAL",0,0,"10",,terminal_output +2840,7017323,"TERMINAL",0,0,"1",,terminal_output +2841,7018341,"TERMINAL",0,0,"2",,terminal_output +2842,7019352,"TERMINAL",0,0,"3",,terminal_output +2843,7020408,"TERMINAL",0,0,"4",,terminal_output +2844,7021389,"TERMINAL",0,0,"5",,terminal_output +2845,7022544,"TERMINAL",0,0,"6",,terminal_output +2846,7023563,"TERMINAL",0,0,"7",,terminal_output +2847,7024437,"TERMINAL",0,0,"8",,terminal_output +2848,7025523,"TERMINAL",0,0,"9",,terminal_output +2849,7026547,"TERMINAL",0,0,"20",,terminal_output +2850,7027482,"TERMINAL",0,0,"1",,terminal_output +2851,7028582,"TERMINAL",0,0,"2",,terminal_output +2852,7029514,"TERMINAL",0,0,"3",,terminal_output +2853,7030569,"TERMINAL",0,0,"4",,terminal_output +2854,7031544,"TERMINAL",0,0,"5",,terminal_output +2855,7032573,"TERMINAL",0,0,"6",,terminal_output +2856,7033595,"TERMINAL",0,0,"7",,terminal_output +2857,7034599,"TERMINAL",0,0,"8",,terminal_output +2858,7035618,"TERMINAL",0,0,"9",,terminal_output +2859,7036628,"TERMINAL",0,0,"30",,terminal_output +2860,7037693,"TERMINAL",0,0,"1",,terminal_output +2861,7038660,"TERMINAL",0,0,"2",,terminal_output +2862,7039675,"TERMINAL",0,0,"3",,terminal_output +2863,7040696,"TERMINAL",0,0,"4",,terminal_output +2864,7041789,"TERMINAL",0,0,"5",,terminal_output +2865,7042813,"TERMINAL",0,0,"6",,terminal_output +2866,7043745,"TERMINAL",0,0,"7",,terminal_output +2867,7044859,"TERMINAL",0,0,"8",,terminal_output +2868,7045883,"TERMINAL",0,0,"9",,terminal_output +2869,7046792,"TERMINAL",0,0,"40",,terminal_output +2870,7047808,"TERMINAL",0,0,"1",,terminal_output +2871,7048827,"TERMINAL",0,0,"2",,terminal_output +2872,7049882,"TERMINAL",0,0,"3",,terminal_output +2873,7050904,"TERMINAL",0,0,"4",,terminal_output +2874,7051873,"TERMINAL",0,0,"5",,terminal_output +2875,7052896,"TERMINAL",0,0,"6",,terminal_output +2876,7053905,"TERMINAL",0,0,"7",,terminal_output +2877,7054922,"TERMINAL",0,0,"8",,terminal_output +2878,7056021,"TERMINAL",0,0,"9",,terminal_output +2879,7056960,"TERMINAL",0,0,"50",,terminal_output +2880,7057969,"TERMINAL",0,0,"1",,terminal_output +2881,7059005,"TERMINAL",0,0,"2",,terminal_output +2882,7060011,"TERMINAL",0,0,"3",,terminal_output +2883,7061037,"TERMINAL",0,0,"4",,terminal_output +2884,7062038,"TERMINAL",0,0,"5",,terminal_output +2885,7063086,"TERMINAL",0,0,"6",,terminal_output +2886,7064073,"TERMINAL",0,0,"7",,terminal_output +2887,7065138,"TERMINAL",0,0,"8",,terminal_output +2888,7066108,"TERMINAL",0,0,"9",,terminal_output +2889,7067136,"TERMINAL",0,0,"4:00",,terminal_output +2890,7067840,"TERMINAL",0,0,"srun",,terminal_focus +2891,7068211,"TERMINAL",0,0,"1",,terminal_output +2892,7069157,"TERMINAL",0,0,"2",,terminal_output +2893,7070177,"TERMINAL",0,0,"3",,terminal_output +2894,7071196,"TERMINAL",0,0,"5",,terminal_output +2895,7072210,"TERMINAL",0,0,"6",,terminal_output +2896,7073226,"TERMINAL",0,0,"7",,terminal_output +2897,7074244,"TERMINAL",0,0,"8",,terminal_output +2898,7075261,"TERMINAL",0,0,"9",,terminal_output +2899,7076280,"TERMINAL",0,0,"10\r000002.orbax-checkpoint-tmp-0",,terminal_output +2900,7077330,"TERMINAL",0,0,"1",,terminal_output +2901,7078314,"TERMINAL",0,0,"2",,terminal_output +2902,7079339,"TERMINAL",0,0,"3",,terminal_output +2903,7080353,"TERMINAL",0,0,"4",,terminal_output +2904,7081368,"TERMINAL",0,0,"5",,terminal_output +2905,7082442,"TERMINAL",0,0,"6\r000004.orbax-checkpoint-tmp-3",,terminal_output +2906,7083485,"TERMINAL",0,0,"7",,terminal_output +2907,7084484,"TERMINAL",0,0,"8",,terminal_output +2908,7085457,"TERMINAL",0,0,"9",,terminal_output +2909,7086470,"TERMINAL",0,0,"20",,terminal_output +2910,7087490,"TERMINAL",0,0,"1\r000006.orbax-checkpoint-tmp-6",,terminal_output +2911,7088584,"TERMINAL",0,0,"2",,terminal_output +2912,7089535,"TERMINAL",0,0,"3",,terminal_output +2913,7090630,"TERMINAL",0,0,"4",,terminal_output +2914,7091562,"TERMINAL",0,0,"5",,terminal_output +2915,7092654,"TERMINAL",0,0,"6\r000008.orbax-checkpoint-tmp-9",,terminal_output +2916,7093706,"TERMINAL",0,0,"7",,terminal_output +2917,7094692,"TERMINAL",0,0,"8",,terminal_output +2918,7095726,"TERMINAL",0,0,"\r9",,terminal_output +2919,7096775,"TERMINAL",0,0,"30",,terminal_output +2920,7097805,"TERMINAL",0,0,"1",,terminal_output +2921,7098802,"TERMINAL",0,0,"2\r000010.orbax-checkpoint-tmp-12",,terminal_output +2922,7099858,"TERMINAL",0,0,"3",,terminal_output +2923,7100874,"TERMINAL",0,0,"\r4",,terminal_output +2924,7101865,"TERMINAL",0,0,"5",,terminal_output +2925,7102923,"TERMINAL",0,0,"6",,terminal_output +2926,7104053,"TERMINAL",0,0,"7\r000012.orbax-checkpoint-tmp-15",,terminal_output +2927,7104923,"TERMINAL",0,0,"8",,terminal_output +2928,7105992,"TERMINAL",0,0,"\r9",,terminal_output +2929,7106958,"TERMINAL",0,0,"40",,terminal_output +2930,7107966,"TERMINAL",0,0,"1",,terminal_output +2931,7108878,"train_tokenizer.py",0,0,"",python,tab +2932,7109005,"TERMINAL",0,0,"2\r000014.orbax-checkpoint-tmp-18",,terminal_output +2933,7110061,"TERMINAL",0,0,"3",,terminal_output +2934,7111037,"TERMINAL",0,0,"4",,terminal_output +2935,7112081,"TERMINAL",0,0,"\r5",,terminal_output +2936,7113075,"TERMINAL",0,0,"6",,terminal_output +2937,7114187,"TERMINAL",0,0,"7",,terminal_output +2938,7115123,"TERMINAL",0,0,"8\r000016.orbax-checkpoint-tmp-21",,terminal_output +2939,7116131,"TERMINAL",0,0,"9",,terminal_output +2940,7117175,"TERMINAL",0,0,"50",,terminal_output +2941,7118173,"TERMINAL",0,0,"1",,terminal_output +2942,7119207,"TERMINAL",0,0,"2\r000018.orbax-checkpoint-tmp-24",,terminal_output +2943,7120227,"TERMINAL",0,0,"4",,terminal_output +2944,7121474,"TERMINAL",0,0,"5",,terminal_output +2945,7122292,"TERMINAL",0,0,"\r6",,terminal_output +2946,7123407,"TERMINAL",0,0,"7",,terminal_output +2947,7124281,"TERMINAL",0,0,"8",,terminal_output +2948,7125440,"TERMINAL",0,0,"9\r000020.orbax-checkpoint-tmp-27",,terminal_output +2949,7126375,"TERMINAL",0,0,"5:00",,terminal_output +2950,7127360,"TERMINAL",0,0,"\r1",,terminal_output +2951,7128387,"TERMINAL",0,0,"2",,terminal_output +2952,7129493,"TERMINAL",0,0,"3",,terminal_output +2953,7130778,"TERMINAL",0,0,"4\r000022.orbax-checkpoint-tmp-30",,terminal_output +2954,7131455,"TERMINAL",0,0,"5",,terminal_output +2955,7132548,"TERMINAL",0,0,"\r6",,terminal_output +2956,7133468,"TERMINAL",0,0,"7",,terminal_output +2957,7134492,"TERMINAL",0,0,"8",,terminal_output +2958,7135691,"TERMINAL",0,0,"9\r000024.orbax-checkpoint-tmp-33",,terminal_output +2959,7136570,"TERMINAL",0,0,"10",,terminal_output +2960,7137653,"TERMINAL",0,0,"1",,terminal_output +2961,7139074,"TERMINAL",0,0,"\r2",,terminal_output +2962,7139628,"TERMINAL",0,0,"3",,terminal_output +2963,7140703,"TERMINAL",0,0,"4",,terminal_output +2964,7141634,"TERMINAL",0,0,"5\r000026.orbax-checkpoint-tmp-36",,terminal_output +2965,7142656,"TERMINAL",0,0,"6",,terminal_output +2966,7143680,"TERMINAL",0,0,"7",,terminal_output +2967,7144723,"TERMINAL",0,0,"8",,terminal_output +2968,7146427,"TERMINAL",0,0,"9",,terminal_output +2969,7146861,"TERMINAL",0,0,"20\r000028.orbax-checkpoint-tmp-39",,terminal_output +2970,7147865,"TERMINAL",0,0,"1",,terminal_output +2971,7148798,"TERMINAL",0,0,"\r2",,terminal_output +2972,7149822,"TERMINAL",0,0,"3",,terminal_output +2973,7151152,"TERMINAL",0,0,"4",,terminal_output +2974,7151869,"TERMINAL",0,0,"5\r000030.orbax-checkpoint-tmp-42",,terminal_output +2975,7153121,"TERMINAL",0,0,"6",,terminal_output +2976,7153449,"TERMINAL",0,0,"Step 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378320217132568\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490693271160126\r\nStep 5, loss: 0.12313315272331238\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425269395112991\r\nStep 7, loss: 0.11629357188940048\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11066249758005142\r\nStep 9, loss: 0.11040424555540085\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11020518839359283\r\nStep 11, loss: 0.12242428213357925\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11282980442047119\r\nStep 13, loss: 0.10651187598705292\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10383078455924988\r\nStep 15, loss: 0.11540766060352325\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.10047846287488937\r\nStep 17, loss: 0.10804067552089691\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09622284024953842\r\nStep 19, loss: 0.09978687763214111\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.11120744794607162\r\nStep 21, loss: 0.10859648138284683\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10887590050697327\r\nStep 23, loss: 0.1018231213092804\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.09663458168506622\r\nStep 25, loss: 0.09739810973405838\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09140750020742416\r\nStep 27, loss: 0.10390116274356842\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09546089172363281\r\nStep 29, loss: 0.10609658062458038\r\nSaved checkpoint at step 30\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378320217132568\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490693271160126\r\nStep 5, loss: 0.12313315272331238\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425269395112991\r\nStep 7, loss: 0.11629357188940048\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11066249758005142\r\nStep 9, loss: 0.11040424555540085\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11020518839359283\r\nStep 11, loss: 0.12242428213357925\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11282980442047119\r\nStep 13, loss: 0.10651187598705292\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10383078455924988\r\nStep 15, loss: 0.11540766060352325\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.10047846287488937\r\nStep 17, loss: 0.10804067552089691\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09622284024953842\r\nStep 19, loss: 0.09978687763214111\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.11120744794607162\r\nStep 21, loss: 0.10859648138284683\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10887590050697327\r\nStep 23, loss: 0.1018231213092804\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.09663458168506622\r\nStep 25, loss: 0.09739810973405838\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09140750020742416\r\nStep 27, loss: 0.10390116274356842\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09546089172363281\r\nStep 29, loss: 0.10609658062458038\r\nSaved checkpoint at step 30\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378320217132568\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490693271160126\r\nStep 5, loss: 0.12313315272331238\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425269395112991\r\nStep 7, loss: 0.11629357188940048\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11066249758005142\r\nStep 9, loss: 0.11040424555540085\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11020518839359283\r\nStep 11, loss: 0.12242428213357925\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11282980442047119\r\nStep 13, loss: 0.10651187598705292\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10383078455924988\r\nStep 15, loss: 0.11540766060352325\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.10047846287488937\r\nStep 17, loss: 0.10804067552089691\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09622284024953842\r\nStep 19, loss: 0.09978687763214111\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.11120744794607162\r\nStep 21, loss: 0.10859648138284683\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10887590050697327\r\nStep 23, loss: 0.1018231213092804\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.09663458168506622\r\nStep 25, loss: 0.09739810973405838\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09140750020742416\r\nStep 27, loss: 0.10390116274356842\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09546089172363281\r\nStep 29, loss: 0.10609658062458038\r\nSaved checkpoint at step 30\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378320217132568\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490693271160126\r\nStep 5, loss: 0.12313315272331238\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425269395112991\r\nStep 7, loss: 0.11629357188940048\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11066249758005142\r\nStep 9, loss: 0.11040424555540085\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11020518839359283\r\nStep 11, loss: 0.12242428213357925\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11282980442047119\r\nStep 13, loss: 0.10651187598705292\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10383078455924988\r\nStep 15, loss: 0.11540766060352325\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.10047846287488937\r\nStep 17, loss: 0.10804067552089691\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09622284024953842\r\nStep 19, loss: 0.09978687763214111\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.11120744794607162\r\nStep 21, loss: 0.10859648138284683\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10887590050697327\r\nStep 23, loss: 0.1018231213092804\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.09663458168506622\r\nStep 25, loss: 0.09739810973405838\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09140750020742416\r\nStep 27, loss: 0.10390116274356842\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09546089172363281\r\nStep 29, loss: 0.10609658062458038\r\nSaved checkpoint at step 30\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378320217132568\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490693271160126\r\nStep 5, loss: 0.12313315272331238\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425269395112991\r\nStep 7, loss: 0.11629357188940048\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11066249758005142\r\nStep 9, loss: 0.11040424555540085\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11020518839359283\r\nStep 11, loss: 0.12242428213357925\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11282980442047119\r\nStep 13, loss: 0.10651187598705292\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10383078455924988\r\nStep 15, loss: 0.11540766060352325\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.10047846287488937\r\nStep 17, loss: 0.10804067552089691\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09622284024953842\r\nStep 19, loss: 0.09978687763214111\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.11120744794607162\r\nStep 21, loss: 0.10859648138284683\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10887590050697327\r\nStep 23, loss: 0.1018231213092804\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.09663458168506622\r\nStep 25, loss: 0.09739810973405838\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09140750020742416\r\nStep 27, loss: 0.10390116274356842\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09546089172363281\r\nStep 29, loss: 0.10609658062458038\r\nSaved checkpoint at step 30\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378320217132568\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490693271160126\r\nStep 5, loss: 0.12313315272331238\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425269395112991\r\nStep 7, loss: 0.11629357188940048\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11066249758005142\r\nStep 9, loss: 0.11040424555540085\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11020518839359283\r\nStep 11, loss: 0.12242428213357925\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11282980442047119\r\nStep 13, loss: 0.10651187598705292\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10383078455924988\r\nStep 15, loss: 0.11540766060352325\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.10047846287488937\r\nStep 17, loss: 0.10804067552089691\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09622284024953842\r\nStep 19, loss: 0.09978687763214111\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.11120744794607162\r\nStep 21, loss: 0.10859648138284683\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10887590050697327\r\nStep 23, loss: 0.1018231213092804\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.09663458168506622\r\nStep 25, loss: 0.09739810973405838\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09140750020742416\r\nStep 27, loss: 0.10390116274356842\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09546089172363281\r\nStep 29, loss: 0.10609658062458038\r\nSaved checkpoint at step 30\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378320217132568\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490693271160126\r\nStep 5, loss: 0.12313315272331238\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425269395112991\r\nStep 7, loss: 0.11629357188940048\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11066249758005142\r\nStep 9, loss: 0.11040424555540085\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11020518839359283\r\nStep 11, loss: 0.12242428213357925\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11282980442047119\r\nStep 13, loss: 0.10651187598705292\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10383078455924988\r\nStep 15, loss: 0.11540766060352325\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.10047846287488937\r\nStep 17, loss: 0.10804067552089691\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09622284024953842\r\nStep 19, loss: 0.09978687763214111\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.11120744794607162\r\nStep 21, loss: 0.10859648138284683\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10887590050697327\r\nStep 23, loss: 0.1018231213092804\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.09663458168506622\r\nStep 25, loss: 0.09739810973405838\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09140750020742416\r\nStep 27, loss: 0.10390116274356842\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09546089172363281\r\nStep 29, loss: 0.10609658062458038\r\nSaved checkpoint at step 30\r\nStep 0, loss: 0.33292484283447266\r\nStep 1, loss: 0.33973613381385803\r\nSaved checkpoint at step 2\r\nStep 2, loss: 0.21907024085521698\r\nStep 3, loss: 0.15378320217132568\r\nSaved checkpoint at step 4\r\nStep 4, loss: 0.13490693271160126\r\nStep 5, loss: 0.12313315272331238\r\nSaved checkpoint at step 6\r\nStep 6, loss: 0.11425269395112991\r\nStep 7, loss: 0.11629357188940048\r\nSaved checkpoint at step 8\r\nStep 8, loss: 0.11066249758005142\r\nStep 9, loss: 0.11040424555540085\r\nSaved checkpoint at step 10\r\nStep 10, loss: 0.11020518839359283\r\nStep 11, loss: 0.12242428213357925\r\nSaved checkpoint at step 12\r\nStep 12, loss: 0.11282980442047119\r\nStep 13, loss: 0.10651187598705292\r\nSaved checkpoint at step 14\r\nStep 14, loss: 0.10383078455924988\r\nStep 15, loss: 0.11540766060352325\r\nSaved checkpoint at step 16\r\nStep 16, loss: 0.10047846287488937\r\nStep 17, loss: 0.10804067552089691\r\nSaved checkpoint at step 18\r\nStep 18, loss: 0.09622284024953842\r\nStep 19, loss: 0.09978687763214111\r\nSaved checkpoint at step 20\r\nStep 20, loss: 0.11120744794607162\r\nStep 21, loss: 0.10859648138284683\r\nSaved checkpoint at step 22\r\nStep 22, loss: 0.10887590050697327\r\nStep 23, loss: 0.1018231213092804\r\nSaved checkpoint at step 24\r\nStep 24, loss: 0.09663458168506622\r\nStep 25, loss: 0.09739810973405838\r\nSaved checkpoint at step 26\r\nStep 26, loss: 0.09140750020742416\r\nStep 27, loss: 0.10390116274356842\r\nSaved checkpoint at step 28\r\nStep 28, loss: 0.09546089172363281\r\nStep 29, loss: 0.10609658062458038\r\nSaved checkpoint at step 30\r\n",,terminal_output +2977,7153712,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2978,7153855,"TERMINAL",0,0,"\r7",,terminal_output +2979,7153869,"TERMINAL",0,0,"Filtering out episode with length 8, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 3, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 8, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2980,7153936,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 7, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2981,7154003,"TERMINAL",0,0,"Filtering out episode with length 2, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2982,7154130,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 9, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 10, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2983,7154322,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2984,7154391,"TERMINAL",0,0,"Filtering out episode with length 5, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 11, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2985,7154440,"TERMINAL",0,0,"Filtering out episode with length 4, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2986,7154944,"TERMINAL",0,0,"8",,terminal_output +2987,7155254,"TERMINAL",0,0,"Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2988,7155660,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2989,7155944,"TERMINAL",0,0,"Filtering out episode with length 6, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2990,7155944,"TERMINAL",0,0,"9",,terminal_output +2991,7156104,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2992,7156930,"TERMINAL",0,0,"30",,terminal_output +2993,7157528,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run tokenizer-batch-size-scaling-2-node-sqrt-lr-mihir-debug-grain-checkpointing-new at: https://wandb.ai/instant-uv/jafar/runs/skb2m6bd\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250710_122101-skb2m6bd/logs\r\n",,terminal_output +2994,7158021,"TERMINAL",0,0,"1",,terminal_output +2995,7158465,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +2996,7158523,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +2997,7158585,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +2998,7158687,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +2999,7158795,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +3000,7158853,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +3001,7158958,"TERMINAL",0,0,"2",,terminal_output +3002,7159650,"TERMINAL",0,0,"]0;tum_cte0515@hkn0719:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0719 jafar]$ ",,terminal_output +3003,7160059,"TERMINAL",0,0,"3",,terminal_output +3004,7160970,"TERMINAL",0,0,"4",,terminal_output +3005,7161993,"TERMINAL",0,0,"5",,terminal_output +3006,7163019,"TERMINAL",0,0,"6",,terminal_output +3007,7164026,"TERMINAL",0,0,"7",,terminal_output +3008,7165059,"TERMINAL",0,0,"8",,terminal_output +3009,7167033,"TERMINAL",0,0,"9",,terminal_output +3010,7167086,"TERMINAL",0,0,"40",,terminal_output +3011,7168238,"train_tokenizer.py",0,0,"",python,tab +3012,7168262,"TERMINAL",0,0,"1",,terminal_output +3013,7169184,"TERMINAL",0,0,"2",,terminal_output +3014,7170122,"TERMINAL",0,0,"3",,terminal_output +3015,7170757,"train_tokenizer.py",6742,0,"",python,selection_mouse +3016,7171138,"TERMINAL",0,0,"4",,terminal_output +3017,7171263,"train_tokenizer.py",6773,0,"",python,selection_mouse +3018,7171399,"train_tokenizer.py",6767,11,"keep_period",python,selection_mouse +3019,7172152,"TERMINAL",0,0,"5",,terminal_output +3020,7173188,"TERMINAL",0,0,"6",,terminal_output +3021,7174184,"TERMINAL",0,0,"7",,terminal_output +3022,7175216,"TERMINAL",0,0,"9",,terminal_output +3023,7176215,"TERMINAL",0,0,"50",,terminal_output +3024,7177233,"TERMINAL",0,0,"1",,terminal_output +3025,7178247,"TERMINAL",0,0,"2",,terminal_output +3026,7179263,"TERMINAL",0,0,"3",,terminal_output +3027,7180282,"TERMINAL",0,0,"4",,terminal_output +3028,7181298,"TERMINAL",0,0,"5",,terminal_output +3029,7182312,"TERMINAL",0,0,"6",,terminal_output +3030,7183331,"TERMINAL",0,0,"7",,terminal_output +3031,7184347,"TERMINAL",0,0,"8",,terminal_output +3032,7185361,"TERMINAL",0,0,"9",,terminal_output +3033,7186483,"TERMINAL",0,0,"6:00",,terminal_output +3034,7187504,"TERMINAL",0,0,"1",,terminal_output +3035,7188426,"TERMINAL",0,0,"2",,terminal_output +3036,7189426,"TERMINAL",0,0,"3",,terminal_output +3037,7190483,"TERMINAL",0,0,"4",,terminal_output +3038,7191498,"TERMINAL",0,0,"5",,terminal_output +3039,7192524,"TERMINAL",0,0,"6",,terminal_output +3040,7193493,"TERMINAL",0,0,"7",,terminal_output +3041,7194510,"TERMINAL",0,0,"8",,terminal_output +3042,7195551,"TERMINAL",0,0,"9",,terminal_output +3043,7196623,"TERMINAL",0,0,"10",,terminal_output +3044,7197560,"TERMINAL",0,0,"1",,terminal_output +3045,7198667,"TERMINAL",0,0,"2",,terminal_output +3046,7199587,"TERMINAL",0,0,"3",,terminal_output +3047,7200624,"TERMINAL",0,0,"4",,terminal_output +3048,7201637,"TERMINAL",0,0,"5",,terminal_output +3049,7202660,"TERMINAL",0,0,"6",,terminal_output +3050,7203683,"TERMINAL",0,0,"7",,terminal_output +3051,7204756,"TERMINAL",0,0,"8",,terminal_output +3052,7205733,"TERMINAL",0,0,"9",,terminal_output +3053,7206858,"TERMINAL",0,0,"20",,terminal_output +3054,7207715,"TERMINAL",0,0,"1",,terminal_output +3055,7208730,"TERMINAL",0,0,"2",,terminal_output +3056,7209746,"TERMINAL",0,0,"3",,terminal_output +3057,7210853,"TERMINAL",0,0,"4",,terminal_output +3058,7211776,"TERMINAL",0,0,"5",,terminal_output +3059,7212795,"TERMINAL",0,0,"6",,terminal_output +3060,7213822,"TERMINAL",0,0,"7",,terminal_output +3061,7214850,"TERMINAL",0,0,"8",,terminal_output +3062,7215883,"TERMINAL",0,0,"9",,terminal_output +3063,7216943,"TERMINAL",0,0,"30",,terminal_output +3064,7217876,"TERMINAL",0,0,"1",,terminal_output +3065,7218941,"TERMINAL",0,0,"2",,terminal_output +3066,7219964,"TERMINAL",0,0,"3",,terminal_output +3067,7220989,"TERMINAL",0,0,"4",,terminal_output +3068,7221965,"TERMINAL",0,0,"5",,terminal_output +3069,7222956,"TERMINAL",0,0,"6",,terminal_output +3070,7224063,"TERMINAL",0,0,"7",,terminal_output +3071,7225097,"TERMINAL",0,0,"[1;223H8",,terminal_output +3072,7226007,"TERMINAL",0,0,"9",,terminal_output +3073,7227031,"TERMINAL",0,0,"40",,terminal_output +3074,7228052,"TERMINAL",0,0,"1",,terminal_output +3075,7229080,"TERMINAL",0,0,"2",,terminal_output +3076,7230127,"TERMINAL",0,0,"3",,terminal_output +3077,7231131,"TERMINAL",0,0,"4",,terminal_output +3078,7232110,"TERMINAL",0,0,"5",,terminal_output +3079,7233128,"TERMINAL",0,0,"6",,terminal_output +3080,7234207,"TERMINAL",0,0,"7",,terminal_output +3081,7235232,"TERMINAL",0,0,"8",,terminal_output +3082,7236175,"TERMINAL",0,0,"9",,terminal_output +3083,7237201,"TERMINAL",0,0,"50",,terminal_output +3084,7238202,"TERMINAL",0,0,"2",,terminal_output +3085,7239321,"TERMINAL",0,0,"3",,terminal_output +3086,7240245,"TERMINAL",0,0,"4",,terminal_output +3087,7241262,"TERMINAL",0,0,"5",,terminal_output +3088,7242281,"TERMINAL",0,0,"6",,terminal_output +3089,7243303,"TERMINAL",0,0,"7",,terminal_output +3090,7244316,"TERMINAL",0,0,"8",,terminal_output +3091,7245341,"TERMINAL",0,0,"9",,terminal_output +3092,7246358,"TERMINAL",0,0,"7:00",,terminal_output +3093,7247376,"TERMINAL",0,0,"1",,terminal_output +3094,7248396,"TERMINAL",0,0,"2",,terminal_output +3095,7249415,"TERMINAL",0,0,"3",,terminal_output +3096,7250446,"TERMINAL",0,0,"4",,terminal_output +3097,7251525,"TERMINAL",0,0,"5",,terminal_output +3098,7252545,"TERMINAL",0,0,"6",,terminal_output +3099,7253540,"TERMINAL",0,0,"7",,terminal_output +3100,7254529,"TERMINAL",0,0,"8",,terminal_output +3101,7255528,"TERMINAL",0,0,"9",,terminal_output +3102,7256625,"TERMINAL",0,0,"10",,terminal_output +3103,7257562,"TERMINAL",0,0,"1",,terminal_output +3104,7258576,"TERMINAL",0,0,"2",,terminal_output +3105,7259596,"TERMINAL",0,0,"3",,terminal_output +3106,7260618,"TERMINAL",0,0,"4",,terminal_output +3107,7261642,"TERMINAL",0,0,"5",,terminal_output +3108,7262668,"TERMINAL",0,0,"6",,terminal_output +3109,7263654,"TERMINAL",0,0,"7",,terminal_output +3110,7264673,"TERMINAL",0,0,"8",,terminal_output +3111,7265738,"TERMINAL",0,0,"9",,terminal_output +3112,7266702,"TERMINAL",0,0,"20",,terminal_output +3113,7267785,"TERMINAL",0,0,"1",,terminal_output +3114,7268809,"TERMINAL",0,0,"2",,terminal_output +3115,7269751,"TERMINAL",0,0,"3",,terminal_output +3116,7270860,"TERMINAL",0,0,"4",,terminal_output +3117,7271881,"TERMINAL",0,0,"5",,terminal_output +3118,7272805,"TERMINAL",0,0,"6",,terminal_output +3119,7273820,"TERMINAL",0,0,"7",,terminal_output +3120,7274832,"TERMINAL",0,0,"8",,terminal_output +3121,7275847,"TERMINAL",0,0,"9",,terminal_output +3122,7276873,"TERMINAL",0,0,"30",,terminal_output +3123,7277930,"TERMINAL",0,0,"1",,terminal_output +3124,7278897,"TERMINAL",0,0,"2",,terminal_output +3125,7279912,"TERMINAL",0,0,"3",,terminal_output +3126,7280999,"TERMINAL",0,0,"4",,terminal_output +3127,7281945,"TERMINAL",0,0,"5",,terminal_output +3128,7282964,"TERMINAL",0,0,"6",,terminal_output +3129,7284073,"TERMINAL",0,0,"7",,terminal_output +3130,7285093,"TERMINAL",0,0,"8",,terminal_output +3131,7286012,"TERMINAL",0,0,"9",,terminal_output +3132,7287027,"TERMINAL",0,0,"40",,terminal_output +3133,7288064,"TERMINAL",0,0,"1",,terminal_output +3134,7289059,"TERMINAL",0,0,"2",,terminal_output +3135,7290111,"TERMINAL",0,0,"3",,terminal_output +3136,7291135,"TERMINAL",0,0,"4",,terminal_output +3137,7292162,"TERMINAL",0,0,"5",,terminal_output +3138,7293195,"TERMINAL",0,0,"6",,terminal_output +3139,7294209,"TERMINAL",0,0,"7",,terminal_output +3140,7295156,"TERMINAL",0,0,"8",,terminal_output +3141,7296275,"TERMINAL",0,0,"9",,terminal_output +3142,7297278,"TERMINAL",0,0,"51",,terminal_output +3143,7298226,"TERMINAL",0,0,"2",,terminal_output +3144,7299244,"TERMINAL",0,0,"3",,terminal_output +3145,7300248,"TERMINAL",0,0,"4",,terminal_output +3146,7301271,"TERMINAL",0,0,"5",,terminal_output +3147,7302269,"TERMINAL",0,0,"6",,terminal_output +3148,7303285,"TERMINAL",0,0,"7",,terminal_output +3149,7304321,"TERMINAL",0,0,"8",,terminal_output +3150,7305318,"TERMINAL",0,0,"9",,terminal_output +3151,7306340,"TERMINAL",0,0,"8:00",,terminal_output +3152,7307356,"TERMINAL",0,0,"1",,terminal_output +3153,7308365,"TERMINAL",0,0,"2",,terminal_output +3154,7309383,"TERMINAL",0,0,"3",,terminal_output +3155,7310396,"TERMINAL",0,0,"4",,terminal_output +3156,7311414,"TERMINAL",0,0,"5",,terminal_output +3157,7312534,"TERMINAL",0,0,"6",,terminal_output +3158,7313461,"TERMINAL",0,0,"7",,terminal_output +3159,7314486,"TERMINAL",0,0,"8",,terminal_output +3160,7315475,"TERMINAL",0,0,"9",,terminal_output +3161,7316530,"TERMINAL",0,0,"10",,terminal_output +3162,7317506,"TERMINAL",0,0,"1",,terminal_output +3163,7318581,"TERMINAL",0,0,"2",,terminal_output +3164,7319537,"TERMINAL",0,0,"3",,terminal_output +3165,7320627,"TERMINAL",0,0,"4",,terminal_output +3166,7321652,"TERMINAL",0,0,"5",,terminal_output +3167,7322587,"TERMINAL",0,0,"6",,terminal_output +3168,7323597,"TERMINAL",0,0,"7",,terminal_output +3169,7324766,"TERMINAL",0,0,"8",,terminal_output +3170,7325631,"TERMINAL",0,0,"9",,terminal_output +3171,7326670,"TERMINAL",0,0,"20",,terminal_output +3172,7327694,"TERMINAL",0,0,"1",,terminal_output +3173,7328721,"TERMINAL",0,0,"2",,terminal_output +3174,7329696,"TERMINAL",0,0,"3",,terminal_output +3175,7330762,"TERMINAL",0,0,"4",,terminal_output +3176,7331786,"TERMINAL",0,0,"5",,terminal_output +3177,7332749,"TERMINAL",0,0,"6",,terminal_output +3178,7333761,"TERMINAL",0,0,"7",,terminal_output +3179,7334779,"TERMINAL",0,0,"8",,terminal_output +3180,7335887,"TERMINAL",0,0,"9",,terminal_output +3181,7336908,"TERMINAL",0,0,"30",,terminal_output +3182,7337842,"TERMINAL",0,0,"1",,terminal_output +3183,7338840,"TERMINAL",0,0,"2",,terminal_output +3184,7339855,"TERMINAL",0,0,"3",,terminal_output +3185,7340870,"TERMINAL",0,0,"4",,terminal_output +3186,7341968,"TERMINAL",0,0,"5",,terminal_output +3187,7342903,"TERMINAL",0,0,"6",,terminal_output +3188,7343920,"TERMINAL",0,0,"7",,terminal_output +3189,7345000,"TERMINAL",0,0,"8",,terminal_output +3190,7345952,"TERMINAL",0,0,"9",,terminal_output +3191,7346970,"TERMINAL",0,0,"40",,terminal_output +3192,7348071,"TERMINAL",0,0,"1",,terminal_output +3193,7349092,"TERMINAL",0,0,"2",,terminal_output +3194,7350022,"TERMINAL",0,0,"3",,terminal_output +3195,7351039,"TERMINAL",0,0,"4",,terminal_output +3196,7352048,"TERMINAL",0,0,"5",,terminal_output +3197,7353064,"TERMINAL",0,0,"6",,terminal_output +3198,7354084,"TERMINAL",0,0,"7",,terminal_output +3199,7355100,"TERMINAL",0,0,"8",,terminal_output +3200,7356160,"TERMINAL",0,0,"9",,terminal_output +3201,7357185,"TERMINAL",0,0,"50",,terminal_output +3202,7358207,"TERMINAL",0,0,"1",,terminal_output +3203,7359232,"TERMINAL",0,0,"2",,terminal_output +3204,7360190,"TERMINAL",0,0,"3",,terminal_output +3205,7361289,"TERMINAL",0,0,"5",,terminal_output +3206,7362309,"TERMINAL",0,0,"6",,terminal_output +3207,7363258,"TERMINAL",0,0,"7",,terminal_output +3208,7364270,"TERMINAL",0,0,"8",,terminal_output +3209,7365294,"TERMINAL",0,0,"9",,terminal_output +3210,7366317,"TERMINAL",0,0,"9:00",,terminal_output +3211,7367332,"TERMINAL",0,0,"1",,terminal_output +3212,7368346,"TERMINAL",0,0,"2",,terminal_output +3213,7369358,"TERMINAL",0,0,"3",,terminal_output +3214,7370374,"TERMINAL",0,0,"4",,terminal_output +3215,7371391,"TERMINAL",0,0,"5",,terminal_output +3216,7372409,"TERMINAL",0,0,"6",,terminal_output +3217,7373424,"TERMINAL",0,0,"7",,terminal_output +3218,7374439,"TERMINAL",0,0,"8",,terminal_output +3219,7375457,"TERMINAL",0,0,"9",,terminal_output +3220,7376536,"TERMINAL",0,0,"10",,terminal_output +3221,7377485,"TERMINAL",0,0,"1",,terminal_output +3222,7378502,"TERMINAL",0,0,"2",,terminal_output +3223,7379555,"TERMINAL",0,0,"3",,terminal_output +3224,7380635,"TERMINAL",0,0,"4",,terminal_output +3225,7381657,"TERMINAL",0,0,"5",,terminal_output +3226,7382578,"TERMINAL",0,0,"6",,terminal_output +3227,7383583,"TERMINAL",0,0,"7",,terminal_output +3228,7384627,"TERMINAL",0,0,"8",,terminal_output +3229,7385604,"TERMINAL",0,0,"9",,terminal_output +3230,7386682,"TERMINAL",0,0,"20",,terminal_output +3231,7387638,"TERMINAL",0,0,"1",,terminal_output +3232,7388652,"TERMINAL",0,0,"2",,terminal_output +3233,7389666,"TERMINAL",0,0,"3",,terminal_output +3234,7390773,"TERMINAL",0,0,"4",,terminal_output +3235,7391792,"TERMINAL",0,0,"5",,terminal_output +3236,7391955,"train_tokenizer.py",6862,0,"",python,selection_mouse +3237,7391966,"train_tokenizer.py",6861,0,"",python,selection_command +3238,7392066,"train_tokenizer.py",6861,1,")",python,selection_mouse +3239,7392068,"train_tokenizer.py",6862,0,"",python,selection_command +3240,7392099,"train_tokenizer.py",6844,18,"tories=True,\n )",python,selection_mouse +3241,7392114,"train_tokenizer.py",6835,27,"tmp_directories=True,\n )",python,selection_mouse +3242,7392171,"train_tokenizer.py",6829,33,"eanup_tmp_directories=True,\n )",python,selection_mouse +3243,7392171,"train_tokenizer.py",6825,37," cleanup_tmp_directories=True,\n )",python,selection_mouse +3244,7392172,"train_tokenizer.py",6821,41," cleanup_tmp_directories=True,\n )",python,selection_mouse +3245,7392180,"train_tokenizer.py",6783,79," step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )",python,selection_mouse +3246,7392228,"train_tokenizer.py",6759,103," keep_period=10,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )",python,selection_mouse +3247,7392291,"train_tokenizer.py",6736,126," max_to_keep=3,\n keep_period=10,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )",python,selection_mouse +3248,7392347,"train_tokenizer.py",6678,184," save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=10,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )",python,selection_mouse +3249,7392404,"train_tokenizer.py",6623,239," checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=10,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )",python,selection_mouse +3250,7392715,"TERMINAL",0,0,"6",,terminal_output +3251,7393728,"TERMINAL",0,0,"7",,terminal_output +3252,7394762,"TERMINAL",0,0,"8",,terminal_output +3253,7395767,"TERMINAL",0,0,"9",,terminal_output +3254,7396778,"TERMINAL",0,0,"30",,terminal_output +3255,7397833,"TERMINAL",0,0,"1",,terminal_output +3256,7398812,"TERMINAL",0,0,"2",,terminal_output +3257,7399884,"TERMINAL",0,0,"3",,terminal_output +3258,7400907,"TERMINAL",0,0,"4",,terminal_output +3259,7401883,"TERMINAL",0,0,"5",,terminal_output +3260,7402881,"TERMINAL",0,0,"6",,terminal_output +3261,7403893,"TERMINAL",0,0,"7",,terminal_output +3262,7405008,"TERMINAL",0,0,"8",,terminal_output +3263,7405928,"TERMINAL",0,0,"9",,terminal_output +3264,7406950,"TERMINAL",0,0,"40",,terminal_output +3265,7407964,"TERMINAL",0,0,"1",,terminal_output +3266,7409001,"TERMINAL",0,0,"2",,terminal_output +3267,7410002,"TERMINAL",0,0,"3",,terminal_output +3268,7411045,"TERMINAL",0,0,"4",,terminal_output +3269,7412034,"TERMINAL",0,0,"5",,terminal_output +3270,7413095,"TERMINAL",0,0,"6",,terminal_output +3271,7414116,"TERMINAL",0,0,"7",,terminal_output +3272,7415085,"TERMINAL",0,0,"8",,terminal_output +3273,7416166,"TERMINAL",0,0,"9",,terminal_output +3274,7417189,"TERMINAL",0,0,"50",,terminal_output +3275,7418215,"TERMINAL",0,0,"1",,terminal_output +3276,7419240,"TERMINAL",0,0,"2",,terminal_output +3277,7420273,"TERMINAL",0,0,"3",,terminal_output +3278,7421291,"TERMINAL",0,0,"4",,terminal_output +3279,7422314,"TERMINAL",0,0,"6",,terminal_output +3280,7423334,"TERMINAL",0,0,"7",,terminal_output +3281,7424335,"TERMINAL",0,0,"8",,terminal_output +3282,7425281,"TERMINAL",0,0,"9",,terminal_output +3283,7426308,"TERMINAL",0,0,"30:00",,terminal_output +3284,7427299,"TERMINAL",0,0,"1",,terminal_output +3285,7428316,"TERMINAL",0,0,"2",,terminal_output +3286,7429332,"TERMINAL",0,0,"3",,terminal_output +3287,7430348,"TERMINAL",0,0,"4",,terminal_output +3288,7431365,"TERMINAL",0,0,"5",,terminal_output +3289,7432381,"TERMINAL",0,0,"6",,terminal_output +3290,7433396,"TERMINAL",0,0,"7",,terminal_output +3291,7434413,"TERMINAL",0,0,"8",,terminal_output +3292,7435428,"TERMINAL",0,0,"9",,terminal_output +3293,7436436,"TERMINAL",0,0,"10",,terminal_output +3294,7437469,"TERMINAL",0,0,"1",,terminal_output +3295,7438490,"TERMINAL",0,0,"2",,terminal_output +3296,7439513,"TERMINAL",0,0,"3",,terminal_output +3297,7440537,"TERMINAL",0,0,"4",,terminal_output +3298,7441560,"TERMINAL",0,0,"5",,terminal_output +3299,7442531,"TERMINAL",0,0,"6",,terminal_output +3300,7443608,"TERMINAL",0,0,"7",,terminal_output +3301,7444562,"TERMINAL",0,0,"8",,terminal_output +3302,7445658,"TERMINAL",0,0,"9",,terminal_output +3303,7446687,"TERMINAL",0,0,"20",,terminal_output +3304,7447705,"TERMINAL",0,0,"1",,terminal_output +3305,7448649,"TERMINAL",0,0,"2",,terminal_output +3306,7449093,"TERMINAL",0,0,"watch",,terminal_focus +3307,7449657,"TERMINAL",0,0,"3",,terminal_output +3308,7450790,"TERMINAL",0,0,"4",,terminal_output +3309,7451687,"TERMINAL",0,0,"5",,terminal_output +3310,7452692,"TERMINAL",0,0,"6",,terminal_output +3311,7453749,"TERMINAL",0,0,"7",,terminal_output +3312,7454722,"TERMINAL",0,0,"8",,terminal_output +3313,7455097,"train_tokenizer.py",6867,0,"",python,selection_mouse +3314,7455108,"train_tokenizer.py",6866,0,"",python,selection_command +3315,7455624,"train_tokenizer.py",6843,0,"",python,selection_mouse +3316,7455752,"TERMINAL",0,0,"9",,terminal_output +3317,7456757,"TERMINAL",0,0,"30",,terminal_output +3318,7457848,"TERMINAL",0,0,"1",,terminal_output +3319,7458784,"TERMINAL",0,0,"2",,terminal_output +3320,7459804,"TERMINAL",0,0,"3",,terminal_output +3321,7460817,"TERMINAL",0,0,"4",,terminal_output +3322,7461837,"TERMINAL",0,0,"5",,terminal_output +3323,7462851,"TERMINAL",0,0,"6",,terminal_output +3324,7463866,"TERMINAL",0,0,"7",,terminal_output +3325,7464910,"TERMINAL",0,0,"8",,terminal_output +3326,7465933,"TERMINAL",0,0,"9",,terminal_output +3327,7466911,"TERMINAL",0,0,"40",,terminal_output +3328,7467927,"TERMINAL",0,0,"1",,terminal_output +3329,7468563,"train_tokenizer.py",6818,0,"",python,selection_mouse +3330,7468564,"train_tokenizer.py",6817,0,"",python,selection_command +3331,7468945,"TERMINAL",0,0,"2",,terminal_output +3332,7470029,"TERMINAL",0,0,"3",,terminal_output +3333,7470975,"TERMINAL",0,0,"4",,terminal_output +3334,7471994,"TERMINAL",0,0,"5",,terminal_output +3335,7473018,"TERMINAL",0,0,"6",,terminal_output +3336,7474025,"TERMINAL",0,0,"7",,terminal_output +3337,7475055,"train_tokenizer.py",10934,0,"",python,selection_command +3338,7475057,"TERMINAL",0,0,"8",,terminal_output +3339,7476057,"TERMINAL",0,0,"9",,terminal_output +3340,7477094,"TERMINAL",0,0,"50",,terminal_output +3341,7478123,"TERMINAL",0,0,"1",,terminal_output +3342,7479146,"TERMINAL",0,0,"2",,terminal_output +3343,7480119,"TERMINAL",0,0,"3",,terminal_output +3344,7481135,"TERMINAL",0,0,"4",,terminal_output +3345,7482216,"TERMINAL",0,0,"5",,terminal_output +3346,7483170,"TERMINAL",0,0,"6",,terminal_output +3347,7484262,"TERMINAL",0,0,"7",,terminal_output +3348,7485203,"TERMINAL",0,0,"9",,terminal_output +3349,7486215,"TERMINAL",0,0,"1:00",,terminal_output +3350,7487239,"TERMINAL",0,0,"1",,terminal_output +3351,7488247,"TERMINAL",0,0,"2",,terminal_output +3352,7489267,"TERMINAL",0,0,"3",,terminal_output +3353,7489461,"train_tokenizer.py",11060,0,"",python,selection_mouse +3354,7489473,"train_tokenizer.py",11059,0,"",python,selection_command +3355,7490278,"TERMINAL",0,0,"4",,terminal_output +3356,7490875,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug/mihir-debug-grain-checkpointing-new]633;D;0",,terminal_output +3357,7492487,"TERMINAL",0,0,"cd ..",,terminal_command +3358,7493629,"TERMINAL",0,0,"ls",,terminal_command +3359,7493655,"TERMINAL",0,0,"]633;E;2025-07-10 12:31:07 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C0000 debug-mihir mihir-debug-grain-checkpointing-new mihir-debug-grain-checkpointing-old\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/debug]633;D;0",,terminal_output +3360,7497115,"TERMINAL",0,0,"ls",,terminal_command +3361,7499182,"TERMINAL",0,0,"cd ..",,terminal_command +3362,7499474,"TERMINAL",0,0,"ls",,terminal_command +3363,7499524,"TERMINAL",0,0,"]633;E;2025-07-10 12:31:13 ls;46e99295-9803-4583-b36f-e400fb9e619d]633;C",,terminal_output +3364,7499717,"TERMINAL",0,0,"0000 3290367 3292213 3292331 3292337 3294603 3296575 3297582 3297727 3299068 3300233 3301026 3307618 3310437 3313565 dyn tokenizer\r\n3290283 3290391 3292221 3292332 3292338 3296502 3297569 3297586 3299016 3299069 3300290 3301027 3307619 3311671 3313570 dynamics_ckpt_dir tokenizer_ckpt_dir\r\n3290284 3290392 3292258 3292333 3292339 3296540 3297575 3297606 3299062 3299258 3300658 3301029 3309662 3311672 3313571 lam train_lam_minecraft_overfit_sample\r\n3290295 3290439 3292328 3292334 3294600 3296571 3297576 3297671 3299063 3299259 3300663 3301030 3309663 3313562 3313572 lam-1-action train_tokenizer_batch_size_scaling_16_node\r\n3290296 3290440 3292329 3292335 3294601 3296573 3297577 3297693 3299065 3299272 3300672 3301031 3309699 3313563 3316022 lam_ckpt_dir train_tokenizer_minecraft_overfit_sample\r\n3290366 3291405 3292330 3292336 3294602 3296574 3297578 3297706 3299066 3299579 3301025 3306801 3310436 3313564 debug lam_main_test\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints]633;D;0",,terminal_output +3365,7502430,"TERMINAL",0,0,"srun",,terminal_focus diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-54e098d1-2492-47f1-a955-80881c3022861757959318496-2025_09_15-20.02.18.163/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-54e098d1-2492-47f1-a955-80881c3022861757959318496-2025_09_15-20.02.18.163/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..45e76f853888093d86ad3946d641df624e321aa5 --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-54e098d1-2492-47f1-a955-80881c3022861757959318496-2025_09_15-20.02.18.163/source.csv @@ -0,0 +1,536 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +1,9,"train_dynamics.py",0,0,"import os\n\n\nos.environ.setdefault(""XLA_PYTHON_CLIENT_MEM_FRACTION"", ""0.98"")\n\nfrom dataclasses import dataclass, field\nimport itertools\nfrom typing import cast, Optional\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.train_utils import (\n get_lr_schedule,\n count_parameters_by_component,\n print_mem_stats,\n print_compiled_memory_stats,\n print_compiled_cost_analysis,\n)\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_type: str = ""maskgit"" # supported options: maskgit, causal\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n val_data_dir: str = """"\n val_interval: int = 20_000\n val_steps: int = 50\n wandb_id: str = """"\n\n\ndef build_model(args: Args, rng: jax.Array) -> tuple[Genie, jax.Array]:\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=False,\n rngs=rngs,\n )\n del genie.lam.decoder\n return genie, rng\n\n\ndef build_optimizer(genie: Genie, args: Args) -> tuple[nnx.Optimizer, optax.Schedule]:\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.param_dtype, # moments in full precision\n )\n optimizer = nnx.Optimizer(genie, tx)\n return optimizer, lr_schedule\n\n\ndef build_mesh_and_sharding(\n num_devices: int,\n) -> tuple[Mesh, NamedSharding, NamedSharding]:\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n return mesh, replicated_sharding, videos_sharding\n\n\ndef shard_optimizer_states(\n optimizer: nnx.Optimizer, replicated_sharding: NamedSharding\n) -> None:\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n\ndef build_dataloader(args: Args, data_dir: str) -> grain.DataLoaderIterator:\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(data_dir, x)\n for x in os.listdir(data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n return grain_iterator\n\n\ndef build_checkpoint_manager(args: Args) -> ocp.CheckpointManager:\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""train_dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""train_dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n if args.val_data_dir:\n handler_registry.add(\n ""val_dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""val_dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n return checkpoint_manager\n\n\ndef restore_or_initialize_components(\n args: Args,\n checkpoint_manager: ocp.CheckpointManager,\n optimizer: nnx.Optimizer,\n train_iterator: grain.DataLoaderIterator,\n rng: jax.Array,\n replicated_sharding: NamedSharding,\n val_iterator: Optional[grain.DataLoaderIterator] = None,\n restore_step: Optional[int] = None,\n) -> tuple[int, nnx.Optimizer, grain.DataLoaderIterator, grain.DataLoaderIterator, jax.Array]:\n step = 0\n if restore_step is None:\n restore_step = checkpoint_manager.latest_step()\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n if args.val_data_dir:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n val_dataloader_state=grain.checkpoint.CheckpointRestore(val_iterator), # type: ignore\n )\n else: \n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=restore_args\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n train_iterator = restored[""train_dataloader_state""]\n if args.val_data_dir:\n val_iterator = restored[""val_dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n rng, _rng = jax.random.split(rng)\n optimizer = restore_genie_components(optimizer, replicated_sharding, _rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n return step, optimizer, train_iterator, val_iterator, rng\n\n\ndef main(args: Args) -> None:\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n genie, rng = build_model(args, rng)\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n optimizer, lr_schedule = build_optimizer(genie, args)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n _, replicated_sharding, videos_sharding = build_mesh_and_sharding(num_devices)\n\n shard_optimizer_states(optimizer, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n checkpoint_manager = build_checkpoint_manager(args)\n\n # --- Create DataLoaderIterator from dataloader ---\n train_iterator = build_dataloader(args, args.data_dir)\n val_iterator = None\n if args.val_data_dir:\n val_iterator = build_dataloader(args, args.val_data_dir)\n \n # --- Restore checkpoint ---\n if val_iterator:\n step, optimizer, train_iterator, val_iterator, rng = restore_or_initialize_components(\n args, checkpoint_manager, optimizer, train_iterator, rng, replicated_sharding, val_iterator\n )\n else:\n step, optimizer, train_iterator, _, rng = restore_or_initialize_components(\n args, checkpoint_manager, optimizer, train_iterator, rng, replicated_sharding\n )\n\n # --- Define loss and train step (close over args) ---\n def dynamics_loss_fn(\n model: Genie, inputs: dict, training: bool = False, pred_full_frame: bool = False\n ) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n outputs = model(inputs, training=training, pred_full_frame=pred_full_frame)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt_val = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt_val, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt_val, recon)).mean()\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]),\n size=args.num_latent_actions,\n fill_value=0,\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]),\n size=args.num_patch_latents,\n fill_value=0,\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n @nnx.jit(donate_argnums=0)\n def train_step(\n optimizer: nnx.Optimizer, inputs: dict\n ) -> tuple[jax.Array, jax.Array, dict]:\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n model.train()\n return dynamics_loss_fn(model, inputs, training=True)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(\n optimizer.model\n )\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n @nnx.jit\n def val_step(genie: Genie, inputs: dict) -> tuple[jax.Array, jax.Array, dict, jax.Array, jax.Array, dict]:\n """"""Evaluate model and compute metrics""""""\n genie.eval()\n (loss, (recon, metrics)) = dynamics_loss_fn(genie, inputs, training=False, pred_full_frame=False)\n (loss_full_frame, (recon_full_frame, metrics_full_frame)) = dynamics_loss_fn(genie, inputs, training=False, pred_full_frame=True)\n return loss, recon, metrics, loss_full_frame, recon_full_frame, metrics_full_frame\n\n\n def calculate_validation_metrics(val_dataloader, genie, rng):\n step = 0\n loss_per_step = []\n metrics_per_step = []\n loss_full_frame_per_step = []\n metrics_full_frame_per_step = []\n inputs = None\n recon = None\n recon_full_frame = None\n for videos in val_dataloader:\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics, loss_full_frame, recon_full_frame, metrics_full_frame = val_step(genie, inputs)\n loss_per_step.append(loss)\n metrics_per_step.append(metrics)\n loss_full_frame_per_step.append(loss_full_frame)\n metrics_full_frame_per_step.append(metrics_full_frame)\n step += 1\n if step > args.val_steps:\n break\n\n if step < args.val_steps:\n print(f""Warning: Your validation dataset is too small to make val_steps many steps. Made {step} steps, expected {args.val_steps}"")\n\n val_metrics = {\n f""val_{key}"": np.mean([float(m[key]) for m in metrics_per_step])\n for key in metrics_per_step[0].keys()\n }\n val_metrics_full_frame = {\n f""val_full_frame_{key}"": np.mean([float(m[key]) for m in metrics_full_frame_per_step])\n for key in metrics_full_frame_per_step[0].keys()\n }\n val_losses = {\n ""val_loss"": np.mean(loss_per_step),\n ""val_loss_full_frame"": np.mean(loss_full_frame_per_step)\n }\n val_metrics.update(val_metrics_full_frame)\n val_metrics.update(val_losses)\n return val_metrics, inputs, recon, recon_full_frame\n\n # --- TRAIN LOOP ---\n dataloader_train = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in train_iterator\n )\n dataloader_val = None\n if val_iterator:\n dataloader_val = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in val_iterator\n )\n if jax.process_index() == 0:\n first_videos = next(dataloader_train)\n sample_inputs = dict(videos=first_videos, mask_rng=rng)\n compiled = train_step.lower(optimizer, sample_inputs).compile()\n print_compiled_memory_stats(compiled.memory_analysis())\n print_compiled_cost_analysis(compiled.cost_analysis())\n # Do not skip the first batch during training\n dataloader_train = itertools.chain([first_videos], dataloader_train)\n print(f""Starting training from step {step}..."")\n first_step = step\n while step < args.num_steps:\n for videos in dataloader_train:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer, inputs)\n if step == first_step:\n print_mem_stats(""After params initialized"")\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Validation loss ---\n val_results = {}\n if dataloader_val and step % args.val_interval == 0:\n rng, _rng_mask_val = jax.random.split(rng, 2)\n print(""Calculating validation metrics..."")\n val_metrics, val_gt_batch, val_recon, val_full_frame = calculate_validation_metrics(dataloader_val, optimizer.model, _rng_mask_val)\n print(f""Step {step}, validation loss: {val_metrics['val_loss']}"")\n val_results = {\n ""metrics"": val_metrics,\n ""gt_batch"": val_gt_batch,\n ""recon"": val_recon,\n ""full_frame"": val_full_frame,\n }\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n log_dict = {\n ""loss"": loss,\n ""step"": step,\n **metrics\n }\n if val_results:\n log_dict.update(val_results[""metrics""])\n wandb.log(log_dict)\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if val_results:\n val_results[""gt_seq_val""] = val_results[""gt_batch""][""videos""][0].astype(jnp.float32) / 255.0\n val_results[""recon_seq_val""] = val_results[""recon""].clip(0, 1)\n val_comparison_seq = jnp.concatenate((val_results[""gt_seq_val""], val_results[""recon_seq_val""]), axis=1)\n val_results[""val_comparison_seq""] = einops.rearrange(\n val_comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n val_results[""full_frame_seq_val""] = val_results[""full_frame""][0].clip(0, 1)\n val_results[""val_full_frame_comparison_seq""] = jnp.concatenate((val_results[""gt_seq_val""], val_results[""full_frame_seq_val""]), axis=1)\n val_results[""val_full_frame_comparison_seq""] = einops.rearrange(\n val_results[""val_full_frame_comparison_seq""] * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n if val_results:\n log_images.update(\n dict(\n val_image=wandb.Image(np.asarray(val_results[""gt_seq_val""][args.seq_len - 1])),\n val_recon=wandb.Image(np.asarray(val_results[""recon_seq_val""][args.seq_len - 1])),\n val_true_vs_recon=wandb.Image(\n np.asarray(val_results[""val_comparison_seq""].astype(np.uint8))\n ),\n val_full_frame=wandb.Image(np.asarray(val_results[""full_frame_seq_val""][args.seq_len - 1])),\n val_true_vs_full_frame=wandb.Image(\n np.asarray(val_results[""val_full_frame_comparison_seq""].astype(np.uint8))\n )\n )\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n if args.val_data_dir:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n val_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n val_iterator # type: ignore\n )\n )\n else: \n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n )\n )\n checkpoint_manager.save(\n step,\n args=ckpt_manager_args\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(Args)\n main(args)\n",python,tab +2,991,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"8:02:18 PM [info] Activating crowd-code\n8:02:18 PM [info] Recording started\n8:02:18 PM [info] Initializing git provider using file system watchers...\n",Log,tab +3,1146,"extension-output-pdoom-org.crowd-code-#1-crowd-code",150,0,"8:02:18 PM [info] Git repository found\n8:02:18 PM [info] Git provider initialized successfully\n8:02:19 PM [info] Initial git state: [object Object]\n",Log,content +4,2499,"train_dynamics.py",0,0,"",python,tab +5,7859,"train_dynamics.py",21570,0,"",python,selection_mouse +6,7861,"train_dynamics.py",21557,18,"val_comparison_seq",python,selection_mouse +7,8924,"train_dynamics.py",21568,0,"",python,selection_mouse +8,9049,"train_dynamics.py",21557,18,"val_comparison_seq",python,selection_mouse +9,14886,"train_dynamics.py",21631,0,"",python,selection_mouse +10,14923,"train_dynamics.py",21630,0,"",python,selection_command +11,30284,"train_dynamics.py",22140,0,"",python,selection_mouse +12,31916,"train_dynamics.py",22413,0,"",python,selection_mouse +13,36277,"train_dynamics.py",22202,0,"",python,selection_mouse +14,45357,"train_dynamics.py",25157,0,"",python,selection_mouse +15,61046,"train_dynamics.py",17266,0,"",python,selection_mouse +16,102708,"train_dynamics.py",10631,0,"",python,selection_command +17,104109,"train_dynamics.py",10661,0,"",python,selection_command +18,105548,"train_dynamics.py",10693,0,"",python,selection_command +19,108196,"train_dynamics.py",10726,0,"",python,selection_command +20,108392,"train_dynamics.py",10755,0,"",python,selection_command +21,108540,"train_dynamics.py",10788,0,"",python,selection_command +22,108947,"train_dynamics.py",10755,0,"",python,selection_command +23,109429,"train_dynamics.py",10726,0,"",python,selection_command +24,109468,"train_dynamics.py",10693,0,"",python,selection_command +25,109513,"train_dynamics.py",10660,0,"",python,selection_command +26,109548,"train_dynamics.py",10631,0,"",python,selection_command +27,109629,"train_dynamics.py",10630,0,"",python,selection_command +28,109630,"train_dynamics.py",10599,0,"",python,selection_command +29,109723,"train_dynamics.py",10553,0,"",python,selection_command +30,109724,"train_dynamics.py",10520,0,"",python,selection_command +31,109816,"train_dynamics.py",10429,0,"",python,selection_command +32,109818,"train_dynamics.py",10333,0,"",python,selection_command +33,109818,"train_dynamics.py",10223,0,"",python,selection_command +34,109905,"train_dynamics.py",10134,0,"",python,selection_command +35,109906,"train_dynamics.py",10092,0,"",python,selection_command +36,109907,"train_dynamics.py",10037,0,"",python,selection_command +37,109998,"train_dynamics.py",10004,0,"",python,selection_command +38,109999,"train_dynamics.py",9956,0,"",python,selection_command +39,110000,"train_dynamics.py",9903,0,"",python,selection_command +40,110083,"train_dynamics.py",9843,0,"",python,selection_command +41,110085,"train_dynamics.py",9810,0,"",python,selection_command +42,110086,"train_dynamics.py",9753,0,"",python,selection_command +43,110156,"train_dynamics.py",9697,0,"",python,selection_command +44,110157,"train_dynamics.py",9638,0,"",python,selection_command +45,110157,"train_dynamics.py",9605,0,"",python,selection_command +46,110182,"train_dynamics.py",9595,0,"",python,selection_command +47,110327,"train_dynamics.py",9552,0,"",python,selection_command +48,110491,"train_dynamics.py",9505,0,"",python,selection_command +49,110654,"train_dynamics.py",9472,0,"",python,selection_command +50,110827,"train_dynamics.py",9376,0,"",python,selection_command +51,110953,"train_dynamics.py",9278,0,"",python,selection_command +52,112493,"train_dynamics.py",9231,0,"",python,selection_command +53,114841,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab +54,121869,"train_dynamics.py",0,0,"",python,tab +55,131681,"models/dynamics.py",0,0,"from typing import Dict\n\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\nimport einops\n\nfrom utils.nn import STTransformer, Transformer\n\n\nclass DynamicsMaskGIT(nnx.Module):\n """"""\n MaskGIT dynamics model\n\n Dimension keys:\n B: batch size\n T: sequence length\n N: number of patches per frame\n L: latent dimension\n V: vocabulary size (number of latents)\n """"""\n\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_latents: int,\n latent_action_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n mask_limit: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.mask_limit = mask_limit\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.transformer = STTransformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.mask_token = nnx.Param(\n nnx.initializers.lecun_uniform()(rngs.params(), (1, 1, 1, self.model_dim))\n )\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True, pred_full_frame: bool = False,\n ) -> tuple[jax.Array, jax.Array | None]:\n assert not (training and pred_full_frame), ""Cannot evaluate full frame prediction during training.""\n # --- Mask videos ---\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n vid_embed_BTNM = self.patch_embed(video_tokens_BTN)\n if training:\n batch_size = vid_embed_BTNM.shape[0]\n _rng_prob, *_rngs_mask = jax.random.split(batch[""mask_rng""], batch_size + 1)\n mask_prob = jax.random.uniform(\n _rng_prob, shape=(batch_size,), minval=self.mask_limit\n )\n per_sample_shape = vid_embed_BTNM.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(jnp.asarray(_rngs_mask), mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed_BTNM = jnp.where(\n jnp.expand_dims(mask, -1), self.mask_token.value, vid_embed_BTNM\n )\n elif pred_full_frame:\n mask = jnp.zeros_like(video_tokens_BTN)\n mask = mask.at[:, -1].set(True)\n vid_embed_BTNM = jnp.where(\n jnp.expand_dims(mask, -1), self.mask_token.value, vid_embed_BTNM\n )\n else:\n mask = jnp.ones_like(video_tokens_BTN)\n\n # --- Predict transition ---\n act_embed_BTm11M = self.action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n padded_act_embed_BTNM = jnp.broadcast_to(\n padded_act_embed_BT1M, vid_embed_BTNM.shape\n )\n vid_embed_BTNM += padded_act_embed_BTNM\n logits_BTNV = self.transformer(vid_embed_BTNM)\n return logits_BTNV, mask\n\n\nclass DynamicsCausal(nnx.Module):\n """"""Causal dynamics model""""""\n\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_latents: int,\n latent_action_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n\n self.transformer = Transformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=self.decode,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True, pred_full_frame: bool = False,\n ) -> tuple[jax.Array, jax.Array | None]:\n assert not (training and pred_full_frame), ""Cannot evaluate full frame prediction during training.""\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n if pred_full_frame:\n # --- Extract submodule states ---\n patch_embed_state = nnx.state(self.patch_embed)\n action_up_state = nnx.state(self.action_up)\n transformer_state = nnx.state(self.transformer)\n\n def _pred_full_frame(carry, step_n):\n video_tokens_BTN, final_logits_BTNV = carry\n # We need to reconstruct submodules inside scan body to prevent trace context mismatches\n patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=nnx.Rngs(0))\n nnx.update(patch_embed, patch_embed_state)\n action_up = nnx.Linear(\n self.latent_action_dim, self.model_dim, param_dtype=self.param_dtype, dtype=self.dtype, rngs=nnx.Rngs(0)\n )\n nnx.update(action_up, action_up_state)\n transformer = Transformer(\n self.model_dim, self.model_dim, self.ffn_dim, self.num_latents, self.num_blocks, self.num_heads,\n self.dropout, self.param_dtype, self.dtype, use_flash_attention=self.use_flash_attention,\n decode=self.decode, rngs=nnx.Rngs(0)\n )\n nnx.update(transformer, transformer_state)\n\n vid_embed_BTNM = patch_embed(video_tokens_BTN)\n act_embed_BTm11M = action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n vid_embed_BTNp1M = jnp.concatenate(\n [padded_act_embed_BT1M, vid_embed_BTNM], axis=2\n )\n step_logits_BTNp1V = transformer(vid_embed_BTNp1M)\n step_logits_BV = step_logits_BTNp1V[:, -1, step_n, :]\n final_logits_BTNV = final_logits_BTNV.at[:, -1, step_n].set(step_logits_BV)\n sampled_token_idxs_B = jnp.argmax(step_logits_BV, axis=-1)\n video_tokens_BTN = video_tokens_BTN.at[:, -1, step_n].set(\n sampled_token_idxs_B\n )\n return (video_tokens_BTN, final_logits_BTNV), None\n\n (_, final_logits_BTNV), _ = jax.lax.scan(\n _pred_full_frame,\n (video_tokens_BTN, jnp.zeros((\n video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BTN.shape[2],\n self.num_latents))),\n jnp.arange(video_tokens_BTN.shape[2])\n )\n mask_out = jnp.zeros_like(video_tokens_BTN)\n mask_out = mask_out.at[:, -1].set(True)\n return final_logits_BTNV, mask_out\n else:\n vid_embed_BTNM = self.patch_embed(video_tokens_BTN)\n act_embed_BTm11M = self.action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n vid_embed_BTNp1M = jnp.concatenate(\n [padded_act_embed_BT1M, vid_embed_BTNM], axis=2\n )\n logits_BTNp1V = self.transformer(vid_embed_BTNp1M)\n logits_BTNV = logits_BTNp1V[:, :, :-1]\n return logits_BTNV, jnp.ones_like(video_tokens_BTN)\n",python,tab +56,137521,"models/dynamics.py",8329,0,"",python,selection_mouse +57,137530,"models/dynamics.py",8328,0,"",python,selection_command +58,138582,"models/dynamics.py",8209,0,"",python,selection_mouse +59,138823,"models/dynamics.py",8209,2,"vi",python,selection_mouse +60,138824,"models/dynamics.py",8209,50,"video_tokens_BTN.shape[0],\n vid",python,selection_mouse +61,138826,"models/dynamics.py",8209,52,"video_tokens_BTN.shape[0],\n video",python,selection_mouse +62,138888,"models/dynamics.py",8209,53,"video_tokens_BTN.shape[0],\n video_",python,selection_mouse +63,138889,"models/dynamics.py",8209,54,"video_tokens_BTN.shape[0],\n video_t",python,selection_mouse +64,138973,"models/dynamics.py",8209,55,"video_tokens_BTN.shape[0],\n video_to",python,selection_mouse +65,139022,"models/dynamics.py",8209,103,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tok",python,selection_mouse +66,139078,"models/dynamics.py",8209,104,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_toke",python,selection_mouse +67,139208,"models/dynamics.py",8209,105,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_token",python,selection_mouse +68,139289,"models/dynamics.py",8209,106,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens",python,selection_mouse +69,139344,"models/dynamics.py",8209,107,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_",python,selection_mouse +70,139399,"models/dynamics.py",8209,108,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_B",python,selection_mouse +71,139441,"models/dynamics.py",8209,109,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BT",python,selection_mouse +72,139488,"models/dynamics.py",8209,110,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BTN",python,selection_mouse +73,139527,"models/dynamics.py",8209,111,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BTN.",python,selection_mouse +74,139559,"models/dynamics.py",8209,112,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BTN.s",python,selection_mouse +75,139579,"models/dynamics.py",8209,113,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BTN.sh",python,selection_mouse +76,139632,"models/dynamics.py",8209,114,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BTN.sha",python,selection_mouse +77,139663,"models/dynamics.py",8209,115,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BTN.shap",python,selection_mouse +78,139685,"models/dynamics.py",8209,116,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BTN.shape",python,selection_mouse +79,139775,"models/dynamics.py",8209,117,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BTN.shape[",python,selection_mouse +80,139798,"models/dynamics.py",8209,118,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BTN.shape[2",python,selection_mouse +81,139862,"models/dynamics.py",8209,119,"video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BTN.shape[2]",python,selection_mouse +82,142231,"models/dynamics.py",8327,0,"",python,selection_command +83,142759,"models/dynamics.py",8281,0,"",python,selection_command +84,143169,"models/dynamics.py",8328,0,"",python,selection_command +85,144801,"models/dynamics.py",8283,46," video_tokens_BTN.shape[2],",python,selection_command +86,145022,"models/dynamics.py",8236,93," video_tokens_BTN.shape[1],\n video_tokens_BTN.shape[2],",python,selection_command +87,145177,"models/dynamics.py",8189,140," video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BTN.shape[2],",python,selection_command +88,145711,"models/dynamics.py",8235,0,"",python,selection_command +89,146298,"models/dynamics.py",8327,2,"",python,content +90,146299,"models/dynamics.py",8280,2,"",python,content +91,146299,"models/dynamics.py",8233,2,"",python,content +92,147160,"models/dynamics.py",8322,1,"",python,content +93,147160,"models/dynamics.py",8277,1,"",python,content +94,147161,"models/dynamics.py",8232,1,"",python,content +95,147586,"models/dynamics.py",8319,1,"",python,content +96,147588,"models/dynamics.py",8275,1,"",python,content +97,147588,"models/dynamics.py",8231,1,"",python,content +98,149974,"models/dynamics.py",8230,0,"",python,selection_command +99,150366,"models/dynamics.py",8273,0,"",python,selection_command +100,150786,"models/dynamics.py",8232,86,"",python,content +101,150832,"models/dynamics.py",8252,0,"",python,selection_command +102,151122,"models/dynamics.py",8230,0,"",python,selection_command +103,151488,"models/dynamics.py",8231,0,"",python,selection_command +104,153678,"models/dynamics.py",8231,0,",",python,content +105,153681,"models/dynamics.py",8232,0,"",python,selection_keyboard +106,153834,"models/dynamics.py",8231,0,"",python,selection_command +107,154365,"models/dynamics.py",8209,0,"",python,selection_command +108,155605,"models/dynamics.py",8209,0,"*",python,content +109,155607,"models/dynamics.py",8210,0,"",python,selection_keyboard +110,155736,"models/dynamics.py",8210,0,"*",python,content +111,155737,"models/dynamics.py",8211,0,"",python,selection_keyboard +112,155963,"models/dynamics.py",8210,0,"",python,selection_command +113,158081,"models/dynamics.py",8420,0,"",python,selection_mouse +114,160572,"models/dynamics.py",8311,0,"",python,selection_mouse +115,183815,"models/dynamics.py",3920,0,"",python,selection_mouse +116,203479,"models/dynamics.py",72,0,"",python,selection_mouse +117,204077,"models/dynamics.py",94,0,"",python,selection_mouse +118,205127,"models/dynamics.py",83,14,"",python,content +119,211347,"models/dynamics.py",5527,0,"",python,selection_mouse +120,248355,"models/dynamics.py",0,0,"",python,tab +121,249233,"models/dynamics.py",8712,0,"",python,selection_mouse +122,249825,"models/dynamics.py",8929,0,"",python,selection_mouse +123,257875,"TERMINAL",0,0,"git diff",,terminal_command +124,258002,"TERMINAL",0,0,"]633;C[?1h=\rdiff --git a/models/dynamics.py b/models/dynamics.py\r\nindex 3df9f41..74fde10 100644\r\n--- a/models/dynamics.py\r\n+++ b/models/dynamics.py\r\n@@ -3,7 +3,6 @@ from typing import Dict\r\n import jax\r\n import jax.numpy as jnp\r\n import flax.nnx as nnx\r\n-import einops\r\n \r\n from utils.nn import STTransformer, Transformer\r\n:",,terminal_output +125,258848,"TERMINAL",0,0,"\r \r\n:",,terminal_output +126,259057,"TERMINAL",0,0,"\r@@ -218,9 +217,7 @@ class DynamicsCausal(nnx.Module):\r\n:",,terminal_output +127,259182,"TERMINAL",0,0,"\r (_, final_logits_BTNV), _ = jax.lax.scan(\r\n:",,terminal_output +128,259331,"TERMINAL",0,0,"\r _pred_full_frame,\r\n:",,terminal_output +129,259443,"TERMINAL",0,0,"\r (video_tokens_BTN, jnp.zeros((\r\n:",,terminal_output +130,259585,"TERMINAL",0,0,"\r- video_tokens_BTN.shape[0],\r\n:",,terminal_output +131,259717,"TERMINAL",0,0,"\r- video_tokens_BTN.shape[1],\r\n:",,terminal_output +132,259859,"TERMINAL",0,0,"\r- video_tokens_BTN.shape[2],\r\n:",,terminal_output +133,260033,"TERMINAL",0,0,"\r+ **video_tokens_BTN.shape,\r\n:",,terminal_output +134,260172,"TERMINAL",0,0,"\r self.num_latents))),\r\n:",,terminal_output +135,260303,"TERMINAL",0,0,"\r jnp.arange(video_tokens_BTN.shape[2])\r\n:",,terminal_output +136,260442,"TERMINAL",0,0,"\r )\r\n:",,terminal_output +137,260553,"TERMINAL",0,0,"\r\r(END)",,terminal_output +138,260736,"TERMINAL",0,0,"\r\r(END)",,terminal_output +139,260911,"TERMINAL",0,0,"\r\r(END)",,terminal_output +140,261016,"TERMINAL",0,0,"\r\r(END)",,terminal_output +141,261737,"TERMINAL",0,0,"\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +142,268310,"TERMINAL",0,0,"git commit -am ""use list unpacking for shape""",,terminal_command +143,268336,"TERMINAL",0,0,"]633;C",,terminal_output +144,268458,"TERMINAL",0,0,"g",,terminal_output +145,268551,"TERMINAL",0,0,"i[val-loss 52dcd52] use list unpacking for shape\r\n 1 file changed, 1 insertion(+), 4 deletions(-)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +146,269408,"TERMINAL",0,0,"git push",,terminal_command +147,269470,"TERMINAL",0,0,"]633;C",,terminal_output +148,270803,"TERMINAL",0,0,"Enumerating objects: 7, done.\r\nCounting objects: 14% (1/7)\rCounting objects: 28% (2/7)\rCounting objects: 42% (3/7)\rCounting objects: 57% (4/7)\rCounting objects: 71% (5/7)\rCounting objects: 85% (6/7)\rCounting objects: 100% (7/7)\rCounting objects: 100% (7/7), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 25% (1/4)\rCompressing objects: 50% (2/4)\rCompressing objects: 75% (3/4)\rCompressing objects: 100% (4/4)\rCompressing objects: 100% (4/4), done.\r\nWriting objects: 25% (1/4)\rWriting objects: 50% (2/4)\rWriting objects: 75% (3/4)\rWriting objects: 100% (4/4)\rWriting objects: 100% (4/4), 452 bytes | 452.00 KiB/s, done.\r\nTotal 4 (delta 2), reused 0 (delta 0), pack-reused 0\r\n",,terminal_output +149,270897,"TERMINAL",0,0,"remote: Resolving deltas: 0% (0/2)\rremote: Resolving deltas: 50% (1/2)\rremote: Resolving deltas: 100% (2/2)\rremote: Resolving deltas: 100% (2/2), completed with 2 local objects.\r\n",,terminal_output +150,271127,"TERMINAL",0,0,"To github.com:p-doom/jasmine.git\r\n ca0bd15..52dcd52 val-loss -> val-loss\r\n",,terminal_output +151,271260,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +152,358613,"TERMINAL",0,0,"precommit-install",,terminal_command +153,358675,"TERMINAL",0,0,"]633;Cbash: precommit-install: command not found...\r\n",,terminal_output +154,359909,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +155,363658,"TERMINAL",0,0,"precommit install",,terminal_command +156,363705,"TERMINAL",0,0,"]633;Cbash: precommit: command not found...\r\n",,terminal_output +157,363809,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +158,368658,"TERMINAL",0,0,"pre-commit install",,terminal_command +159,368724,"TERMINAL",0,0,"]633;Cbash: pre-commit: command not found...\r\n",,terminal_output +160,368803,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +161,372748,"TERMINAL",0,0,"pr^C",,terminal_command +162,375921,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command +163,377289,"TERMINAL",0,0,"pre-commit install",,terminal_command +164,377354,"TERMINAL",0,0,"]633;C",,terminal_output +165,380443,"TERMINAL",0,0,"pre-commit installed at .git/hooks/pre-commit\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +166,473223,"models/dynamics.py",8498,0,"",python,selection_mouse +167,473264,"models/dynamics.py",8497,0,"",python,selection_command +168,475871,"TERMINAL",0,0,"git status",,terminal_command +169,475912,"TERMINAL",0,0,"]633;COn branch val-loss\r\nYour branch is up to date with 'origin/val-loss'.\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdata/\r\n\tdiff.diff\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\tlogs/\r\n\toverfit_dir.zip\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\tutils/visualizer.py\r\n\r\nnothing added to commit but untracked files present (use ""git add"" to track)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +170,553461,"models/dynamics.py",6718,0,"",python,selection_mouse +171,625022,"TERMINAL",0,0,"git pull",,terminal_command +172,625075,"TERMINAL",0,0,"]633;C",,terminal_output +173,628256,"TERMINAL",0,0,"remote: Enumerating objects: 16, done.\r\nremote: Counting objects: 6% (1/16)\rremote: Counting objects: 12% (2/16)\rremote: Counting objects: 18% (3/16)\rremote: Counting objects: 25% (4/16)\rremote: Counting objects: 31% (5/16)\rremote: Counting objects: 37% (6/16)\rremote: Counting objects: 43% (7/16)\rremote: Counting objects: 50% (8/16)\rremote: Counting objects: 56% (9/16)\rremote: Counting objects: 62% (10/16)\rremote: Counting objects: 68% (11/16)\rremote: Counting objects: 75% (12/16)\rremote: Counting objects: 81% (13/16)\rremote: Counting objects: 87% (14/16)\rremote: Counting objects: 93% (15/16)\rremote: Counting objects: 100% (16/16)\rremote: Counting objects: 100% (16/16), done.\r\nremote: Compressing objects: 10% (1/10)\rremote: Compressing objects: 20% (2/10)\rremote: Compressing objects: 30% (3/10)\rremote: Compressing objects: 40% (4/10)\rremote: Compressing objects: 50% (5/10)\rremote: Compressing objects: 60% (6/10)\rremote: Compressing objects: 70% (7/10)\rremote: Compressing objects: 80% (8/10)\rremote: Compressing objects: 90% (9/10)\rremote: Compressing objects: 100% (10/10)\rremote: Compressing objects: 100% (10/10), done.\r\nremote: Total 16 (delta 5), reused 15 (delta 5), pack-reused 0 (from 0)\r\nUnpacking objects: 6% (1/16)\rUnpacking objects: 12% (2/16)\rUnpacking objects: 18% (3/16)\rUnpacking objects: 25% (4/16)\rUnpacking objects: 31% (5/16)\rUnpacking objects: 37% (6/16)\rUnpacking objects: 43% (7/16)\r",,terminal_output +174,628386,"TERMINAL",0,0,"Unpacking objects: 50% (8/16)\rUnpacking objects: 56% (9/16)\rUnpacking objects: 62% (10/16)\rUnpacking objects: 68% (11/16)\rUnpacking objects: 75% (12/16)\rUnpacking objects: 81% (13/16)\rUnpacking objects: 87% (14/16)\rUnpacking objects: 93% (15/16)\rUnpacking objects: 100% (16/16)\rUnpacking objects: 100% (16/16), 11.61 KiB | 58.00 KiB/s, done.\r\n",,terminal_output +175,628716,"TERMINAL",0,0,"From github.com:p-doom/jasmine\r\n * [new branch] dqn-replay-preprocessing -> origin/dqn-replay-preprocessing\r\n 804c77e..7323b66 huggingface-download-in-folder -> origin/huggingface-download-in-folder\r\n 9ed9449..ca0cb23 main -> origin/main\r\n * [new branch] remove-restore-branching -> origin/remove-restore-branching\r\n",,terminal_output +176,628731,"TERMINAL",0,0,"Already up to date.\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +177,638286,"TERMINAL",0,0,"git checkout remove-restore-branching",,terminal_command +178,638337,"TERMINAL",0,0,"]633;C",,terminal_output +179,638518,"TERMINAL",0,0,"branch 'remove-restore-branching' set up to track 'origin/remove-restore-branching'.\r\nSwitched to a new branch 'remove-restore-branching'\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +180,638783,"models/dynamics.py",83,8138,"import einops\n\nfrom utils.nn import STTransformer, Transformer\n\n\nclass DynamicsMaskGIT(nnx.Module):\n """"""\n MaskGIT dynamics model\n\n Dimension keys:\n B: batch size\n T: sequence length\n N: number of patches per frame\n L: latent dimension\n V: vocabulary size (number of latents)\n """"""\n\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_latents: int,\n latent_action_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n mask_limit: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.mask_limit = mask_limit\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.transformer = STTransformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.mask_token = nnx.Param(\n nnx.initializers.lecun_uniform()(rngs.params(), (1, 1, 1, self.model_dim))\n )\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True, pred_full_frame: bool = False,\n ) -> tuple[jax.Array, jax.Array | None]:\n assert not (training and pred_full_frame), ""Cannot evaluate full frame prediction during training.""\n # --- Mask videos ---\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n vid_embed_BTNM = self.patch_embed(video_tokens_BTN)\n if training:\n batch_size = vid_embed_BTNM.shape[0]\n _rng_prob, *_rngs_mask = jax.random.split(batch[""mask_rng""], batch_size + 1)\n mask_prob = jax.random.uniform(\n _rng_prob, shape=(batch_size,), minval=self.mask_limit\n )\n per_sample_shape = vid_embed_BTNM.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(jnp.asarray(_rngs_mask), mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed_BTNM = jnp.where(\n jnp.expand_dims(mask, -1), self.mask_token.value, vid_embed_BTNM\n )\n elif pred_full_frame:\n mask = jnp.zeros_like(video_tokens_BTN)\n mask = mask.at[:, -1].set(True)\n vid_embed_BTNM = jnp.where(\n jnp.expand_dims(mask, -1), self.mask_token.value, vid_embed_BTNM\n )\n else:\n mask = jnp.ones_like(video_tokens_BTN)\n\n # --- Predict transition ---\n act_embed_BTm11M = self.action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n padded_act_embed_BTNM = jnp.broadcast_to(\n padded_act_embed_BT1M, vid_embed_BTNM.shape\n )\n vid_embed_BTNM += padded_act_embed_BTNM\n logits_BTNV = self.transformer(vid_embed_BTNM)\n return logits_BTNV, mask\n\n\nclass DynamicsCausal(nnx.Module):\n """"""Causal dynamics model""""""\n\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_latents: int,\n latent_action_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n\n self.transformer = Transformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=self.decode,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True, pred_full_frame: bool = False,\n ) -> tuple[jax.Array, jax.Array | None]:\n assert not (training and pred_full_frame), ""Cannot evaluate full frame prediction during training.""\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n if pred_full_frame:\n # --- Extract submodule states ---\n patch_embed_state = nnx.state(self.patch_embed)\n action_up_state = nnx.state(self.action_up)\n transformer_state = nnx.state(self.transformer)\n\n def _pred_full_frame(carry, step_n):\n video_tokens_BTN, final_logits_BTNV = carry\n # We need to reconstruct submodules inside scan body to prevent trace context mismatches\n patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=nnx.Rngs(0))\n nnx.update(patch_embed, patch_embed_state)\n action_up = nnx.Linear(\n self.latent_action_dim, self.model_dim, param_dtype=self.param_dtype, dtype=self.dtype, rngs=nnx.Rngs(0)\n )\n nnx.update(action_up, action_up_state)\n transformer = Transformer(\n self.model_dim, self.model_dim, self.ffn_dim, self.num_latents, self.num_blocks, self.num_heads,\n self.dropout, self.param_dtype, self.dtype, use_flash_attention=self.use_flash_attention,\n decode=self.decode, rngs=nnx.Rngs(0)\n )\n nnx.update(transformer, transformer_state)\n\n vid_embed_BTNM = patch_embed(video_tokens_BTN)\n act_embed_BTm11M = action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n vid_embed_BTNp1M = jnp.concatenate(\n [padded_act_embed_BT1M, vid_embed_BTNM], axis=2\n )\n step_logits_BTNp1V = transformer(vid_embed_BTNp1M)\n step_logits_BV = step_logits_BTNp1V[:, -1, step_n, :]\n final_logits_BTNV = final_logits_BTNV.at[:, -1, step_n].set(step_logits_BV)\n sampled_token_idxs_B = jnp.argmax(step_logits_BV, axis=-1)\n video_tokens_BTN = video_tokens_BTN.at[:, -1, step_n].set(\n sampled_token_idxs_B\n )\n return (video_tokens_BTN, final_logits_BTNV), None\n\n (_, final_logits_BTNV), _ = jax.lax.scan(\n _pred_full_frame,\n (video_tokens_BTN, jnp.zeros((\n video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BTN.shape[2],\n",python,content +181,640688,"models/dynamics.py",0,0,"Switched from branch 'val-loss' to 'remove-restore-branching'",python,git_branch_checkout +182,649591,"TERMINAL",0,0,"",,terminal_focus +183,651971,"TERMINAL",0,0,"source /home/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/bin/activate",,terminal_command +184,652053,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +185,668669,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=accelerated --nodes=1 --gres=gpu:1 --cpus-per-task=8",,terminal_command +186,668734,"TERMINAL",0,0,"]633;Csalloc: Pending job allocation 3497225\r\nsalloc: job 3497225 queued and waiting for resources\r\n",,terminal_output +187,678935,"TERMINAL",0,0,"",,terminal_focus +188,681126,"TERMINAL",0,0,"source /home/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/bin/activate",,terminal_command +189,681201,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +190,714794,"TERMINAL",0,0,"idling",,terminal_command +191,714892,"TERMINAL",0,0,"]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1990.localdomain: Mon Sep 15 20:14:12 2025Partition dev_cpuonly: 11 nodes idle\rPartition cpuonly: 13 nodes idle\rPartition dev_accelerated:\t 1 nodes idle\rPartition accelerated:\t 3 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 5 nodes idle\rPartition accelerated-h200:\t 0 nodes idle",,terminal_output +192,715947,"TERMINAL",0,0,"3",,terminal_output +193,716915,"TERMINAL",0,0,"4",,terminal_output +194,717319,"TERMINAL",0,0,"salloc",,terminal_focus +195,717986,"TERMINAL",0,0,"5",,terminal_output +196,718566,"TERMINAL",0,0,"watch",,terminal_focus +197,718994,"TERMINAL",0,0,"7",,terminal_output +198,720144,"TERMINAL",0,0,"8",,terminal_output +199,721071,"TERMINAL",0,0,"94",,terminal_output +200,721451,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +201,730564,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --gres=gpu:1 --cpus-per-task=8",,terminal_command +202,730691,"TERMINAL",0,0,"]633;Csalloc: Granted job allocation 3497227\r\n",,terminal_output +203,730807,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +204,741373,"TERMINAL",0,0,"salloc",,terminal_focus +205,742356,"TERMINAL",0,0,"^Csalloc: Job allocation 3497225 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +206,744026,"TERMINAL",0,0,"salloc",,terminal_focus +207,759164,"TERMINAL",0,0,"salloc: Prolog hung on node hkn0403\r\n",,terminal_output +208,764603,"TERMINAL",0,0,"salloc: Nodes hkn0403 are ready for job\r\n",,terminal_output +209,765675,"TERMINAL",0,0,"]0;tum_cte0515@hkn0403:~/Projects/jasmine[?2004h[tum_cte0515@hkn0403 jasmine]$ ",,terminal_output +210,774098,"TERMINAL",0,0,"s",,terminal_output +211,774529,"TERMINAL",0,0,"o",,terminal_output +212,774627,"TERMINAL",0,0,"u",,terminal_output +213,774911,"TERMINAL",0,0,"r",,terminal_output +214,775109,"TERMINAL",0,0,"c",,terminal_output +215,775167,"TERMINAL",0,0,"e",,terminal_output +216,775265,"TERMINAL",0,0," ",,terminal_output +217,775373,"TERMINAL",0,0,",",,terminal_output +218,775813,"TERMINAL",0,0,"",,terminal_output +219,776119,"TERMINAL",0,0,"v",,terminal_output +220,776802,"TERMINAL",0,0,"",,terminal_output +221,777054,"TERMINAL",0,0,".",,terminal_output +222,777206,"TERMINAL",0,0,"v",,terminal_output +223,777317,"TERMINAL",0,0,"e",,terminal_output +224,777590,"TERMINAL",0,0,"nv/",,terminal_output +225,777794,"TERMINAL",0,0,"b",,terminal_output +226,777932,"TERMINAL",0,0,"in/",,terminal_output +227,778259,"TERMINAL",0,0,"ac",,terminal_output +228,778444,"TERMINAL",0,0,"tivate",,terminal_output +229,778602,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0403:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0403 jasmine]$ ",,terminal_output +230,787591,"TERMINAL",0,0,"\r(reverse-i-search)`': ",,terminal_output +231,787790,"TERMINAL",0,0,"s': source .venv/bin/activate\rh': . ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/cli/servers/Stable-2f2737de9aa376933d975ae30290447c910fdf40/server/out/vs/workbench/contrib/terminal/common/scripts/shellIntegration-bash.sh""",,terminal_output +232,789709,"TERMINAL",0,0,"\r ': sh slurm/jobs/mihir/horeka/preprocessing/preprocess_atari.sbatch > log.log\r\n\r",,terminal_output +233,791354,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0403:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0403 jasmine]$ ",,terminal_output +234,804127,"slurm/jobs/mihir/horeka/coinrun/train_dyn_single_gpu.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=00:20:00\n#SBATCH --partition=dev_accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/lam/%x_%j.log\n#SBATCH --job-name=train_lam_1e-4\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\n# array_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_test\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m_split/train\n# array_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m_chunked\narray_records_dir_val=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m_split/val\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/dyn/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nlam_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/lam/interactive/3483568\ntokenizer_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/interactive/3483568\n\nenv | grep SLURM\n\nexport PYTHONUNBUFFERED=1\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --image_height=64 \\n --image_width=64 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=110 \\n --init_lr=0 \\n --max_lr=1e-4 \\n --log_image_interval=250 \\n --log_checkpoint_interval=250 \\n --dyna_type=maskgit \\n --log \\n --name=coinrun-dyn-dev-$slurm_job_id \\n --tags dyn coinrun dev \\n --entity instant-uv \\n --project jafar \\n --warmup_steps 0 \\n --wsd_decay_steps 0 \\n --num_steps 50 \\n --data_dir $array_records_dir_train \\n --tokenizer_checkpoint $tokenizer_checkpoint \\n --val_data_dir $array_records_dir_val \\n --val_interval 10 \\n --val_steps 5\n",shellscript,tab +235,811290,"slurm/jobs/mihir/horeka/coinrun/train_dyn_single_gpu.sh",2190,0,"",shellscript,selection_mouse +236,817190,"TERMINAL",0,0,"sh",,terminal_output +237,817191,"TERMINAL",0,0," ",,terminal_output +238,817444,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/coinrun/train_dyn_single_gpu.sh",,terminal_output +239,817711,"TERMINAL",0,0,"\rslurm/jobs/mihir/horeka/coinrun/train_dyn_single_gpu.sh\r\n[?2004l\r#!/usr/bin/env bash\r\n\r\n#SBATCH --nodes=1\r\n#SBATCH --ntasks-per-node=4\r\n#SBATCH --time=00:20:00\r\n#SBATCH --partition=dev_accelerated\r\n#SBATCH --cpus-per-task=5\r\n#SBATCH --gres=gpu:1\r\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/lam/%x_%j.log\r\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/lam/%x_%j.log\r\n#SBATCH --job-name=train_lam_1e-4\r\n#SBATCH --requeue\r\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\r\n\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\n# array_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_test\r\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m_split/train\r\n# array_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m_chunked\r\narray_records_dir_val=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m_split/val\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/dyn/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nlam_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/lam/interactive/3483568\r\ntokenizer_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/interactive/3483568\r\n\r\nenv | grep SLURM\r\n\r\nexport PYTHONUNBUFFERED=1\r\n\r\nsrun python train_dynamics.py \\r\n --save_ckpt \\r\n --image_height=64 \\r\n --image_width=64 \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=110 \\r\n --init_lr=0 \\r\n --max_lr=1e-4 \\r\n --log_image_interval=250 \\r\n --log_checkpoint_interval=250 \\r\n --dyna_type=maskgit \\r\n --log \\r\n --name=coinrun-dyn-dev-$slurm_job_id \\r\n --tags dyn coinrun dev \\r\n --entity instant-uv \\r\n --project jafar \\r\n --warmup_steps 0 \\r\n --wsd_decay_steps 0 \\r\n --num_steps 50 \\r\n --data_dir $array_records_dir_train \\r\n --tokenizer_checkpoint $tokenizer_checkpoint \\r\n --val_data_dir $array_records_dir_val \\r\n --val_interval 10 \\r\n --val_steps 5\r\n",,terminal_output +240,817855,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=3957988\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine\r\nSLURMD_NODENAME=hkn0403\r\nSLURM_JOB_START_TIME=1757960068\r\nSLURM_STEP_NODELIST=hkn0403\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1757963668\r\nSLURM_PMI2_SRUN_PORT=44065\r\nSLURM_CPUS_ON_NODE=8\r\nSLURM_JOB_CPUS_PER_NODE=8\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=dev_accelerated\r\nSLURM_TRES_PER_TASK=cpu=8\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3497227\r\nSLURM_PTY_PORT=36975\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=33\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e11.hkn0403\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=181\r\nSLURM_NODELIST=hkn0403\r\nSLURM_SRUN_COMM_PORT=41927\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3497227\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0403\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_STEP_LAUNCHER_PORT=41927\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0403\r\n",,terminal_output +241,817968,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +242,837097,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +243,844157,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\n",,terminal_output +244,844580,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +245,845460,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.21.3\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/wandb/run-20250915_201622-c19jp68p\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run coinrun-dyn-dev-3497227\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/c19jp68p\r\n",,terminal_output +246,846202,"TERMINAL",0,0,"Parameter counts:\r\n{'dynamics': 26555904, 'lam': 17640416, 'tokenizer': 33750256, 'total': 77946576}\r\n",,terminal_output +247,851245,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1269: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +248,872626,"TERMINAL",0,0,"2025-09-15 20:16:50.619036: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-09-15 20:16:50.620710: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +249,922007,"TERMINAL",0,0,"Total memory size: 25.5 GB, Output size: 0.9 GB, Temp size: 24.6 GB, Argument size: 0.9 GB, Host temp size: 0.0 GB.\r\nFLOPs: 5.213e+12, Bytes: 7.365e+11 (686.0 GB), Intensity: 7.1 FLOPs/byte\r\nStarting training from step 0...\r\n",,terminal_output +250,922381,"TERMINAL",0,0,"\r\nMemstats: After params initialized:\r\n\tUsing (GB) 1.14 / 38.7 (2.945736%) on cuda:0\r\n",,terminal_output +251,924243,"TERMINAL",0,0,"Step 0, loss: 17.954784393310547\r\n",,terminal_output +252,953318,"TERMINAL",0,0,"bash",,terminal_focus +253,953960,"TERMINAL",0,0,"Step 1, loss: 6.460620403289795\r\n",,terminal_output +254,955704,"TERMINAL",0,0,"Step 2, loss: 14.903913497924805\r\n",,terminal_output +255,957515,"TERMINAL",0,0,"Step 3, loss: 9.082958221435547\r\n",,terminal_output +256,957532,"TERMINAL",0,0,"python",,terminal_command +257,957654,"TERMINAL",0,0,"]633;CPython 3.10.18 (main, Jun 4 2025, 17:36:27) [Clang 20.1.4 ] on linux\r\nType ""help"", ""copyright"", ""credits"" or ""license"" for more information.\r\n",,terminal_output +258,957859,"TERMINAL",0,0,">>> ",,terminal_output +259,959347,"TERMINAL",0,0,"Step 4, loss: 11.08510971069336\r\n",,terminal_output +260,959709,"TERMINAL",0,0,"a",,terminal_output +261,959920,"TERMINAL",0,0," ",,terminal_output +262,960118,"TERMINAL",0,0,"=",,terminal_output +263,960174,"TERMINAL",0,0," ",,terminal_output +264,960667,"TERMINAL",0,0,"[",,terminal_output +265,960840,"TERMINAL",0,0,"1",,terminal_output +266,961048,"TERMINAL",0,0,",",,terminal_output +267,961135,"TERMINAL",0,0,"Step 5, loss: 9.383418083190918\r\n",,terminal_output +268,961376,"TERMINAL",0,0,"2,",,terminal_output +269,961587,"TERMINAL",0,0,"3",,terminal_output +270,962433,"TERMINAL",0,0,"]",,terminal_output +271,962584,"TERMINAL",0,0,"\r\n>>> ",,terminal_output +272,962965,"TERMINAL",0,0,"Step 6, loss: 9.63429069519043\r\n",,terminal_output +273,965132,"TERMINAL",0,0,"p",,terminal_output +274,965319,"TERMINAL",0,0,"Step 7, loss: 7.117089748382568\r\n",,terminal_output +275,965319,"TERMINAL",0,0,"ri",,terminal_output +276,965444,"TERMINAL",0,0,"n",,terminal_output +277,965560,"TERMINAL",0,0,"t",,terminal_output +278,965844,"TERMINAL",0,0,"(",,terminal_output +279,966710,"TERMINAL",0,0,"*",,terminal_output +280,966863,"TERMINAL",0,0,"*",,terminal_output +281,966970,"TERMINAL",0,0,"a",,terminal_output +282,967090,"TERMINAL",0,0,"Step 8, loss: 3.8622961044311523\r\n",,terminal_output +283,968409,"TERMINAL",0,0,"",,terminal_output +284,968595,"TERMINAL",0,0,"",,terminal_output +285,968951,"TERMINAL",0,0,"",,terminal_output +286,968960,"TERMINAL",0,0,"Step 9, loss: 4.703470706939697\r\nCalculating validation metrics...\r\n",,terminal_output +287,969698,"TERMINAL",0,0,"\r(**a",,terminal_output +288,970320,"TERMINAL",0,0,"*",,terminal_output +289,970443,"TERMINAL",0,0,"*",,terminal_output +290,970629,"TERMINAL",0,0,"a",,terminal_output +291,971351,"TERMINAL",0,0,",",,terminal_output +292,971460,"TERMINAL",0,0," ",,terminal_output +293,973018,"TERMINAL",0,0,"4",,terminal_output +294,973753,"TERMINAL",0,0,")",,terminal_output +295,974375,"TERMINAL",0,0,"\r\n File """", line 1\r\n print((**a, 4)\r\n ^^\r\nSyntaxError: invalid syntax\r\n>>> ",,terminal_output +296,975616,"TERMINAL",0,0,"\r>>> print((**a, 4)",,terminal_output +297,976259,"TERMINAL",0,0,"\r>>> a = [1,2,3]",,terminal_output +298,977935,"TERMINAL",0,0,"\r\n>>> ",,terminal_output +299,978295,"TERMINAL",0,0,"\r>>> a = [1,2,3]",,terminal_output +300,978734,"TERMINAL",0,0,"\r>>> print((**a, 4)",,terminal_output +301,979269,"TERMINAL",0,0,"",,terminal_output +302,979734,"TERMINAL",0,0,"",,terminal_output +303,979929,"TERMINAL",0,0,"",,terminal_output +304,980072,"TERMINAL",0,0,"",,terminal_output +305,980580,"TERMINAL",0,0,"\ra",,terminal_output +306,980893,"TERMINAL",0,0,"\r\n... ",,terminal_output +307,982495,"TERMINAL",0,0,"2025-09-15 20:18:40.501289: E external/xla/xla/service/slow_operation_alarm.cc:73] Constant folding an instruction is taking > 1s:\r\n\r\n %reduce.11381 = s32[] reduce(%broadcast.2238, %constant.526), dimensions={1,0,2}, to_apply=%region_296.11380, metadata={op_name=""jit(val_step)/jit(main)/reduce_sum"" source_file=""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/train_dynamics.py"" source_line=398}\r\n\r\nThis isn't necessarily a bug; constant-folding is inherently a trade-off between compilation time and speed at runtime. XLA has some guards that attempt to keep constant folding from taking too long, but fundamentally you'll always be able to come up with an input program that takes a long time.\r\n\r\nIf you'd like to file a bug, run with envvar XLA_FLAGS=--xla_dump_to=/tmp/foo and attach the results.\r\n",,terminal_output +308,983226,"TERMINAL",0,0,"\r\nKeyboardInterrupt\r\n>>> ",,terminal_output +309,983344,"TERMINAL",0,0,"2025-09-15 20:18:41.371020: E external/xla/xla/service/slow_operation_alarm.cc:140] The operation took 1.869828652s\r\nConstant folding an instruction is taking > 1s:\r\n\r\n %reduce.11381 = s32[] reduce(%broadcast.2238, %constant.526), dimensions={1,0,2}, to_apply=%region_296.11380, metadata={op_name=""jit(val_step)/jit(main)/reduce_sum"" source_file=""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/train_dynamics.py"" source_line=398}\r\n\r\nThis isn't necessarily a bug; constant-folding is inherently a trade-off between compilation time and speed at runtime. XLA has some guards that attempt to keep constant folding from taking too long, but fundamentally you'll always be able to come up with an input program that takes a long time.\r\n\r\nIf you'd like to file a bug, run with envvar XLA_FLAGS=--xla_dump_to=/tmp/foo and attach the results.\r\n",,terminal_output +310,984133,"TERMINAL",0,0,"2025-09-15 20:18:42.159490: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +311,985533,"TERMINAL",0,0,"\r>>> print((*a, 4)",,terminal_output +312,986333,"TERMINAL",0,0,"\r>>> a = [1,2,3]",,terminal_output +313,986719,"TERMINAL",0,0,"\r\n>>> ",,terminal_output +314,987029,"TERMINAL",0,0,"\r>>> a = [1,2,3]",,terminal_output +315,987671,"TERMINAL",0,0,"\r>>> print((*a, 4)",,terminal_output +316,988137,"TERMINAL",0,0,"",,terminal_output +317,988994,"TERMINAL",0,0,"\r)",,terminal_output +318,989524,"TERMINAL",0,0,"\r\n(1, 2, 3, 4)\r\n>>> ",,terminal_output +319,1014649,"TERMINAL",0,0,"Step 10, validation loss: 5.508398056030273\r\n",,terminal_output +320,1027579,"TERMINAL",0,0,"srun",,terminal_focus +321,1043020,"TERMINAL",0,0,"Step 10, loss: 4.491197109222412\r\n",,terminal_output +322,1044961,"TERMINAL",0,0,"Step 11, loss: 4.127790451049805\r\n",,terminal_output +323,1046706,"TERMINAL",0,0,"Step 12, loss: 3.5996522903442383\r\n",,terminal_output +324,1048465,"TERMINAL",0,0,"Step 13, loss: 2.9497897624969482\r\n",,terminal_output +325,1050749,"TERMINAL",0,0,"Step 14, loss: 2.6980764865875244\r\n",,terminal_output +326,1052600,"TERMINAL",0,0,"Step 15, loss: 3.1382486820220947\r\n",,terminal_output +327,1054488,"TERMINAL",0,0,"Step 16, loss: 3.53959059715271\r\n",,terminal_output +328,1056277,"TERMINAL",0,0,"Step 17, loss: 3.54298734664917\r\n",,terminal_output +329,1058177,"TERMINAL",0,0,"Step 18, loss: 2.5865678787231445\r\n",,terminal_output +330,1059938,"TERMINAL",0,0,"Step 19, loss: 2.674994468688965\r\nCalculating validation metrics...\r\n",,terminal_output +331,1068515,"TERMINAL",0,0,"Step 20, validation loss: 2.8861100673675537\r\n",,terminal_output +332,1070352,"TERMINAL",0,0,"Step 20, loss: 2.6039786338806152\r\n",,terminal_output +333,1072197,"TERMINAL",0,0,"Step 21, loss: 2.2568001747131348\r\n",,terminal_output +334,1073943,"TERMINAL",0,0,"Step 22, loss: 2.388590097427368\r\n",,terminal_output +335,1076267,"TERMINAL",0,0,"Step 23, loss: 1.8975921869277954\r\n",,terminal_output +336,1078133,"TERMINAL",0,0,"Step 24, loss: 1.7999199628829956\r\n",,terminal_output +337,1079880,"TERMINAL",0,0,"Step 25, loss: 1.5867562294006348\r\n",,terminal_output +338,1081729,"TERMINAL",0,0,"Step 26, loss: 1.6156861782073975\r\n",,terminal_output +339,1083563,"TERMINAL",0,0,"Step 27, loss: 1.7478469610214233\r\n",,terminal_output +340,1085404,"TERMINAL",0,0,"Step 28, loss: 1.4027135372161865\r\n",,terminal_output +341,1087478,"TERMINAL",0,0,"Step 29, loss: 1.3588135242462158\r\nCalculating validation metrics...\r\n",,terminal_output +342,1095710,"TERMINAL",0,0,"Step 30, validation loss: 1.795782446861267\r\n",,terminal_output +343,1097658,"TERMINAL",0,0,"Step 30, loss: 1.261796474456787\r\n",,terminal_output +344,1099446,"TERMINAL",0,0,"Step 31, loss: 1.4518359899520874\r\n",,terminal_output +345,1101296,"TERMINAL",0,0,"Step 32, loss: 1.1440304517745972\r\n",,terminal_output +346,1103660,"TERMINAL",0,0,"Step 33, loss: 1.2935168743133545\r\n",,terminal_output +347,1105377,"TERMINAL",0,0,"Step 34, loss: 1.1508052349090576\r\n",,terminal_output +348,1107223,"TERMINAL",0,0,"Step 35, loss: 0.9338809847831726\r\n",,terminal_output +349,1109058,"TERMINAL",0,0,"Step 36, loss: 0.9636817574501038\r\n",,terminal_output +350,1110922,"TERMINAL",0,0,"Step 37, loss: 1.0422073602676392\r\n",,terminal_output +351,1112749,"TERMINAL",0,0,"Step 38, loss: 1.0963596105575562\r\n",,terminal_output +352,1114532,"TERMINAL",0,0,"Step 39, loss: 1.031375765800476\r\nCalculating validation metrics...\r\n",,terminal_output +353,1123094,"TERMINAL",0,0,"Step 40, validation loss: 1.2927380800247192\r\n",,terminal_output +354,1124895,"TERMINAL",0,0,"Step 40, loss: 0.901832640171051\r\n",,terminal_output +355,1126776,"TERMINAL",0,0,"Step 41, loss: 1.0297459363937378\r\n",,terminal_output +356,1128561,"TERMINAL",0,0,"Step 42, loss: 0.9613402485847473\r\n",,terminal_output +357,1130451,"TERMINAL",0,0,"Step 43, loss: 0.9845135807991028\r\n",,terminal_output +358,1132311,"TERMINAL",0,0,"Step 44, loss: 1.085848331451416\r\n",,terminal_output +359,1134149,"TERMINAL",0,0,"Step 45, loss: 0.8057553172111511\r\n",,terminal_output +360,1135866,"TERMINAL",0,0,"Step 46, loss: 0.9998927116394043\r\n",,terminal_output +361,1137731,"TERMINAL",0,0,"Step 47, loss: 0.8952344059944153\r\n",,terminal_output +362,1139493,"TERMINAL",0,0,"Step 48, loss: 0.8718449473381042\r\n",,terminal_output +363,1141350,"TERMINAL",0,0,"Step 49, loss: 0.9575175046920776\r\nCalculating validation metrics...\r\n",,terminal_output +364,1149893,"TERMINAL",0,0,"Step 50, validation loss: 1.4611374139785767\r\n",,terminal_output +365,1152760,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run coinrun-dyn-dev-3497227 at: https://wandb.ai/instant-uv/jafar/runs/c19jp68p\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/wandb/run-20250915_201622-c19jp68p/logs\r\n",,terminal_output +366,1154741,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 5 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +367,1155160,"TERMINAL",0,0,"]0;tum_cte0515@hkn0403:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0403 jasmine]$ ",,terminal_output +368,1283359,"TERMINAL",0,0,"gi",,terminal_output +369,1283498,"TERMINAL",0,0,"t",,terminal_output +370,1283581,"TERMINAL",0,0," ",,terminal_output +371,1283718,"TERMINAL",0,0,"c",,terminal_output +372,1283805,"TERMINAL",0,0,"h",,terminal_output +373,1283903,"TERMINAL",0,0,"e",,terminal_output +374,1283963,"TERMINAL",0,0,"c",,terminal_output +375,1284057,"TERMINAL",0,0,"k",,terminal_output +376,1284181,"TERMINAL",0,0,"o",,terminal_output +377,1284331,"TERMINAL",0,0,"u",,terminal_output +378,1284409,"TERMINAL",0,0,"t",,terminal_output +379,1284457,"TERMINAL",0,0," ",,terminal_output +380,1284599,"TERMINAL",0,0,"v",,terminal_output +381,1284736,"TERMINAL",0,0,"a",,terminal_output +382,1284822,"TERMINAL",0,0,"l",,terminal_output +383,1284986,"TERMINAL",0,0,"-",,terminal_output +384,1285209,"TERMINAL",0,0,"l",,terminal_output +385,1285396,"TERMINAL",0,0,"o",,terminal_output +386,1285486,"TERMINAL",0,0,"s",,terminal_output +387,1285639,"TERMINAL",0,0,"\r\n[?2004l\rerror: pathspec 'val-los' did not match any file(s) known to git\r\n]0;tum_cte0515@hkn0403:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0403 jasmine]$ ",,terminal_output +388,1286505,"TERMINAL",0,0,"gi",,terminal_output +389,1286648,"TERMINAL",0,0," t",,terminal_output +390,1286791,"TERMINAL",0,0,"p",,terminal_output +391,1287201,"TERMINAL",0,0,"",,terminal_output +392,1287628,"TERMINAL",0,0,"",,terminal_output +393,1287768,"TERMINAL",0,0,"",,terminal_output +394,1287905,"TERMINAL",0,0,"",,terminal_output +395,1288092,"TERMINAL",0,0,"",,terminal_output +396,1288316,"TERMINAL",0,0,"git checkout val-los",,terminal_output +397,1288880,"TERMINAL",0,0,"",,terminal_output +398,1289170,"TERMINAL",0,0,"",,terminal_output +399,1289310,"TERMINAL",0,0,"s",,terminal_output +400,1289493,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +401,1290081,"TERMINAL",0,0,"Switched to branch 'val-loss'\r\nYour branch is up to date with 'origin/val-loss'.\r\n]0;tum_cte0515@hkn0403:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0403 jasmine]$ ",,terminal_output +402,1290608,"TERMINAL",0,0,"g",,terminal_output +403,1290721,"",0,0,"Switched from branch 'remove-restore-branching' to 'val-loss'",,git_branch_checkout +404,1290769,"TERMINAL",0,0,"i",,terminal_output +405,1290873,"TERMINAL",0,0,"t ",,terminal_output +406,1290988,"TERMINAL",0,0,"p",,terminal_output +407,1291178,"TERMINAL",0,0,"u",,terminal_output +408,1291357,"TERMINAL",0,0,"l",,terminal_output +409,1291493,"TERMINAL",0,0,"l",,terminal_output +410,1291590,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +411,1297150,"train_dynamics.py",0,0,"import os\n\n\nos.environ.setdefault(""XLA_PYTHON_CLIENT_MEM_FRACTION"", ""0.98"")\n\nfrom dataclasses import dataclass, field\nimport itertools\nfrom typing import cast, Optional\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.train_utils import (\n get_lr_schedule,\n count_parameters_by_component,\n print_mem_stats,\n print_compiled_memory_stats,\n print_compiled_cost_analysis,\n)\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_type: str = ""maskgit"" # supported options: maskgit, causal\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n val_data_dir: str = """"\n val_interval: int = 20_000\n val_steps: int = 50\n wandb_id: str = """"\n\n\ndef build_model(args: Args, rng: jax.Array) -> tuple[Genie, jax.Array]:\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=False,\n rngs=rngs,\n )\n del genie.lam.decoder\n return genie, rng\n\n\ndef build_optimizer(genie: Genie, args: Args) -> tuple[nnx.Optimizer, optax.Schedule]:\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.param_dtype, # moments in full precision\n )\n optimizer = nnx.Optimizer(genie, tx)\n return optimizer, lr_schedule\n\n\ndef build_mesh_and_sharding(\n num_devices: int,\n) -> tuple[Mesh, NamedSharding, NamedSharding]:\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n return mesh, replicated_sharding, videos_sharding\n\n\ndef shard_optimizer_states(\n optimizer: nnx.Optimizer, replicated_sharding: NamedSharding\n) -> None:\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n\ndef build_dataloader(args: Args, data_dir: str) -> grain.DataLoaderIterator:\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(data_dir, x)\n for x in os.listdir(data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n return grain_iterator\n\n\ndef build_checkpoint_manager(args: Args) -> ocp.CheckpointManager:\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""train_dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""train_dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n if args.val_data_dir:\n handler_registry.add(\n ""val_dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""val_dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n return checkpoint_manager\n\n\ndef restore_or_initialize_components(\n args: Args,\n checkpoint_manager: ocp.CheckpointManager,\n optimizer: nnx.Optimizer,\n train_iterator: grain.DataLoaderIterator,\n rng: jax.Array,\n replicated_sharding: NamedSharding,\n val_iterator: Optional[grain.DataLoaderIterator],\n restore_step: Optional[int] = None,\n) -> tuple[\n int, nnx.Optimizer, grain.DataLoaderIterator, grain.DataLoaderIterator, jax.Array\n]:\n step = 0\n if restore_step is None:\n restore_step = checkpoint_manager.latest_step()\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n if val_iterator:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n val_dataloader_state=grain.checkpoint.CheckpointRestore(val_iterator), # type: ignore\n )\n else:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(), args=restore_args\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n train_iterator = restored[""train_dataloader_state""]\n if val_iterator:\n val_iterator = restored[""val_dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n rng, _rng = jax.random.split(rng)\n optimizer = restore_genie_components(optimizer, replicated_sharding, _rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n return step, optimizer, train_iterator, val_iterator, rng\n\n\ndef main(args: Args) -> None:\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n genie, rng = build_model(args, rng)\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n optimizer, lr_schedule = build_optimizer(genie, args)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n _, replicated_sharding, videos_sharding = build_mesh_and_sharding(num_devices)\n\n shard_optimizer_states(optimizer, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n checkpoint_manager = build_checkpoint_manager(args)\n\n # --- Create DataLoaderIterator from dataloader ---\n train_iterator = build_dataloader(args, args.data_dir)\n val_iterator = None\n if args.val_data_dir:\n val_iterator = build_dataloader(args, args.val_data_dir)\n\n # --- Restore checkpoint ---\n step, optimizer, train_iterator, val_iterator, rng = (\n restore_or_initialize_components(\n args,\n checkpoint_manager,\n optimizer,\n train_iterator,\n rng,\n replicated_sharding,\n val_iterator,\n )\n )\n\n # --- Define loss and train step (close over args) ---\n def dynamics_loss_fn(\n model: Genie,\n inputs: dict,\n training: bool = False,\n pred_full_frame: bool = False,\n ) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n outputs = model(inputs, training=training, pred_full_frame=pred_full_frame)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt_val = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt_val, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt_val, recon)).mean()\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]),\n size=args.num_latent_actions,\n fill_value=0,\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]),\n size=args.num_patch_latents,\n fill_value=0,\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n @nnx.jit(donate_argnums=0)\n def train_step(\n optimizer: nnx.Optimizer, inputs: dict\n ) -> tuple[jax.Array, jax.Array, dict]:\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n model.train()\n return dynamics_loss_fn(model, inputs, training=True)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(\n optimizer.model\n )\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n @nnx.jit\n def val_step(\n genie: Genie, inputs: dict\n ) -> tuple[jax.Array, jax.Array, dict, jax.Array, jax.Array, dict]:\n """"""Evaluate model and compute metrics""""""\n genie.eval()\n (loss, (recon, metrics)) = dynamics_loss_fn(\n genie, inputs, training=False, pred_full_frame=False\n )\n (loss_full_frame, (recon_full_frame, metrics_full_frame)) = dynamics_loss_fn(\n genie, inputs, training=False, pred_full_frame=True\n )\n return (\n loss,\n recon,\n metrics,\n loss_full_frame,\n recon_full_frame,\n metrics_full_frame,\n )\n\n def calculate_validation_metrics(val_dataloader, genie, rng):\n step = 0\n loss_per_step = []\n metrics_per_step = []\n loss_full_frame_per_step = []\n metrics_full_frame_per_step = []\n inputs = None\n recon = None\n recon_full_frame = None\n for videos in val_dataloader:\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n (\n loss,\n recon,\n metrics,\n loss_full_frame,\n recon_full_frame,\n metrics_full_frame,\n ) = val_step(genie, inputs)\n loss_per_step.append(loss)\n metrics_per_step.append(metrics)\n loss_full_frame_per_step.append(loss_full_frame)\n metrics_full_frame_per_step.append(metrics_full_frame)\n step += 1\n if step > args.val_steps:\n break\n\n if step < args.val_steps:\n print(\n f""Warning: Your validation dataset is too small to make val_steps many steps. Made {step} steps, expected {args.val_steps}""\n )\n\n val_metrics = {\n f""val_{key}"": np.mean([float(m[key]) for m in metrics_per_step])\n for key in metrics_per_step[0].keys()\n }\n val_metrics_full_frame = {\n f""val_full_frame_{key}"": np.mean(\n [float(m[key]) for m in metrics_full_frame_per_step]\n )\n for key in metrics_full_frame_per_step[0].keys()\n }\n val_losses = {\n ""val_loss"": np.mean(loss_per_step),\n ""val_loss_full_frame"": np.mean(loss_full_frame_per_step),\n }\n val_metrics.update(val_metrics_full_frame)\n val_metrics.update(val_losses)\n return val_metrics, inputs, recon, recon_full_frame\n\n # --- TRAIN LOOP ---\n dataloader_train = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in train_iterator\n )\n dataloader_val = None\n if val_iterator:\n dataloader_val = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in val_iterator\n )\n if jax.process_index() == 0:\n first_videos = next(dataloader_train)\n sample_inputs = dict(videos=first_videos, mask_rng=rng)\n compiled = train_step.lower(optimizer, sample_inputs).compile()\n print_compiled_memory_stats(compiled.memory_analysis())\n print_compiled_cost_analysis(compiled.cost_analysis())\n # Do not skip the first batch during training\n dataloader_train = itertools.chain([first_videos], dataloader_train)\n print(f""Starting training from step {step}..."")\n first_step = step\n while step < args.num_steps:\n for videos in dataloader_train:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer, inputs)\n if step == first_step:\n print_mem_stats(""After params initialized"")\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Validation loss ---\n val_results = {}\n if dataloader_val and step % args.val_interval == 0:\n rng, _rng_mask_val = jax.random.split(rng, 2)\n print(""Calculating validation metrics..."")\n val_metrics, val_gt_batch, val_recon, val_full_frame = (\n calculate_validation_metrics(\n dataloader_val, optimizer.model, _rng_mask_val\n )\n )\n print(f""Step {step}, validation loss: {val_metrics['val_loss']}"")\n val_results = {\n ""metrics"": val_metrics,\n ""gt_batch"": val_gt_batch,\n ""recon"": val_recon,\n ""full_frame"": val_full_frame,\n }\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n log_dict = {""loss"": loss, ""step"": step, **metrics}\n if val_results:\n log_dict.update(val_results[""metrics""])\n wandb.log(log_dict)\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if val_results:\n val_results[""gt_seq_val""] = (\n val_results[""gt_batch""][""videos""][0].astype(jnp.float32)\n / 255.0\n )\n val_results[""recon_seq_val""] = val_results[""recon""].clip(0, 1)\n val_comparison_seq = jnp.concatenate(\n (val_results[""gt_seq_val""], val_results[""recon_seq_val""]),\n axis=1,\n )\n val_results[""val_comparison_seq""] = einops.rearrange(\n val_comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n val_results[""full_frame_seq_val""] = val_results[""full_frame""][\n 0\n ].clip(0, 1)\n val_results[""val_full_frame_comparison_seq""] = jnp.concatenate(\n (\n val_results[""gt_seq_val""],\n val_results[""full_frame_seq_val""],\n ),\n axis=1,\n )\n val_results[""val_full_frame_comparison_seq""] = einops.rearrange(\n val_results[""val_full_frame_comparison_seq""] * 255,\n ""t h w c -> h (t w) c"",\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n if val_results:\n log_images.update(\n dict(\n val_image=wandb.Image(\n np.asarray(\n val_results[""gt_seq_val""][args.seq_len - 1]\n )\n ),\n val_recon=wandb.Image(\n np.asarray(\n val_results[""recon_seq_val""][\n args.seq_len - 1\n ]\n )\n ),\n val_true_vs_recon=wandb.Image(\n np.asarray(\n val_results[""val_comparison_seq""].astype(\n np.uint8\n )\n )\n ),\n val_full_frame=wandb.Image(\n np.asarray(\n val_results[""full_frame_seq_val""][\n args.seq_len - 1\n ]\n )\n ),\n val_true_vs_full_frame=wandb.Image(\n np.asarray(\n val_results[\n ""val_full_frame_comparison_seq""\n ].astype(np.uint8)\n )\n ),\n )\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n if args.val_data_dir:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n val_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n val_iterator # type: ignore\n ),\n )\n else:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n )\n checkpoint_manager.save(step, args=ckpt_manager_args)\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(Args)\n main(args)\n",python,tab +412,1297588,"train_dynamics.py",8326,18301," val_iterator: Optional[grain.DataLoaderIterator] = None,\n restore_step: Optional[int] = None,\n) -> tuple[int, nnx.Optimizer, grain.DataLoaderIterator, grain.DataLoaderIterator, jax.Array]:\n step = 0\n if restore_step is None:\n restore_step = checkpoint_manager.latest_step()\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n if args.val_data_dir:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n val_dataloader_state=grain.checkpoint.CheckpointRestore(val_iterator), # type: ignore\n )\n else: \n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=restore_args\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n train_iterator = restored[""train_dataloader_state""]\n if args.val_data_dir:\n val_iterator = restored[""val_dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n rng, _rng = jax.random.split(rng)\n optimizer = restore_genie_components(optimizer, replicated_sharding, _rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n return step, optimizer, train_iterator, val_iterator, rng\n\n\ndef main(args: Args) -> None:\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n genie, rng = build_model(args, rng)\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n optimizer, lr_schedule = build_optimizer(genie, args)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n _, replicated_sharding, videos_sharding = build_mesh_and_sharding(num_devices)\n\n shard_optimizer_states(optimizer, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n checkpoint_manager = build_checkpoint_manager(args)\n\n # --- Create DataLoaderIterator from dataloader ---\n train_iterator = build_dataloader(args, args.data_dir)\n val_iterator = None\n if args.val_data_dir:\n val_iterator = build_dataloader(args, args.val_data_dir)\n \n # --- Restore checkpoint ---\n if val_iterator:\n step, optimizer, train_iterator, val_iterator, rng = restore_or_initialize_components(\n args, checkpoint_manager, optimizer, train_iterator, rng, replicated_sharding, val_iterator\n )\n else:\n step, optimizer, train_iterator, _, rng = restore_or_initialize_components(\n args, checkpoint_manager, optimizer, train_iterator, rng, replicated_sharding\n )\n\n # --- Define loss and train step (close over args) ---\n def dynamics_loss_fn(\n model: Genie, inputs: dict, training: bool = False, pred_full_frame: bool = False\n ) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n outputs = model(inputs, training=training, pred_full_frame=pred_full_frame)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt_val = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt_val, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt_val, recon)).mean()\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]),\n size=args.num_latent_actions,\n fill_value=0,\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]),\n size=args.num_patch_latents,\n fill_value=0,\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n @nnx.jit(donate_argnums=0)\n def train_step(\n optimizer: nnx.Optimizer, inputs: dict\n ) -> tuple[jax.Array, jax.Array, dict]:\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n model.train()\n return dynamics_loss_fn(model, inputs, training=True)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(\n optimizer.model\n )\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n @nnx.jit\n def val_step(genie: Genie, inputs: dict) -> tuple[jax.Array, jax.Array, dict, jax.Array, jax.Array, dict]:\n """"""Evaluate model and compute metrics""""""\n genie.eval()\n (loss, (recon, metrics)) = dynamics_loss_fn(genie, inputs, training=False, pred_full_frame=False)\n (loss_full_frame, (recon_full_frame, metrics_full_frame)) = dynamics_loss_fn(genie, inputs, training=False, pred_full_frame=True)\n return loss, recon, metrics, loss_full_frame, recon_full_frame, metrics_full_frame\n\n\n def calculate_validation_metrics(val_dataloader, genie, rng):\n step = 0\n loss_per_step = []\n metrics_per_step = []\n loss_full_frame_per_step = []\n metrics_full_frame_per_step = []\n inputs = None\n recon = None\n recon_full_frame = None\n for videos in val_dataloader:\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics, loss_full_frame, recon_full_frame, metrics_full_frame = val_step(genie, inputs)\n loss_per_step.append(loss)\n metrics_per_step.append(metrics)\n loss_full_frame_per_step.append(loss_full_frame)\n metrics_full_frame_per_step.append(metrics_full_frame)\n step += 1\n if step > args.val_steps:\n break\n\n if step < args.val_steps:\n print(f""Warning: Your validation dataset is too small to make val_steps many steps. Made {step} steps, expected {args.val_steps}"")\n\n val_metrics = {\n f""val_{key}"": np.mean([float(m[key]) for m in metrics_per_step])\n for key in metrics_per_step[0].keys()\n }\n val_metrics_full_frame = {\n f""val_full_frame_{key}"": np.mean([float(m[key]) for m in metrics_full_frame_per_step])\n for key in metrics_full_frame_per_step[0].keys()\n }\n val_losses = {\n ""val_loss"": np.mean(loss_per_step),\n ""val_loss_full_frame"": np.mean(loss_full_frame_per_step)\n }\n val_metrics.update(val_metrics_full_frame)\n val_metrics.update(val_losses)\n return val_metrics, inputs, recon, recon_full_frame\n\n # --- TRAIN LOOP ---\n dataloader_train = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in train_iterator\n )\n dataloader_val = None\n if val_iterator:\n dataloader_val = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in val_iterator\n )\n if jax.process_index() == 0:\n first_videos = next(dataloader_train)\n sample_inputs = dict(videos=first_videos, mask_rng=rng)\n compiled = train_step.lower(optimizer, sample_inputs).compile()\n print_compiled_memory_stats(compiled.memory_analysis())\n print_compiled_cost_analysis(compiled.cost_analysis())\n # Do not skip the first batch during training\n dataloader_train = itertools.chain([first_videos], dataloader_train)\n print(f""Starting training from step {step}..."")\n first_step = step\n while step < args.num_steps:\n for videos in dataloader_train:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer, inputs)\n if step == first_step:\n print_mem_stats(""After params initialized"")\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Validation loss ---\n val_results = {}\n if dataloader_val and step % args.val_interval == 0:\n rng, _rng_mask_val = jax.random.split(rng, 2)\n print(""Calculating validation metrics..."")\n val_metrics, val_gt_batch, val_recon, val_full_frame = calculate_validation_metrics(dataloader_val, optimizer.model, _rng_mask_val)\n print(f""Step {step}, validation loss: {val_metrics['val_loss']}"")\n val_results = {\n ""metrics"": val_metrics,\n ""gt_batch"": val_gt_batch,\n ""recon"": val_recon,\n ""full_frame"": val_full_frame,\n }\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n log_dict = {\n ""loss"": loss,\n ""step"": step,\n **metrics\n }\n if val_results:\n log_dict.update(val_results[""metrics""])\n wandb.log(log_dict)\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if val_results:\n val_results[""gt_seq_val""] = val_results[""gt_batch""][""videos""][0].astype(jnp.float32) / 255.0\n val_results[""recon_seq_val""] = val_results[""recon""].clip(0, 1)\n val_comparison_seq = jnp.concatenate((val_results[""gt_seq_val""], val_results[""recon_seq_val""]), axis=1)\n val_results[""val_comparison_seq""] = einops.rearrange(\n val_comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n val_results[""full_frame_seq_val""] = val_results[""full_frame""][0].clip(0, 1)\n val_results[""val_full_frame_comparison_seq""] = jnp.concatenate((val_results[""gt_seq_val""], val_results[""full_frame_seq_val""]), axis=1)\n val_results[""val_full_frame_comparison_seq""] = einops.rearrange(\n val_results[""val_full_frame_comparison_seq""] * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n if val_results:\n log_images.update(\n dict(\n val_image=wandb.Image(np.asarray(val_results[""gt_seq_val""][args.seq_len - 1])),\n val_recon=wandb.Image(np.asarray(val_results[""recon_seq_val""][args.seq_len - 1])),\n val_true_vs_recon=wandb.Image(\n np.asarray(val_results[""val_comparison_seq""].astype(np.uint8))\n ),\n val_full_frame=wandb.Image(np.asarray(val_results[""full_frame_seq_val""][args.seq_len - 1])),\n val_true_vs_full_frame=wandb.Image(\n np.asarray(val_results[""val_full_frame_comparison_seq""].astype(np.uint8))\n )\n )\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n if args.val_data_dir:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n val_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n val_iterator # type: ignore\n )\n )\n else: \n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n )\n )\n checkpoint_manager.save(\n step,\n args=ckpt_manager_args\n )\n",python,content +413,1298711,"TERMINAL",0,0,"remote: Enumerating objects: 2, done.\r\nremote: Counting objects: 50% (1/2)\rremote: Counting objects: 100% (2/2)\rremote: Counting objects: 100% (2/2), done.\r\nremote: Compressing objects: 50% (1/2)\rremote: Compressing objects: 100% (2/2)\rremote: Compressing objects: 100% (2/2), done.\r\nremote: Total 2 (delta 0), reused 0 (delta 0), pack-reused 0 (from 0)\r\nUnpacking objects: 50% (1/2)\rUnpacking objects: 100% (2/2)\rUnpacking objects: 100% (2/2), 1.35 KiB | 115.00 KiB/s, done.\r\n",,terminal_output +414,1298814,"TERMINAL",0,0,"From github.com:p-doom/jasmine\r\n 52dcd52..a9f9ec1 val-loss -> origin/val-loss\r\n",,terminal_output +415,1298976,"TERMINAL",0,0,"Updating 52dcd52..a9f9ec1\r\nFast-forward\r\n",,terminal_output +416,1299068,"TERMINAL",0,0," train_dynamics.py | 178 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------------------------\r\n 1 file changed, 122 insertions(+), 56 deletions(-)\r\n]0;tum_cte0515@hkn0403:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0403 jasmine]$ ",,terminal_output +417,1299738,"models/dynamics.py",0,0,"from typing import Dict\n\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\nimport einops\n\nfrom utils.nn import STTransformer, Transformer\n\n\nclass DynamicsMaskGIT(nnx.Module):\n """"""\n MaskGIT dynamics model\n\n Dimension keys:\n B: batch size\n T: sequence length\n N: number of patches per frame\n L: latent dimension\n V: vocabulary size (number of latents)\n """"""\n\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_latents: int,\n latent_action_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n mask_limit: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.mask_limit = mask_limit\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.transformer = STTransformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.mask_token = nnx.Param(\n nnx.initializers.lecun_uniform()(rngs.params(), (1, 1, 1, self.model_dim))\n )\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True, pred_full_frame: bool = False,\n ) -> tuple[jax.Array, jax.Array | None]:\n assert not (training and pred_full_frame), ""Cannot evaluate full frame prediction during training.""\n # --- Mask videos ---\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n vid_embed_BTNM = self.patch_embed(video_tokens_BTN)\n if training:\n batch_size = vid_embed_BTNM.shape[0]\n _rng_prob, *_rngs_mask = jax.random.split(batch[""mask_rng""], batch_size + 1)\n mask_prob = jax.random.uniform(\n _rng_prob, shape=(batch_size,), minval=self.mask_limit\n )\n per_sample_shape = vid_embed_BTNM.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(jnp.asarray(_rngs_mask), mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed_BTNM = jnp.where(\n jnp.expand_dims(mask, -1), self.mask_token.value, vid_embed_BTNM\n )\n elif pred_full_frame:\n mask = jnp.zeros_like(video_tokens_BTN)\n mask = mask.at[:, -1].set(True)\n vid_embed_BTNM = jnp.where(\n jnp.expand_dims(mask, -1), self.mask_token.value, vid_embed_BTNM\n )\n else:\n mask = jnp.ones_like(video_tokens_BTN)\n\n # --- Predict transition ---\n act_embed_BTm11M = self.action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n padded_act_embed_BTNM = jnp.broadcast_to(\n padded_act_embed_BT1M, vid_embed_BTNM.shape\n )\n vid_embed_BTNM += padded_act_embed_BTNM\n logits_BTNV = self.transformer(vid_embed_BTNM)\n return logits_BTNV, mask\n\n\nclass DynamicsCausal(nnx.Module):\n """"""Causal dynamics model""""""\n\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_latents: int,\n latent_action_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n\n self.transformer = Transformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=self.decode,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True, pred_full_frame: bool = False,\n ) -> tuple[jax.Array, jax.Array | None]:\n assert not (training and pred_full_frame), ""Cannot evaluate full frame prediction during training.""\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n if pred_full_frame:\n # --- Extract submodule states ---\n patch_embed_state = nnx.state(self.patch_embed)\n action_up_state = nnx.state(self.action_up)\n transformer_state = nnx.state(self.transformer)\n\n def _pred_full_frame(carry, step_n):\n video_tokens_BTN, final_logits_BTNV = carry\n # We need to reconstruct submodules inside scan body to prevent trace context mismatches\n patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=nnx.Rngs(0))\n nnx.update(patch_embed, patch_embed_state)\n action_up = nnx.Linear(\n self.latent_action_dim, self.model_dim, param_dtype=self.param_dtype, dtype=self.dtype, rngs=nnx.Rngs(0)\n )\n nnx.update(action_up, action_up_state)\n transformer = Transformer(\n self.model_dim, self.model_dim, self.ffn_dim, self.num_latents, self.num_blocks, self.num_heads,\n self.dropout, self.param_dtype, self.dtype, use_flash_attention=self.use_flash_attention,\n decode=self.decode, rngs=nnx.Rngs(0)\n )\n nnx.update(transformer, transformer_state)\n\n vid_embed_BTNM = patch_embed(video_tokens_BTN)\n act_embed_BTm11M = action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n vid_embed_BTNp1M = jnp.concatenate(\n [padded_act_embed_BT1M, vid_embed_BTNM], axis=2\n )\n step_logits_BTNp1V = transformer(vid_embed_BTNp1M)\n step_logits_BV = step_logits_BTNp1V[:, -1, step_n, :]\n final_logits_BTNV = final_logits_BTNV.at[:, -1, step_n].set(step_logits_BV)\n sampled_token_idxs_B = jnp.argmax(step_logits_BV, axis=-1)\n video_tokens_BTN = video_tokens_BTN.at[:, -1, step_n].set(\n sampled_token_idxs_B\n )\n return (video_tokens_BTN, final_logits_BTNV), None\n\n (_, final_logits_BTNV), _ = jax.lax.scan(\n _pred_full_frame,\n (video_tokens_BTN, jnp.zeros((\n video_tokens_BTN.shape[0],\n video_tokens_BTN.shape[1],\n video_tokens_BTN.shape[2],\n self.num_latents))),\n jnp.arange(video_tokens_BTN.shape[2])\n )\n mask_out = jnp.zeros_like(video_tokens_BTN)\n mask_out = mask_out.at[:, -1].set(True)\n return final_logits_BTNV, mask_out\n else:\n vid_embed_BTNM = self.patch_embed(video_tokens_BTN)\n act_embed_BTm11M = self.action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n vid_embed_BTNp1M = jnp.concatenate(\n [padded_act_embed_BT1M, vid_embed_BTNM], axis=2\n )\n logits_BTNp1V = self.transformer(vid_embed_BTNp1M)\n logits_BTNV = logits_BTNp1V[:, :, :-1]\n return logits_BTNV, jnp.ones_like(video_tokens_BTN)\n",python,tab +418,1299979,"models/dynamics.py",83,8247,"\nfrom utils.nn import STTransformer, Transformer\n\n\nclass DynamicsMaskGIT(nnx.Module):\n """"""\n MaskGIT dynamics model\n\n Dimension keys:\n B: batch size\n T: sequence length\n N: number of patches per frame\n L: latent dimension\n V: vocabulary size (number of latents)\n """"""\n\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_latents: int,\n latent_action_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n mask_limit: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.mask_limit = mask_limit\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.transformer = STTransformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.mask_token = nnx.Param(\n nnx.initializers.lecun_uniform()(rngs.params(), (1, 1, 1, self.model_dim))\n )\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True, pred_full_frame: bool = False,\n ) -> tuple[jax.Array, jax.Array | None]:\n assert not (training and pred_full_frame), ""Cannot evaluate full frame prediction during training.""\n # --- Mask videos ---\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n vid_embed_BTNM = self.patch_embed(video_tokens_BTN)\n if training:\n batch_size = vid_embed_BTNM.shape[0]\n _rng_prob, *_rngs_mask = jax.random.split(batch[""mask_rng""], batch_size + 1)\n mask_prob = jax.random.uniform(\n _rng_prob, shape=(batch_size,), minval=self.mask_limit\n )\n per_sample_shape = vid_embed_BTNM.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(jnp.asarray(_rngs_mask), mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed_BTNM = jnp.where(\n jnp.expand_dims(mask, -1), self.mask_token.value, vid_embed_BTNM\n )\n elif pred_full_frame:\n mask = jnp.zeros_like(video_tokens_BTN)\n mask = mask.at[:, -1].set(True)\n vid_embed_BTNM = jnp.where(\n jnp.expand_dims(mask, -1), self.mask_token.value, vid_embed_BTNM\n )\n else:\n mask = jnp.ones_like(video_tokens_BTN)\n\n # --- Predict transition ---\n act_embed_BTm11M = self.action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n padded_act_embed_BTNM = jnp.broadcast_to(\n padded_act_embed_BT1M, vid_embed_BTNM.shape\n )\n vid_embed_BTNM += padded_act_embed_BTNM\n logits_BTNV = self.transformer(vid_embed_BTNM)\n return logits_BTNV, mask\n\n\nclass DynamicsCausal(nnx.Module):\n """"""Causal dynamics model""""""\n\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_latents: int,\n latent_action_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n\n self.transformer = Transformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=self.decode,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True, pred_full_frame: bool = False,\n ) -> tuple[jax.Array, jax.Array | None]:\n assert not (training and pred_full_frame), ""Cannot evaluate full frame prediction during training.""\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n if pred_full_frame:\n # --- Extract submodule states ---\n patch_embed_state = nnx.state(self.patch_embed)\n action_up_state = nnx.state(self.action_up)\n transformer_state = nnx.state(self.transformer)\n\n def _pred_full_frame(carry, step_n):\n video_tokens_BTN, final_logits_BTNV = carry\n # We need to reconstruct submodules inside scan body to prevent trace context mismatches\n patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=nnx.Rngs(0))\n nnx.update(patch_embed, patch_embed_state)\n action_up = nnx.Linear(\n self.latent_action_dim, self.model_dim, param_dtype=self.param_dtype, dtype=self.dtype, rngs=nnx.Rngs(0)\n )\n nnx.update(action_up, action_up_state)\n transformer = Transformer(\n self.model_dim, self.model_dim, self.ffn_dim, self.num_latents, self.num_blocks, self.num_heads,\n self.dropout, self.param_dtype, self.dtype, use_flash_attention=self.use_flash_attention,\n decode=self.decode, rngs=nnx.Rngs(0)\n )\n nnx.update(transformer, transformer_state)\n\n vid_embed_BTNM = patch_embed(video_tokens_BTN)\n act_embed_BTm11M = action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n vid_embed_BTNp1M = jnp.concatenate(\n [padded_act_embed_BT1M, vid_embed_BTNM], axis=2\n )\n step_logits_BTNp1V = transformer(vid_embed_BTNp1M)\n step_logits_BV = step_logits_BTNp1V[:, -1, step_n, :]\n final_logits_BTNV = final_logits_BTNV.at[:, -1, step_n].set(step_logits_BV)\n sampled_token_idxs_B = jnp.argmax(step_logits_BV, axis=-1)\n video_tokens_BTN = video_tokens_BTN.at[:, -1, step_n].set(\n sampled_token_idxs_B\n )\n return (video_tokens_BTN, final_logits_BTNV), None\n\n (_, final_logits_BTNV), _ = jax.lax.scan(\n _pred_full_frame,\n (video_tokens_BTN, jnp.zeros((\n **video_tokens_BTN.shape,\n",python,content +419,1305131,"models/dynamics.py",8329,0,"",python,selection_mouse +420,1305148,"models/dynamics.py",8328,0,"",python,selection_command +421,1305757,"models/dynamics.py",8196,0,"",python,selection_mouse +422,1306086,"models/dynamics.py",8196,1,"",python,content +423,1308506,"TERMINAL",0,0,"bash",,terminal_focus +424,1309256,"TERMINAL",0,0,"srun",,terminal_focus +425,1310952,"TERMINAL",0,0,"git pull",,terminal_output +426,1311641,"TERMINAL",0,0,"checkout val-loss",,terminal_output +427,1313036,"TERMINAL",0,0,"",,terminal_output +428,1313516,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/coinrun/train_dyn_single_gpu.sh",,terminal_output +429,1314543,"TERMINAL",0,0,"\r\n[?2004l\r#!/usr/bin/env bash\r\n\r\n#SBATCH --nodes=1\r\n#SBATCH --ntasks-per-node=4\r\n#SBATCH --time=00:20:00\r\n#SBATCH --partition=dev_accelerated\r\n#SBATCH --cpus-per-task=5\r\n#SBATCH --gres=gpu:1\r\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/lam/%x_%j.log\r\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/lam/%x_%j.log\r\n#SBATCH --job-name=train_lam_1e-4\r\n#SBATCH --requeue\r\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\r\n\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\n# array_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_test\r\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m_split/train\r\n# array_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m_chunked\r\narray_records_dir_val=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m_split/val\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/dyn/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nlam_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/lam/interactive/3483568\r\ntokenizer_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/interactive/3483568\r\n\r\nenv | grep SLURM\r\n\r\nexport PYTHONUNBUFFERED=1\r\n\r\nsrun python train_dynamics.py \\r\n --save_ckpt \\r\n --image_height=64 \\r\n --image_width=64 \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=110 \\r\n --init_lr=0 \\r\n --max_lr=1e-4 \\r\n --log_image_interval=250 \\r\n --log_checkpoint_interval=250 \\r\n --dyna_type=maskgit \\r\n --log \\r\n --name=coinrun-dyn-dev-$slurm_job_id \\r\n --tags dyn coinrun dev \\r\n --entity instant-uv \\r\n --project jafar \\r\n --warmup_steps 0 \\r\n --wsd_decay_steps 0 \\r\n --num_steps 50 \\r\n --data_dir $array_records_dir_train \\r\n --tokenizer_checkpoint $tokenizer_checkpoint \\r\n --val_data_dir $array_records_dir_val \\r\n --val_interval 10 \\r\n --val_steps 5\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=3957988\r\nSLURM_JOB_GPUS=0\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine\r\nSLURMD_NODENAME=hkn0403\r\nSLURM_JOB_START_TIME=1757960068\r\nSLURM_STEP_NODELIST=hkn0403\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1757963668\r\nSLURM_PMI2_SRUN_PORT=44065\r\nSLURM_CPUS_ON_NODE=8\r\nSLURM_JOB_CPUS_PER_NODE=8\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=dev_accelerated\r\nSLURM_TRES_PER_TASK=cpu=8\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3497227\r\nSLURM_PTY_PORT=36975\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=33\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=8\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e11.hkn0403\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=181\r\nSLURM_NODELIST=hkn0403\r\nSLURM_SRUN_COMM_PORT=41927\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3497227\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0403\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_STEP_LAUNCHER_PORT=41927\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0403\r\n",,terminal_output +430,1314659,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +431,1315734,"models/dynamics.py",0,0,"",python,tab +432,1317196,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +433,1323119,"models/dynamics.py",8436,0,"",python,selection_mouse +434,1323156,"models/dynamics.py",8435,0,"",python,selection_command +435,1323839,"models/dynamics.py",8675,0,"",python,selection_mouse +436,1323851,"models/dynamics.py",8674,0,"",python,selection_command +437,1323968,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\n",,terminal_output +438,1324350,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +439,1325236,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.21.3\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/wandb/run-20250915_202422-vbquohe9\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run coinrun-dyn-dev-3497227\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/vbquohe9\r\nParameter counts:\r\n{'dynamics': 26555904, 'lam': 17640416, 'tokenizer': 33750256, 'total': 77946576}\r\n",,terminal_output +440,1329548,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1269: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +441,1351158,"TERMINAL",0,0,"2025-09-15 20:24:49.217609: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-09-15 20:24:49.219270: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +442,1365014,"train_dynamics.py",0,0,"",python,tab +443,1365457,"train_dynamics.py",8326,16747," val_iterator: Optional[grain.DataLoaderIterator],\n restore_step: Optional[int] = None,\n) -> tuple[\n int, nnx.Optimizer, grain.DataLoaderIterator, grain.DataLoaderIterator, jax.Array\n]:\n step = 0\n if restore_step is None:\n restore_step = checkpoint_manager.latest_step()\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n if val_iterator:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n val_dataloader_state=grain.checkpoint.CheckpointRestore(val_iterator), # type: ignore\n )\n else:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(), args=restore_args\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n train_iterator = restored[""train_dataloader_state""]\n if val_iterator:\n val_iterator = restored[""val_dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n rng, _rng = jax.random.split(rng)\n optimizer = restore_genie_components(optimizer, replicated_sharding, _rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n return step, optimizer, train_iterator, val_iterator, rng\n\n\ndef main(args: Args) -> None:\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n genie, rng = build_model(args, rng)\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n optimizer, lr_schedule = build_optimizer(genie, args)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n _, replicated_sharding, videos_sharding = build_mesh_and_sharding(num_devices)\n\n shard_optimizer_states(optimizer, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n checkpoint_manager = build_checkpoint_manager(args)\n\n # --- Create DataLoaderIterator from dataloader ---\n train_iterator = build_dataloader(args, args.data_dir)\n val_iterator = None\n if args.val_data_dir:\n val_iterator = build_dataloader(args, args.val_data_dir)\n\n # --- Restore checkpoint ---\n step, optimizer, train_iterator, val_iterator, rng = (\n restore_or_initialize_components(\n args,\n checkpoint_manager,\n optimizer,\n train_iterator,\n rng,\n replicated_sharding,\n val_iterator,\n )\n )\n\n # --- Define loss and train step (close over args) ---\n def dynamics_loss_fn(\n model: Genie,\n inputs: dict,\n training: bool = False,\n pred_full_frame: bool = False,\n ) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n outputs = model(inputs, training=training, pred_full_frame=pred_full_frame)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt_val = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt_val, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt_val, recon)).mean()\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]),\n size=args.num_latent_actions,\n fill_value=0,\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]),\n size=args.num_patch_latents,\n fill_value=0,\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n @nnx.jit(donate_argnums=0)\n def train_step(\n optimizer: nnx.Optimizer, inputs: dict\n ) -> tuple[jax.Array, jax.Array, dict]:\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n model.train()\n return dynamics_loss_fn(model, inputs, training=True)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(\n optimizer.model\n )\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n @nnx.jit\n def val_step(\n genie: Genie, inputs: dict\n ) -> tuple[jax.Array, jax.Array, dict, jax.Array, jax.Array, dict]:\n """"""Evaluate model and compute metrics""""""\n genie.eval()\n (loss, (recon, metrics)) = dynamics_loss_fn(\n genie, inputs, training=False, pred_full_frame=False\n )\n (loss_full_frame, (recon_full_frame, metrics_full_frame)) = dynamics_loss_fn(\n genie, inputs, training=False, pred_full_frame=True\n )\n return (\n loss,\n recon,\n metrics,\n loss_full_frame,\n recon_full_frame,\n metrics_full_frame,\n )\n\n def calculate_validation_metrics(val_dataloader, genie, rng):\n step = 0\n loss_per_step = []\n metrics_per_step = []\n loss_full_frame_per_step = []\n metrics_full_frame_per_step = []\n inputs = None\n recon = None\n recon_full_frame = None\n for videos in val_dataloader:\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n (\n loss,\n recon,\n metrics,\n loss_full_frame,\n recon_full_frame,\n metrics_full_frame,\n ) = val_step(genie, inputs)\n loss_per_step.append(loss)\n metrics_per_step.append(metrics)\n loss_full_frame_per_step.append(loss_full_frame)\n metrics_full_frame_per_step.append(metrics_full_frame)\n step += 1\n if step > args.val_steps:\n break\n\n if step < args.val_steps:\n print(\n f""Warning: Your validation dataset is too small to make val_steps many steps. Made {step} steps, expected {args.val_steps}""\n )\n\n val_metrics = {\n f""val_{key}"": np.mean([float(m[key]) for m in metrics_per_step])\n for key in metrics_per_step[0].keys()\n }\n val_metrics_full_frame = {\n f""val_full_frame_{key}"": np.mean(\n [float(m[key]) for m in metrics_full_frame_per_step]\n )\n for key in metrics_full_frame_per_step[0].keys()\n }\n val_losses = {\n ""val_loss"": np.mean(loss_per_step),\n ""val_loss_full_frame"": np.mean(loss_full_frame_per_step),\n }\n val_metrics.update(val_metrics_full_frame)\n val_metrics.update(val_losses)\n return val_metrics, inputs, recon, recon_full_frame\n\n # --- TRAIN LOOP ---\n dataloader_train = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in train_iterator\n )\n dataloader_val = None\n if val_iterator:\n dataloader_val = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in val_iterator\n )\n if jax.process_index() == 0:\n first_videos = next(dataloader_train)\n sample_inputs = dict(videos=first_videos, mask_rng=rng)\n compiled = train_step.lower(optimizer, sample_inputs).compile()\n print_compiled_memory_stats(compiled.memory_analysis())\n print_compiled_cost_analysis(compiled.cost_analysis())\n # Do not skip the first batch during training\n dataloader_train = itertools.chain([first_videos], dataloader_train)\n print(f""Starting training from step {step}..."")\n first_step = step\n while step < args.num_steps:\n for videos in dataloader_train:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer, inputs)\n if step == first_step:\n print_mem_stats(""After params initialized"")\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Validation loss ---\n val_results = {}\n if dataloader_val and step % args.val_interval == 0:\n rng, _rng_mask_val = jax.random.split(rng, 2)\n print(""Calculating validation metrics..."")\n val_metrics, val_gt_batch, val_recon, val_full_frame = (\n calculate_validation_metrics(\n dataloader_val, optimizer.model, _rng_mask_val\n )\n )\n print(f""Step {step}, validation loss: {val_metrics['val_loss']}"")\n val_results = {\n ""metrics"": val_metrics,\n ""gt_batch"": val_gt_batch,\n ""recon"": val_recon,\n ""full_frame"": val_full_frame,\n }\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n log_dict = {""loss"": loss, ""step"": step, **metrics}\n if val_results:\n log_dict.update(val_results[""metrics""])\n wandb.log(log_dict)\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if val_results:\n val_results[""gt_seq_val""] = (\n val_results[""gt_batch""][""videos""][0].astype(jnp.float32)\n / 255.0\n )\n val_results[""recon_seq_val""] = val_results[""recon""].clip(0, 1)\n val_comparison_seq = jnp.concatenate(\n (val_results[""gt_seq_val""], val_results[""recon_seq_val""]),\n axis=1,\n )\n val_results[""val_comparison_seq""] = einops.rearrange(\n val_comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n val_results[""full_frame_seq_val""] = val_results[""full_frame""][\n 0\n ].clip(0, 1)\n val_results[""val_full_frame_comparison_seq""] = jnp.concatenate(\n (\n val_results[""gt_seq_val""],\n val_results[""full_frame_seq_val""],\n ),\n axis=1,\n )\n val_results[""val_full_frame_comparison_seq""] = einops.rearrange(\n val_results[""val_full_frame_comparison_seq""] * 255,\n ""t h w c -> h (t w) c"",\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n if val_results:\n log_images.update(\n dict(\n val_image=wandb.Image(\n np.asarray(\n val_results[""gt_seq_val""][args.seq_len - 1]\n )\n ),\n val_recon=wandb.Image(\n np.asarray(\n val_results[""recon_seq_val""][\n args.seq_len - 1\n ]\n )\n ),\n val_true_vs_recon=wandb.Image(\n np.asarray(\n val_results[""val_comparison_seq""].astype(\n np.uint8\n )\n )\n ),\n val_full_frame=wandb.Image(\n np.asarray(\n val_results[""full_frame_seq_val""][\n args.seq_len - 1\n ]\n )\n ),\n val_true_vs_full_frame=wandb.Image(\n np.asarray(\n val_results[\n ""val_full_frame_comparison_seq""\n ].astype(np.uint8)\n )\n ),\n )\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n if args.val_data_dir:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n val_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n val_iterator # type: ignore\n ),\n )\n else:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n )\n checkpoint_manager.save(step, args=ckpt_manager_args)\n",python,content +444,1375978,"train_dynamics.py",9716,0,"",python,selection_mouse +445,1387035,"train_dynamics.py",9302,0,"",python,selection_command +446,1388432,"train_dynamics.py",9301,0,"",python,selection_mouse +447,1389456,"train_dynamics.py",9301,1,"",python,content +448,1395274,"train_dynamics.py",9535,0,"",python,selection_command +449,1398311,"train_dynamics.py",9391,0,"",python,selection_command +450,1399723,"TERMINAL",0,0,"Total memory size: 25.5 GB, Output size: 0.9 GB, Temp size: 24.6 GB, Argument size: 0.9 GB, Host temp size: 0.0 GB.\r\nFLOPs: 1.198e+12, Bytes: 7.213e+11 (671.8 GB), Intensity: 1.7 FLOPs/byte\r\nStarting training from step 0...\r\n",,terminal_output +451,1399950,"train_dynamics.py",9558,0,"",python,selection_mouse +452,1399955,"train_dynamics.py",9557,0,"",python,selection_command +453,1400093,"TERMINAL",0,0,"\r\nMemstats: After params initialized:\r\n\tUsing (GB) 1.14 / 38.7 (2.945736%) on cuda:0\r\n",,terminal_output +454,1401174,"train_dynamics.py",9301,0," ",python,content +455,1401808,"TERMINAL",0,0,"Step 0, loss: 17.954784393310547\r\n",,terminal_output +456,1402011,"train_dynamics.py",9300,0,"",python,selection_command +457,1409024,"TERMINAL",0,0,"python",,terminal_focus +458,1409779,"TERMINAL",0,0,"^D\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +459,1428777,"TERMINAL",0,0,"srun",,terminal_focus +460,1431527,"TERMINAL",0,0,"Step 1, loss: 6.460620403289795\r\n",,terminal_output +461,1433553,"TERMINAL",0,0,"Step 2, loss: 14.903913497924805\r\n",,terminal_output +462,1435211,"TERMINAL",0,0,"Step 3, loss: 9.082958221435547\r\n",,terminal_output +463,1437159,"TERMINAL",0,0,"Step 4, loss: 11.08510971069336\r\n",,terminal_output +464,1439103,"TERMINAL",0,0,"Step 5, loss: 9.383418083190918\r\n",,terminal_output +465,1440939,"TERMINAL",0,0,"Step 6, loss: 9.63429069519043\r\n",,terminal_output +466,1442801,"TERMINAL",0,0,"Step 7, loss: 7.117089748382568\r\n",,terminal_output +467,1444636,"TERMINAL",0,0,"Step 8, loss: 3.8622961044311523\r\n",,terminal_output +468,1446373,"TERMINAL",0,0,"Step 9, loss: 4.703470706939697\r\nCalculating validation metrics...\r\n",,terminal_output +469,1459796,"TERMINAL",0,0,"2025-09-15 20:26:37.787885: E external/xla/xla/service/slow_operation_alarm.cc:73] Constant folding an instruction is taking > 1s:\r\n\r\n %reduce.11381 = s32[] reduce(%broadcast.2238, %constant.526), dimensions={1,0,2}, to_apply=%region_296.11380, metadata={op_name=""jit(val_step)/jit(main)/reduce_sum"" source_file=""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/train_dynamics.py"" source_line=398}\r\n\r\nThis isn't necessarily a bug; constant-folding is inherently a trade-off between compilation time and speed at runtime. XLA has some guards that attempt to keep constant folding from taking too long, but fundamentally you'll always be able to come up with an input program that takes a long time.\r\n\r\nIf you'd like to file a bug, run with envvar XLA_FLAGS=--xla_dump_to=/tmp/foo and attach the results.\r\n",,terminal_output +470,1460813,"TERMINAL",0,0,"2025-09-15 20:26:38.762508: E external/xla/xla/service/slow_operation_alarm.cc:140] The operation took 1.974720008s\r\nConstant folding an instruction is taking > 1s:\r\n\r\n %reduce.11381 = s32[] reduce(%broadcast.2238, %constant.526), dimensions={1,0,2}, to_apply=%region_296.11380, metadata={op_name=""jit(val_step)/jit(main)/reduce_sum"" source_file=""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/train_dynamics.py"" source_line=398}\r\n\r\nThis isn't necessarily a bug; constant-folding is inherently a trade-off between compilation time and speed at runtime. XLA has some guards that attempt to keep constant folding from taking too long, but fundamentally you'll always be able to come up with an input program that takes a long time.\r\n\r\nIf you'd like to file a bug, run with envvar XLA_FLAGS=--xla_dump_to=/tmp/foo and attach the results.\r\n",,terminal_output +471,1461633,"TERMINAL",0,0,"2025-09-15 20:26:39.585289: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +472,1477518,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/coinrun/train_dyn_single_gpu.sh",,terminal_output +473,1486324,"TERMINAL",0,0," ",,terminal_output +474,1486788,"TERMINAL",0,0," ",,terminal_output +475,1487505,"TERMINAL",0,0,"          ",,terminal_output +476,1487642,"TERMINAL",0,0,"     ",,terminal_output +477,1488356,"TERMINAL",0,0,"                        ",,terminal_output +478,1488639,"TERMINAL",0,0,"       ",,terminal_output +479,1488842,"TERMINAL",0,0,"       ",,terminal_output +480,1491289,"TERMINAL",0,0,"bash",,terminal_focus +481,1492150,"TERMINAL",0,0,"Step 10, validation loss: 5.508398056030273\r\n",,terminal_output +482,1516687,"TERMINAL",0,0,"git commit -am ""fix typo in list unpacking"" && git push",,terminal_command +483,1516753,"TERMINAL",0,0,"]633;C",,terminal_output +484,1519776,"TERMINAL",0,0,"[INFO] Initializing environment for https://github.com/psf/black.\r\n",,terminal_output +485,1520415,"TERMINAL",0,0,"Step 10, loss: 4.491197109222412\r\n",,terminal_output +486,1522257,"TERMINAL",0,0,"Step 11, loss: 4.127790451049805\r\n",,terminal_output +487,1522474,"TERMINAL",0,0,"[INFO] Installing environment for https://github.com/psf/black.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\n",,terminal_output +488,1523999,"TERMINAL",0,0,"Step 12, loss: 3.6005430221557617\r\n",,terminal_output +489,1526354,"TERMINAL",0,0,"Step 13, loss: 2.945164442062378\r\n",,terminal_output +490,1528189,"TERMINAL",0,0,"Step 14, loss: 2.6941239833831787\r\n",,terminal_output +491,1529982,"TERMINAL",0,0,"Step 15, loss: 3.138007879257202\r\n",,terminal_output +492,1531876,"TERMINAL",0,0,"Step 16, loss: 3.5381312370300293\r\n",,terminal_output +493,1533730,"TERMINAL",0,0,"Step 17, loss: 3.5429773330688477\r\n",,terminal_output +494,1535522,"TERMINAL",0,0,"Step 18, loss: 2.5871028900146484\r\n",,terminal_output +495,1537297,"TERMINAL",0,0,"Step 19, loss: 2.674548625946045\r\nCalculating validation metrics...\r\n",,terminal_output +496,1545936,"TERMINAL",0,0,"Step 20, validation loss: 2.8841030597686768\r\n",,terminal_output +497,1547746,"TERMINAL",0,0,"Step 20, loss: 2.60347056388855\r\n",,terminal_output +498,1549486,"TERMINAL",0,0,"Step 21, loss: 2.256122350692749\r\n",,terminal_output +499,1549585,"TERMINAL",0,0,"black....................................................................",,terminal_output +500,1550321,"TERMINAL",0,0,"Failed\r\n- hook id: black\r\n- files were modified by this hook\r\n\r\nreformatted models/dynamics.py\r\n\r\nAll done! ✨ 🍰 ✨\r\n1 file reformatted.\r\n\r\n",,terminal_output +501,1550348,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine",,terminal_output +502,1551742,"TERMINAL",0,0,"Step 22, loss: 2.387242317199707\r\n",,terminal_output +503,1553585,"TERMINAL",0,0,"Step 23, loss: 1.8972651958465576\r\n",,terminal_output +504,1555379,"TERMINAL",0,0,"Step 24, loss: 1.7989380359649658\r\n",,terminal_output +505,1557216,"TERMINAL",0,0,"Step 25, loss: 1.5865707397460938\r\n",,terminal_output +506,1559232,"TERMINAL",0,0,"Step 26, loss: 1.6155534982681274\r\n",,terminal_output +507,1560875,"TERMINAL",0,0,"Step 27, loss: 1.7484979629516602\r\n",,terminal_output +508,1562798,"TERMINAL",0,0,"Step 28, loss: 1.402345061302185\r\n",,terminal_output +509,1564649,"TERMINAL",0,0,"Step 29, loss: 1.3592332601547241\r\nCalculating validation metrics...\r\n",,terminal_output +510,1573140,"TERMINAL",0,0,"Step 30, validation loss: 1.7966995239257812\r\n",,terminal_output +511,1575407,"TERMINAL",0,0,"Step 30, loss: 1.2616956233978271\r\n",,terminal_output +512,1577240,"TERMINAL",0,0,"Step 31, loss: 1.450427532196045\r\n",,terminal_output +513,1579190,"TERMINAL",0,0,"Step 32, loss: 1.1439793109893799\r\n",,terminal_output +514,1580848,"TERMINAL",0,0,"Step 33, loss: 1.2927297353744507\r\n",,terminal_output +515,1582893,"TERMINAL",0,0,"Step 34, loss: 1.1497232913970947\r\n",,terminal_output +516,1584608,"TERMINAL",0,0,"Step 35, loss: 0.9348832964897156\r\n",,terminal_output +517,1586448,"TERMINAL",0,0,"Step 36, loss: 0.9615083336830139\r\n",,terminal_output +518,1588195,"TERMINAL",0,0,"Step 37, loss: 1.0419937372207642\r\n",,terminal_output +519,1590047,"TERMINAL",0,0,"Step 38, loss: 1.0958672761917114\r\n",,terminal_output +520,1591887,"TERMINAL",0,0,"Step 39, loss: 1.0305054187774658\r\nCalculating validation metrics...\r\n",,terminal_output +521,1600391,"TERMINAL",0,0,"Step 40, validation loss: 1.2961922883987427\r\n",,terminal_output +522,1602207,"TERMINAL",0,0,"Step 40, loss: 0.9020775556564331\r\n",,terminal_output +523,1604035,"TERMINAL",0,0,"Step 41, loss: 1.0280429124832153\r\n",,terminal_output +524,1605869,"TERMINAL",0,0,"Step 42, loss: 0.9608640074729919\r\n",,terminal_output +525,1607726,"TERMINAL",0,0,"Step 43, loss: 0.9851916432380676\r\n",,terminal_output +526,1609700,"TERMINAL",0,0,"Step 44, loss: 1.0829013586044312\r\n",,terminal_output +527,1611427,"TERMINAL",0,0,"Step 45, loss: 0.8061583042144775\r\n",,terminal_output +528,1613283,"TERMINAL",0,0,"Step 46, loss: 1.0005710124969482\r\n",,terminal_output +529,1615139,"TERMINAL",0,0,"Step 47, loss: 0.8929821252822876\r\n",,terminal_output +530,1616879,"TERMINAL",0,0,"Step 48, loss: 0.8713186383247375\r\n",,terminal_output +531,1618908,"TERMINAL",0,0,"Step 49, loss: 0.9584136605262756\r\nCalculating validation metrics...\r\n",,terminal_output +532,1627822,"TERMINAL",0,0,"Step 50, validation loss: 1.4749484062194824\r\n",,terminal_output +533,1630536,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run coinrun-dyn-dev-3497227 at: https://wandb.ai/instant-uv/jafar/runs/vbquohe9\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/wandb/run-20250915_202422-vbquohe9/logs\r\n",,terminal_output +534,1631982,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 5 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +535,1632343,"TERMINAL",0,0,"]0;tum_cte0515@hkn0403:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0403 jasmine]$ ",,terminal_output diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-60e09318-8e92-415d-8aa8-e2e7a22c37501750853311441-2025_06_25-14.09.13.696/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-60e09318-8e92-415d-8aa8-e2e7a22c37501750853311441-2025_06_25-14.09.13.696/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..91e39851c0231e164bc07360e77b4fd4176ff419 --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-60e09318-8e92-415d-8aa8-e2e7a22c37501750853311441-2025_06_25-14.09.13.696/source.csv @@ -0,0 +1,229 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +2,306,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:09:13 PM [info] Activating crowd-code\n2:09:13 PM [info] Recording started\n2:09:13 PM [info] Initializing git provider using file system watchers...\n2:09:13 PM [info] Git repository found\n2:09:13 PM [info] Git provider initialized successfully\n",Log,tab +3,378,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"2:09:13 PM [info] Initial git state: [object Object]\n",Log,content +4,3777,"TERMINAL",0,0,"/bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command +5,3828,"TERMINAL",0,0,"]633;E;2025-06-25 14:09:17 /bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;3c9c3e49-ccb4-4916-b6c3-a5a58281974f]633;C",,terminal_output +6,3887,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:/hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output +7,16698,"train_tokenizer.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer__\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n # for videos in dataloader:\n # npy_path = ""overfit_dir/single_sample_corner.npy""\n npy_path = ""overfit_dir/single_batch_12_elems.npy""\n videos = np.load(npy_path)\n print(""batch shape: "", videos.shape)\n while(True):\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log and jax.process_index() == 0:\n if step % args.log_interval == 0:\n wandb.log({""loss"": loss, ""step"": step, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +8,54597,"train_tokenizer.py",1174,0,"",python,selection_mouse +9,54730,"train_tokenizer.py",1170,16,"codebook_dropout",python,selection_mouse +10,56327,"train_tokenizer.py",1102,0,"",python,selection_mouse +11,57219,"train_tokenizer.py",1149,0,"",python,selection_mouse +12,57343,"train_tokenizer.py",1145,7,"dropout",python,selection_mouse +13,58088,"train_tokenizer.py",1104,0,"",python,selection_mouse +14,58221,"train_tokenizer.py",1098,10,"num_blocks",python,selection_mouse +15,59033,"train_tokenizer.py",1150,0,"",python,selection_mouse +16,59168,"train_tokenizer.py",1145,7,"dropout",python,selection_mouse +17,71946,"train_tokenizer.py",808,0,"",python,selection_mouse +18,72595,"train_tokenizer.py",732,0,"",python,selection_mouse +19,73311,"train_tokenizer.py",659,0,"",python,selection_mouse +20,73789,"train_tokenizer.py",678,0,"",python,selection_mouse +21,74398,"train_tokenizer.py",654,0,"",python,selection_mouse +22,75237,"train_tokenizer.py",655,0,"",python,selection_mouse +23,76163,"train_tokenizer.py",678,0,"",python,selection_mouse +24,76767,"train_tokenizer.py",705,0,"",python,selection_mouse +25,78023,"train_tokenizer.py",732,0,"",python,selection_command +26,78162,"train_tokenizer.py",759,0,"",python,selection_command +27,78405,"train_tokenizer.py",804,0,"",python,selection_command +28,78627,"train_tokenizer.py",759,0,"",python,selection_command +29,78813,"train_tokenizer.py",732,0,"",python,selection_command +30,78975,"train_tokenizer.py",705,0,"",python,selection_command +31,79122,"train_tokenizer.py",677,0,"",python,selection_command +32,79276,"train_tokenizer.py",655,0,"",python,selection_command +33,79671,"train_tokenizer.py",637,0,"",python,selection_command +34,79982,"train_tokenizer.py",655,0,"",python,selection_command +35,80236,"train_tokenizer.py",637,0,"",python,selection_command +36,80440,"train_tokenizer.py",608,0,"",python,selection_command +37,80806,"train_tokenizer.py",637,0,"",python,selection_command +38,81103,"train_tokenizer.py",608,0,"",python,selection_command +39,81302,"train_tokenizer.py",591,0,"",python,selection_command +40,85692,"train_lam.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_resolution: int = 64\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n batch_size: int = 36\n vq_beta: float = 0.25\n min_lr: float = 3e-6\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n vq_reset_thresh: int = 50\n # LAM\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 6\n patch_size: int = 16\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.0\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n\n\nargs = tyro.cli(Args)\n\n\ndef lam_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n gt_future_frames = inputs[""videos""][:, 1:]\n mse = jnp.square(gt_future_frames - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = gt_future_frames.clip(0, 1).reshape(-1, *gt_future_frames.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n count_fn = jax.vmap(lambda i: (outputs[""indices""] == i).sum())\n index_counts = count_fn(jnp.arange(args.num_latents))\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=(index_counts != 0).mean(),\n )\n return loss, (outputs[""recon""], index_counts, metrics)\n\n\n@jax.jit\ndef train_step(state, inputs, action_last_active):\n # --- Update model ---\n rng, inputs[""rng""] = jax.random.split(inputs[""rng""])\n grad_fn = jax.value_and_grad(lam_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, idx_counts, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n\n # --- Reset inactive latent actions ---\n codebook = state.params[""params""][""vq""][""codebook""]\n num_codes = len(codebook)\n active_codes = idx_counts != 0.0\n action_last_active = jnp.where(active_codes, 0, action_last_active + 1)\n p_code = active_codes / active_codes.sum()\n reset_idxs = jax.random.choice(rng, num_codes, shape=(num_codes,), p=p_code)\n do_reset = action_last_active >= args.vq_reset_thresh\n new_codebook = jnp.where(\n jnp.expand_dims(do_reset, -1), codebook[reset_idxs], codebook\n )\n state.params[""params""][""vq""][""codebook""] = new_codebook\n action_last_active = jnp.where(do_reset, 0, action_last_active)\n return state, loss, recon, action_last_active, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n \n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n # --- Initialize model ---\n lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n # Track when each action was last sampled\n action_last_active = jnp.zeros(args.num_latents)\n image_shape = (args.image_resolution, args.image_resolution, args.image_channels)\n rng, _rng = jax.random.split(rng)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape), dtype=jnp.float32\n ),\n rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = lam.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=lam.apply, params=init_params, tx=tx)\n \n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=('data',))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n action_last_active = jax.device_put(action_last_active, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer().restore(args.checkpoint, item=restore_target, restore_args=restore_args)[""model""].params[""params""]\n )\n # Assume checkpoint is of the form tokenizer__\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files, args.seq_len, args.batch_size, *image_shape\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n # for videos in dataloader:\n # npy_path = ""overfit_dir/single_sample_corner.npy""\n npy_path = ""overfit_dir/single_batch_12_elems.npy""\n videos = np.load(npy_path)\n print(""batch shape: "", videos.shape)\n while(True):\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n \n videos_sharding = NamedSharding(mesh, PartitionSpec('data', None, None, None, None))\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n \n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, action_last_active, metrics = train_step(\n train_state, inputs, action_last_active\n )\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log and jax.process_index() == 0:\n if step % args.log_interval == 0:\n wandb.log({""loss"": loss, ""step"": step, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0][1:]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""lam_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +41,89131,"train_lam.py",889,0,"",python,selection_mouse +42,89262,"train_lam.py",889,1," ",python,selection_mouse +43,89513,"train_lam.py",889,3," 3e",python,selection_mouse +44,89513,"train_lam.py",889,28," 3e-6\n max_lr: float = 3e",python,selection_mouse +45,89514,"train_lam.py",889,29," 3e-6\n max_lr: float = 3e-",python,selection_mouse +46,89516,"train_lam.py",889,30," 3e-6\n max_lr: float = 3e-5",python,selection_mouse +47,89977,"train_lam.py",919,0,"",python,selection_mouse +48,89990,"train_lam.py",918,0,"",python,selection_command +49,90207,"train_lam.py",918,1,"5",python,selection_mouse +50,90209,"train_lam.py",919,0,"",python,selection_command +51,90357,"train_lam.py",917,2,"-5",python,selection_mouse +52,90391,"train_lam.py",915,4,"3e-5",python,selection_mouse +53,90391,"train_lam.py",913,6,"= 3e-5",python,selection_mouse +54,90415,"train_lam.py",912,7," = 3e-5",python,selection_mouse +55,90416,"train_lam.py",907,12,"float = 3e-5",python,selection_mouse +56,90484,"train_lam.py",905,14,": float = 3e-5",python,selection_mouse +57,90485,"train_lam.py",919,17,"\n warmup_steps",python,selection_mouse +58,90502,"train_lam.py",919,3,"\n ",python,selection_mouse +59,90560,"train_lam.py",919,2,"\n ",python,selection_mouse +60,90561,"train_lam.py",919,1,"\n",python,selection_mouse +61,90692,"train_lam.py",895,24," max_lr: float = 3e-5",python,selection_mouse +62,91101,"train_lam.py",870,49," min_lr: float = 3e-6\n max_lr: float = 3e-5",python,selection_mouse +63,93646,"train_lam.py",908,0,"",python,selection_mouse +64,94468,"train_lam.py",919,0,"",python,selection_mouse +65,94484,"train_lam.py",918,0,"",python,selection_command +66,94920,"train_lam.py",919,0,"",python,selection_mouse +67,94934,"train_lam.py",918,0,"",python,selection_command +68,95123,"train_lam.py",918,1,"5",python,selection_mouse +69,95124,"train_lam.py",919,0,"",python,selection_command +70,95217,"train_lam.py",894,25,"\n max_lr: float = 3e-5",python,selection_mouse +71,95709,"train_lam.py",894,0,"",python,selection_mouse +72,95744,"train_lam.py",893,0,"",python,selection_command +73,96986,"train_tokenizer.py",0,0,"",python,tab +74,98278,"train_tokenizer.py",945,0,"",python,selection_mouse +75,98293,"train_tokenizer.py",944,0,"",python,selection_command +76,99392,"train_tokenizer.py",927,0,"",python,selection_mouse +77,100515,"train_lam.py",0,0,"",python,tab +78,101259,"train_lam.py",906,0,"",python,selection_mouse +79,101894,"train_lam.py",902,0,"",python,selection_mouse +80,103155,"train_tokenizer.py",0,0,"",python,tab +81,104768,"train_lam.py",0,0,"",python,tab +82,107673,"train_tokenizer.py",0,0,"",python,tab +83,112089,"train_tokenizer.py",942,0,"",python,selection_mouse +84,112664,"train_tokenizer.py",937,0,"",python,selection_command +85,115484,"train_tokenizer.py",930,0,"",python,selection_mouse +86,116924,"TERMINAL",0,0,"bash",,terminal_focus +87,118725,"train_lam.py",0,0,"",python,tab +88,139385,"train_lam.py",741,0,"",python,selection_mouse +89,140018,"train_lam.py",712,0,"",python,selection_mouse +90,169801,"train_tokenizer.py",0,0,"",python,tab +91,171256,"train_lam.py",0,0,"",python,tab +92,193569,"train_lam.py",1101,0,"",python,selection_mouse +93,194202,"train_lam.py",1049,0,"",python,selection_mouse +94,194931,"train_lam.py",1074,0,"",python,selection_mouse +95,198107,"train_lam.py",1160,0,"",python,selection_mouse +96,198120,"train_lam.py",1159,0,"",python,selection_command +97,204117,"train_lam.py",1177,0,"",python,selection_mouse +98,204826,"train_lam.py",1250,0,"",python,selection_mouse +99,209364,"genie.py",0,0,"from typing import Dict, Any\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nfrom jax import NamedSharding\nfrom flax.training.train_state import TrainState\nfrom flax.training import orbax_utils\nfrom orbax.checkpoint import PyTreeCheckpointer\n\nfrom models.dynamics import DynamicsMaskGIT\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\n\nclass Genie(nn.Module):\n """"""Genie model""""""\n\n # --- Tokenizer ---\n in_dim: int\n tokenizer_dim: int\n latent_patch_dim: int\n num_patch_latents: int\n patch_size: int\n tokenizer_num_blocks: int\n tokenizer_num_heads: int\n # --- LAM ---\n lam_dim: int\n latent_action_dim: int\n num_latent_actions: int\n lam_patch_size: int\n lam_num_blocks: int\n lam_num_heads: int\n # --- Dynamics ---\n dyna_dim: int\n dyna_num_blocks: int\n dyna_num_heads: int\n dropout: float = 0.0\n mask_limit: float = 0.0\n\n def setup(self):\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n )\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\n lam_outputs = self.lam.vq_encode(batch[""videos""], training=False)\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\n latent_actions=jax.lax.stop_gradient(lam_outputs[""z_q""]),\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )\n return outputs\n\n @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None\n\n\ndef restore_genie_components(train_state: TrainState, sharding: NamedSharding, inputs: Dict[str, jax.Array], rng: jax.Array, args):\n """"""Restore pre-trained Genie components""""""\n rng, _rng = jax.random.split(rng)\n \n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n )\n tokenizer_init_params = dummy_tokenizer.init(_rng, inputs)\n lam_init_params = dummy_lam.init(_rng, inputs)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.adamw(learning_rate=optax.constant_schedule(args.max_lr), b1=0.9, b2=0.9, weight_decay=1e-4)\n\n dummy_tokenizer_train_state = TrainState.create(apply_fn=dummy_tokenizer.apply, params=tokenizer_init_params, tx=dummy_tx)\n dummy_lam_train_state = TrainState.create(apply_fn=dummy_lam.apply, params=lam_init_params, tx=dummy_tx)\n\n def create_abstract_sharded_pytree(pytree_template, sharding_spec):\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n def map_fn(leaf_template):\n if hasattr(leaf_template, 'shape') and hasattr(leaf_template, 'dtype'):\n return jax.ShapeDtypeStruct(leaf_template.shape, leaf_template.dtype, sharding=sharding_spec)\n return leaf_template\n return jax.tree_util.tree_map(map_fn, pytree_template)\n\n abstract_sharded_tokenizer_state = create_abstract_sharded_pytree(\n dummy_tokenizer_train_state, sharding\n )\n abstract_sharded_lam_state = create_abstract_sharded_pytree(\n dummy_lam_train_state, sharding\n )\n\n tokenizer_restore_target = {""model"": abstract_sharded_tokenizer_state}\n lam_restore_target = {""model"": abstract_sharded_lam_state}\n\n tokenizer_restore_args = orbax_utils.restore_args_from_target(tokenizer_restore_target)\n lam_restore_args = orbax_utils.restore_args_from_target(lam_restore_target)\n\n restored_tokenizer_params = PyTreeCheckpointer().restore(args.tokenizer_checkpoint, item=tokenizer_restore_target, restore_args=tokenizer_restore_args)[""model""].params[""params""]\n restored_lam_params = PyTreeCheckpointer().restore(args.lam_checkpoint, item=lam_restore_target, restore_args=lam_restore_args)[""model""].params[""params""]\n # Genie does not initialize all LAM modules, thus we omit those extra modules during restoration\n # (f.srambical) FIXME: Currently, this is a small HBM memory crunch since the LAM's decoder is loaded into HBM and immediately dicarded.\n # A workaround would be to restore to host memory first, and only move the weights to HBM after pruning the decoder\n restored_lam_params = {k: v for k, v in restored_lam_params.items() if k in train_state.params[""params""][""lam""]}\n \n train_state.params[""params""][""tokenizer""].update(\n restored_tokenizer_params\n )\n train_state.params[""params""][""lam""].update(\n restored_lam_params\n )\n\n return train_state",python,tab +100,224128,"train_tokenizer.py",0,0,"",python,tab +101,232884,"train_tokenizer.py",593,0,"",python,selection_mouse +102,233954,"train_tokenizer.py",610,0,"",python,selection_command +103,234109,"train_tokenizer.py",609,0,"",python,selection_command +104,234852,"train_tokenizer.py",638,0,"",python,selection_command +105,235504,"train_tokenizer.py",637,0,"",python,selection_command +106,236114,"train_tokenizer.py",655,0,"",python,selection_command +107,238529,"train_tokenizer.py",677,0,"",python,selection_command +108,239969,"train_tokenizer.py",705,0,"",python,selection_command +109,240418,"train_tokenizer.py",732,0,"",python,selection_command +110,241430,"train_tokenizer.py",759,0,"",python,selection_command +111,242624,"train_tokenizer.py",804,0,"",python,selection_command +112,242872,"train_tokenizer.py",829,0,"",python,selection_command +113,243177,"train_tokenizer.py",848,0,"",python,selection_command +114,243434,"train_tokenizer.py",874,0,"",python,selection_command +115,244074,"train_tokenizer.py",899,0,"",python,selection_command +116,245012,"train_tokenizer.py",924,0,"",python,selection_command +117,245894,"train_tokenizer.py",949,0,"",python,selection_command +118,246633,"train_tokenizer.py",979,0,"",python,selection_command +119,246887,"train_tokenizer.py",995,0,"",python,selection_command +120,249427,"train_tokenizer.py",1020,0,"",python,selection_command +121,249671,"train_tokenizer.py",1045,0,"",python,selection_command +122,250012,"train_tokenizer.py",1073,0,"",python,selection_command +123,250318,"train_tokenizer.py",1097,0,"",python,selection_command +124,251636,"train_tokenizer.py",1121,0,"",python,selection_command +125,251878,"train_tokenizer.py",1144,0,"",python,selection_command +126,252114,"train_tokenizer.py",1169,0,"",python,selection_command +127,253209,"train_tokenizer.py",1204,0,"",python,selection_command +128,253505,"train_tokenizer.py",1218,0,"",python,selection_command +129,253655,"train_tokenizer.py",1240,0,"",python,selection_command +130,254003,"train_tokenizer.py",1261,0,"",python,selection_command +131,254218,"train_tokenizer.py",1283,0,"",python,selection_command +132,254422,"train_tokenizer.py",1309,0,"",python,selection_command +133,254636,"train_tokenizer.py",1343,0,"",python,selection_command +134,254949,"train_tokenizer.py",1366,0,"",python,selection_command +135,255266,"train_tokenizer.py",1407,0,"",python,selection_command +136,256755,"train_lam.py",0,0,"",python,tab +137,258846,"train_lam.py",605,0,"",python,selection_mouse +138,259533,"train_lam.py",634,0,"",python,selection_command +139,260346,"train_lam.py",652,0,"",python,selection_command +140,260606,"train_lam.py",674,0,"",python,selection_command +141,262386,"train_lam.py",702,0,"",python,selection_command +142,262596,"train_lam.py",733,0,"",python,selection_command +143,262769,"train_lam.py",778,0,"",python,selection_command +144,262942,"train_lam.py",803,0,"",python,selection_command +145,263159,"train_lam.py",822,0,"",python,selection_command +146,263946,"train_lam.py",847,0,"",python,selection_command +147,268607,"train_lam.py",873,0,"",python,selection_command +148,269540,"train_lam.py",898,0,"",python,selection_command +149,269709,"train_lam.py",923,0,"",python,selection_command +150,269898,"train_lam.py",952,0,"",python,selection_command +151,270136,"train_lam.py",982,0,"",python,selection_command +152,270811,"train_lam.py",992,0,"",python,selection_command +153,270966,"train_lam.py",1017,0,"",python,selection_command +154,271109,"train_lam.py",1042,0,"",python,selection_command +155,271338,"train_lam.py",1067,0,"",python,selection_command +156,271860,"train_lam.py",1042,0,"",python,selection_command +157,272018,"train_lam.py",1017,0,"",python,selection_command +158,272182,"train_lam.py",992,0,"",python,selection_command +159,272554,"train_lam.py",982,0,"",python,selection_command +160,272808,"train_lam.py",992,0,"",python,selection_command +161,272986,"train_lam.py",1017,0,"",python,selection_command +162,273136,"train_lam.py",1042,0,"",python,selection_command +163,273803,"train_lam.py",1067,0,"",python,selection_command +164,274441,"train_lam.py",1092,0,"",python,selection_command +165,274983,"train_lam.py",1116,0,"",python,selection_command +166,275481,"train_lam.py",1139,0,"",python,selection_command +167,276203,"train_lam.py",1164,0,"",python,selection_command +168,276521,"train_lam.py",1198,0,"",python,selection_command +169,276748,"train_lam.py",1212,0,"",python,selection_command +170,276928,"train_lam.py",1234,0,"",python,selection_command +171,277100,"train_lam.py",1255,0,"",python,selection_command +172,277373,"train_lam.py",1277,0,"",python,selection_command +173,277589,"train_lam.py",1303,0,"",python,selection_command +174,277768,"train_lam.py",1337,0,"",python,selection_command +175,277955,"train_lam.py",1360,0,"",python,selection_command +176,280006,"train_dynamics.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom genie import Genie, restore_genie_components\nfrom models.tokenizer import TokenizerVQVAE\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_resolution: int = 64\n data_dir: str = ""data_tfrecords/coinrun""\n # Optimization\n batch_size: int = 36\n min_lr: float = 3e-6\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""dropout_rng""]}\n )\n mask = outputs[""mask""]\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n \n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_resolution, args.image_resolution, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape), dtype=jnp.float32\n ),\n action=jnp.zeros((per_device_batch_size_for_init, args.seq_len), dtype=jnp.float32),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=('data',))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Restore checkpoint ---\n train_state = restore_genie_components(train_state, replicated_sharding, dummy_inputs, rng, args)\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files, args.seq_len, args.batch_size, *image_shape\n )\n step = 0\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _mask_rng = jax.random.split(rng, 3)\n \n videos_sharding = NamedSharding(mesh, PartitionSpec('data', None, None, None, None))\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n \n inputs = dict(\n videos=videos,\n dropout_rng=_rng,\n mask_rng=_mask_rng,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log and jax.process_index() == 0:\n if step % args.log_interval == 0:\n wandb.log({""loss"": loss, ""step"": step, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len-1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len-1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""genie_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +177,282124,"train_dynamics.py",678,0,"",python,selection_mouse +178,283542,"train_dynamics.py",707,0,"",python,selection_command +179,283845,"train_dynamics.py",725,0,"",python,selection_command +180,284159,"train_dynamics.py",747,0,"",python,selection_command +181,284432,"train_dynamics.py",775,0,"",python,selection_command +182,284673,"train_dynamics.py",806,0,"",python,selection_command +183,284942,"train_dynamics.py",851,0,"",python,selection_command +184,288617,"train_dynamics.py",870,0,"",python,selection_command +185,288964,"train_dynamics.py",895,0,"",python,selection_command +186,289260,"train_dynamics.py",920,0,"",python,selection_command +187,289581,"train_dynamics.py",945,0,"",python,selection_command +188,289928,"train_dynamics.py",974,0,"",python,selection_command +189,290265,"train_dynamics.py",990,0,"",python,selection_command +190,290548,"train_dynamics.py",1019,0,"",python,selection_command +191,290768,"train_dynamics.py",1050,0,"",python,selection_command +192,290986,"train_dynamics.py",1084,0,"",python,selection_command +193,291200,"train_dynamics.py",1108,0,"",python,selection_command +194,291486,"train_dynamics.py",1142,0,"",python,selection_command +195,291807,"train_dynamics.py",1175,0,"",python,selection_command +196,292115,"train_dynamics.py",1210,0,"",python,selection_command +197,292440,"train_dynamics.py",1220,0,"",python,selection_command +198,292779,"train_dynamics.py",1243,0,"",python,selection_command +199,293023,"train_dynamics.py",1275,0,"",python,selection_command +200,293493,"train_dynamics.py",1307,0,"",python,selection_command +201,293723,"train_dynamics.py",1336,0,"",python,selection_command +202,293942,"train_dynamics.py",1364,0,"",python,selection_command +203,294104,"train_dynamics.py",1391,0,"",python,selection_command +204,294534,"train_dynamics.py",1420,0,"",python,selection_command +205,295131,"train_dynamics.py",1435,0,"",python,selection_command +206,295399,"train_dynamics.py",1459,0,"",python,selection_command +207,295775,"train_dynamics.py",1489,0,"",python,selection_command +208,296068,"train_dynamics.py",1517,0,"",python,selection_command +209,296297,"train_dynamics.py",1542,0,"",python,selection_command +210,296520,"train_dynamics.py",1570,0,"",python,selection_command +211,296786,"train_dynamics.py",1584,0,"",python,selection_command +212,297101,"train_dynamics.py",1606,0,"",python,selection_command +213,297752,"train_dynamics.py",1627,0,"",python,selection_command +214,298091,"train_dynamics.py",1649,0,"",python,selection_command +215,298577,"train_dynamics.py",1675,0,"",python,selection_command +216,298881,"train_dynamics.py",1709,0,"",python,selection_command +217,299238,"train_dynamics.py",1732,0,"",python,selection_command +218,299535,"train_dynamics.py",1773,0,"",python,selection_command +219,300824,"train_dynamics.py",1732,0,"",python,selection_command +220,301213,"train_dynamics.py",1773,0,"",python,selection_command +221,373163,"train_dynamics.py",696,0,"",python,selection_mouse +222,373316,"train_dynamics.py",696,7,"200_000",python,selection_mouse +223,374057,"train_dynamics.py",703,0,"",python,selection_mouse +224,374071,"train_dynamics.py",702,0,"",python,selection_command +225,374661,"train_dynamics.py",698,0,"",python,selection_mouse +226,374821,"train_dynamics.py",696,7,"200_000",python,selection_mouse +227,377217,"train_lam.py",0,0,"",python,tab +228,643666,"train_lam.py",978,0,"",python,selection_mouse +229,643667,"train_lam.py",977,0,"",python,selection_command diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-62f06a9f-ec1c-4922-b992-72581ba3451c1751618040284-2025_07_04-10.34.35.357/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-62f06a9f-ec1c-4922-b992-72581ba3451c1751618040284-2025_07_04-10.34.35.357/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..942678f0898a4edaf9b57fab51e361e6d4d519cb --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-62f06a9f-ec1c-4922-b992-72581ba3451c1751618040284-2025_07_04-10.34.35.357/source.csv @@ -0,0 +1,4277 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +1,5,"scripts_horeka/batchsize_scaling/adjusted_lr/train_tokenizer_2_nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --job-name=train_tokenizer_batch_size_scaling_2_node\n#SBATCH --mem=100G\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=250 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,tab +2,111,"tasks",0,0,"",Log,tab +3,115,"scripts_horeka/batchsize_scaling/adjusted_lr/train_tokenizer_2_nodes.sbatch",0,0,"",shellscript,tab +4,1467,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"10:34:35 AM [info] Activating crowd-code\n10:34:35 AM [info] Recording started\n10:34:35 AM [info] Initializing git provider using file system watchers...\n10:34:35 AM [info] Git repository found\n10:34:35 AM [info] Git provider initialized successfully\n10:34:35 AM [info] Initial git state: [object Object]\n",Log,tab +5,3886,"TERMINAL",0,0,"/bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command +6,3935,"TERMINAL",0,0,"]633;E;2025-07-04 10:34:39 /bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;5cb687c4-0916-47f4-812e-f9882b6b77aa]633;C",,terminal_output +7,3969,"TERMINAL",0,0,"]0;tum_cte0515@hkn1993:/hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output +8,10514,"TERMINAL",0,0,"bash",,terminal_focus +9,10518,"Untitled-1",0,0,"",plaintext,tab +10,16264,"TERMINAL",0,0,"idling",,terminal_command +11,16345,"TERMINAL",0,0,"]633;E;2025-07-04 10:34:51 idling;dd830a57-5de1-42ee-9f5b-a467117add9f]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1993.localdomain: Fri Jul 4 10:34:51 2025Partition dev_cpuonly: 10 nodes idle\rPartition cpuonly: 32 nodes idle\rPartition dev_accelerated:\t 1 nodes idle\rPartition accelerated: 20 nodes idle\rPartition dev_accelerated-h100 :\t 1 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 7 nodes idle",,terminal_output +12,17381,"TERMINAL",0,0,"2",,terminal_output +13,17829,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +14,40561,"TERMINAL",0,0,"salloc --time=10:00:00 --partition=accelerated --nodes=4 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5 ",,terminal_command +15,40590,"TERMINAL",0,0,"]633;E;2025-07-04 10:35:15 salloc --time=10:00:00 --partition=accelerated --nodes=4 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5 ;dd830a57-5de1-42ee-9f5b-a467117add9f]633;C",,terminal_output +16,40640,"TERMINAL",0,0,"salloc: Granted job allocation 3316923\r\n",,terminal_output +17,40747,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +18,47453,"TERMINAL",0,0,"",,terminal_focus +19,66668,"TERMINAL",0,0,"salloc --time=10:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5 ",,terminal_command +20,66725,"TERMINAL",0,0,"]633;E;2025-07-04 10:35:41 salloc --time=10:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5 ;db8148eb-0cb4-400c-a5f8-445792cd5508]633;Csalloc: Granted job allocation 3316924\r\n",,terminal_output +21,66835,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +22,67790,"TERMINAL",0,0,"salloc: Nodes hkn[0625-0628] are ready for job\r\n",,terminal_output +23,69699,"TERMINAL",0,0,"srun",,terminal_focus +24,69983,"TERMINAL",0,0,"]0;tum_cte0515@hkn0625:~/Projects/jafar[?2004h[tum_cte0515@hkn0625 jafar]$ ",,terminal_output +25,71391,"TERMINAL",0,0,"[?25lso[?25h[?25lo[?25h",,terminal_output +26,71588,"TERMINAL",0,0,"[?25lu[?25h[?25lr[?25h",,terminal_output +27,71770,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +28,71950,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +29,72488,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +30,72601,"TERMINAL",0,0,"[?25le[?25h[?25l [?25h",,terminal_output +31,72837,"TERMINAL",0,0,"[?25l.v[?25h",,terminal_output +32,73250,"TERMINAL",0,0,"env/",,terminal_output +33,73515,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +34,73641,"TERMINAL",0,0,"in/",,terminal_output +35,73895,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +36,74009,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +37,74190,"TERMINAL",0,0,"tivate",,terminal_output +38,74481,"TERMINAL",0,0,"[?25l[?2004l\r]0;tum_cte0515@hkn0625:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0625 jafar]$ [?25h",,terminal_output +39,93891,"TERMINAL",0,0,"salloc: Nodes hkn0733 are ready for job\r\n",,terminal_output +40,94930,"TERMINAL",0,0,"]0;tum_cte0515@hkn0733:~/Projects/jafar[?2004h[tum_cte0515@hkn0733 jafar]$ ",,terminal_output +41,710869,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_output +42,711146,"TERMINAL",0,0,"",,terminal_output +43,749976,"scripts_horeka/batchsize_scaling/adjusted_lr/train_tokenizer_2_nodes.sbatch",0,0,"",shellscript,tab +44,749978,"scripts_horeka/batchsize_scaling/adjusted_lr/train_tokenizer_2_nodes.sbatch",990,0,"",shellscript,selection_mouse +45,750070,"scripts_horeka/batchsize_scaling/adjusted_lr/train_tokenizer_2_nodes.sbatch",989,0,"",shellscript,selection_command +46,767669,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,0,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,tab +47,769697,"scripts_horeka/modelsize_scaling/lam/tester.sh",0,0,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,tab +48,770745,"scripts_horeka/modelsize_scaling/lam/tester.sh",828,0,"",shellscript,selection_mouse +49,771865,"scripts_horeka/modelsize_scaling/lam/tester.sh",456,0,"",shellscript,selection_mouse +50,772053,"scripts_horeka/modelsize_scaling/lam/tester.sh",456,1,"t",shellscript,selection_mouse +51,772139,"scripts_horeka/modelsize_scaling/lam/tester.sh",456,4,"toke",shellscript,selection_mouse +52,772140,"scripts_horeka/modelsize_scaling/lam/tester.sh",456,7,"tokeniz",shellscript,selection_mouse +53,772140,"scripts_horeka/modelsize_scaling/lam/tester.sh",456,9,"tokenizer",shellscript,selection_mouse +54,772185,"scripts_horeka/modelsize_scaling/lam/tester.sh",456,10,"tokenizer.",shellscript,selection_mouse +55,772539,"scripts_horeka/modelsize_scaling/lam/tester.sh",456,9,"tokenizer",shellscript,selection_mouse +56,772587,"scripts_horeka/modelsize_scaling/lam/tester.sh",456,8,"tokenize",shellscript,selection_mouse +57,773834,"scripts_horeka/modelsize_scaling/lam/tester.sh",456,9,"tokenizer",shellscript,selection_command +58,774533,"scripts_horeka/modelsize_scaling/lam/tester.sh",456,9,"",shellscript,content +59,775401,"scripts_horeka/modelsize_scaling/lam/tester.sh",456,0,"l",shellscript,content +60,775402,"scripts_horeka/modelsize_scaling/lam/tester.sh",457,0,"",shellscript,selection_keyboard +61,775511,"scripts_horeka/modelsize_scaling/lam/tester.sh",457,0,"a",shellscript,content +62,775512,"scripts_horeka/modelsize_scaling/lam/tester.sh",458,0,"",shellscript,selection_keyboard +63,775595,"scripts_horeka/modelsize_scaling/lam/tester.sh",458,0,"m",shellscript,content +64,775596,"scripts_horeka/modelsize_scaling/lam/tester.sh",459,0,"",shellscript,selection_keyboard +65,775869,"scripts_horeka/modelsize_scaling/lam/tester.sh",458,0,"",shellscript,selection_command +66,776646,"scripts_horeka/modelsize_scaling/lam/tester.sh",485,0,"",shellscript,selection_command +67,776814,"scripts_horeka/modelsize_scaling/lam/tester.sh",518,0,"",shellscript,selection_command +68,782301,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,tab +69,787735,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,0,"",shellscript,tab +70,788491,"scripts_horeka/modelsize_scaling/lam/tester.sh",0,0,"",shellscript,tab +71,790567,"scripts_horeka/modelsize_scaling/lam/tester.sh",790,0,"",shellscript,selection_mouse +72,790569,"scripts_horeka/modelsize_scaling/lam/tester.sh",789,0,"",shellscript,selection_command +73,809659,"scripts_horeka/modelsize_scaling/lam/tester.sh",768,0,"",shellscript,selection_mouse +74,809673,"scripts_horeka/modelsize_scaling/lam/tester.sh",767,0,"",shellscript,selection_command +75,810222,"scripts_horeka/modelsize_scaling/lam/tester.sh",768,0,"",shellscript,selection_mouse +76,810237,"scripts_horeka/modelsize_scaling/lam/tester.sh",767,0,"",shellscript,selection_command +77,810763,"scripts_horeka/modelsize_scaling/lam/tester.sh",790,0,"",shellscript,selection_mouse +78,810777,"scripts_horeka/modelsize_scaling/lam/tester.sh",789,0,"",shellscript,selection_command +79,811402,"scripts_horeka/modelsize_scaling/lam/tester.sh",682,0,"",shellscript,selection_mouse +80,812224,"scripts_horeka/modelsize_scaling/lam/tester.sh",649,0,"",shellscript,selection_mouse +81,813576,"scripts_horeka/modelsize_scaling/lam/tester.sh",648,0,"",shellscript,selection_command +82,814510,"scripts_horeka/modelsize_scaling/lam/tester.sh",648,1,"t",shellscript,selection_command +83,814739,"scripts_horeka/modelsize_scaling/lam/tester.sh",648,1,"t",shellscript,selection_command +84,815017,"scripts_horeka/modelsize_scaling/lam/tester.sh",648,2,"to",shellscript,selection_command +85,815541,"scripts_horeka/modelsize_scaling/lam/tester.sh",648,3,"tok",shellscript,selection_command +86,815546,"scripts_horeka/modelsize_scaling/lam/tester.sh",648,4,"toke",shellscript,selection_command +87,815617,"scripts_horeka/modelsize_scaling/lam/tester.sh",648,5,"token",shellscript,selection_command +88,815633,"scripts_horeka/modelsize_scaling/lam/tester.sh",648,6,"tokeni",shellscript,selection_command +89,815654,"scripts_horeka/modelsize_scaling/lam/tester.sh",648,7,"tokeniz",shellscript,selection_command +90,815748,"scripts_horeka/modelsize_scaling/lam/tester.sh",648,8,"tokenize",shellscript,selection_command +91,816075,"scripts_horeka/modelsize_scaling/lam/tester.sh",648,9,"tokenizer",shellscript,selection_command +92,816802,"scripts_horeka/modelsize_scaling/lam/tester.sh",648,0,"",shellscript,selection_command +93,817747,"scripts_horeka/modelsize_scaling/lam/tester.sh",708,9,"",shellscript,content +94,817747,"scripts_horeka/modelsize_scaling/lam/tester.sh",648,9,"",shellscript,content +95,818259,"scripts_horeka/modelsize_scaling/lam/tester.sh",699,0,"l",shellscript,content +96,818260,"scripts_horeka/modelsize_scaling/lam/tester.sh",648,0,"l",shellscript,content +97,818261,"scripts_horeka/modelsize_scaling/lam/tester.sh",649,0,"",shellscript,selection_keyboard +98,818371,"scripts_horeka/modelsize_scaling/lam/tester.sh",701,0,"a",shellscript,content +99,818372,"scripts_horeka/modelsize_scaling/lam/tester.sh",649,0,"a",shellscript,content +100,818372,"scripts_horeka/modelsize_scaling/lam/tester.sh",650,0,"",shellscript,selection_keyboard +101,818440,"scripts_horeka/modelsize_scaling/lam/tester.sh",703,0,"m",shellscript,content +102,818441,"scripts_horeka/modelsize_scaling/lam/tester.sh",650,0,"m",shellscript,content +103,818442,"scripts_horeka/modelsize_scaling/lam/tester.sh",651,0,"",shellscript,selection_keyboard +104,818784,"scripts_horeka/modelsize_scaling/lam/tester.sh",650,0,"",shellscript,selection_command +105,819526,"scripts_horeka/modelsize_scaling/lam/tester.sh",778,0,"",shellscript,selection_mouse +106,819540,"scripts_horeka/modelsize_scaling/lam/tester.sh",777,0,"",shellscript,selection_command +107,820111,"scripts_horeka/modelsize_scaling/lam/tester.sh",756,0,"",shellscript,selection_mouse +108,820121,"scripts_horeka/modelsize_scaling/lam/tester.sh",755,0,"",shellscript,selection_command +109,820660,"scripts_horeka/modelsize_scaling/lam/tester.sh",718,0,"",shellscript,selection_mouse +110,821241,"scripts_horeka/modelsize_scaling/lam/tester.sh",636,0,"",shellscript,selection_mouse +111,821262,"scripts_horeka/modelsize_scaling/lam/tester.sh",635,0,"",shellscript,selection_command +112,822492,"scripts_horeka/modelsize_scaling/lam/tester.sh",578,0,"",shellscript,selection_mouse +113,822650,"scripts_horeka/modelsize_scaling/lam/tester.sh",566,18,"log_image_interval",shellscript,selection_mouse +114,823179,"scripts_horeka/modelsize_scaling/lam/tester.sh",555,0,"",shellscript,selection_mouse +115,823306,"scripts_horeka/modelsize_scaling/lam/tester.sh",553,2,"1e",shellscript,selection_mouse +116,823863,"scripts_horeka/modelsize_scaling/lam/tester.sh",533,0,"",shellscript,selection_mouse +117,824000,"scripts_horeka/modelsize_scaling/lam/tester.sh",533,2,"1e",shellscript,selection_mouse +118,824220,"scripts_horeka/modelsize_scaling/lam/tester.sh",533,3,"1e-",shellscript,selection_mouse +119,824248,"scripts_horeka/modelsize_scaling/lam/tester.sh",533,4,"1e-4",shellscript,selection_mouse +120,824639,"scripts_horeka/modelsize_scaling/lam/tester.sh",537,0,"",shellscript,selection_mouse +121,829129,"scripts_horeka/modelsize_scaling/lam/tester.sh",334,0,"",shellscript,selection_mouse +122,829748,"scripts_horeka/modelsize_scaling/lam/tester.sh",361,0,"",shellscript,selection_mouse +123,829907,"scripts_horeka/modelsize_scaling/lam/tester.sh",358,11,"checkpoints",shellscript,selection_mouse +124,830754,"scripts_horeka/modelsize_scaling/lam/tester.sh",495,0,"",shellscript,selection_mouse +125,835279,"scripts_horeka/modelsize_scaling/lam/tester.sh",464,0,"",shellscript,selection_mouse +126,835307,"scripts_horeka/modelsize_scaling/lam/tester.sh",463,0,"",shellscript,selection_command +127,835829,"scripts_horeka/modelsize_scaling/lam/tester.sh",459,0,"",shellscript,selection_mouse +128,836602,"scripts_horeka/modelsize_scaling/lam/tester.sh",718,0,"",shellscript,selection_mouse +129,837220,"scripts_horeka/modelsize_scaling/lam/tester.sh",810,0,"",shellscript,selection_mouse +130,845145,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",0,0,"# Genie 1 - Model Sizes and their configs\n\n## Tokenizer model: sizes\n\ndefault: \n| Model | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 1024 | ~38M |\n\n### scaling up \n#### (not tested yet - TODO @mihir)\n\n| Model | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| L1 | 768 | 12 | 12 | 64 | 2048 | ~80M |\n| L2 | 1024 | 12 | 16 | 128 | 2048 | ~140M |\n| L3 | 1152 | 16 | 16 | 128 | 4096 | ~200M |\n| L4 | 896 | 16 | 14 | 96 | 4096 | ~120M |\n| L5 | 1536 | 12 | 24 | 256 | 2048 | ~190M |\n\n\n### tiny models\n| Model | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| S1 | 128 | 2 | 2 | 8 | 128 | ~0.6M |\n| S2 | 192 | 2 | 3 | 16 | 128 | ~1.3M |\n| S3 | 256 | 3 | 4 | 16 | 256 | ~3.6M |\n| S4 | 320 | 4 | 5 | 24 | 256 | ~7.4M |\n| S5 | 384 | 4 | 6 | 32 | 512 | ~10M |\n\n\n## Latent Action model: sizes\ndefault: \n| Model | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M |\n\n### scaling up \n#### (not tested yet - TODO @mihir)\n\n| Name | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|--------------|-----------|------------|-----------|------------|-------------|-------------|\n| XL | 1024 | 12 | 16 | 64 | 12 | ~200M |\n| L | 896 | 12 | 14 | 48 | 8 | ~150M |\n| M+ | 768 | 10 | 12 | 48 | 8 | ~100M |\n| M | 640 | 10 | 10 | 32 | 8 | ~70M |\n| Base+ | 512 | 12 | 8 | 32 | 8 | ~55M |\n\n\n### tiny models\n| Name | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|--------------|-----------|------------|-----------|------------|-------------|-------------|\n| XS | 128 | 2 | 2 | 8 | 4 | ~0.9M |\n| S | 160 | 2 | 2 | 8 | 4 | ~1.3M |\n| S+ | 192 | 3 | 3 | 8 | 4 | ~2.4M |\n| M- | 256 | 4 | 4 | 16 | 6 | ~5.4M |\n| M | 320 | 6 | 4 | 16 | 6 | ~12M |\n\n\n## Dynamics model: sizes \n\n| Config | dyna_dim | dyna_num_blocks | dyna_num_heads | Approx. Params |\n|--------|----------|-----------------|---------------|----------------|\n| 1 | 512 | 12 | 8 | ~36M |\n| 2 | 768 | 16 | 12 | ~110M |\n| 3 | 1024 | 16 | 16 | ~180M |\n| 4 | 1024 | 24 | 16 | ~270M |\n| 5 | 1536 | 24 | 24 | ~500M |\n\n\n### tiny models\n| Config | dyna_dim | dyna_num_blocks | dyna_num_heads | Approx. Params |\n|--------|----------|-----------------|---------------|----------------|\n| A | 128 | 2 | 4 | ~1.5M |\n| B | 256 | 2 | 4 | ~3.5M |\n| C | 256 | 4 | 4 | ~6M |\n| D | 384 | 4 | 6 | ~12M |\n| E | 512 | 4 | 8 | ~18M |",markdown,tab +131,847402,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1402,0,"",markdown,selection_mouse +132,847413,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1401,0,"",markdown,selection_command +133,847696,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1401,1,"|",markdown,selection_mouse +134,847697,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1304,97,"--|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M ",markdown,selection_mouse +135,847698,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1293,108,"|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M ",markdown,selection_mouse +136,847698,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1286,115,"-------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M ",markdown,selection_mouse +137,847698,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1280,121,"-|-----------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M ",markdown,selection_mouse +138,847699,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1189,212,"del | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M ",markdown,selection_mouse +139,847699,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1187,214,"Model | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M ",markdown,selection_mouse +140,847699,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1186,215," Model | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M ",markdown,selection_mouse +141,847700,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1185,216,"| Model | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M ",markdown,selection_mouse +142,847710,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1402,0,"",markdown,selection_command +143,847754,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1185,217,"| Model | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M |",markdown,selection_mouse +144,848375,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1273,129,"|-------|-----------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M |",markdown,selection_mouse +145,848476,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1361,41,"| default | 512 | 8 | 8 | 32 | 6 | ~39M |",markdown,selection_mouse +146,848842,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1273,129,"|-------|-----------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M |",markdown,selection_mouse +147,849093,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1185,217,"| Model | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M |",markdown,selection_mouse +148,849712,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",1175,227,"default: \n| Model | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M |",markdown,selection_mouse +149,856496,"scripts_horeka/modelsize_scaling/lam/tester.sh",0,0,"",shellscript,tab +150,857363,"scripts_horeka/modelsize_scaling/lam/tester.sh",809,0,"",shellscript,selection_mouse +151,857382,"scripts_horeka/modelsize_scaling/lam/tester.sh",808,0,"",shellscript,selection_command +152,857489,"scripts_horeka/modelsize_scaling/lam/tester.sh",808,1,"r",shellscript,selection_mouse +153,857497,"scripts_horeka/modelsize_scaling/lam/tester.sh",809,0,"",shellscript,selection_command +154,857637,"scripts_horeka/modelsize_scaling/lam/tester.sh",778,31,"\n --data_dir $tf_records_dir",shellscript,selection_mouse +155,857638,"scripts_horeka/modelsize_scaling/lam/tester.sh",712,97,"size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +156,857638,"scripts_horeka/modelsize_scaling/lam/tester.sh",633,176,"g \\n --name=lam-model-size-scaling-38M-$slurm_job_id \\n --tags lam model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +157,857639,"scripts_horeka/modelsize_scaling/lam/tester.sh",593,216," --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-38M-$slurm_job_id \\n --tags lam model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +158,857639,"scripts_horeka/modelsize_scaling/lam/tester.sh",540,269," --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-38M-$slurm_job_id \\n --tags lam model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +159,857640,"scripts_horeka/modelsize_scaling/lam/tester.sh",520,289," --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-38M-$slurm_job_id \\n --tags lam model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +160,857640,"scripts_horeka/modelsize_scaling/lam/tester.sh",498,311," --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-38M-$slurm_job_id \\n --tags lam model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +161,857661,"scripts_horeka/modelsize_scaling/lam/tester.sh",465,344," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-38M-$slurm_job_id \\n --tags lam model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +162,857733,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,371,"srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-38M-$slurm_job_id \\n --tags lam model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,selection_mouse +163,860075,"scripts_horeka/modelsize_scaling/lam/tester.sh",810,0,"",shellscript,selection_mouse +164,861374,"scripts_horeka/modelsize_scaling/lam/tester.sh",673,0,"",shellscript,selection_mouse +165,862317,"scripts_horeka/modelsize_scaling/lam/tester.sh",672,0,"",shellscript,selection_command +166,862962,"scripts_horeka/modelsize_scaling/lam/tester.sh",672,1,"8",shellscript,selection_command +167,863220,"scripts_horeka/modelsize_scaling/lam/tester.sh",672,1,"8",shellscript,selection_command +168,863788,"scripts_horeka/modelsize_scaling/lam/tester.sh",672,0,"",shellscript,selection_command +169,864468,"scripts_horeka/modelsize_scaling/lam/tester.sh",726,1,"",shellscript,content +170,864468,"scripts_horeka/modelsize_scaling/lam/tester.sh",672,1,"",shellscript,content +171,865694,"scripts_horeka/modelsize_scaling/lam/tester.sh",725,0,"9",shellscript,content +172,865695,"scripts_horeka/modelsize_scaling/lam/tester.sh",672,0,"9",shellscript,content +173,865696,"scripts_horeka/modelsize_scaling/lam/tester.sh",673,0,"",shellscript,selection_keyboard +174,868783,"scripts_horeka/modelsize_scaling/dynamics/model_sizes.md",0,0,"",markdown,tab +175,875920,"scripts_horeka/modelsize_scaling/lam/tester.sh",0,0,"",shellscript,tab +176,875922,"scripts_horeka/modelsize_scaling/lam/tester.sh",515,0,"",shellscript,selection_mouse +177,875923,"scripts_horeka/modelsize_scaling/lam/tester.sh",515,2,"96",shellscript,selection_mouse +178,876668,"scripts_horeka/modelsize_scaling/lam/tester.sh",517,0,"",shellscript,selection_mouse +179,876947,"scripts_horeka/modelsize_scaling/lam/tester.sh",516,0,"",shellscript,selection_command +180,883658,"TERMINAL",0,0,"[?25lsh[?25h[?25lh[?25h",,terminal_output +181,883710,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +182,884179,"TERMINAL",0,0,"[?25ls[?25h[?25lc[?25h",,terminal_output +183,884402,"TERMINAL",0,0,"ripts_",,terminal_output +184,885656,"TERMINAL",0,0,"[?25lh[?25horeka/",,terminal_output +185,885864,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +186,885980,"TERMINAL",0,0,"odelsize_scaling/",,terminal_output +187,887724,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +188,887862,"TERMINAL",0,0,"am/",,terminal_output +189,889810,"TERMINAL",0,0,"tester.sh ",,terminal_output +190,890873,"TERMINAL",0,0,"[?25l[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_lam.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --min_lr=1e-4 \\r\n --max_lr=1e-4 \\r\n --log_image_interval=500 \\r\n --log_checkpoint_interval=5 \\r\n --log \\r\n --name=lam-model-size-scaling-39M-$slurm_job_id \\r\n --tags lam model-size-scaling 39M \\r\n --entity instant-uv \\r\n --project jafar \\r\n --data_dir $tf_records_dir\r\n[?25h",,terminal_output +191,891236,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x4)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=3172857\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0625\r\nSLURM_JOB_START_TIME=1751618115\r\nSLURM_STEP_NODELIST=hkn0625\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751654115\r\nSLURM_PMI2_SRUN_PORT=38731\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x4)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=4\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3316923\r\nSLURM_PTY_PORT=39987\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=50\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=16\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e30.hkn0625\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=108\r\nSLURM_NODELIST=hkn[0625-0628]\r\nSLURM_SRUN_COMM_PORT=39403\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=16\r\nSLURM_NNODES=4\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3316923\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0625\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=39403\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0625-0628]\r\n",,terminal_output +192,891341,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +193,917555,"TERMINAL",0,0,"2025-07-04 10:49:52.814448: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.814483: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.814446: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.814551: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.814817: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.814840: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.814814: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.814841: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.814820: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.814838: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.814966: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.814844: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.816956: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.816810: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.816809: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:49:52.816813: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n",,terminal_output +194,917641,"TERMINAL",0,0,"WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.897423 3175549 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.897487 3175551 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.897610 3175550 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.897699 3175552 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.897709 1539159 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.897854 1539160 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.897712 1539161 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.897948 1539162 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.899591 1016578 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.899831 1016579 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.899930 1016581 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.900070 1016580 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.902431 1383125 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.902294 1383126 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.902333 1383127 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751618992.902430 1383128 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751618992.904754 1539160 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.904756 1539162 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.905645 1539159 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.905605 1539161 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.906520 3175552 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.906537 3175550 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.906758 3175551 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.906845 3175549 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.906868 1016578 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.906870 1016580 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.907027 1016581 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.907541 1016579 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.908305 1383125 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.908303 1383126 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.908308 1383127 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751618992.908305 1383128 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\n",,terminal_output +195,917881,"TERMINAL",0,0,"W0000 00:00:1751618993.154473 1539159 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154500 1539159 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154502 1539159 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154504 1539159 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154472 1539160 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154497 1539160 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154500 1539160 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154502 1539160 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154471 1539161 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154496 1539161 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154498 1539161 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154500 1539161 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154477 1539162 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154497 1539162 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154500 1539162 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154502 1539162 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154696 1383125 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154733 1383125 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154735 1383125 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154737 1383125 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154692 1383126 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154727 1383126 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154730 1383126 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154731 1383126 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154692 1383127 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154735 1383127 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154736 1383127 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154738 1383127 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154694 1383128 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154732 1383128 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154734 1383128 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.154735 1383128 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155563 1016578 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155604 1016578 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155606 1016578 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155608 1016578 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155565 1016579 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155596 1016579 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155598 1016579 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155600 1016579 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155562 1016580 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155600 1016580 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155602 1016580 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155604 1016580 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155563 1016581 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155594 1016581 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155596 1016581 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.155597 1016581 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158127 3175549 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158169 3175549 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158170 3175549 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158172 3175549 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158128 3175551 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158162 3175551 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158164 3175551 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158165 3175551 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158134 3175552 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158164 3175552 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158166 3175552 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158168 3175552 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158274 3175550 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158294 3175550 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158296 3175550 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751618993.158298 3175550 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +196,921503,"scripts_horeka/modelsize_scaling/lam/tester.sh",0,0,"",shellscript,tab +197,921503,"scripts_horeka/modelsize_scaling/lam/tester.sh",672,0,"",shellscript,selection_mouse +198,949812,"TERMINAL",0,0,"W0000 00:00:1751619025.066990 3175552 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.067101 3175550 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.067261 3175549 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.067298 3175551 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.067185 1539159 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.067237 1539161 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.067314 1539160 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.067321 1539162 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.069767 1016578 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.069710 1016579 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.069770 1016581 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.069865 1016580 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.070902 1383125 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.070905 1383127 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.070903 1383128 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619025.071028 1383126 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +199,966778,"TERMINAL",0,0,"2025-07-04 10:50:42.061720: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +200,966841,"TERMINAL",0,0,"2025-07-04 10:50:42.104498: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +201,966913,"TERMINAL",0,0,"2025-07-04 10:50:42.152459: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:50:42.163252: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:50:42.197734: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +202,967012,"TERMINAL",0,0,"2025-07-04 10:50:42.263520: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:50:42.275433: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +203,967120,"TERMINAL",0,0,"2025-07-04 10:50:42.383170: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +204,967317,"TERMINAL",0,0,"2025-07-04 10:50:42.602684: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +205,967805,"TERMINAL",0,0,"2025-07-04 10:50:43.087508: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +206,967933,"TERMINAL",0,0,"2025-07-04 10:50:43.218414: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +207,968108,"TERMINAL",0,0,"2025-07-04 10:50:43.376922: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:50:43.390967: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +208,968711,"TERMINAL",0,0,"2025-07-04 10:50:43.986421: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +209,969077,"TERMINAL",0,0,"2025-07-04 10:50:44.349942: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:50:44.361379: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +210,982261,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +211,983133,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250704_105057-xyb5evrv\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run lam-model-size-scaling-39M-debug-mihir\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/xyb5evrv\r\n",,terminal_output +212,1214637,"TERMINAL",0,0,"",,terminal_focus +213,1245641,"scripts_horeka/modelsize_scaling/lam/tester.sh",0,0,"",shellscript,tab +214,1245642,"scripts_horeka/modelsize_scaling/lam/tester.sh",439,0,"",shellscript,selection_mouse +215,1246902,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,0,"",shellscript,selection_command +216,1247255,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,0,"XLA_FLAGS=--xla_gpu_autotune_level=0",shellscript,content +217,1247830,"scripts_horeka/modelsize_scaling/lam/tester.sh",474,0," ",shellscript,content +218,1247832,"scripts_horeka/modelsize_scaling/lam/tester.sh",475,0,"",shellscript,selection_keyboard +219,1248128,"scripts_horeka/modelsize_scaling/lam/tester.sh",474,0,"",shellscript,selection_command +220,1264636,"TERMINAL",0,0,"srun",,terminal_focus +221,1265422,"TERMINAL",0,0,"\r\n",,terminal_output +222,1274225,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3316923.0 tasks 0-15: running\r\n",,terminal_output +223,1274448,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3316923.0\r\nsrun: forcing job termination\r\nsrun: Job step aborted: Waiting up to 32 seconds for job step to finish.\r\nslurmstepd: error: *** STEP 3316923.0 ON hkn0625 CANCELLED AT 2025-07-04T10:55:49 ***\r\n",,terminal_output +224,1274926,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3316923.0\r\nsrun: job abort in progress\r\n",,terminal_output +225,1275856,"TERMINAL",0,0,"scripts_horeka/modelsize_scaling/lam/tester.sh: line 34: far: command not found\r\n]0;tum_cte0515@hkn0625:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0625 jafar]$ ",,terminal_output +226,1276372,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0625:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0625 jafar]$ ",,terminal_output +227,1276577,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0625:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0625 jafar]$ ",,terminal_output +228,1277390,"TERMINAL",0,0,"i",,terminal_output +229,1277789,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +230,1277844,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +231,1277917,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +232,1278034,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +233,1278096,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +234,1278218,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +235,1278286,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +236,1278351,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +237,1278415,"TERMINAL",0,0,"[?25lk[?25h",,terminal_output +238,1278557,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +239,1278695,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +240,1278803,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +241,1279263,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +242,1279360,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +243,1279480,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +244,1279686,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +245,1279761,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +246,1279911,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +247,1279992,"TERMINAL",0,0,"[?25le[?25h[?25lr[?25h",,terminal_output +248,1280110,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +249,1280463,"TERMINAL",0,0,"M\tgenie.py\r\nM\tsample.py\r\nSwitched to branch 'runner'\r\n]0;tum_cte0515@hkn0625:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0625 jafar]$ ",,terminal_output +250,1280607,"",0,0,"Switched from branch 'fix-sampling' to 'runner'",,git_branch_checkout +251,1283284,"TERMINAL",0,0,"git checkout runner",,terminal_output +252,1283440,"TERMINAL",0,0,"sh scripts_horeka/modelsize_scaling/lam/tester.sh ",,terminal_output +253,1285336,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nenv | grep SLURM\r\n\r\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --min_lr=1e-4 \\r\n --max_lr=1e-4 \\r\n --log_image_interval=500 \\r\n --log_checkpoint_interval=5 \\r\n --log \\r\n --name=lam-model-size-scaling-39M-$slurm_job_id \\r\n --tags lam model-size-scaling 39M \\r\n --entity instant-uv \\r\n --project jafar \\r\n --data_dir $tf_records_dir\r\n",,terminal_output +254,1285466,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x4)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=3172857\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0625\r\nSLURM_JOB_START_TIME=1751618115\r\nSLURM_STEP_NODELIST=hkn0625\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751654115\r\nSLURM_PMI2_SRUN_PORT=38731\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x4)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=4\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3316923\r\nSLURM_PTY_PORT=39987\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=50\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=16\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e30.hkn0625\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=108\r\nSLURM_NODELIST=hkn[0625-0628]\r\nSLURM_SRUN_COMM_PORT=39403\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=16\r\nSLURM_NNODES=4\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3316923\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0625\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=39403\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0625-0628]\r\n",,terminal_output +255,1285608,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +256,1291439,"TERMINAL",0,0,"2025-07-04 10:56:06.607951: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:56:06.608106: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:56:06.608150: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:56:06.608639: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.620983 1386567 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.621029 1386570 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.621484 1386568 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.621782 1386569 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751619366.625514 1386570 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751619366.625595 1386567 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751619366.626098 1386568 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751619366.626072 1386569 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751619366.638366 1386569 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.638381 1386569 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.638383 1386569 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.638385 1386569 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.638366 1386570 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.638382 1386570 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.638383 1386570 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.638385 1386570 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.638566 1386567 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.638580 1386567 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.638582 1386567 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.638583 1386567 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.639782 1386568 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.639798 1386568 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.639799 1386568 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.639801 1386568 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +257,1291711,"TERMINAL",0,0,"2025-07-04 10:56:06.911218: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:56:06.911325: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:56:06.911247: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:56:06.911310: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.924308 1019998 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.924388 1020000 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.924573 1019999 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.924536 1020001 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751619366.928882 1019998 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751619366.928934 1020000 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751619366.929014 1019999 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751619366.929200 1020001 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\n2025-07-04 10:56:06.935491: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:56:06.935838: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:56:06.936051: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:56:06.936405: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:56:06.942311: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:56:06.942312: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 10:56:06.942577: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nW0000 00:00:1751619366.942796 1019998 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.942814 1019998 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.942817 1019998 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.942818 1019998 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n2025-07-04 10:56:06.942842: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nW0000 00:00:1751619366.942800 1020000 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.942815 1020000 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.942817 1020000 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.942818 1020000 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.942927 1020001 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.942944 1020001 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.942945 1020001 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.942947 1020001 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.942966 1019999 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.942982 1019999 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.942983 1019999 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.942985 1019999 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.948567 1542586 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.948728 1542584 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.949023 1542587 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.949613 1542585 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751619366.952917 1542584 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751619366.953163 1542586 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751619366.953287 1542587 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751619366.953860 1542585 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.955315 3179055 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.955469 3179056 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.955855 3179057 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751619366.955964 3179054 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751619366.959681 3179055 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751619366.960134 3179056 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751619366.960338 3179057 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751619366.960214 3179054 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751619366.966169 1542584 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.966185 1542584 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.966187 1542584 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.966189 1542584 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.966402 1542586 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.966417 1542586 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.966418 1542586 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.966420 1542586 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.967220 1542587 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.967237 1542587 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.967239 1542587 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.967241 1542587 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.967544 1542585 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.967561 1542585 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.967563 1542585 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.967564 1542585 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.973626 3179055 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.973643 3179055 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.973644 3179055 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.973645 3179055 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.973857 3179054 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.973872 3179054 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.973874 3179054 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.973876 3179054 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.973846 3179056 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.973862 3179056 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.973864 3179056 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.973865 3179056 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.974047 3179057 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.974063 3179057 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.974065 3179057 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751619366.974067 3179057 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +258,1300426,"TERMINAL",0,0,"W0000 00:00:1751619375.683263 1019998 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.683242 1019999 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.683213 1020000 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.683234 1020001 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.685865 1542584 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.685861 1542586 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.687304 1542585 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.687466 1386567 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.687306 1542587 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.687459 1386569 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.687466 1386570 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.687897 1386568 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.688199 3179054 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.688404 3179055 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.688408 3179056 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751619375.690190 3179057 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +259,1310376,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"#!/usr/bin/env bash\n\n# Unload modules that may interfere\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n\n# Activate virtual environment\nsource .venv/bin/activate\n\n# Set workspace and checkpoint directory (update slurm_job_id as needed)\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared'\n# Replace the following with the actual job id/checkpoint you want to sample from\nslurm_job_id=3301029\n\n# job_name=train_dynamics_minecraft_overfit_sample_tiny\nCHECKPOINT_DIR=$ws_dir/checkpoints/${slurm_job_id}\n\n# Example: If you want to use a specific checkpoint, set it here\n# CHECKPOINT_PATH=$ws_dir/checkpoints/3299272/dynamics-tiny-overfit-big-lr-3299272_50000/\n# Or use the latest in the directory\n# CHECKPOINT_PATH=$(ls -d $CHECKPOINT_DIR/*/ | sort | tail -n 1)\nCHECKPOINT_PATH=$CHECKPOINT_DIR/genie_1751067601_200000/\n# CHECKPOINT_PATH=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/0000/genie_1751301068_2000/\n# CHECKPOINT_PATH=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/../checkpoints/3307618/genie_1751322003_15500/\n# CHECKPOINT_PATH=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3307619/genie_1751322003_200000/\nCHECKPOINT_PATH=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3309699/genie_1751384516_74500/\n\n\necho ""Sampling from checkpoint: $CHECKPOINT_PATH""\n\npython sample.py \\n --checkpoint ""$CHECKPOINT_PATH"" \\n --tokenizer_dim=384 \\n --latent_patch_dim=32 \\n --num_patch_latents=1024 \\n --patch_size=4 \\n --tokenizer_num_blocks=8 \\n --tokenizer_num_heads=8 \\n --lam_dim=384 \\n --latent_action_dim=32 \\n --lam_patch_size=16 \\n --lam_num_blocks=8 \\n --lam_num_heads=8 \\n --dyna_dim=128 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4 \\n --maskgit_steps=1 \\n --num_latent_actions=6 \\n --seq_len=16 \\n --start_frame=0\n\n# python sample.py \\n # --checkpoint ""$CHECKPOINT_PATH"" \\n # --data_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/coinrun_episodes\n",shellscript,tab +260,1311311,"TERMINAL",0,0,"2025-07-04 10:56:26.595560: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +261,1311452,"TERMINAL",0,0,"2025-07-04 10:56:26.736969: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +262,1311538,"TERMINAL",0,0,"2025-07-04 10:56:26.765912: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:26.772730: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:26.776210: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:26.777133: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:26.777136: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:26.803785: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:26.812551: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:26.820432: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +263,1311610,"TERMINAL",0,0,"2025-07-04 10:56:26.859886: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:26.874225: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:26.876585: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:26.896451: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +264,1311997,"TERMINAL",0,0,"2025-07-04 10:56:27.282568: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +265,1312245,"TERMINAL",0,0,"2025-07-04 10:56:27.530366: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +266,1315655,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args,\n )\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer__\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n seed=args.seed,\n )\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in dataloader) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +267,1317933,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +268,1318653,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250704_105633-9susoo7a\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run lam-model-size-scaling-39M-debug-mihir\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/9susoo7a\r\n",,terminal_output +269,1328965,"TERMINAL",0,0,"bash",,terminal_focus +270,1331700,"TERMINAL",0,0,"git branch",,terminal_command +271,1331753,"TERMINAL",0,0,"]633;E;2025-07-04 10:56:46 git branch;7be71f88-07f8-498b-82c5-01f117a78f25]633;C[?1h=\r add-wandb-name-and-tags\r\n convert-to-jax-array-in-iter\r\n dont-let-tf-see-gpu\r\n feat/explicit-image-dims\r\n fix-sampling\r\n main\r\n preprocess_video\r\n revised-dataloader\r\n* runner\r\n tmp\r\n",,terminal_output +272,1331779,"TERMINAL",0,0,"\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +273,1343294,"TERMINAL",0,0,"2025-07-04 10:56:58.575395: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.576484: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.576514: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.577136: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +274,1343354,"TERMINAL",0,0,"2025-07-04 10:56:58.631803: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.632886: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.632904: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.633515: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +275,1343424,"TERMINAL",0,0,"2025-07-04 10:56:58.680910: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.681977: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.681995: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.682603: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.701994: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.703085: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.703105: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.703731: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +276,1343485,"TERMINAL",0,0,"2025-07-04 10:56:58.732884: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.733948: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.733967: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.734568: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.739548: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.740624: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.740644: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.741264: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +277,1343546,"TERMINAL",0,0,"2025-07-04 10:56:58.804464: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.805534: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.805552: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:58.806162: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +278,1344042,"TERMINAL",0,0,"2025-07-04 10:56:59.282689: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:59.283771: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:59.283791: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:56:59.284408: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +279,1344826,"TERMINAL",0,0,"2025-07-04 10:57:00.109987: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.111069: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.111088: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.111703: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +280,1344891,"TERMINAL",0,0,"2025-07-04 10:57:00.142042: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.143124: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.143142: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.143759: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +281,1344997,"TERMINAL",0,0,"2025-07-04 10:57:00.259881: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.260944: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.260962: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.261570: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +282,1345061,"TERMINAL",0,0,"2025-07-04 10:57:00.315438: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.316510: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.316529: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.317159: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +283,1345252,"TERMINAL",0,0,"2025-07-04 10:57:00.525186: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.526295: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.526323: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.526950: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +284,1345311,"TERMINAL",0,0,"2025-07-04 10:57:00.596262: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.597325: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.597344: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.597968: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +285,1345679,"TERMINAL",0,0,"2025-07-04 10:57:00.883388: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.884469: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.884490: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:00.885107: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +286,1346983,"TERMINAL",0,0,"2025-07-04 10:57:02.267462: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:02.268567: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:02.268588: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 10:57:02.269222: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +287,1368430,"TERMINAL",0,0,"srun",,terminal_focus +288,1528246,"TERMINAL",0,0,"Running on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 107, loss: 0.020319683477282524\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nStep 107, loss: 0.020319683477282524\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 107, loss: 0.020319683477282524\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 107, loss: 0.020319683477282524\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nStep 107, loss: 0.020319683477282524\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nStep 107, loss: 0.020319683477282524\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 107, loss: 0.020319683477282524\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nStep 107, loss: 0.020319683477282524\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 107, loss: 0.020319683477282524\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nStep 107, loss: 0.020319683477282524\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nStep 107, loss: 0.020319683477282524\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 107, loss: 0.020319683477282524\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 107, loss: 0.020319683477282524\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nStep 107, loss: 0.020319683477282524\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n",,terminal_output +289,1528349,"TERMINAL",0,0,"{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 107, loss: 0.020319683477282524\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\nRunning on 16 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3362254798412323\r\nStep 1, loss: 0.2750907838344574\r\nStep 2, loss: 0.23492641746997833\r\nStep 3, loss: 0.20701628923416138\r\nStep 4, loss: 0.18350884318351746\r\nStep 5, loss: 0.1659007966518402\r\nStep 6, loss: 0.150494784116745\r\nStep 7, loss: 0.14000122249126434\r\nStep 8, loss: 0.13274547457695007\r\nStep 9, loss: 0.12251953780651093\r\nStep 10, loss: 0.1170879453420639\r\nStep 11, loss: 0.10762540251016617\r\nStep 12, loss: 0.10258731245994568\r\nStep 13, loss: 0.10352618992328644\r\nStep 14, loss: 0.10688018053770065\r\nStep 15, loss: 0.10075119882822037\r\nStep 16, loss: 0.09745762497186661\r\nStep 17, loss: 0.09457919001579285\r\nStep 18, loss: 0.08164741843938828\r\nStep 19, loss: 0.0888347178697586\r\nStep 20, loss: 0.07998507469892502\r\nStep 21, loss: 0.07322828471660614\r\nStep 22, loss: 0.07593098282814026\r\nStep 23, loss: 0.07218942791223526\r\nStep 24, loss: 0.06460891664028168\r\nStep 25, loss: 0.07442361116409302\r\nStep 26, loss: 0.0644427239894867\r\nStep 27, loss: 0.06295198947191238\r\nStep 28, loss: 0.06914092600345612\r\nStep 29, loss: 0.06511834263801575\r\nStep 30, loss: 0.061374422162771225\r\nStep 31, loss: 0.059483930468559265\r\nStep 32, loss: 0.06902221590280533\r\nStep 33, loss: 0.060859691351652145\r\nStep 34, loss: 0.05683210864663124\r\nStep 35, loss: 0.05632608011364937\r\nStep 36, loss: 0.058733828365802765\r\nStep 37, loss: 0.05144477263092995\r\nStep 38, loss: 0.05117009952664375\r\nStep 39, loss: 0.05097543075680733\r\nStep 40, loss: 0.051942065358161926\r\nStep 41, loss: 0.046499162912368774\r\nStep 42, loss: 0.05159098282456398\r\nStep 43, loss: 0.04936894029378891\r\nStep 44, loss: 0.05515732616186142\r\nStep 45, loss: 0.04567846655845642\r\nStep 46, loss: 0.04675234481692314\r\nStep 47, loss: 0.04656634107232094\r\nStep 48, loss: 0.04983530566096306\r\nStep 49, loss: 0.049152761697769165\r\nStep 50, loss: 0.04401770606637001\r\nStep 51, loss: 0.042400676757097244\r\nStep 52, loss: 0.0430758111178875\r\nStep 53, loss: 0.043048195540905\r\nStep 54, loss: 0.04316592216491699\r\nStep 55, loss: 0.04429558664560318\r\nStep 56, loss: 0.042991869151592255\r\nStep 57, loss: 0.04130374267697334\r\nStep 58, loss: 0.03836307302117348\r\nStep 59, loss: 0.03951887786388397\r\nStep 60, loss: 0.03625255823135376\r\nStep 61, loss: 0.03791269659996033\r\nStep 62, loss: 0.03727186843752861\r\nStep 63, loss: 0.03937394171953201\r\nStep 64, loss: 0.036140769720077515\r\nStep 65, loss: 0.03697889298200607\r\nStep 66, loss: 0.03517203778028488\r\nStep 67, loss: 0.03560420870780945\r\nStep 68, loss: 0.037143804132938385\r\nStep 69, loss: 0.032461587339639664\r\nStep 70, loss: 0.03468279168009758\r\nStep 71, loss: 0.03513737767934799\r\nStep 72, loss: 0.03565040975809097\r\nStep 73, loss: 0.0330132320523262\r\nStep 74, loss: 0.034020647406578064\r\nStep 75, loss: 0.031051885336637497\r\nStep 76, loss: 0.029657039791345596\r\nStep 77, loss: 0.029128598049283028\r\nStep 78, loss: 0.02778560295701027\r\nStep 79, loss: 0.03003678098320961\r\nStep 80, loss: 0.030365584418177605\r\nStep 81, loss: 0.026201536878943443\r\nStep 82, loss: 0.028577476739883423\r\nStep 83, loss: 0.02874593995511532\r\nStep 84, loss: 0.026943983510136604\r\nStep 85, loss: 0.0281868614256382\r\nStep 86, loss: 0.027384648099541664\r\nStep 87, loss: 0.02767602913081646\r\nStep 88, loss: 0.028130633756518364\r\nStep 89, loss: 0.02517537586390972\r\nStep 90, loss: 0.0255118440836668\r\nStep 91, loss: 0.028349988162517548\r\nStep 92, loss: 0.025830097496509552\r\nStep 93, loss: 0.02468285895884037\r\nStep 94, loss: 0.02751479484140873\r\nStep 95, loss: 0.023452816531062126\r\nStep 96, loss: 0.02339227870106697\r\nStep 97, loss: 0.023860404267907143\r\nStep 98, loss: 0.026367874816060066\r\nStep 99, loss: 0.02228216454386711\r\nStep 100, loss: 0.023430615663528442\r\nStep 101, loss: 0.021609408780932426\r\nStep 102, loss: 0.022437402978539467\r\nStep 103, loss: 0.021542737260460854\r\nStep 104, loss: 0.018982194364070892\r\nStep 105, loss: 0.021596087142825127\r\nStep 106, loss: 0.02188090980052948\r\nStep 107, loss: 0.020319683477282524\r\nStep 108, loss: 0.02188311144709587\r\nStep 109, loss: 0.022975796833634377\r\nStep 110, loss: 0.01976914331316948\r\nStep 111, loss: 0.02313579060137272\r\nStep 112, loss: 0.020566441118717194\r\nStep 113, loss: 0.019332319498062134\r\nStep 114, loss: 0.019270069897174835\r\nStep 115, loss: 0.021202340722084045\r\nStep 116, loss: 0.020022688433527946\r\nStep 117, loss: 0.01845189556479454\r\nStep 118, loss: 0.021814992651343346\r\nStep 119, loss: 0.01873568259179592\r\nStep 120, loss: 0.019324379041790962\r\nStep 121, loss: 0.01696816086769104\r\nStep 122, loss: 0.01798570528626442\r\nStep 123, loss: 0.01626499742269516\r\nStep 124, loss: 0.017125030979514122\r\nStep 125, loss: 0.020170114934444427\r\nStep 126, loss: 0.017771216109395027\r\nStep 127, loss: 0.018780428916215897\r\nStep 128, loss: 0.017509611323475838\r\nStep 129, loss: 0.016392791643738747\r\nStep 130, loss: 0.0161692313849926\r\nStep 131, loss: 0.016657739877700806\r\nStep 132, loss: 0.015444198623299599\r\nStep 133, loss: 0.016056101769208908\r\nStep 134, loss: 0.01770874857902527\r\nStep 135, loss: 0.01826588436961174\r\nStep 136, loss: 0.017154138535261154\r\nStep 137, loss: 0.016903536394238472\r\nStep 138, loss: 0.017067160457372665\r\nStep 139, loss: 0.014987957663834095\r\nStep 140, loss: 0.014494623988866806\r\nStep 141, loss: 0.015896758064627647\r\nStep 142, loss: 0.015160825103521347\r\nStep 143, loss: 0.015809522941708565\r\nStep 144, loss: 0.015752172097563744\r\nStep 145, loss: 0.01583671011030674\r\nStep 146, loss: 0.013611181639134884\r\nStep 147, loss: 0.017301112413406372\r\nStep 148, loss: 0.013612732291221619\r\nStep 149, loss: 0.014683591201901436\r\nStep 150, loss: 0.01602398231625557\r\nStep 151, loss: 0.016187656670808792\r\nStep 152, loss: 0.014394099824130535\r\nStep 153, loss: 0.0142692094668746\r\nStep 154, loss: 0.014798534102737904\r\nStep 155, loss: 0.013772721402347088\r\nStep 156, loss: 0.012673899531364441\r\nStep 157, loss: 0.013326121494174004\r\nStep 158, loss: 0.014826718717813492\r\nStep 159, loss: 0.013340544886887074\r\nStep 160, loss: 0.013956693932414055\r\nStep 161, loss: 0.013649641536176205\r\nStep 162, loss: 0.01341031864285469\r\nStep 163, loss: 0.015223181806504726\r\nStep 164, loss: 0.013461416587233543\r\nStep 165, loss: 0.016614627093076706\r\nStep 166, loss: 0.014446724206209183\r\nStep 167, loss: 0.01382629293948412\r\nStep 168, loss: 0.013771926052868366\r\nStep 169, loss: 0.015074227936565876\r\nStep 170, loss: 0.013416316360235214\r\nStep 171, loss: 0.014910782687366009\r\nStep 172, loss: 0.014735950157046318\r\nStep 173, loss: 0.01343217957764864\r\nStep 174, loss: 0.014639505185186863\r\nStep 175, loss: 0.015760699287056923\r\nStep 176, loss: 0.01325331348925829\r\nStep 177, loss: 0.012775450944900513\r\nStep 178, loss: 0.012347170151770115\r\nStep 179, loss: 0.01201647985726595\r\nStep 180, loss: 0.011997772380709648\r\nStep 181, loss: 0.0127970976755023\r\nStep 182, loss: 0.012849961407482624\r\nStep 183, loss: 0.013519403524696827\r\nStep 184, loss: 0.013941296376287937\r\nStep 185, loss: 0.011854317039251328\r\nStep 186, loss: 0.012987681664526463\r\nStep 187, loss: 0.013691768981516361\r\nStep 188, loss: 0.01244765892624855\r\nStep 189, loss: 0.01361786387860775\r\nStep 190, loss: 0.013386002741754055\r\nStep 191, loss: 0.013909702189266682\r\nStep 192, loss: 0.014463458210229874\r\nStep 193, loss: 0.013295559212565422\r\nStep 194, loss: 0.01352232601493597\r\nStep 195, loss: 0.01174916047602892\r\nStep 196, loss: 0.012223197147250175\r\nStep 197, loss: 0.011510714888572693\r\nStep 198, loss: 0.012651047669351101\r\nStep 199, loss: 0.010755637660622597\r\nStep 200, loss: 0.013457592576742172\r\nStep 201, loss: 0.0138861034065485\r\nStep 202, loss: 0.012667758390307426\r\nStep 203, loss: 0.012279652059078217\r\nStep 204, loss: 0.012127796187996864\r\nStep 205, loss: 0.01303846761584282\r\nStep 206, loss: 0.012841872870922089\r\nStep 207, loss: 0.01342686451971531\r\nStep 208, loss: 0.011667580343782902\r\nStep 209, loss: 0.01000010035932064\r\nStep 210, loss: 0.011521648615598679\r\nStep 211, loss: 0.01216675341129303\r\nStep 212, loss: 0.01002007070928812\r\nStep 213, loss: 0.01219736784696579\r\nStep 214, loss: 0.011972173117101192\r\nStep 215, loss: 0.010780243203043938\r\nStep 216, loss: 0.010104210115969181\r\nStep 217, loss: 0.012307766824960709\r\nStep 218, loss: 0.012908908538520336\r\n",,terminal_output +290,1646097,"scripts_horeka/modelsize_scaling/lam/tester.sh",0,0,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,tab +291,1646098,"scripts_horeka/modelsize_scaling/lam/tester.sh",623,0,"",shellscript,selection_mouse +292,1653130,"scripts_horeka/modelsize_scaling/lam/tester.sh",623,1,"",shellscript,content +293,1669171,"TERMINAL",0,0,"Step 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332",,terminal_output +294,1669228,"TERMINAL",0,0,"\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\nStep 219, loss: 0.013099383562803268\r\nStep 220, loss: 0.013694820925593376\r\nStep 221, loss: 0.012139582075178623\r\nStep 222, loss: 0.011012162081897259\r\nStep 223, loss: 0.011057185009121895\r\nStep 224, loss: 0.01283134426921606\r\nStep 225, loss: 0.010869650170207024\r\nStep 226, loss: 0.010859719477593899\r\nStep 227, loss: 0.011350473389029503\r\nStep 228, loss: 0.010802650824189186\r\nStep 229, loss: 0.009961412288248539\r\nStep 230, loss: 0.01025364175438881\r\nStep 231, loss: 0.011073515750467777\r\nStep 232, loss: 0.010382494889199734\r\nStep 233, loss: 0.010060961358249187\r\nStep 234, loss: 0.01103963889181614\r\nStep 235, loss: 0.010757341049611568\r\nStep 236, loss: 0.010398885235190392\r\nStep 237, loss: 0.011812644079327583\r\nStep 238, loss: 0.0094912676140666\r\nStep 239, loss: 0.01158614456653595\r\nStep 240, loss: 0.010897740721702576\r\nStep 241, loss: 0.009723613038659096\r\nStep 242, loss: 0.012121678330004215\r\nStep 243, loss: 0.010595949366688728\r\nStep 244, loss: 0.010378634557127953\r\nStep 245, loss: 0.011665726080536842\r\nStep 246, loss: 0.010449363850057125\r\nStep 247, loss: 0.01035214588046074\r\nStep 248, loss: 0.011270000599324703\r\nStep 249, loss: 0.011872820556163788\r\nStep 250, loss: 0.011594422161579132\r\nStep 251, loss: 0.009980921633541584\r\nStep 252, loss: 0.010286862961947918\r\nStep 253, loss: 0.011944676749408245\r\nStep 254, loss: 0.012024126015603542\r\nStep 255, loss: 0.010886747390031815\r\nStep 256, loss: 0.010658128187060356\r\nStep 257, loss: 0.011032473295927048\r\nStep 258, loss: 0.011335105635225773\r\nStep 259, loss: 0.013315752148628235\r\nStep 260, loss: 0.012090524658560753\r\nStep 261, loss: 0.009936386719346046\r\nStep 262, loss: 0.009728874079883099\r\nStep 263, loss: 0.009869663044810295\r\nStep 264, loss: 0.011215614154934883\r\nStep 265, loss: 0.01125930156558752\r\nStep 266, loss: 0.010517990216612816\r\nStep 267, loss: 0.010507647879421711\r\nStep 268, loss: 0.010362515226006508\r\nStep 269, loss: 0.011415633372962475\r\nStep 270, loss: 0.01145961694419384\r\nStep 271, loss: 0.009482959285378456\r\nStep 272, loss: 0.011310311034321785\r\nStep 273, loss: 0.010140996426343918\r\nStep 274, loss: 0.010601982474327087\r\nStep 275, loss: 0.01028397772461176\r\nStep 276, loss: 0.011026402935385704\r\nStep 277, loss: 0.010329381562769413\r\nStep 278, loss: 0.010154777206480503\r\nStep 279, loss: 0.01032185461372137\r\nStep 280, loss: 0.010506854392588139\r\nStep 281, loss: 0.011142993345856667\r\nStep 282, loss: 0.010036150924861431\r\nStep 283, loss: 0.011414938606321812\r\nStep 284, loss: 0.009582496248185635\r\nStep 285, loss: 0.009787384420633316\r\nStep 286, loss: 0.010223863646388054\r\nStep 287, loss: 0.009179569780826569\r\nStep 288, loss: 0.011081495322287083\r\nStep 289, loss: 0.010283957235515118\r\nStep 290, loss: 0.010582290589809418\r\nStep 291, loss: 0.00986514426767826\r\nStep 292, loss: 0.00965440645813942\r\nStep 293, loss: 0.009914973750710487\r\nStep 294, loss: 0.009669107384979725\r\nStep 295, loss: 0.00881169643253088\r\nStep 296, loss: 0.009560219943523407\r\nStep 297, loss: 0.009458527900278568\r\nStep 298, loss: 0.010391158051788807\r\nStep 299, loss: 0.009811971336603165\r\nStep 300, loss: 0.009161300025880337\r\nStep 301, loss: 0.009543187916278839\r\nStep 302, loss: 0.010171896778047085\r\nStep 303, loss: 0.009172284975647926\r\nStep 304, loss: 0.009565647691488266\r\nStep 305, loss: 0.010039139539003372\r\nStep 306, loss: 0.00956760160624981\r\nStep 307, loss: 0.009556096978485584\r\nStep 308, loss: 0.009693847969174385\r\nStep 309, loss: 0.010572932660579681\r\nStep 310, loss: 0.011336741037666798\r\nStep 311, loss: 0.009630970656871796\r\nStep 312, loss: 0.010636239312589169\r\nStep 313, loss: 0.009809005074203014\r\nStep 314, loss: 0.00931477639824152\r\nStep 315, loss: 0.010083867236971855\r\nStep 316, loss: 0.009611323475837708\r\nStep 317, loss: 0.01032000221312046\r\nStep 318, loss: 0.008976491168141365\r\nStep 319, loss: 0.0091495830565691\r\nStep 320, loss: 0.010515597648918629\r\nStep 321, loss: 0.010185513645410538\r\nStep 322, loss: 0.01073758490383625\r\nStep 323, loss: 0.0096001997590065\r\nStep 324, loss: 0.010766669176518917\r\nStep 325, loss: 0.00949220173060894\r\nStep 326, loss: 0.010624677874147892\r\nStep 327, loss: 0.010104024782776833\r\nStep 328, loss: 0.0085305105894804\r\nStep 329, loss: 0.009465201757848263\r\nStep 330, loss: 0.009789333678781986\r\nStep 331, loss: 0.01092305313795805\r\nStep 332, loss: 0.008985054679214954\r\nStep 333, loss: 0.00994466058909893\r\nStep 334, loss: 0.010118870064616203\r\nStep 335, loss: 0.008400583639740944\r\nStep 336, loss: 0.008790651336312294\r\nStep 337, loss: 0.01010106224566698\r\nStep 338, loss: 0.009276456199586391\r\nStep 339, loss: 0.008561133407056332\r\nStep 340, loss: 0.009276671335101128\r\nStep 341, loss: 0.008887761272490025\r\nStep 342, loss: 0.010138514451682568\r\nStep 343, loss: 0.009025386534631252\r\nStep 344, loss: 0.010667262598872185\r\nStep 345, loss: 0.008848258294165134\r\nStep 346, loss: 0.010287134908139706\r\nStep 347, loss: 0.008325062692165375\r\nStep 348, loss: 0.010148957371711731\r\nStep 349, loss: 0.009190731681883335\r\nStep 350, loss: 0.009581753984093666\r\nStep 351, loss: 0.009136899374425411\r\nStep 352, loss: 0.009848029352724552\r\nStep 353, loss: 0.009596729651093483\r\nStep 354, loss: 0.008651762269437313\r\nStep 355, loss: 0.008681347593665123\r\nStep 356, loss: 0.008824453689157963\r\nStep 357, loss: 0.00955806765705347\r\nStep 358, loss: 0.009801270440220833\r\nStep 359, loss: 0.008466079831123352\r\nStep 360, loss: 0.00912779662758112\r\nStep 361, loss: 0.008608909323811531\r\nStep 362, loss: 0.008244039490818977\r\nStep 363, loss: 0.009335396811366081\r\nStep 364, loss: 0.008582552894949913\r\nStep 365, loss: 0.009297855198383331\r\nStep 366, loss: 0.008141983300447464\r\nStep 367, loss: 0.009393983520567417\r\nStep 368, loss: 0.00935653317719698\r\nStep 369, loss: 0.00956851989030838\r\nStep 370, loss: 0.009400052018463612\r\nStep 371, loss: 0.01006643008440733\r\nStep 372, loss: 0.009365479461848736\r\nStep 373, loss: 0.00824917946010828\r\nStep 374, loss: 0.00813942402601242\r\nStep 375, loss: 0.00910226907581091\r\nStep 376, loss: 0.010079216212034225\r\nStep 377, loss: 0.009087095968425274\r\nStep 378, loss: 0.008659454993903637\r\nStep 379, loss: 0.00930467527359724\r\nStep 380, loss: 0.008777443319559097\r\nStep 381, loss: 0.007916522212326527\r\nStep 382, loss: 0.008325379341840744\r\nStep 383, loss: 0.00848743598908186\r\nStep 384, loss: 0.007398432586342096\r\nStep 385, loss: 0.009856878779828548\r\nStep 386, loss: 0.008934546262025833\r\nStep 387, loss: 0.00811261311173439\r\nStep 388, loss: 0.008522428572177887\r\nStep 389, loss: 0.009407051838934422\r\nStep 390, loss: 0.009476755745708942\r\nStep 391, loss: 0.00886524561792612\r\nStep 392, loss: 0.008509679697453976\r\nStep 393, loss: 0.008736547082662582\r\nStep 394, loss: 0.0095830662176013\r\nStep 395, loss: 0.009046163409948349\r\nStep 396, loss: 0.009033717215061188\r\nStep 397, loss: 0.009225020185112953\r\nStep 398, loss: 0.00959339365363121\r\nStep 399, loss: 0.010412130504846573\r\nStep 400, loss: 0.00931647326797247\r\nStep 401, loss: 0.010083380155265331\r\nStep 402, loss: 0.009015293791890144\r\nStep 403, loss: 0.009169119410216808\r\nStep 404, loss: 0.009633009321987629\r\nStep 405, loss: 0.009266078472137451\r\nStep 406, loss: 0.009133762679994106\r\nStep 407, loss: 0.009069717489182949\r\nStep 408, loss: 0.00867233332246542\r\nStep 409, loss: 0.008150232955813408\r\nStep 410, loss: 0.0077474238350987434\r\nStep 411, loss: 0.00808892771601677\r\nStep 412, loss: 0.007288776803761721\r\nStep 413, loss: 0.008106247521936893\r\nStep 414, loss: 0.00810135155916214\r\nStep 415, loss: 0.008702809922397137\r\nStep 416, loss: 0.008316677995026112\r\nStep 417, loss: 0.009631174616515636\r\nStep 418, loss: 0.009098071604967117\r\nStep 419, loss: 0.010791489854454994\r\nStep 420, loss: 0.007883910089731216\r\nStep 421, loss: 0.008732305839657784\r\nStep 422, loss: 0.008829205296933651\r\nStep 423, loss: 0.008164764381945133\r\nStep 424, loss: 0.009509255178272724\r\nStep 425, loss: 0.009375796653330326\r\nStep 426, loss: 0.008981086313724518\r\nStep 427, loss: 0.010169018059968948\r\nStep 428, loss: 0.009473764337599277\r\nStep 429, loss: 0.009139928966760635\r\nStep 430, loss: 0.008253500796854496\r\nStep 431, loss: 0.009879836812615395\r\nStep 432, loss: 0.00861106626689434\r\nStep 433, loss: 0.007545475848019123\r\nStep 434, loss: 0.008925536647439003\r\nStep 435, loss: 0.00869233813136816\r\nStep 436, loss: 0.00839403085410595\r\nStep 437, loss: 0.009328337386250496\r\nStep 438, loss: 0.00917942076921463\r\nStep 439, loss: 0.009509981609880924\r\nStep 440, loss: 0.009915051981806755\r\n",,terminal_output +295,1787974,"TERMINAL",0,0,"bash",,terminal_focus +296,1789037,"TERMINAL",0,0,"queue",,terminal_command +297,1789096,"TERMINAL",0,0,"]633;E;2025-07-04 11:04:24 queue;7be71f88-07f8-498b-82c5-01f117a78f25]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Fri Jul 4 11:04:24 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3316020 accelerat train_to tum_cte0 R 6:54:47 12 hkn[0407,0724-0726,0734-0735,0804-0805,0807,0809-0810,0815]3316026 accelerat train_to tum_cte0 R 6:54:47\t 4 hkn[0510-0511,0519,0707]3316022 accelerat train_to tum_cte0 R 6:58:42\t 2 hkn[0507,0520]3316019 accelerat train_to tum_cte0 R 8:40:49\t 8 hkn[0429,0436,0603-0604,0608,0612,0614,0820]3316016 accelerat train_to tum_cte0 R 10:16:57\t 1 hkn06343316924 accelerat interact tum_cte0 R28:42\t 1 hkn07333316923 accelerat interact tum_cte0 R29:09\t 4 hkn[0625-0628]",,terminal_output +298,1790205,"TERMINAL",0,0,"5883508310",,terminal_output +299,1791230,"TERMINAL",0,0,"69941941",,terminal_output +300,1792254,"TERMINAL",0,0,"75050527:0052",,terminal_output +301,1793376,"TERMINAL",0,0,"81163163",,terminal_output +302,1794352,"TERMINAL",0,0,"92274274",,terminal_output +303,1795397,"TERMINAL",0,0,"303385385",,terminal_output +304,1796450,"TERMINAL",0,0,"14496496",,terminal_output +305,1797580,"TERMINAL",0,0,"2555075507",,terminal_output +306,1798599,"TERMINAL",0,0,"36618618",,terminal_output +307,1799626,"TERMINAL",0,0,"47729729",,terminal_output +308,1800648,"TERMINAL",0,0,"58831:008320",,terminal_output +309,1801763,"TERMINAL",0,0,"65:005:00521052",,terminal_output +310,1802906,"TERMINAL",0,0,"81163163",,terminal_output +311,1803928,"TERMINAL",0,0,"92274274",,terminal_output +312,1804946,"TERMINAL",0,0,"403385385",,terminal_output +313,1806078,"TERMINAL",0,0,"14496496",,terminal_output +314,1807097,"TERMINAL",0,0,"2559:00759:007",,terminal_output +315,1808122,"TERMINAL",0,0,"36618618",,terminal_output +316,1809149,"TERMINAL",0,0,"47729729",,terminal_output +317,1810193,"TERMINAL",0,0,"5883108330",,terminal_output +318,1811296,"TERMINAL",0,0,"69941941",,terminal_output +319,1812319,"TERMINAL",0,0,"71010522052",,terminal_output +320,1813345,"TERMINAL",0,0,"81163163",,terminal_output +321,1814475,"TERMINAL",0,0,"92274274",,terminal_output +322,1815390,"TERMINAL",0,0,"503385385",,terminal_output +323,1816529,"TERMINAL",0,0,"14496496",,terminal_output +324,1817543,"TERMINAL",0,0,"2551075107",,terminal_output +325,1818154,"TERMINAL",0,0,"Step 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\n",,terminal_output +326,1818225,"TERMINAL",0,0,"Step 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\nStep 441, loss: 0.009172636084258556\r\nStep 442, loss: 0.00982529204338789\r\nStep 443, loss: 0.009700413793325424\r\nStep 444, loss: 0.008612173609435558\r\nStep 445, loss: 0.010386315174400806\r\nStep 446, loss: 0.008841309696435928\r\nStep 447, loss: 0.008696725592017174\r\nStep 448, loss: 0.0100677739828825\r\nStep 449, loss: 0.00916972104460001\r\nStep 450, loss: 0.009417261928319931\r\nStep 451, loss: 0.008428973145782948\r\nStep 452, loss: 0.009684531018137932\r\nStep 453, loss: 0.008075914345681667\r\nStep 454, loss: 0.00811962690204382\r\nStep 455, loss: 0.009778064675629139\r\nStep 456, loss: 0.00913725420832634\r\nStep 457, loss: 0.009202827699482441\r\nStep 458, loss: 0.009212199598550797\r\nStep 459, loss: 0.00841327104717493\r\nStep 460, loss: 0.008480049669742584\r\nStep 461, loss: 0.008315286599099636\r\nStep 462, loss: 0.007818982936441898\r\nStep 463, loss: 0.008244290947914124\r\nStep 464, loss: 0.008826389908790588\r\nStep 465, loss: 0.008300169371068478\r\nStep 466, loss: 0.009412254206836224\r\nStep 467, loss: 0.009721849113702774\r\nStep 468, loss: 0.00943279080092907\r\nStep 469, loss: 0.00857819989323616\r\nStep 470, loss: 0.008158877491950989\r\nStep 471, loss: 0.009350799024105072\r\nStep 472, loss: 0.010540132410824299\r\nStep 473, loss: 0.009407121688127518\r\nStep 474, loss: 0.009018382988870144\r\nStep 475, loss: 0.006886541377753019\r\nStep 476, loss: 0.00878828950226307\r\nStep 477, loss: 0.007888736203312874\r\nStep 478, loss: 0.01001682784408331\r\nStep 479, loss: 0.010163736529648304\r\nStep 480, loss: 0.008517569862306118\r\nStep 481, loss: 0.01042562909424305\r\nStep 482, loss: 0.007701660506427288\r\nStep 483, loss: 0.007607926148921251\r\nStep 484, loss: 0.008656023070216179\r\nStep 485, loss: 0.009207268245518208\r\nStep 486, loss: 0.009093547239899635\r\nStep 487, loss: 0.009981537237763405\r\nStep 488, loss: 0.009314225055277348\r\nStep 489, loss: 0.008576551452279091\r\nStep 490, loss: 0.007834444753825665\r\nStep 491, loss: 0.008205845952033997\r\nStep 492, loss: 0.00798095390200615\r\nStep 493, loss: 0.008706610649824142\r\nStep 494, loss: 0.008503030054271221\r\nStep 495, loss: 0.008082824759185314\r\nStep 496, loss: 0.0100203612819314\r\nStep 497, loss: 0.009352821856737137\r\nStep 498, loss: 0.008165973238646984\r\nStep 499, loss: 0.008751009590923786\r\nStep 500, loss: 0.008776586502790451\r\nStep 501, loss: 0.0088904844596982\r\nStep 502, loss: 0.009440240450203419\r\nStep 503, loss: 0.0086445277556777\r\nStep 504, loss: 0.008648236282169819\r\nStep 505, loss: 0.008715372532606125\r\nStep 506, loss: 0.008195176720619202\r\nStep 507, loss: 0.008302773348987103\r\nStep 508, loss: 0.008936022408306599\r\nStep 509, loss: 0.007341370452195406\r\nStep 510, loss: 0.008851333521306515\r\nStep 511, loss: 0.008958064951002598\r\nStep 512, loss: 0.009790496900677681\r\nStep 513, loss: 0.009989015758037567\r\nStep 514, loss: 0.010105382651090622\r\nStep 515, loss: 0.008583087474107742\r\nStep 516, loss: 0.007951793260872364\r\nStep 517, loss: 0.007381951902061701\r\nStep 518, loss: 0.008452118374407291\r\nStep 519, loss: 0.008968343958258629\r\nStep 520, loss: 0.008171094581484795\r\nStep 521, loss: 0.008153341710567474\r\nStep 522, loss: 0.008069823496043682\r\nStep 523, loss: 0.009000985883176327\r\nStep 524, loss: 0.009068850427865982\r\nStep 525, loss: 0.008548617362976074\r\nStep 526, loss: 0.007930642925202847\r\nStep 527, loss: 0.009337826631963253\r\nStep 528, loss: 0.008745614439249039\r\nStep 529, loss: 0.00784107856452465\r\nStep 530, loss: 0.0104753989726305\r\nStep 531, loss: 0.007889948785305023\r\nStep 532, loss: 0.008794852532446384\r\nStep 533, loss: 0.007991227321326733\r\nStep 534, loss: 0.009364668279886246\r\nStep 535, loss: 0.008899899199604988\r\nStep 536, loss: 0.008616209030151367\r\nStep 537, loss: 0.00890787597745657\r\nStep 538, loss: 0.009146109223365784\r\nStep 539, loss: 0.010876119136810303\r\nStep 540, loss: 0.007866852916777134\r\nStep 541, loss: 0.01012083888053894\r\nStep 542, loss: 0.009157226420938969\r\nStep 543, loss: 0.009107368066906929\r\nStep 544, loss: 0.008830016478896141\r\nStep 545, loss: 0.009399570524692535\r\nStep 546, loss: 0.008762801066040993\r\nStep 547, loss: 0.008676286786794662\r\nStep 548, loss: 0.00974164716899395\r\nStep 549, loss: 0.0074196322821080685\r\nStep 550, loss: 0.009397583082318306\r\nStep 551, loss: 0.00852873269468546\r\nStep 552, loss: 0.009549298323690891\r\nStep 553, loss: 0.007757730316370726\r\nStep 554, loss: 0.009732040576636791\r\nStep 555, loss: 0.008432733826339245\r\nStep 556, loss: 0.009298763237893581\r\nStep 557, loss: 0.00818414706736803\r\nStep 558, loss: 0.007203501649200916\r\nStep 559, loss: 0.009764304384589195\r\nStep 560, loss: 0.010583905503153801\r\nStep 561, loss: 0.008711939677596092\r\nStep 562, loss: 0.008203057572245598\r\nStep 563, loss: 0.008497781120240688\r\nStep 564, loss: 0.007532292511314154\r\nStep 565, loss: 0.00986651424318552\r\nStep 566, loss: 0.009398253634572029\r\nStep 567, loss: 0.009236501529812813\r\nStep 568, loss: 0.008806644938886166\r\nStep 569, loss: 0.008545918390154839\r\nStep 570, loss: 0.009641233831644058\r\nStep 571, loss: 0.008593868464231491\r\nStep 572, loss: 0.009537957608699799\r\nStep 573, loss: 0.0091137969866395\r\nStep 574, loss: 0.008439164608716965\r\nStep 575, loss: 0.008239593356847763\r\nStep 576, loss: 0.008624826557934284\r\nStep 577, loss: 0.008145265281200409\r\nStep 578, loss: 0.008806181140244007\r\nStep 579, loss: 0.008017127402126789\r\nStep 580, loss: 0.007308092433959246\r\nStep 581, loss: 0.007994630374014378\r\nStep 582, loss: 0.008302122354507446\r\nStep 583, loss: 0.009227989241480827\r\nStep 584, loss: 0.007321110460907221\r\nStep 585, loss: 0.009219660423696041\r\nStep 586, loss: 0.008875611238181591\r\nStep 587, loss: 0.008734606206417084\r\nStep 588, loss: 0.008769622072577477\r\nStep 589, loss: 0.008562730625271797\r\nStep 590, loss: 0.008588621392846107\r\nStep 591, loss: 0.0082046864554286\r\nStep 592, loss: 0.008270608261227608\r\nStep 593, loss: 0.008636316284537315\r\nStep 594, loss: 0.008951708674430847\r\nStep 595, loss: 0.010645163245499134\r\nStep 596, loss: 0.0076056960970163345\r\nStep 597, loss: 0.009029458276927471\r\nStep 598, loss: 0.008114810101687908\r\nStep 599, loss: 0.009122074581682682\r\nStep 600, loss: 0.008821049705147743\r\nStep 601, loss: 0.008914874866604805\r\nStep 602, loss: 0.008345932699739933\r\nStep 603, loss: 0.008173663169145584\r\nStep 604, loss: 0.009208902716636658\r\nStep 605, loss: 0.007708316668868065\r\nStep 606, loss: 0.007426695432513952\r\nStep 607, loss: 0.008950533345341682\r\nStep 608, loss: 0.007777011021971703\r\nStep 609, loss: 0.009693533182144165\r\nStep 610, loss: 0.007790651172399521\r\nStep 611, loss: 0.00917765311896801\r\nStep 612, loss: 0.007631454151123762\r\nStep 613, loss: 0.010259517468512058\r\nStep 614, loss: 0.009628580883145332\r\nStep 615, loss: 0.007362436968833208\r\nStep 616, loss: 0.008848816156387329\r\nStep 617, loss: 0.009226517751812935\r\nStep 618, loss: 0.007675519213080406\r\nStep 619, loss: 0.00823437049984932\r\nStep 620, loss: 0.0077772000804543495\r\nStep 621, loss: 0.007999159395694733\r\nStep 622, loss: 0.008146382868289948\r\nStep 623, loss: 0.007927773520350456\r\nStep 624, loss: 0.008136093616485596\r\nStep 625, loss: 0.008799970149993896\r\nStep 626, loss: 0.009730185382068157\r\nStep 627, loss: 0.00980282574892044\r\nStep 628, loss: 0.009932177141308784\r\nStep 629, loss: 0.00985488761216402\r\nStep 630, loss: 0.008750185370445251\r\nStep 631, loss: 0.009241788648068905\r\nStep 632, loss: 0.00865153968334198\r\nStep 633, loss: 0.007657185662537813\r\nStep 634, loss: 0.008444579318165779\r\nStep 635, loss: 0.007861824706196785\r\nStep 636, loss: 0.008599400520324707\r\nStep 637, loss: 0.00875160563737154\r\nStep 638, loss: 0.007627840619534254\r\nStep 639, loss: 0.00831996463239193\r\nStep 640, loss: 0.007287387736141682\r\nStep 641, loss: 0.007996085099875927\r\nStep 642, loss: 0.0073984782211482525\r\nStep 643, loss: 0.0090358667075634\r\nStep 644, loss: 0.008371159434318542\r\nStep 645, loss: 0.010133248753845692\r\nStep 646, loss: 0.007735549937933683\r\nStep 647, loss: 0.009218603372573853\r\nStep 648, loss: 0.008114420808851719\r\nStep 649, loss: 0.008637652732431889\r\nStep 650, loss: 0.008584381081163883\r\nStep 651, loss: 0.009334669448435307\r\nStep 652, loss: 0.008351793512701988\r\nStep 653, loss: 0.00839329231530428\r\nStep 654, loss: 0.008216412737965584\r\nStep 655, loss: 0.008404298685491085\r\nStep 656, loss: 0.0077551137655973434\r\nStep 657, loss: 0.008800540119409561\r\nStep 658, loss: 0.008638748899102211\r\nStep 659, loss: 0.008356684818863869\r\nStep 660, loss: 0.008637391030788422\r\nStep 661, loss: 0.007406390272080898\r\nStep 662, loss: 0.008827605284750462\r\n",,terminal_output +327,1818529,"TERMINAL",0,0,"36618618",,terminal_output +328,1819579,"TERMINAL",0,0,"47729729",,terminal_output +329,1820624,"TERMINAL",0,0,"5883208340",,terminal_output +330,1821680,"TERMINAL",0,0,"69941941",,terminal_output +331,1822734,"TERMINAL",0,0,"72121633163",,terminal_output +332,1823781,"TERMINAL",0,0,"92274274",,terminal_output +333,1824887,"TERMINAL",0,0,"5:003385385",,terminal_output +334,1825879,"TERMINAL",0,0,"14496496",,terminal_output +335,1827007,"TERMINAL",0,0,"2552075207",,terminal_output +336,1827988,"TERMINAL",0,0,"36618618",,terminal_output +337,1829008,"TERMINAL",0,0,"47729729",,terminal_output +338,1830058,"TERMINAL",0,0,"5883308350",,terminal_output +339,1831096,"TERMINAL",0,0,"69941941",,terminal_output +340,1832122,"TERMINAL",0,0,"73030524052",,terminal_output +341,1833153,"TERMINAL",0,0,"81163163",,terminal_output +342,1834237,"TERMINAL",0,0,"92274274",,terminal_output +343,1835209,"TERMINAL",0,0,"103385385",,terminal_output +344,1836286,"TERMINAL",0,0,"14496496",,terminal_output +345,1837409,"TERMINAL",0,0,"2553075307",,terminal_output +346,1838367,"TERMINAL",0,0,"36618618",,terminal_output +347,1839457,"TERMINAL",0,0,"47729729",,terminal_output +348,1840485,"TERMINAL",0,0,"5883408330:00",,terminal_output +349,1841506,"TERMINAL",0,0,"69941941",,terminal_output +350,1842634,"TERMINAL",0,0,"74040525052",,terminal_output +351,1843587,"TERMINAL",0,0,"81163163",,terminal_output +352,1844644,"TERMINAL",0,0,"92274274",,terminal_output +353,1845675,"TERMINAL",0,0,"203385385",,terminal_output +354,1846750,"TERMINAL",0,0,"1554075407",,terminal_output +355,1847853,"TERMINAL",0,0,"36618618",,terminal_output +356,1848830,"TERMINAL",0,0,"47729729",,terminal_output +357,1849901,"TERMINAL",0,0,"5883508310",,terminal_output +358,1851030,"TERMINAL",0,0,"69941941",,terminal_output +359,1851981,"TERMINAL",0,0,"75050528:0052",,terminal_output +360,1853053,"TERMINAL",0,0,"81163163",,terminal_output +361,1854101,"TERMINAL",0,0,"92274274",,terminal_output +362,1855145,"TERMINAL",0,0,"303385385",,terminal_output +363,1856249,"TERMINAL",0,0,"14496496",,terminal_output +364,1857298,"TERMINAL",0,0,"2555075507",,terminal_output +365,1858298,"TERMINAL",0,0,"36618618",,terminal_output +366,1858904,"TERMINAL",0,0,"srun",,terminal_focus +367,1859328,"TERMINAL",0,0,"47729729",,terminal_output +368,1859587,"scripts_horeka/modelsize_scaling/lam/tester.sh",0,0,"",shellscript,tab +369,1859588,"scripts_horeka/modelsize_scaling/lam/tester.sh",814,0,"",shellscript,selection_mouse +370,1859643,"scripts_horeka/modelsize_scaling/lam/tester.sh",813,0,"",shellscript,selection_command +371,1860372,"TERMINAL",0,0,"58832:008320",,terminal_output +372,1860386,"scripts_horeka/modelsize_scaling/lam/tester.sh",792,0,"",shellscript,selection_mouse +373,1860400,"scripts_horeka/modelsize_scaling/lam/tester.sh",791,0,"",shellscript,selection_command +374,1861472,"TERMINAL",0,0,"69941941",,terminal_output +375,1862499,"TERMINAL",0,0,"76:006:00521052",,terminal_output +376,1863517,"TERMINAL",0,0,"81163163",,terminal_output +377,1864649,"TERMINAL",0,0,"92274274",,terminal_output +378,1865595,"TERMINAL",0,0,"403385385",,terminal_output +379,1866639,"TERMINAL",0,0,"14496496",,terminal_output +380,1867663,"TERMINAL",0,0,"2557:00:007530:007",,terminal_output +381,1868709,"TERMINAL",0,0,"36618618",,terminal_output +382,1869767,"TERMINAL",0,0,"4883108330",,terminal_output +383,1870789,"TERMINAL",0,0,"69941941",,terminal_output +384,1871927,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",0,0,"",plaintext,tab +385,1872127,"TERMINAL",0,0,"71010522052",,terminal_output +386,1872887,"TERMINAL",0,0,"81163163",,terminal_output +387,1873562,"scripts_horeka/modelsize_scaling/lam/tester.sh",0,0,"",shellscript,tab +388,1873926,"TERMINAL",0,0,"92274274",,terminal_output +389,1874227,"scripts_horeka/modelsize_scaling/lam/tester.sh",846,0,"",shellscript,selection_mouse +390,1874384,"scripts_horeka/modelsize_scaling/lam/tester.sh",845,1,"\n",shellscript,selection_mouse +391,1874493,"scripts_horeka/modelsize_scaling/lam/tester.sh",814,32,"\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +392,1874494,"scripts_horeka/modelsize_scaling/lam/tester.sh",788,58,"uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +393,1874494,"scripts_horeka/modelsize_scaling/lam/tester.sh",744,102,"del-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +394,1874495,"scripts_horeka/modelsize_scaling/lam/tester.sh",688,158,"model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +395,1874495,"scripts_horeka/modelsize_scaling/lam/tester.sh",686,160,"m-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +396,1874496,"scripts_horeka/modelsize_scaling/lam/tester.sh",672,174,"\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +397,1874524,"scripts_horeka/modelsize_scaling/lam/tester.sh",636,210,"_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +398,1874549,"scripts_horeka/modelsize_scaling/lam/tester.sh",604,242,"og_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +399,1874570,"scripts_horeka/modelsize_scaling/lam/tester.sh",583,263,"max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +400,1874629,"scripts_horeka/modelsize_scaling/lam/tester.sh",562,284,"-min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +401,1874641,"scripts_horeka/modelsize_scaling/lam/tester.sh",539,307,"--batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +402,1874706,"scripts_horeka/modelsize_scaling/lam/tester.sh",505,341," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +403,1874738,"scripts_horeka/modelsize_scaling/lam/tester.sh",441,405,"_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +404,1874762,"scripts_horeka/modelsize_scaling/lam/tester.sh",440,406,"A_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +405,1874779,"scripts_horeka/modelsize_scaling/lam/tester.sh",439,407,"LA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +406,1874837,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,408,"XLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +407,1874893,"scripts_horeka/modelsize_scaling/lam/tester.sh",437,409,"\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +408,1874974,"TERMINAL",0,0,"503385385",,terminal_output +409,1875285,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,408,"XLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +410,1875286,"scripts_horeka/modelsize_scaling/lam/tester.sh",535,311," --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +411,1875286,"scripts_horeka/modelsize_scaling/lam/tester.sh",558,288," --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +412,1875370,"scripts_horeka/modelsize_scaling/lam/tester.sh",600,246," --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +413,1875371,"scripts_horeka/modelsize_scaling/lam/tester.sh",669,177,"g \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +414,1875371,"scripts_horeka/modelsize_scaling/lam/tester.sh",683,163,"=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +415,1875371,"scripts_horeka/modelsize_scaling/lam/tester.sh",738,108,"lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +416,1875394,"scripts_horeka/modelsize_scaling/lam/tester.sh",740,106,"m model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +417,1875418,"scripts_horeka/modelsize_scaling/lam/tester.sh",808,38,"afar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +418,1875431,"scripts_horeka/modelsize_scaling/lam/tester.sh",832,14,"f_records_dir\n",shellscript,selection_mouse +419,1875453,"scripts_horeka/modelsize_scaling/lam/tester.sh",846,0,"",shellscript,selection_mouse +420,1875570,"scripts_horeka/modelsize_scaling/lam/tester.sh",810,36,"ar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +421,1875642,"scripts_horeka/modelsize_scaling/lam/tester.sh",583,263,"max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +422,1875676,"scripts_horeka/modelsize_scaling/lam/tester.sh",93,753,"source .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +423,1875677,"scripts_horeka/modelsize_scaling/lam/tester.sh",0,846,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +424,1875969,"scripts_horeka/modelsize_scaling/lam/tester.sh",25,821,"cat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +425,1876002,"scripts_horeka/modelsize_scaling/lam/tester.sh",33,813,"module unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +426,1876003,"scripts_horeka/modelsize_scaling/lam/tester.sh",93,753,"source .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +427,1876037,"scripts_horeka/modelsize_scaling/lam/tester.sh",120,726,"tf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +428,1876037,"scripts_horeka/modelsize_scaling/lam/tester.sh",223,623,"ws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +429,1876045,"TERMINAL",0,0,"14496496",,terminal_output +430,1876087,"scripts_horeka/modelsize_scaling/lam/tester.sh",289,557,"\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +431,1876088,"scripts_horeka/modelsize_scaling/lam/tester.sh",290,556,"job_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +432,1876150,"scripts_horeka/modelsize_scaling/lam/tester.sh",307,539,"slurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +433,1876169,"scripts_horeka/modelsize_scaling/lam/tester.sh",334,512,"\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +434,1876279,"scripts_horeka/modelsize_scaling/lam/tester.sh",307,539,"slurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +435,1876336,"scripts_horeka/modelsize_scaling/lam/tester.sh",334,512,"\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +436,1876396,"scripts_horeka/modelsize_scaling/lam/tester.sh",335,511,"CHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +437,1876455,"scripts_horeka/modelsize_scaling/lam/tester.sh",337,509,"ECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +438,1876455,"scripts_horeka/modelsize_scaling/lam/tester.sh",397,449,"ir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +439,1876455,"scripts_horeka/modelsize_scaling/lam/tester.sh",398,448,"r -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +440,1876455,"scripts_horeka/modelsize_scaling/lam/tester.sh",419,427,"\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +441,1876562,"scripts_horeka/modelsize_scaling/lam/tester.sh",426,420,"grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=50 \\n --log_checkpoint_interval=5 \\n --log \\n --name=lam-model-size-scaling-39M-$slurm_job_id \\n --tags lam model-size-scaling 39M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +442,1877253,"TERMINAL",0,0,"2551075107",,terminal_output +443,1878239,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,tab +444,1878653,"TERMINAL",0,0,"36618618",,terminal_output +445,1879005,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",1270,0,"",shellscript,selection_mouse +446,1879011,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",1269,0,"",shellscript,selection_command +447,1879164,"TERMINAL",0,0,"47729729",,terminal_output +448,1879646,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",1302,0,"",shellscript,selection_mouse +449,1879902,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",1248,54,"\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +450,1879903,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",908,394,"\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +451,1879903,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",149,1153,"#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +452,1879904,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",0,1302,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +453,1879906,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",1248,54,"\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_command +454,1879956,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",0,1302,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +455,1880220,"TERMINAL",0,0,"5883208340",,terminal_output +456,1881338,"TERMINAL",0,0,"69941941",,terminal_output +457,1882361,"TERMINAL",0,0,"72020523052",,terminal_output +458,1882746,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",0,0,"",plaintext,tab +459,1883386,"TERMINAL",0,0,"81163163",,terminal_output +460,1884286,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",plaintext,content +461,1884441,"TERMINAL",0,0,"92274274",,terminal_output +462,1885146,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",1248,54,"\n --project jafar \\n --data_dir $tf_records_dir\n",plaintext,selection_mouse +463,1885229,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",891,411,"\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",plaintext,selection_mouse +464,1885230,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",434,868,"e sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",plaintext,selection_mouse +465,1885231,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",40,1262,"SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",plaintext,selection_mouse +466,1885231,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",0,1302,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",plaintext,selection_mouse +467,1885547,"TERMINAL",0,0,"6:003385385",,terminal_output +468,1886101,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",0,0,"",plaintext,selection_command +469,1886715,"TERMINAL",0,0,"14496496",,terminal_output +470,1887585,"TERMINAL",0,0,"2552075207",,terminal_output +471,1888860,"TERMINAL",0,0,"36618618",,terminal_output +472,1889706,"TERMINAL",0,0,"47729729",,terminal_output +473,1890760,"TERMINAL",0,0,"5883308350",,terminal_output +474,1891798,"TERMINAL",0,0,"63030524052",,terminal_output +475,1892841,"TERMINAL",0,0,"81163163",,terminal_output +476,1893944,"TERMINAL",0,0,"92274274",,terminal_output +477,1894924,"TERMINAL",0,0,"103385385",,terminal_output +478,1896008,"TERMINAL",0,0,"14496496",,terminal_output +479,1897021,"TERMINAL",0,0,"2553075307",,terminal_output +480,1898113,"TERMINAL",0,0,"36618618",,terminal_output +481,1898601,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",367,0,"#SBATCH --job-name=train_lam_model_size_scaling_38M\n",plaintext,content +482,1898603,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",419,58,"",plaintext,content +483,1899123,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",762,0,"CHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\n",plaintext,content +484,1899209,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",821,98,"",plaintext,content +485,1899279,"TERMINAL",0,0,"47729729",,terminal_output +486,1899409,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",865,0,"XLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n",plaintext,content +487,1899410,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",929,33,"",plaintext,content +488,1899715,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",1103,0," --name=lam-model-size-scaling-38M-$slurm_job_id \\n",plaintext,content +489,1899822,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",1157,0," --tags lam model-size-scaling 38M \\n",plaintext,content +490,1899823,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",1197,106,"",plaintext,content +491,1900335,"TERMINAL",0,0,"588340831:00",,terminal_output +492,1901251,"TERMINAL",0,0,"69941941",,terminal_output +493,1902319,"TERMINAL",0,0,"74040525052",,terminal_output +494,1903463,"TERMINAL",0,0,"81163163",,terminal_output +495,1904486,"TERMINAL",0,0,"92274274",,terminal_output +496,1905505,"TERMINAL",0,0,"203385385",,terminal_output +497,1906532,"TERMINAL",0,0,"14496496",,terminal_output +498,1907657,"TERMINAL",0,0,"2554075407",,terminal_output +499,1908681,"TERMINAL",0,0,"36618618",,terminal_output +500,1909664,"TERMINAL",0,0,"47729729",,terminal_output +501,1910830,"TERMINAL",0,0,"5994519411",,terminal_output +502,1911855,"TERMINAL",0,0,"75050529:0052",,terminal_output +503,1912879,"TERMINAL",0,0,"81163163",,terminal_output +504,1913912,"TERMINAL",0,0,"92274274",,terminal_output +505,1914959,"TERMINAL",0,0,"303385385",,terminal_output +506,1915908,"TERMINAL",0,0,"14496496",,terminal_output +507,1916999,"TERMINAL",0,0,"2555075507",,terminal_output +508,1918101,"TERMINAL",0,0,"36618618",,terminal_output +509,1918134,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",1195,0,"",plaintext,selection_mouse +510,1918731,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",960,0,"",plaintext,selection_mouse +511,1919078,"TERMINAL",0,0,"47729729",,terminal_output +512,1920102,"TERMINAL",0,0,"58833:008320",,terminal_output +513,1921158,"TERMINAL",0,0,"69941941",,terminal_output +514,1922198,"TERMINAL",0,0,"77:007:00521052",,terminal_output +515,1923212,"TERMINAL",0,0,"81163163",,terminal_output +516,1924349,"TERMINAL",0,0,"92274274",,terminal_output +517,1925298,"TERMINAL",0,0,"403385385",,terminal_output +518,1926345,"TERMINAL",0,0,"14496496",,terminal_output +519,1927423,"TERMINAL",0,0,"2551:00751:007",,terminal_output +520,1928548,"TERMINAL",0,0,"36618618",,terminal_output +521,1929498,"TERMINAL",0,0,"47729729",,terminal_output +522,1930600,"TERMINAL",0,0,"5883108330",,terminal_output +523,1931618,"TERMINAL",0,0,"69941941",,terminal_output +524,1932607,"TERMINAL",0,0,"71010522052",,terminal_output +525,1933668,"TERMINAL",0,0,"81163163",,terminal_output +526,1934792,"TERMINAL",0,0,"92274274",,terminal_output +527,1935816,"TERMINAL",0,0,"504496496",,terminal_output +528,1936843,"TERMINAL",0,0,"2551075107",,terminal_output +529,1937853,"TERMINAL",0,0,"36618618",,terminal_output +530,1939001,"TERMINAL",0,0,"47729729",,terminal_output +531,1940018,"TERMINAL",0,0,"5883208340",,terminal_output +532,1941029,"TERMINAL",0,0,"69941941",,terminal_output +533,1942167,"TERMINAL",0,0,"72020523052",,terminal_output +534,1943119,"TERMINAL",0,0,"81163163",,terminal_output +535,1944280,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",0,0,"",shellscript,tab +536,1944480,"TERMINAL",0,0,"92274274",,terminal_output +537,1945222,"TERMINAL",0,0,"7:003385385",,terminal_output +538,1945578,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",1017,0,"",shellscript,selection_mouse +539,1945596,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",1016,0,"",shellscript,selection_command +540,1946261,"TERMINAL",0,0,"14496496",,terminal_output +541,1947388,"TERMINAL",0,0,"2552075207",,terminal_output +542,1948435,"TERMINAL",0,0,"36618618",,terminal_output +543,1949437,"TERMINAL",0,0,"47729729",,terminal_output +544,1949727,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",865,0,"",shellscript,selection_mouse +545,1949743,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",864,0,"",shellscript,selection_command +546,1949981,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",864,1,"d",shellscript,selection_mouse +547,1949981,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",862,2,"_i",shellscript,selection_mouse +548,1949982,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",847,17,"name/$slurm_job_i",shellscript,selection_mouse +549,1949982,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",826,38,"cts/checkpoints/$job_name/$slurm_job_i",shellscript,selection_mouse +550,1949982,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",812,52,"_cte0515/Projects/checkpoints/$job_name/$slurm_job_i",shellscript,selection_mouse +551,1949983,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",767,97,"\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_i",shellscript,selection_mouse +552,1950009,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",865,0,"",shellscript,selection_command +553,1950010,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",767,98,"\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id",shellscript,selection_mouse +554,1950403,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",768,97,"CHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id",shellscript,selection_mouse +555,1950475,"TERMINAL",0,0,"5883308350",,terminal_output +556,1951488,"TERMINAL",0,0,"69941941",,terminal_output +557,1952609,"TERMINAL",0,0,"73030524052",,terminal_output +558,1953585,"TERMINAL",0,0,"81163163",,terminal_output +559,1953989,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",650,0,"",shellscript,selection_mouse +560,1954150,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",648,6,"ws_dir",shellscript,selection_mouse +561,1954658,"TERMINAL",0,0,"92274274",,terminal_output +562,1955233,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",660,0,"",shellscript,selection_mouse +563,1955676,"TERMINAL",0,0,"103385385",,terminal_output +564,1956407,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",648,66,"",shellscript,content +565,1956728,"TERMINAL",0,0,"1553075307",,terminal_output +566,1957775,"TERMINAL",0,0,"36618618",,terminal_output +567,1958836,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",0,0,"",plaintext,tab +568,1958968,"TERMINAL",0,0,"47729729",,terminal_output +569,1959890,"TERMINAL",0,0,"588340832:00",,terminal_output +570,1960286,"TERMINAL",0,0,"Step 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\n",,terminal_output +571,1960355,"TERMINAL",0,0,"Step 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\nStep 663, loss: 0.008188503794372082\r\nStep 664, loss: 0.009675285778939724\r\nStep 665, loss: 0.007473045960068703\r\nStep 666, loss: 0.008836650289595127\r\nStep 667, loss: 0.0070752548053860664\r\nStep 668, loss: 0.007328846491873264\r\nStep 669, loss: 0.008448733016848564\r\nStep 670, loss: 0.008510428480803967\r\nStep 671, loss: 0.009033353067934513\r\nStep 672, loss: 0.009275879710912704\r\nStep 673, loss: 0.009029111824929714\r\nStep 674, loss: 0.007315825670957565\r\nStep 675, loss: 0.010116366669535637\r\nStep 676, loss: 0.009740644134581089\r\nStep 677, loss: 0.009368915110826492\r\nStep 678, loss: 0.008023670874536037\r\nStep 679, loss: 0.008052327670156956\r\nStep 680, loss: 0.008584907278418541\r\nStep 681, loss: 0.009915526024997234\r\nStep 682, loss: 0.0098458556458354\r\nStep 683, loss: 0.007350854575634003\r\nStep 684, loss: 0.009085259400308132\r\nStep 685, loss: 0.008175426162779331\r\nStep 686, loss: 0.008942642249166965\r\nStep 687, loss: 0.008495903573930264\r\nStep 688, loss: 0.009691085666418076\r\nStep 689, loss: 0.00756419450044632\r\nStep 690, loss: 0.008400192484259605\r\nStep 691, loss: 0.007621959783136845\r\nStep 692, loss: 0.008800345472991467\r\nStep 693, loss: 0.007274152245372534\r\nStep 694, loss: 0.008735042996704578\r\nStep 695, loss: 0.009365207515656948\r\nStep 696, loss: 0.007852311246097088\r\nStep 697, loss: 0.00840317364782095\r\nStep 698, loss: 0.008068027906119823\r\nStep 699, loss: 0.009760628454387188\r\nStep 700, loss: 0.008621647953987122\r\nStep 701, loss: 0.008357041515409946\r\nStep 702, loss: 0.007615256123244762\r\nStep 703, loss: 0.007735436782240868\r\nStep 704, loss: 0.008228760212659836\r\nStep 705, loss: 0.009480788372457027\r\nStep 706, loss: 0.009712466970086098\r\nStep 707, loss: 0.006602727808058262\r\nStep 708, loss: 0.009339170530438423\r\nStep 709, loss: 0.00838259607553482\r\nStep 710, loss: 0.007861709222197533\r\nStep 711, loss: 0.008499818854033947\r\nStep 712, loss: 0.0088029894977808\r\nStep 713, loss: 0.007529713213443756\r\nStep 714, loss: 0.008756793104112148\r\nStep 715, loss: 0.007938941940665245\r\nStep 716, loss: 0.008601955138146877\r\nStep 717, loss: 0.008059379644691944\r\nStep 718, loss: 0.009258144535124302\r\nStep 719, loss: 0.008124497719109058\r\nStep 720, loss: 0.00760618643835187\r\nStep 721, loss: 0.008031727746129036\r\nStep 722, loss: 0.007869260385632515\r\nStep 723, loss: 0.00932231917977333\r\nStep 724, loss: 0.008117434568703175\r\nStep 725, loss: 0.00909934937953949\r\nStep 726, loss: 0.009500808082520962\r\nStep 727, loss: 0.009050896391272545\r\nStep 728, loss: 0.008389940485358238\r\nStep 729, loss: 0.008761260658502579\r\nStep 730, loss: 0.009779571555554867\r\nStep 731, loss: 0.008892599493265152\r\nStep 732, loss: 0.007869680412113667\r\nStep 733, loss: 0.006751553621143103\r\nStep 734, loss: 0.0077990517020225525\r\nStep 735, loss: 0.007715032435953617\r\nStep 736, loss: 0.008044131100177765\r\nStep 737, loss: 0.007676326669752598\r\nStep 738, loss: 0.007771224714815617\r\nStep 739, loss: 0.008221903815865517\r\nStep 740, loss: 0.00855640135705471\r\nStep 741, loss: 0.007962626405060291\r\nStep 742, loss: 0.009416566230356693\r\nStep 743, loss: 0.008063580840826035\r\nStep 744, loss: 0.007259129546582699\r\nStep 745, loss: 0.008020087145268917\r\nStep 746, loss: 0.008935447782278061\r\nStep 747, loss: 0.008065402507781982\r\nStep 748, loss: 0.007918530143797398\r\nStep 749, loss: 0.0076026576571166515\r\nStep 750, loss: 0.008453233167529106\r\nStep 751, loss: 0.008083358407020569\r\nStep 752, loss: 0.00885181687772274\r\nStep 753, loss: 0.008323529735207558\r\nStep 754, loss: 0.010221350006759167\r\nStep 755, loss: 0.008537876419723034\r\nStep 756, loss: 0.0075461408123373985\r\nStep 757, loss: 0.00968962348997593\r\nStep 758, loss: 0.008657812140882015\r\nStep 759, loss: 0.00908595696091652\r\nStep 760, loss: 0.007928945124149323\r\nStep 761, loss: 0.008232498541474342\r\nStep 762, loss: 0.0081816790625453\r\nStep 763, loss: 0.00983133539557457\r\nStep 764, loss: 0.007727086078375578\r\nStep 765, loss: 0.007970851846039295\r\nStep 766, loss: 0.008272339589893818\r\nStep 767, loss: 0.008935518562793732\r\nStep 768, loss: 0.007538327947258949\r\nStep 769, loss: 0.009562896564602852\r\nStep 770, loss: 0.006767282262444496\r\nStep 771, loss: 0.008597593754529953\r\nStep 772, loss: 0.008593601174652576\r\nStep 773, loss: 0.008874927647411823\r\nStep 774, loss: 0.008258556947112083\r\nStep 775, loss: 0.007877164520323277\r\nStep 776, loss: 0.009185128845274448\r\nStep 777, loss: 0.007426017429679632\r\nStep 778, loss: 0.008627464063465595\r\nStep 779, loss: 0.009369480423629284\r\nStep 780, loss: 0.007784644607454538\r\nStep 781, loss: 0.008786655031144619\r\nStep 782, loss: 0.008275906555354595\r\nStep 783, loss: 0.0072689359076321125\r\nStep 784, loss: 0.007263512816280127\r\nStep 785, loss: 0.007613175082951784\r\nStep 786, loss: 0.008340373635292053\r\nStep 787, loss: 0.00699112331494689\r\nStep 788, loss: 0.008804126642644405\r\nStep 789, loss: 0.009185505099594593\r\nStep 790, loss: 0.009037316776812077\r\nStep 791, loss: 0.008365091867744923\r\nStep 792, loss: 0.009296916425228119\r\nStep 793, loss: 0.009148641489446163\r\nStep 794, loss: 0.007881499826908112\r\nStep 795, loss: 0.008150001987814903\r\nStep 796, loss: 0.0077763888984918594\r\nStep 797, loss: 0.007661592680960894\r\nStep 798, loss: 0.009461347945034504\r\nStep 799, loss: 0.008547618985176086\r\nStep 800, loss: 0.007417353335767984\r\nStep 801, loss: 0.009174657985568047\r\nStep 802, loss: 0.008975048549473286\r\nStep 803, loss: 0.0065934909507632256\r\nStep 804, loss: 0.009461808949708939\r\nStep 805, loss: 0.008408617228269577\r\nStep 806, loss: 0.00989458430558443\r\nStep 807, loss: 0.009049440734088421\r\nStep 808, loss: 0.00848913099616766\r\nStep 809, loss: 0.008840562775731087\r\nStep 810, loss: 0.009033815935254097\r\nStep 811, loss: 0.009323631413280964\r\nStep 812, loss: 0.008919804356992245\r\nStep 813, loss: 0.00836434680968523\r\nStep 814, loss: 0.00745892571285367\r\nStep 815, loss: 0.006103780120611191\r\nStep 816, loss: 0.008175531402230263\r\nStep 817, loss: 0.007778630126267672\r\nStep 818, loss: 0.006912181619554758\r\nStep 819, loss: 0.008521071635186672\r\nStep 820, loss: 0.00898344162851572\r\nStep 821, loss: 0.008880916982889175\r\nStep 822, loss: 0.009399177506566048\r\nStep 823, loss: 0.009236321784555912\r\nStep 824, loss: 0.008534150198101997\r\nStep 825, loss: 0.007945604622364044\r\nStep 826, loss: 0.008585712872445583\r\nStep 827, loss: 0.009360426105558872\r\nStep 828, loss: 0.008506404235959053\r\nStep 829, loss: 0.007260220590978861\r\nStep 830, loss: 0.008295166306197643\r\nStep 831, loss: 0.007627023849636316\r\nStep 832, loss: 0.008010821416974068\r\nStep 833, loss: 0.009520187973976135\r\nStep 834, loss: 0.008509846404194832\r\nStep 835, loss: 0.008356443606317043\r\nStep 836, loss: 0.007537637837231159\r\nStep 837, loss: 0.007802378851920366\r\nStep 838, loss: 0.007528160233050585\r\nStep 839, loss: 0.00884687714278698\r\nStep 840, loss: 0.00838764663785696\r\nStep 841, loss: 0.007858619093894958\r\nStep 842, loss: 0.007163564674556255\r\nStep 843, loss: 0.008134142495691776\r\nStep 844, loss: 0.008857795968651772\r\nStep 845, loss: 0.007322083692997694\r\nStep 846, loss: 0.007358285132795572\r\nStep 847, loss: 0.007790219504386187\r\nStep 848, loss: 0.009197061881422997\r\nStep 849, loss: 0.00793133582919836\r\nStep 850, loss: 0.0078589404001832\r\nStep 851, loss: 0.007828818634152412\r\nStep 852, loss: 0.008382393978536129\r\nStep 853, loss: 0.00741743016988039\r\nStep 854, loss: 0.009868623688817024\r\nStep 855, loss: 0.00751782301813364\r\nStep 856, loss: 0.0062983338721096516\r\nStep 857, loss: 0.008031136356294155\r\nStep 858, loss: 0.008807212114334106\r\nStep 859, loss: 0.010153653100132942\r\nStep 860, loss: 0.008077512495219707\r\nStep 861, loss: 0.007962261326611042\r\nStep 862, loss: 0.008267686702311039\r\nStep 863, loss: 0.008352581411600113\r\nStep 864, loss: 0.006564215756952763\r\nStep 865, loss: 0.008987675420939922\r\nStep 866, loss: 0.009892838075757027\r\nStep 867, loss: 0.009020679630339146\r\nStep 868, loss: 0.008473207242786884\r\nStep 869, loss: 0.00642415601760149\r\nStep 870, loss: 0.009132720530033112\r\nStep 871, loss: 0.009013339877128601\r\nStep 872, loss: 0.009076643735170364\r\nStep 873, loss: 0.009696201421320438\r\nStep 874, loss: 0.009372074156999588\r\nStep 875, loss: 0.008545946329832077\r\nStep 876, loss: 0.00843528937548399\r\nStep 877, loss: 0.007416864857077599\r\nStep 878, loss: 0.008183412253856659\r\nStep 879, loss: 0.009929087944328785\r\nStep 880, loss: 0.009046860970556736\r\nStep 881, loss: 0.00933048315346241\r\nStep 882, loss: 0.008539333008229733\r\nStep 883, loss: 0.007746302522718906\r\nStep 884, loss: 0.008637496270239353\r\n",,terminal_output +572,1961028,"TERMINAL",0,0,"69941941",,terminal_output +573,1961951,"TERMINAL",0,0,"74040525052",,terminal_output +574,1962989,"TERMINAL",0,0,"81163163",,terminal_output +575,1963930,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",708,0,"",plaintext,selection_mouse +576,1964028,"TERMINAL",0,0,"92274274",,terminal_output +577,1964664,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",654,0,"",plaintext,selection_mouse +578,1965105,"TERMINAL",0,0,"203385385",,terminal_output +579,1965492,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",653,0,"",plaintext,selection_command +580,1966128,"TERMINAL",0,0,"14496496",,terminal_output +581,1966432,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",642,66,"",plaintext,content +582,1966781,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",643,0,"",plaintext,selection_command +583,1966932,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",668,0,"",plaintext,selection_command +584,1967073,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",695,0,"",plaintext,selection_command +585,1967162,"TERMINAL",0,0,"2554075407",,terminal_output +586,1967215,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",696,0,"",plaintext,selection_command +587,1967481,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",754,0,"\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'",plaintext,content +588,1967492,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",755,0,"",plaintext,selection_command +589,1968184,"TERMINAL",0,0,"36618618",,terminal_output +590,1969234,"TERMINAL",0,0,"47729729",,terminal_output +591,1969359,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",755,66,"",plaintext,content +592,1969361,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",696,0,"",plaintext,selection_command +593,1969560,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",754,0,"\n",plaintext,content +594,1969911,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",755,0,"CHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id",plaintext,content +595,1970292,"TERMINAL",0,0,"5883508310",,terminal_output +596,1970499,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",851,0,"",plaintext,selection_command +597,1970648,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",753,0,"",plaintext,selection_command +598,1971007,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",696,59,"",plaintext,content +599,1971334,"TERMINAL",0,0,"69941941",,terminal_output +600,1972152,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",794,0,"",plaintext,selection_command +601,1972378,"TERMINAL",0,0,"750505220:0052",,terminal_output +602,1973198,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",600,0,"",plaintext,selection_mouse +603,1973355,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",595,14,"jafa_ws_shared",plaintext,selection_mouse +604,1973432,"TERMINAL",0,0,"81163163",,terminal_output +605,1974025,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",642,0,"",plaintext,selection_mouse +606,1974533,"TERMINAL",0,0,"92274274",,terminal_output +607,1974650,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",695,0,"",plaintext,selection_mouse +608,1975300,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",667,0,"",plaintext,selection_mouse +609,1975321,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",666,0,"",plaintext,selection_command +610,1975519,"TERMINAL",0,0,"303385385",,terminal_output +611,1976553,"TERMINAL",0,0,"14496496",,terminal_output +612,1976903,"TERMINAL",0,0,"watch",,terminal_focus +613,1977628,"TERMINAL",0,0,"2555075507",,terminal_output +614,1978723,"TERMINAL",0,0,"36618618",,terminal_output +615,1979694,"TERMINAL",0,0,"47729729",,terminal_output +616,1980771,"TERMINAL",0,0,"59944:019421",,terminal_output +617,1981897,"TERMINAL",0,0,"78:008:00521052",,terminal_output +618,1982123,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +619,2010611,"TERMINAL",0,0,"sbatch scripts_horeka/^C",,terminal_command +620,2010624,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;7be71f88-07f8-498b-82c5-01f117a78f25]633;C]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D",,terminal_output +621,2014900,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command +622,2026042,"scripts_horeka/modelsize_scaling/lam/tester.sh",0,0,"",shellscript,tab +623,2027149,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",0,0,"",plaintext,tab +624,2047389,"TERMINAL",0,0,"sync-runner",,terminal_command +625,2047446,"TERMINAL",0,0,"]633;E;2025-07-04 11:08:42 sync-runner;7be71f88-07f8-498b-82c5-01f117a78f25]633;Csending incremental file list\r\n",,terminal_output +626,2047953,"TERMINAL",0,0,"./\r\ngeneration_1751563425.1792467.gif\r\ngeneration_1751563622.9616451.gif\r\ngeneration_1751564417.9236383.gif\r\ngeneration_1751564628.3937318.gif\r\ngenie.py\r\n",,terminal_output +627,2048430,"TERMINAL",0,0,"scripts_horeka/modelsize_scaling/\r\nscripts_horeka/modelsize_scaling/lam/\r\nscripts_horeka/modelsize_scaling/lam/tester.sh\r\nscripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch\r\nscripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch\r\nutils/\r\nutils/dataloader.py\r\n\r\nsent 1,519,256 bytes received 272 bytes 1,013,018.67 bytes/sec\r\ntotal size is 85,286,172 speedup is 56.13\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +628,2053727,"TERMINAL",0,0,"runner",,terminal_command +629,2063547,"TERMINAL",0,0,"sbatch scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",,terminal_command +630,2063566,"TERMINAL",0,0,"]633;E;2025-07-04 11:08:58 sbatch scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch;7be71f88-07f8-498b-82c5-01f117a78f25]633;CSubmitted batch job 3317098\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +631,2065935,"TERMINAL",0,0,"queue",,terminal_command +632,2066011,"TERMINAL",0,0,"]633;E;2025-07-04 11:09:01 queue;7be71f88-07f8-498b-82c5-01f117a78f25]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Fri Jul 4 11:09:01 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3317098 accelerat train_la tum_cte0 PD\t0:00\t 2 (None)3316020 accelerat train_to tum_cte0 R 6:59:24 12 hkn[0407,0724-0726,0734-0735,0804-0805,0807,0809-0810,0815]3316026 accelerat train_to tum_cte0 R 6:59:24\t 4 hkn[0510-0511,0519,0707]3316022 accelerat train_to tum_cte0 R 7:03:19\t 2 hkn[0507,0520]3316019 accelerat train_to tum_cte0 R 8:45:26\t 8 hkn[0429,0436,0603-0604,0608,0612,0614,0820]3316016 accelerat train_to tum_cte0 R 10:21:34\t 1 hkn06343316924 accelerat interact tum_cte0 R33:19\t 1 hkn07333316923 accelerat interact tum_cte0 R33:46\t 4 hkn[0625-0628]",,terminal_output +633,2067080,"TERMINAL",0,0,"2552075207",,terminal_output +634,2067702,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +635,2094280,"TERMINAL",0,0,"Step 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\n",,terminal_output +636,2094384,"TERMINAL",0,0,"Step 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 885, loss: 0.007278712000697851\r\nStep 886, loss: 0.008584282360970974\r\nStep 887, loss: 0.008348902687430382\r\nStep 888, loss: 0.007911911234259605\r\nStep 889, loss: 0.006851324811577797\r\nStep 890, loss: 0.010272619314491749\r\nStep 891, loss: 0.007954963482916355\r\nStep 892, loss: 0.011299554258584976\r\nStep 893, loss: 0.009172891266644001\r\nStep 894, loss: 0.008669286966323853\r\nStep 895, loss: 0.008719743229448795\r\nStep 896, loss: 0.007419318426400423\r\nStep 897, loss: 0.008977117948234081\r\nStep 898, loss: 0.009361022152006626\r\nStep 899, loss: 0.00724545493721962\r\nStep 900, loss: 0.009258134290575981\r\nStep 901, loss: 0.008727189153432846\r\nStep 902, loss: 0.008203626610338688\r\nStep 903, loss: 0.007765788584947586\r\nStep 904, loss: 0.0072352043353021145\r\nStep 905, loss: 0.007720112334936857\r\nStep 906, loss: 0.0073650795966386795\r\nStep 907, loss: 0.008562346920371056\r\nStep 908, loss: 0.006846950855106115\r\nStep 909, loss: 0.008362415246665478\r\nStep 910, loss: 0.008234109729528427\r\nStep 911, loss: 0.008880231529474258\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 912, loss: 0.00942692719399929\r\nStep 913, loss: 0.007297558709979057\r\nStep 914, loss: 0.007884763181209564\r\nStep 915, loss: 0.006811782717704773\r\nStep 916, loss: 0.009606652893126011\r\nStep 917, loss: 0.009326214902102947\r\nStep 918, loss: 0.007597845047712326\r\nStep 919, loss: 0.009222333319485188\r\nStep 920, loss: 0.007597240619361401\r\nStep 921, loss: 0.007224710658192635\r\nStep 922, loss: 0.007864835672080517\r\nStep 923, loss: 0.006759055890142918\r\nStep 924, loss: 0.007793813943862915\r\nStep 925, loss: 0.007841421291232109\r\nStep 926, loss: 0.00783326756209135\r\nStep 927, loss: 0.008056414313614368\r\nStep 928, loss: 0.008069598115980625\r\nStep 929, loss: 0.008222508244216442\r\nStep 930, loss: 0.008884362876415253\r\nStep 931, loss: 0.009337793104350567\r\nStep 932, loss: 0.008910568431019783\r\nStep 933, loss: 0.008316724561154842\r\nStep 934, loss: 0.00842820294201374\r\nStep 935, loss: 0.008287030272185802\r\nStep 936, loss: 0.007249796763062477\r\nStep 937, loss: 0.0075146788731217384\r\nStep 938, loss: 0.006155883427709341\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 939, loss: 0.008273442275822163\r\nStep 940, loss: 0.009627815335988998\r\nStep 941, loss: 0.008756155148148537\r\nStep 942, loss: 0.007437628228217363\r\nStep 943, loss: 0.00844540260732174\r\nStep 944, loss: 0.006830746773630381\r\nStep 945, loss: 0.008179925382137299\r\nStep 946, loss: 0.008205489255487919\r\nStep 947, loss: 0.01024852879345417\r\nStep 948, loss: 0.007618652656674385\r\nStep 949, loss: 0.009826977737247944\r\nStep 950, loss: 0.008983025327324867\r\nStep 951, loss: 0.007230666931718588\r\nStep 952, loss: 0.006803151685744524\r\nStep 953, loss: 0.009585150517523289\r\nStep 954, loss: 0.0075047011487185955\r\nStep 955, loss: 0.007671905215829611\r\nStep 956, loss: 0.008019600063562393\r\nStep 957, loss: 0.008153526112437248\r\nStep 958, loss: 0.007115584332495928\r\nStep 959, loss: 0.007860583253204823\r\nStep 960, loss: 0.009642302989959717\r\nStep 961, loss: 0.008715268224477768\r\nStep 962, loss: 0.007827111519873142\r\nStep 963, loss: 0.007779854815453291\r\nStep 964, loss: 0.00732398871332407\r\nStep 965, loss: 0.00885375402867794\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 966, loss: 0.009267655201256275\r\nStep 967, loss: 0.008406993001699448\r\nStep 968, loss: 0.008440010249614716\r\nStep 969, loss: 0.007938782684504986\r\nStep 970, loss: 0.009420925751328468\r\nStep 971, loss: 0.00840043555945158\r\nStep 972, loss: 0.007098720874637365\r\nStep 973, loss: 0.00831522885710001\r\nStep 974, loss: 0.00845961831510067\r\nStep 975, loss: 0.007800284307450056\r\nStep 976, loss: 0.008622569963335991\r\nStep 977, loss: 0.007540627382695675\r\nStep 978, loss: 0.008258609101176262\r\nStep 979, loss: 0.008424363099038601\r\nStep 980, loss: 0.007538204547017813\r\nStep 981, loss: 0.008322594687342644\r\nStep 982, loss: 0.0069837411865592\r\nStep 983, loss: 0.008910678327083588\r\nStep 984, loss: 0.008503968827426434\r\nStep 985, loss: 0.009345966391265392\r\nStep 986, loss: 0.009745476767420769\r\nStep 987, loss: 0.008763987571001053\r\nStep 988, loss: 0.008663860149681568\r\nStep 989, loss: 0.007921400479972363\r\nStep 990, loss: 0.0094941146671772\r\nStep 991, loss: 0.0088986000046134\r\nStep 992, loss: 0.007342140190303326\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 993, loss: 0.008915127255022526\r\nStep 994, loss: 0.010046386159956455\r\nStep 995, loss: 0.008601103909313679\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 996, loss: 0.008273789659142494\r\nStep 997, loss: 0.00829209852963686\r\nStep 998, loss: 0.007412363775074482\r\nStep 999, loss: 0.0076874177902936935\r\nStep 1000, loss: 0.007640828378498554\r\nStep 1001, loss: 0.008421523496508598\r\nStep 1002, loss: 0.008363532833755016\r\nStep 1003, loss: 0.008224919438362122\r\nStep 1004, loss: 0.007614170666784048\r\nStep 1005, loss: 0.00808942224830389\r\nStep 1006, loss: 0.00905570574104786\r\nStep 1007, loss: 0.007311396766453981\r\nStep 1008, loss: 0.006594126112759113\r\nStep 1009, loss: 0.006014592479914427\r\nStep 1010, loss: 0.007816429249942303\r\nStep 1011, loss: 0.007855435833334923\r\nStep 1012, loss: 0.006863860879093409\r\nStep 1013, loss: 0.00776817137375474\r\nStep 1014, loss: 0.007321653421968222\r\nStep 1015, loss: 0.007539015728980303\r\nStep 1016, loss: 0.007984439842402935\r\nStep 1017, loss: 0.008064580149948597\r\nStep 1018, loss: 0.007594664115458727\r\nStep 1019, loss: 0.007612136192619801\r\nStep 1020, loss: 0.008153271861374378\r\nStep 1021, loss: 0.009022620506584644\r\nStep 1022, loss: 0.007351504173129797\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\nStep 1023, loss: 0.008014761842787266\r\nStep 1024, loss: 0.008970646187663078\r\nStep 1025, loss: 0.0091971755027771\r\nStep 1026, loss: 0.010525759309530258\r\nStep 1027, loss: 0.007817784324288368\r\nStep 1028, loss: 0.0067655304446816444\r\nStep 1029, loss: 0.008068695664405823\r\nStep 1030, loss: 0.008297930471599102\r\nStep 1031, loss: 0.00823703221976757\r\nStep 1032, loss: 0.0059892176650464535\r\nStep 1033, loss: 0.007247478235512972\r\nStep 1034, loss: 0.007533142808824778\r\nStep 1035, loss: 0.008760763332247734\r\nStep 1036, loss: 0.006972344126552343\r\nStep 1037, loss: 0.009282128885388374\r\nStep 1038, loss: 0.008325469680130482\r\nStep 1039, loss: 0.006402534432709217\r\nStep 1040, loss: 0.007785910740494728\r\nStep 1041, loss: 0.008724870160222054\r\nStep 1042, loss: 0.00852934829890728\r\nStep 1043, loss: 0.007693928200751543\r\nStep 1044, loss: 0.008846480399370193\r\nStep 1045, loss: 0.007957519963383675\r\nStep 1046, loss: 0.0077178203500807285\r\nStep 1047, loss: 0.008153329603374004\r\nStep 1048, loss: 0.009286276064813137\r\nStep 1049, loss: 0.008510326966643333\r\nStep 1050, loss: 0.00852749589830637\r\nStep 1051, loss: 0.007896002382040024\r\nStep 1052, loss: 0.006862045265734196\r\nStep 1053, loss: 0.008342348039150238\r\nStep 1054, loss: 0.0076972683891654015\r\nStep 1055, loss: 0.006982931401580572\r\nStep 1056, loss: 0.0074845412746071815\r\nStep 1057, loss: 0.008726799860596657\r\nStep 1058, loss: 0.007304470054805279\r\nStep 1059, loss: 0.008364593610167503\r\nStep 1060, loss: 0.00762686412781477\r\nStep 1061, loss: 0.008587880060076714\r\nStep 1062, loss: 0.007529120892286301\r\nStep 1063, loss: 0.008381657302379608\r\nStep 1064, loss: 0.009239381179213524\r\nStep 1065, loss: 0.007429488468915224\r\nStep 1066, loss: 0.007957086898386478\r\nStep 1067, loss: 0.007299589458853006\r\nStep 1068, loss: 0.007665595971047878\r\nStep 1069, loss: 0.00937664695084095\r\nStep 1070, loss: 0.008951704949140549\r\nStep 1071, loss: 0.008769129402935505\r\nStep 1072, loss: 0.007632402237504721\r\nStep 1073, loss: 0.008348691277205944\r\nStep 1074, loss: 0.006696468684822321\r\nStep 1075, loss: 0.008740746416151524\r\nStep 1076, loss: 0.006975969765335321\r\nStep 1077, loss: 0.007106286007910967\r\nStep 1078, loss: 0.008355746977031231\r\nStep 1079, loss: 0.008168650791049004\r\nStep 1080, loss: 0.008891562931239605\r\nStep 1081, loss: 0.008963626809418201\r\nStep 1082, loss: 0.008009166456758976\r\nStep 1083, loss: 0.008219603449106216\r\nStep 1084, loss: 0.008079380728304386\r\nStep 1085, loss: 0.007420422043651342\r\nStep 1086, loss: 0.00743855070322752\r\nStep 1087, loss: 0.008207092992961407\r\nStep 1088, loss: 0.006728052627295256\r\nStep 1089, loss: 0.007048305589705706\r\nStep 1090, loss: 0.00929422490298748\r\nStep 1091, loss: 0.008660108782351017\r\nStep 1092, loss: 0.007319759577512741\r\nStep 1093, loss: 0.009513264521956444\r\nStep 1094, loss: 0.006930184550583363\r\nStep 1095, loss: 0.008227167651057243\r\nStep 1096, loss: 0.007472679950296879\r\nStep 1097, loss: 0.00721663748845458\r\nStep 1098, loss: 0.008924340829253197\r\nStep 1099, loss: 0.008344174362719059\r\nStep 1100, loss: 0.009163778275251389\r\nStep 1101, loss: 0.008628742769360542\r\nStep 1102, loss: 0.008254705928266048\r\nStep 1103, loss: 0.008064809255301952\r\n",,terminal_output +637,2101929,"TERMINAL",0,0,"queue",,terminal_command +638,2101981,"TERMINAL",0,0,"]633;E;2025-07-04 11:09:37 queue;7be71f88-07f8-498b-82c5-01f117a78f25]633;C",,terminal_output +639,2102035,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Fri Jul 4 11:09:37 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3317098 accelerat train_la tum_cte0 PD\t0:00\t 2 (None)3316020 accelerat train_to tum_cte0 R 7:00:00 12 hkn[0407,0724-0726,0734-0735,0804-0805,0807,0809-0810,0815]3316026 accelerat train_to tum_cte0 R 7:00:00\t 4 hkn[0510-0511,0519,0707]3316022 accelerat train_to tum_cte0 R 7:03:55\t 2 hkn[0507,0520]3316019 accelerat train_to tum_cte0 R 8:46:02\t 8 hkn[0429,0436,0603-0604,0608,0612,0614,0820]3316016 accelerat train_to tum_cte0 R 10:22:10\t 1 hkn06343316924 accelerat interact tum_cte0 R33:55\t 1 hkn07333316923 accelerat interact tum_cte0 R34:22\t 4 hkn[0625-0628]",,terminal_output +640,2103139,"TERMINAL",0,0,"81163163",,terminal_output +641,2104020,"TERMINAL",0,0,"srun",,terminal_focus +642,2104134,"TERMINAL",0,0,"92274274",,terminal_output +643,2105188,"TERMINAL",0,0,"403385385",,terminal_output +644,2106191,"TERMINAL",0,0,"14496496",,terminal_output +645,2107241,"TERMINAL",0,0,"2554:00754:007",,terminal_output +646,2108285,"TERMINAL",0,0,"36618618",,terminal_output +647,2109387,"TERMINAL",0,0,"47729729",,terminal_output +648,2109699,"TERMINAL",0,0,"watch",,terminal_focus +649,2110370,"TERMINAL",0,0,"5883108330",,terminal_output +650,2111435,"TERMINAL",0,0,"69941941",,terminal_output +651,2112560,"TERMINAL",0,0,"76020to R 7:00:1\t 12 hkn[0407,0724-0726,0734-0735,0804-0805,0807,0809-0810,0815]610 4\t0510-0511,0519,0707]24:05\t 2\t0507,0520]198:46:12\t 8\t0429,0436,0603-0604,0608,0612,0614,0820]610:22:20\t 1 hkn06347098la 0:00\t 2 hkn[0629,0631]52",,terminal_output +652,2113482,"TERMINAL",0,0,"811631163",,terminal_output +653,2114686,"TERMINAL",0,0,"922742274",,terminal_output +654,2115938,"TERMINAL",0,0,"5033853385",,terminal_output +655,2116657,"TERMINAL",0,0,"144964496",,terminal_output +656,2118193,"TERMINAL",0,0,"25510755107",,terminal_output +657,2118710,"TERMINAL",0,0,"366186618",,terminal_output +658,2127740,"TERMINAL",0,0,"scancel 3316016",,terminal_command +659,2129355,"TERMINAL",0,0,"queue",,terminal_command +660,2129420,"TERMINAL",0,0,"]633;E;2025-07-04 11:10:04 queue;7be71f88-07f8-498b-82c5-01f117a78f25]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Fri Jul 4 11:10:04 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3316016 accelerat train_to tum_cte0 CG 10:22:35\t 1 hkn06343316020 accelerat train_to tum_cte0 R 7:00:27 12 hkn[0407,0724-0726,0734-0735,0804-0805,0807,0809-0810,0815]3316026 accelerat train_to tum_cte0 R 7:00:27\t 4 hkn[0510-0511,0519,0707]3316022 accelerat train_to tum_cte0 R 7:04:22\t 2 hkn[0507,0520]3316019 accelerat train_to tum_cte0 R 8:46:29\t 8 hkn[0429,0436,0603-0604,0608,0612,0614,0820]3317098 accelerat train_la tum_cte0 R\t0:17\t 2 hkn[0629,0631]3316924 accelerat interact tum_cte0 R34:22\t 1 hkn07333316923 accelerat interact tum_cte0 R34:49\t 4 hkn[0625-0628]",,terminal_output +661,2130481,"TERMINAL",0,0,"5883308350",,terminal_output +662,2131505,"TERMINAL",0,0,"69941941",,terminal_output +663,2132488,"TERMINAL",0,0,"73030522052",,terminal_output +664,2133552,"TERMINAL",0,0,"81163163",,terminal_output +665,2134591,"TERMINAL",0,0,"92274274",,terminal_output +666,2135638,"TERMINAL",0,0,"103385385",,terminal_output +667,2136721,"TERMINAL",0,0,"14496496",,terminal_output +668,2137728,"TERMINAL",0,0,"2663186318",,terminal_output +669,2138813,"TERMINAL",0,0,"47729729",,terminal_output +670,2139820,"TERMINAL",0,0,"588340835:00",,terminal_output +671,2140857,"TERMINAL",0,0,"69941941",,terminal_output +672,2141907,"TERMINAL",0,0,"74040523052",,terminal_output +673,2142945,"TERMINAL",0,0,"81163163",,terminal_output +674,2144001,"TERMINAL",0,0,"92274274",,terminal_output +675,2145030,"TERMINAL",0,0,"203385385",,terminal_output +676,2146080,"TERMINAL",0,0,"14496496",,terminal_output +677,2147136,"TERMINAL",0,0,"2554075407",,terminal_output +678,2148205,"TERMINAL",0,0,"36618618",,terminal_output +679,2149216,"TERMINAL",0,0,"47729729",,terminal_output +680,2150261,"TERMINAL",0,0,"5883508310",,terminal_output +681,2151575,"TERMINAL",0,0,"69941941",,terminal_output +682,2152379,"TERMINAL",0,0,"75050524052",,terminal_output +683,2153418,"TERMINAL",0,0,"81163163",,terminal_output +684,2154491,"TERMINAL",0,0,"92274274",,terminal_output +685,2155550,"TERMINAL",0,0,"303385385",,terminal_output +686,2156595,"TERMINAL",0,0,"14496496",,terminal_output +687,2157837,"TERMINAL",0,0,"srun",,terminal_focus +688,2157891,"TERMINAL",0,0,"2555075507",,terminal_output +689,2157963,"TERMINAL",0,0,"\r[tum_cte0515@hkn0733 jafar]$ ",,terminal_output +690,2158827,"TERMINAL",0,0,"36618618",,terminal_output +691,2158844,"TERMINAL",0,0,"s",,terminal_output +692,2159081,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +693,2159691,"TERMINAL",0,0,"47729729",,terminal_output +694,2159716,"TERMINAL",0,0,"[?25la[?25h[?25ln[?25h",,terminal_output +695,2159917,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +696,2160009,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +697,2160112,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +698,2160211,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +699,2160688,"TERMINAL",0,0,"3316019",,terminal_output +700,2160714,"TERMINAL",0,0,"58837:008320",,terminal_output +701,2161858,"TERMINAL",0,0,"61:001:00525052",,terminal_output +702,2162787,"TERMINAL",0,0,"81163163",,terminal_output +703,2163807,"TERMINAL",0,0,"92274274",,terminal_output +704,2164844,"TERMINAL",0,0,"403385385",,terminal_output +705,2165911,"TERMINAL",0,0,"14496496",,terminal_output +706,2166946,"TERMINAL",0,0,"2555:00755:007",,terminal_output +707,2167984,"TERMINAL",0,0,"36618618",,terminal_output +708,2169023,"TERMINAL",0,0,"47729729",,terminal_output +709,2169997,"TERMINAL",0,0,"[?25l3316019\r\n[?2004l\r[?25h]0;tum_cte0515@hkn0733:~/Projects/jafar[?2004h[tum_cte0515@hkn0733 jafar]$ ",,terminal_output +710,2170101,"TERMINAL",0,0,"M53316019 accelerat train_to tum_cte0 CG 8:47:10\t 8 hkn[0429,0436,0603-0604,0608,0612,0614,0820]8838330",,terminal_output +711,2171134,"TERMINAL",0,0,"6994941",,terminal_output +712,2172151,"TERMINAL",0,0,"7101051:0052",,terminal_output +713,2173286,"TERMINAL",0,0,"8116163",,terminal_output +714,2174309,"TERMINAL",0,0,"\r920 R 7:01:12 12 hkn[0407,0724-0726,0734-0735,0804-0805,0807,0809-0810,0815]62 4\t0510-0511,0519,0707]25:07\t 2\t0507,0520]7098la 1:02629,06316924interact35:07\t 1 hkn0733334\t 4 hkn[0625-0628]",,terminal_output +715,2175305,"TERMINAL",0,0,"50338385",,terminal_output +716,2176331,"TERMINAL",0,0,"1449496",,terminal_output +717,2177099,"TERMINAL",0,0,"scancel 3316019",,terminal_output +718,2177393,"TERMINAL",0,0,"255105107",,terminal_output +719,2178322,"TERMINAL",0,0,"2",,terminal_output +720,2178384,"TERMINAL",0,0,"[?25l0[?25h",,terminal_output +721,2179124,"TERMINAL",0,0,"3772729",,terminal_output +722,2180145,"TERMINAL",0,0,"58838340",,terminal_output +723,2181270,"TERMINAL",0,0,"6994941",,terminal_output +724,2182244,"TERMINAL",0,0,"7202051052",,terminal_output +725,2184613,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0733:~/Projects/jafar[?2004h[tum_cte0515@hkn0733 jafar]$ ",,terminal_output +726,2184633,"TERMINAL",0,0,"81161639207:01:21 12\t0407,0724-0726,0734-0735,0804-0805,0807,0809-0810,0815]19CG 8:47:10 8\t0429,0436,0603-0604,0608,0612,0614,0820]27274",,terminal_output +727,2185376,"TERMINAL",0,0,"1:0038385",,terminal_output +728,2186398,"TERMINAL",0,0,"149496",,terminal_output +729,2187451,"TERMINAL",0,0,"25205207",,terminal_output +730,2188488,"TERMINAL",0,0,"361618",,terminal_output +731,2189586,"TERMINAL",0,0,"472729",,terminal_output +732,2190230,"TERMINAL",0,0,"scancel 3316020",,terminal_output +733,2190585,"TERMINAL",0,0,"5838350",,terminal_output +734,2191482,"TERMINAL",0,0,"6",,terminal_output +735,2191597,"TERMINAL",0,0,"694941",,terminal_output +736,2192714,"TERMINAL",0,0,"73052052",,terminal_output +737,2193765,"TERMINAL",0,0,"816163",,terminal_output +738,2194788,"TERMINAL",0,0,"938385",,terminal_output +739,2195716,"TERMINAL",0,0,"[?25l[?2004l\r[?25h]0;tum_cte0515@hkn0733:~/Projects/jafar[?2004h[tum_cte0515@hkn0733 jafar]$ ",,terminal_output +740,2195834,"TERMINAL",0,0,"11267:01:33\t 4\t0510-0511,0519,0707]19CG 8:47:10\t 8\t0429,0436,0603-0604,0608,0612,0614,0820]9496",,terminal_output +741,2196938,"TERMINAL",0,0,"2305307",,terminal_output +742,2197965,"TERMINAL",0,0,"31618",,terminal_output +743,2198919,"TERMINAL",0,0,"42729",,terminal_output +744,2199962,"TERMINAL",0,0,"53836:00",,terminal_output +745,2201015,"TERMINAL",0,0,"64941",,terminal_output +746,2202059,"TERMINAL",0,0,"753052",,terminal_output +747,2203186,"TERMINAL",0,0,"86163",,terminal_output +748,2204150,"TERMINAL",0,0,"97274",,terminal_output +749,2205217,"TERMINAL",0,0,"208385",,terminal_output +750,2206248,"TERMINAL",0,0,"19496",,terminal_output +751,2207284,"TERMINAL",0,0,"2405407",,terminal_output +752,2208320,"TERMINAL",0,0,"31618",,terminal_output +753,2209356,"TERMINAL",0,0,"42729",,terminal_output +754,2209675,"TERMINAL",0,0,"srun",,terminal_focus +755,2210427,"TERMINAL",0,0,"538310",,terminal_output +756,2211484,"TERMINAL",0,0,"64941",,terminal_output +757,2212505,"TERMINAL",0,0,"754052",,terminal_output +758,2213632,"TERMINAL",0,0,"86163",,terminal_output +759,2214648,"TERMINAL",0,0,"97274",,terminal_output +760,2215789,"TERMINAL",0,0,"302]8385",,terminal_output +761,2216815,"TERMINAL",0,0,"11 hkn04299496",,terminal_output +762,2217751,"TERMINAL",0,0,"\r222 R 7:05:51\t 2 hkn[0507,0520]7098la 1:46629,06316924interact35:51\t 1 hkn073336:18\t 4 hkn[0625-0628]",,terminal_output +763,2218807,"TERMINAL",0,0,"42729",,terminal_output +764,2219860,"TERMINAL",0,0,"538320",,terminal_output +765,2220992,"TERMINAL",0,0,"64941",,terminal_output +766,2221950,"TERMINAL",0,0,"755052",,terminal_output +767,2223219,"TERMINAL",0,0,"86163",,terminal_output +768,2224040,"TERMINAL",0,0,"97274",,terminal_output +769,2225098,"TERMINAL",0,0,"408385",,terminal_output +770,2226228,"TERMINAL",0,0,"19496",,terminal_output +771,2227187,"TERMINAL",0,0,"26:0056:007",,terminal_output +772,2228238,"TERMINAL",0,0,"31618",,terminal_output +773,2229298,"TERMINAL",0,0,"4 1 hkn04072729",,terminal_output +774,2230334,"TERMINAL",0,0,"\r538330",,terminal_output +775,2231370,"TERMINAL",0,0,"64941",,terminal_output +776,2232475,"TERMINAL",0,0,"752:0052",,terminal_output +777,2233494,"TERMINAL",0,0,"86163",,terminal_output +778,2234511,"TERMINAL",0,0,"97274",,terminal_output +779,2235559,"TERMINAL",0,0,"508385",,terminal_output +780,2236584,"TERMINAL",0,0,"19496",,terminal_output +781,2237695,"TERMINAL",0,0,"2105107",,terminal_output +782,2238719,"TERMINAL",0,0,"31618",,terminal_output +783,2239553,"TERMINAL",0,0,"watch",,terminal_focus +784,2239722,"TERMINAL",0,0,"42729",,terminal_output +785,2240696,"TERMINAL",0,0,"Step 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428",,terminal_output +786,2240760,"TERMINAL",0,0,"\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\nStep 1104, loss: 0.008430581539869308\r\nStep 1105, loss: 0.00841981265693903\r\nStep 1106, loss: 0.007940137758851051\r\nStep 1107, loss: 0.007895980961620808\r\nStep 1108, loss: 0.006901422515511513\r\nStep 1109, loss: 0.008039712905883789\r\nStep 1110, loss: 0.0071457126177847385\r\nStep 1111, loss: 0.0062765637412667274\r\nStep 1112, loss: 0.006268054246902466\r\nStep 1113, loss: 0.008898375555872917\r\nStep 1114, loss: 0.007913569919764996\r\nStep 1115, loss: 0.00898384302854538\r\nStep 1116, loss: 0.009606389328837395\r\nStep 1117, loss: 0.009147465229034424\r\nStep 1118, loss: 0.009823444299399853\r\nStep 1119, loss: 0.008573556318879128\r\nStep 1120, loss: 0.008565381169319153\r\nStep 1121, loss: 0.010354341939091682\r\nStep 1122, loss: 0.007907861843705177\r\nStep 1123, loss: 0.006447137333452702\r\nStep 1124, loss: 0.007331363391131163\r\nStep 1125, loss: 0.007913784123957157\r\nStep 1126, loss: 0.0072159841656684875\r\nStep 1127, loss: 0.007822099141776562\r\nStep 1128, loss: 0.007812371477484703\r\nStep 1129, loss: 0.007498166523873806\r\nStep 1130, loss: 0.009019688703119755\r\nStep 1131, loss: 0.00851128064095974\r\nStep 1132, loss: 0.009014005772769451\r\nStep 1133, loss: 0.00810469314455986\r\nStep 1134, loss: 0.008381768129765987\r\nStep 1135, loss: 0.009265664964914322\r\nStep 1136, loss: 0.007494001649320126\r\nStep 1137, loss: 0.008810305967926979\r\nStep 1138, loss: 0.009191278368234634\r\nStep 1139, loss: 0.008481101132929325\r\nStep 1140, loss: 0.007047205697745085\r\nStep 1141, loss: 0.007649500388652086\r\nStep 1142, loss: 0.007546321023255587\r\nStep 1143, loss: 0.0072618527337908745\r\nStep 1144, loss: 0.008346114307641983\r\nStep 1145, loss: 0.007447859738022089\r\nStep 1146, loss: 0.007449342869222164\r\nStep 1147, loss: 0.007476987317204475\r\nStep 1148, loss: 0.008716730400919914\r\nStep 1149, loss: 0.007617637515068054\r\nStep 1150, loss: 0.007324608042836189\r\nStep 1151, loss: 0.00871486496180296\r\nStep 1152, loss: 0.008261012844741344\r\nStep 1153, loss: 0.008820002898573875\r\nStep 1154, loss: 0.007386986631900072\r\nStep 1155, loss: 0.006287720985710621\r\nStep 1156, loss: 0.007239687722176313\r\nStep 1157, loss: 0.007756705861538649\r\nStep 1158, loss: 0.009660148993134499\r\nStep 1159, loss: 0.00909856241196394\r\nStep 1160, loss: 0.008668644353747368\r\nStep 1161, loss: 0.009067384526133537\r\nStep 1162, loss: 0.008766302838921547\r\nStep 1163, loss: 0.00838024914264679\r\nStep 1164, loss: 0.007747810333967209\r\nStep 1165, loss: 0.008620062842965126\r\nStep 1166, loss: 0.007923033088445663\r\nStep 1167, loss: 0.00863476563245058\r\nStep 1168, loss: 0.007505552843213081\r\nStep 1169, loss: 0.00754917599260807\r\nStep 1170, loss: 0.006123295985162258\r\nStep 1171, loss: 0.008391601964831352\r\nStep 1172, loss: 0.008755280636250973\r\nStep 1173, loss: 0.00787621270865202\r\nStep 1174, loss: 0.010167556814849377\r\nStep 1175, loss: 0.008044450543820858\r\nStep 1176, loss: 0.007125321310013533\r\nStep 1177, loss: 0.009197923354804516\r\nStep 1178, loss: 0.010858401656150818\r\nStep 1179, loss: 0.00829737912863493\r\nStep 1180, loss: 0.009660663083195686\r\nStep 1181, loss: 0.008006054908037186\r\nStep 1182, loss: 0.006648952607065439\r\nStep 1183, loss: 0.008937581442296505\r\nStep 1184, loss: 0.007444542367011309\r\nStep 1185, loss: 0.007305907551199198\r\nStep 1186, loss: 0.008125263266265392\r\nStep 1187, loss: 0.007729206699877977\r\nStep 1188, loss: 0.006833734456449747\r\nStep 1189, loss: 0.008362866006791592\r\nStep 1190, loss: 0.008750674314796925\r\nStep 1191, loss: 0.007162139751017094\r\nStep 1192, loss: 0.007618994452059269\r\nStep 1193, loss: 0.0062854476273059845\r\nStep 1194, loss: 0.010187610052525997\r\nStep 1195, loss: 0.00802543479949236\r\nStep 1196, loss: 0.008903078734874725\r\nStep 1197, loss: 0.009383228607475758\r\nStep 1198, loss: 0.007098838221281767\r\nStep 1199, loss: 0.00905842985957861\r\nStep 1200, loss: 0.007517555728554726\r\nStep 1201, loss: 0.008031006902456284\r\nStep 1202, loss: 0.008288324810564518\r\nStep 1203, loss: 0.007185104303061962\r\nStep 1204, loss: 0.008664080873131752\r\nStep 1205, loss: 0.007589227519929409\r\nStep 1206, loss: 0.008468304760754108\r\nStep 1207, loss: 0.007795711047947407\r\nStep 1208, loss: 0.00795590691268444\r\nStep 1209, loss: 0.007503575179725885\r\nStep 1210, loss: 0.00678598415106535\r\nStep 1211, loss: 0.007746693212538958\r\nStep 1212, loss: 0.007528770249336958\r\nStep 1213, loss: 0.007910806685686111\r\nStep 1214, loss: 0.008487655781209469\r\nStep 1215, loss: 0.008553363382816315\r\nStep 1216, loss: 0.00829324871301651\r\nStep 1217, loss: 0.007361422758549452\r\nStep 1218, loss: 0.008829704485833645\r\nStep 1219, loss: 0.008006290532648563\r\nStep 1220, loss: 0.006867749150842428\r\nStep 1221, loss: 0.008367742411792278\r\nStep 1222, loss: 0.008380208164453506\r\nStep 1223, loss: 0.006569594144821167\r\nStep 1224, loss: 0.010115769691765308\r\nStep 1225, loss: 0.007681806106120348\r\nStep 1226, loss: 0.008582423441112041\r\nStep 1227, loss: 0.007142219692468643\r\nStep 1228, loss: 0.00791062880307436\r\nStep 1229, loss: 0.009301824495196342\r\nStep 1230, loss: 0.007939581759274006\r\nStep 1231, loss: 0.007700969930738211\r\nStep 1232, loss: 0.008206518366932869\r\nStep 1233, loss: 0.007344633806496859\r\nStep 1234, loss: 0.007652160711586475\r\nStep 1235, loss: 0.007586072664707899\r\nStep 1236, loss: 0.008145266212522984\r\nStep 1237, loss: 0.006902631372213364\r\nStep 1238, loss: 0.007209500763565302\r\nStep 1239, loss: 0.009341097436845303\r\nStep 1240, loss: 0.007731970865279436\r\nStep 1241, loss: 0.00972119253128767\r\nStep 1242, loss: 0.007510048802942038\r\nStep 1243, loss: 0.007368344813585281\r\nStep 1244, loss: 0.009518461301922798\r\nStep 1245, loss: 0.00760129326954484\r\nStep 1246, loss: 0.007287587970495224\r\nStep 1247, loss: 0.008109740912914276\r\nStep 1248, loss: 0.007578303571790457\r\nStep 1249, loss: 0.007832775823771954\r\nStep 1250, loss: 0.008860357105731964\r\nStep 1251, loss: 0.008427662774920464\r\nStep 1252, loss: 0.0073447804898023605\r\nStep 1253, loss: 0.008862867020070553\r\nStep 1254, loss: 0.008418967016041279\r\nStep 1255, loss: 0.006864132825285196\r\nStep 1256, loss: 0.008471546694636345\r\nStep 1257, loss: 0.006829363293945789\r\nStep 1258, loss: 0.007636746857315302\r\nStep 1259, loss: 0.0077097462490201\r\nStep 1260, loss: 0.008299724198877811\r\nStep 1261, loss: 0.008214874193072319\r\nStep 1262, loss: 0.009514445438981056\r\nStep 1263, loss: 0.008784919045865536\r\nStep 1264, loss: 0.009564245119690895\r\nStep 1265, loss: 0.008695069700479507\r\nStep 1266, loss: 0.007042749784886837\r\nStep 1267, loss: 0.008229956030845642\r\nStep 1268, loss: 0.009325158782303333\r\nStep 1269, loss: 0.007282247766852379\r\nStep 1270, loss: 0.008544599637389183\r\nStep 1271, loss: 0.0090555464848876\r\nStep 1272, loss: 0.0075870705768466\r\nStep 1273, loss: 0.007510579191148281\r\nStep 1274, loss: 0.006632499862462282\r\nStep 1275, loss: 0.008506187237799168\r\nStep 1276, loss: 0.008988509885966778\r\nStep 1277, loss: 0.006992874667048454\r\nStep 1278, loss: 0.006439715623855591\r\nStep 1279, loss: 0.0075194938108325005\r\nStep 1280, loss: 0.007755130063742399\r\nStep 1281, loss: 0.008549924939870834\r\nStep 1282, loss: 0.007439759094268084\r\nStep 1283, loss: 0.006237460300326347\r\nStep 1284, loss: 0.008101397193968296\r\nStep 1285, loss: 0.007658988703042269\r\nStep 1286, loss: 0.008465925231575966\r\nStep 1287, loss: 0.008804348297417164\r\nStep 1288, loss: 0.00851796381175518\r\nStep 1289, loss: 0.007892305962741375\r\nStep 1290, loss: 0.008276136592030525\r\nStep 1291, loss: 0.007690537255257368\r\nStep 1292, loss: 0.007581097073853016\r\nStep 1293, loss: 0.0071375006809830666\r\nStep 1294, loss: 0.008262865245342255\r\nStep 1295, loss: 0.006938433274626732\r\nStep 1296, loss: 0.007333381567150354\r\nStep 1297, loss: 0.008206418715417385\r\nStep 1298, loss: 0.008021431043744087\r\nStep 1299, loss: 0.008770645596086979\r\nStep 1300, loss: 0.008060443215072155\r\nStep 1301, loss: 0.0062043797224760056\r\nStep 1302, loss: 0.007236831355839968\r\nStep 1303, loss: 0.008011898957192898\r\nStep 1304, loss: 0.0067819394171237946\r\nStep 1305, loss: 0.00891581829637289\r\nStep 1306, loss: 0.008170664310455322\r\nStep 1307, loss: 0.007513105403631926\r\nStep 1308, loss: 0.007368546444922686\r\nStep 1309, loss: 0.0065758246928453445\r\nStep 1310, loss: 0.008839765563607216\r\nStep 1311, loss: 0.008590616285800934\r\nStep 1312, loss: 0.007750781252980232\r\nStep 1313, loss: 0.008137338794767857\r\nStep 1314, loss: 0.0072521851398050785\r\nStep 1315, loss: 0.007789584342390299\r\nStep 1316, loss: 0.00861946027725935\r\nStep 1317, loss: 0.00788140669465065\r\nStep 1318, loss: 0.00731404684484005\r\nStep 1319, loss: 0.007267243694514036\r\n",,terminal_output +787,2240761,"TERMINAL",0,0,"549441",,terminal_output +788,2241787,"TERMINAL",0,0,"\r72 R\t6:15\t 2\t0507,0520]7098la 2:10629,06316924interact36:15\t 1 hkn0733342\t 4 hkn[0625-0628]",,terminal_output +789,2242840,"TERMINAL",0,0,"86163",,terminal_output +790,2243878,"TERMINAL",0,0,"97274",,terminal_output +791,2244967,"TERMINAL",0,0,"2:008385",,terminal_output +792,2246093,"TERMINAL",0,0,"19496",,terminal_output +793,2247162,"TERMINAL",0,0,"srun",,terminal_focus +794,2247317,"TERMINAL",0,0,"2205207",,terminal_output +795,2247866,"TERMINAL",0,0,"c",,terminal_output +796,2247977,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +797,2248087,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +798,2248088,"TERMINAL",0,0,"31618",,terminal_output +799,2248731,"TERMINAL",0,0,"[?25l$[?25h",,terminal_output +800,2249136,"TERMINAL",0,0,"42729",,terminal_output +801,2249318,"TERMINAL",0,0,"[?25lw[?25h",,terminal_output +802,2249439,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +803,2249635,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +804,2250092,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +805,2250168,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +806,2250212,"TERMINAL",0,0,"538350",,terminal_output +807,2250240,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +808,2250549,"TERMINAL",0,0,"[?25l[?2004l\r[?25h]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data[?2004h[tum_cte0515@hkn0733 data]$ ",,terminal_output +809,2250974,"TERMINAL",0,0,"l",,terminal_output +810,2251107,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +811,2251170,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +812,2251277,"TERMINAL",0,0,"64941",,terminal_output +813,2252247,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +814,2252316,"TERMINAL",0,0,"752052",,terminal_output +815,2252317,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +816,2252459,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +817,2252511,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +818,2252661,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +819,2252732,"TERMINAL",0,0,"[?25l[?2004l\r]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared[?2004h[tum_cte0515@hkn0733 tum_ind3695-jafa_ws_shared]$ [?25h",,terminal_output +820,2252911,"TERMINAL",0,0,"l",,terminal_output +821,2252979,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +822,2253085,"TERMINAL",0,0,"[?25l[?2004l\r[?25h",,terminal_output +823,2253156,"TERMINAL",0,0,"checkpoints count_items.sh data huggingface logs scripts\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared[?2004h[tum_cte0515@hkn0733 tum_ind3695-jafa_ws_shared]$ ",,terminal_output +824,2253340,"TERMINAL",0,0,"86163",,terminal_output +825,2253612,"TERMINAL",0,0,"[?25lcd[?25h[?25ld[?25h",,terminal_output +826,2253763,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +827,2254365,"TERMINAL",0,0,"97274",,terminal_output +828,2254829,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +829,2254986,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +830,2255105,"TERMINAL",0,0,"gs/",,terminal_output +831,2255442,"TERMINAL",0,0,"108385",,terminal_output +832,2255558,"TERMINAL",0,0,"[?25l[?2004l\r]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs[?2004h[tum_cte0515@hkn0733 logs]$ [?25h",,terminal_output +833,2255757,"TERMINAL",0,0,"c",,terminal_output +834,2255823,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +835,2255968,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +836,2256331,"TERMINAL",0,0,"3317098",,terminal_output +837,2256485,"TERMINAL",0,0,"19496",,terminal_output +838,2256759,"TERMINAL",0,0,"3317098",,terminal_output +839,2257102,"TERMINAL",0,0,"[?25l[?2004l\rbash: cd: 3317098: No such file or directory\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs[?2004h[tum_cte0515@hkn0733 logs]$ [?25h",,terminal_output +840,2257562,"TERMINAL",0,0,"2305307",,terminal_output +841,2257925,"TERMINAL",0,0,"l",,terminal_output +842,2257996,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +843,2258085,"TERMINAL",0,0,"[?25l[?2004l\r[?25h3306965 logs_alfred logs_mihir\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs[?2004h[tum_cte0515@hkn0733 logs]$ ",,terminal_output +844,2258586,"TERMINAL",0,0,"31618",,terminal_output +845,2258908,"TERMINAL",0,0,"c",,terminal_output +846,2258977,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +847,2259080,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +848,2259143,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +849,2259316,"TERMINAL",0,0,"ogs_",,terminal_output +850,2259611,"TERMINAL",0,0,"42729",,terminal_output +851,2259794,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +852,2260249,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +853,2260375,"TERMINAL",0,0,"ihir/",,terminal_output +854,2260675,"TERMINAL",0,0,"[?25l[?2004l\r]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir[?2004h[tum_cte0515@hkn0733 logs_mihir]$ [?25h",,terminal_output +855,2260693,"TERMINAL",0,0,"53837:00",,terminal_output +856,2261262,"TERMINAL",0,0,"c",,terminal_output +857,2261325,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +858,2261473,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +859,2261679,"TERMINAL",0,0,"64941",,terminal_output +860,2262273,"TERMINAL",0,0,"3317098",,terminal_output +861,2262602,"TERMINAL",0,0,"3317098",,terminal_output +862,2262723,"TERMINAL",0,0,"763163",,terminal_output +863,2262907,"TERMINAL",0,0,"[?25l[?2004l\rbash: cd: 3317098: No such file or directory\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir[?2004h[tum_cte0515@hkn0733 logs_mihir]$ [?25h",,terminal_output +864,2263603,"TERMINAL",0,0,"l",,terminal_output +865,2263741,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +866,2263760,"TERMINAL",0,0,"97274",,terminal_output +867,2263805,"TERMINAL",0,0,"[?25l[?2004l\r[?25h",,terminal_output +868,2263923,"TERMINAL",0,0,"train_lam_minecraft_overfit_sample_3309655.log train_tokenizer_model_size_scaling_200M_3316020.log\r\ntrain_lam_model_size_scaling_38M_3317098.log train_tokenizer_model_size_scaling_37M_3313565.log\r\ntrain_tokenizer_minecraft_overfit_sample_3309656.log train_tokenizer_model_size_scaling_37M_3316022.log\r\ntrain_tokenizer_model_size_scaling_140M_3313562.log train_tokenizer_model_size_scaling_80M_3313564.log\r\ntrain_tokenizer_model_size_scaling_140M_3316019.log train_tokenizer_model_size_scaling_80M_3316026.log\r\ntrain_tokenizer_model_size_scaling_200M_3313563.log\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir[?2004h[tum_cte0515@hkn0733 logs_mihir]$ ",,terminal_output +869,2264832,"TERMINAL",0,0,"208385",,terminal_output +870,2265865,"TERMINAL",0,0,"19496",,terminal_output +871,2266919,"TERMINAL",0,0,"2405407",,terminal_output +872,2267976,"TERMINAL",0,0,"3317098",,terminal_output +873,2267977,"TERMINAL",0,0,"31618",,terminal_output +874,2269019,"TERMINAL",0,0,"42729",,terminal_output +875,2269709,"TERMINAL",0,0,"",,terminal_output +876,2270059,"TERMINAL",0,0,"538310",,terminal_output +877,2271124,"TERMINAL",0,0,"64941",,terminal_output +878,2272205,"TERMINAL",0,0,"754052",,terminal_output +879,2272543,"TERMINAL",0,0,"t",,terminal_output +880,2272613,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +881,2272760,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +882,2272959,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +883,2273069,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +884,2273218,"TERMINAL",0,0,"86163",,terminal_output +885,2273491,"TERMINAL",0,0,"[?25l-[?25h",,terminal_output +886,2273769,"TERMINAL",0,0,"[?25lf[?25h",,terminal_output +887,2273894,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +888,2274253,"TERMINAL",0,0,"train_lam_model_size_scaling_38M_3317098.log",,terminal_output +889,2274264,"TERMINAL",0,0,"97274",,terminal_output +890,2275232,"TERMINAL",0,0,"[?25l\rtrain_lam_model_size_scaling_38M_3317098.log\r\n[?2004l\r[?25h2025-07-04 11:12:06.295980: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:12:06.296604: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:12:06.743338: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:12:06.744497: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:12:06.744516: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:12:06.745131: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:12:07.018137: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:12:07.019274: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:12:07.019293: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:12:07.019911: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +891,2275363,"TERMINAL",0,0,"308385",,terminal_output +892,2276362,"TERMINAL",0,0,"19496",,terminal_output +893,2277413,"TERMINAL",0,0,"2505507",,terminal_output +894,2278552,"TERMINAL",0,0,"31618",,terminal_output +895,2279514,"TERMINAL",0,0,"42729",,terminal_output +896,2280601,"TERMINAL",0,0,"538320",,terminal_output +897,2281727,"TERMINAL",0,0,"64941",,terminal_output +898,2282665,"TERMINAL",0,0,"755052",,terminal_output +899,2283775,"TERMINAL",0,0,"86163",,terminal_output +900,2284799,"TERMINAL",0,0,"98385",,terminal_output +901,2285822,"TERMINAL",0,0,"419496",,terminal_output +902,2286951,"TERMINAL",0,0,"27:0057:007",,terminal_output +903,2287973,"TERMINAL",0,0,"31618",,terminal_output +904,2289010,"TERMINAL",0,0,"42729",,terminal_output +905,2290019,"TERMINAL",0,0,"538330",,terminal_output +906,2291149,"TERMINAL",0,0,"64941",,terminal_output +907,2292096,"TERMINAL",0,0,"753:0052",,terminal_output +908,2293197,"TERMINAL",0,0,"86163",,terminal_output +909,2294223,"TERMINAL",0,0,"97274",,terminal_output +910,2295223,"TERMINAL",0,0,"508385",,terminal_output +911,2296261,"TERMINAL",0,0,"19496",,terminal_output +912,2297301,"TERMINAL",0,0,"2105107",,terminal_output +913,2298355,"TERMINAL",0,0,"31618",,terminal_output +914,2299406,"TERMINAL",0,0,"42729",,terminal_output +915,2300460,"TERMINAL",0,0,"538340",,terminal_output +916,2301594,"TERMINAL",0,0,"64941",,terminal_output +917,2302617,"TERMINAL",0,0,"751052",,terminal_output +918,2303642,"TERMINAL",0,0,"86163",,terminal_output +919,2304666,"TERMINAL",0,0,"97274",,terminal_output +920,2305792,"TERMINAL",0,0,"3:008385",,terminal_output +921,2306814,"TERMINAL",0,0,"1205207",,terminal_output +922,2307798,"TERMINAL",0,0,"31618",,terminal_output +923,2308865,"TERMINAL",0,0,"42729",,terminal_output +924,2309989,"TERMINAL",0,0,"538350",,terminal_output +925,2311016,"TERMINAL",0,0,"64941",,terminal_output +926,2312037,"TERMINAL",0,0,"752052",,terminal_output +927,2313061,"TERMINAL",0,0,"86163",,terminal_output +928,2314188,"TERMINAL",0,0,"97274",,terminal_output +929,2315212,"TERMINAL",0,0,"108385",,terminal_output +930,2316182,"TERMINAL",0,0,"19496",,terminal_output +931,2317260,"TERMINAL",0,0,"2305307",,terminal_output +932,2318274,"TERMINAL",0,0,"31618",,terminal_output +933,2319320,"TERMINAL",0,0,"42729",,terminal_output +934,2320373,"TERMINAL",0,0,"53838:00",,terminal_output +935,2321423,"TERMINAL",0,0,"64941",,terminal_output +936,2322481,"TERMINAL",0,0,"753052",,terminal_output +937,2323609,"TERMINAL",0,0,"86163",,terminal_output +938,2324542,"TERMINAL",0,0,"97274",,terminal_output +939,2325661,"TERMINAL",0,0,"208385",,terminal_output +940,2326608,"TERMINAL",0,0,"19496",,terminal_output +941,2327709,"TERMINAL",0,0,"2405407",,terminal_output +942,2328695,"TERMINAL",0,0,"31618",,terminal_output +943,2329753,"TERMINAL",0,0,"438310",,terminal_output +944,2330879,"TERMINAL",0,0,"64941",,terminal_output +945,2331902,"TERMINAL",0,0,"754052",,terminal_output +946,2332927,"TERMINAL",0,0,"86163",,terminal_output +947,2333932,"TERMINAL",0,0,"97274",,terminal_output +948,2334978,"TERMINAL",0,0,"308385",,terminal_output +949,2336101,"TERMINAL",0,0,"19496",,terminal_output +950,2337077,"TERMINAL",0,0,"2505507",,terminal_output +951,2338158,"TERMINAL",0,0,"31618",,terminal_output +952,2339173,"TERMINAL",0,0,"42729",,terminal_output +953,2340212,"TERMINAL",0,0,"538320",,terminal_output +954,2341267,"TERMINAL",0,0,"64941",,terminal_output +955,2342352,"TERMINAL",0,0,"755052",,terminal_output +956,2343353,"TERMINAL",0,0,"86163",,terminal_output +957,2344399,"TERMINAL",0,0,"97274",,terminal_output +958,2345449,"TERMINAL",0,0,"408385",,terminal_output +959,2346501,"TERMINAL",0,0,"19496",,terminal_output +960,2347547,"TERMINAL",0,0,"28:0058:007",,terminal_output +961,2348569,"TERMINAL",0,0,"31618",,terminal_output +962,2349619,"TERMINAL",0,0,"42729",,terminal_output +963,2350746,"TERMINAL",0,0,"538330",,terminal_output +964,2351768,"TERMINAL",0,0,"64941",,terminal_output +965,2352752,"TERMINAL",0,0,"764:0163",,terminal_output +966,2353804,"TERMINAL",0,0,"97274",,terminal_output +967,2354850,"TERMINAL",0,0,"508385",,terminal_output +968,2355894,"TERMINAL",0,0,"19496",,terminal_output +969,2356946,"TERMINAL",0,0,"2105107",,terminal_output +970,2358014,"TERMINAL",0,0,"31618",,terminal_output +971,2359041,"TERMINAL",0,0,"42729",,terminal_output +972,2360062,"TERMINAL",0,0,"538340",,terminal_output +973,2361107,"TERMINAL",0,0,"64941",,terminal_output +974,2362157,"TERMINAL",0,0,"751052",,terminal_output +975,2363212,"TERMINAL",0,0,"86163",,terminal_output +976,2364278,"TERMINAL",0,0,"97274",,terminal_output +977,2365322,"TERMINAL",0,0,"4:008385",,terminal_output +978,2366350,"TERMINAL",0,0,"19496",,terminal_output +979,2367374,"TERMINAL",0,0,"2205207",,terminal_output +980,2368442,"TERMINAL",0,0,"31618",,terminal_output +981,2369484,"TERMINAL",0,0,"42729",,terminal_output +982,2370474,"TERMINAL",0,0,"538350",,terminal_output +983,2371525,"TERMINAL",0,0,"64941",,terminal_output +984,2372657,"TERMINAL",0,0,"752052",,terminal_output +985,2373683,"TERMINAL",0,0,"86163",,terminal_output +986,2374711,"TERMINAL",0,0,"97274",,terminal_output +987,2375310,"TERMINAL",0,0,"Step 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\n",,terminal_output +988,2375381,"TERMINAL",0,0,"Step 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1320, loss: 0.00720771960914135\r\nStep 1321, loss: 0.007883935235440731\r\nStep 1322, loss: 0.008881154470145702\r\nStep 1323, loss: 0.00740628270432353\r\nStep 1324, loss: 0.007540864869952202\r\nStep 1325, loss: 0.006547036115080118\r\nStep 1326, loss: 0.007666699588298798\r\nStep 1327, loss: 0.007491332944482565\r\nStep 1328, loss: 0.008032630197703838\r\nStep 1329, loss: 0.008476359769701958\r\nStep 1330, loss: 0.008484517224133015\r\nStep 1331, loss: 0.00655736681073904\r\nStep 1332, loss: 0.0063698068261146545\r\nStep 1333, loss: 0.007700665388256311\r\nStep 1334, loss: 0.007622609846293926\r\nStep 1335, loss: 0.00756039097905159\r\nStep 1336, loss: 0.008597198873758316\r\nStep 1337, loss: 0.006706496700644493\r\nStep 1338, loss: 0.007001976482570171\r\nStep 1339, loss: 0.008226539008319378\r\nStep 1340, loss: 0.007194817997515202\r\nStep 1341, loss: 0.008277339860796928\r\nStep 1342, loss: 0.006961979903280735\r\nStep 1343, loss: 0.006755825597792864\r\nStep 1344, loss: 0.008541209623217583\r\nStep 1345, loss: 0.008151362650096416\r\nStep 1346, loss: 0.008100711740553379\r\nStep 1347, loss: 0.009415503591299057\r\nStep 1348, loss: 0.008234209381043911\r\nStep 1349, loss: 0.007786940783262253\r\nStep 1350, loss: 0.0067556267604231834\r\nStep 1351, loss: 0.008489692583680153\r\nStep 1352, loss: 0.007920539937913418\r\nStep 1353, loss: 0.007739189546555281\r\nStep 1354, loss: 0.00827133096754551\r\nStep 1355, loss: 0.007820883765816689\r\nStep 1356, loss: 0.009046525694429874\r\nStep 1357, loss: 0.00889072846621275\r\nStep 1358, loss: 0.007753264158964157\r\nStep 1359, loss: 0.008024019189178944\r\nStep 1360, loss: 0.008456743322312832\r\nStep 1361, loss: 0.007238385733217001\r\nStep 1362, loss: 0.009621286764740944\r\nStep 1363, loss: 0.008689873851835728\r\nStep 1364, loss: 0.008108659647405148\r\nStep 1365, loss: 0.007891128771007061\r\nStep 1366, loss: 0.006648567039519548\r\nStep 1367, loss: 0.008842199109494686\r\nStep 1368, loss: 0.00787885207682848\r\nStep 1369, loss: 0.006834093946963549\r\nStep 1370, loss: 0.00898378249257803\r\nStep 1371, loss: 0.008220483548939228\r\nStep 1372, loss: 0.008445450104773045\r\nStep 1373, loss: 0.008019383996725082\r\nStep 1374, loss: 0.007668774574995041\r\nStep 1375, loss: 0.0075411139987409115\r\nStep 1376, loss: 0.006785766687244177\r\nStep 1377, loss: 0.007337130140513182\r\nStep 1378, loss: 0.007655171677470207\r\nStep 1379, loss: 0.006915426347404718\r\nStep 1380, loss: 0.007157100364565849\r\nStep 1381, loss: 0.007576079573482275\r\nStep 1382, loss: 0.007275465410202742\r\nStep 1383, loss: 0.007859570905566216\r\nStep 1384, loss: 0.007486606016755104\r\nStep 1385, loss: 0.009112083353102207\r\nStep 1386, loss: 0.01130132470279932\r\nStep 1387, loss: 0.008678866550326347\r\nStep 1388, loss: 0.008469712920486927\r\nStep 1389, loss: 0.007986035197973251\r\nStep 1390, loss: 0.007438951637595892\r\nStep 1391, loss: 0.008237932808697224\r\nStep 1392, loss: 0.007058887276798487\r\nStep 1393, loss: 0.006121563725173473\r\nStep 1394, loss: 0.007023328449577093\r\nStep 1395, loss: 0.009202481247484684\r\nStep 1396, loss: 0.006773974746465683\r\nStep 1397, loss: 0.007511608302593231\r\nStep 1398, loss: 0.0079543087631464\r\nStep 1399, loss: 0.008306686766445637\r\nStep 1400, loss: 0.007004190236330032\r\nStep 1401, loss: 0.007067244499921799\r\nStep 1402, loss: 0.008911756798624992\r\nStep 1403, loss: 0.008294312283396721\r\nStep 1404, loss: 0.007922514341771603\r\nStep 1405, loss: 0.008271293714642525\r\nStep 1406, loss: 0.008591042831540108\r\nStep 1407, loss: 0.007726442068815231\r\nStep 1408, loss: 0.007411696016788483\r\nStep 1409, loss: 0.00755075179040432\r\nStep 1410, loss: 0.007561638019979\r\nStep 1411, loss: 0.008104151114821434\r\nStep 1412, loss: 0.008793777786195278\r\nStep 1413, loss: 0.007909945212304592\r\nStep 1414, loss: 0.008457985706627369\r\nStep 1415, loss: 0.005678194109350443\r\nStep 1416, loss: 0.0075266011990606785\r\nStep 1417, loss: 0.008897745981812477\r\nStep 1418, loss: 0.00698827626183629\r\nStep 1419, loss: 0.008560094982385635\r\nStep 1420, loss: 0.00774866808205843\r\nStep 1421, loss: 0.00849932711571455\r\nStep 1422, loss: 0.007702742703258991\r\nStep 1423, loss: 0.00743652880191803\r\nStep 1424, loss: 0.007662967778742313\r\nStep 1425, loss: 0.007685701362788677\r\nStep 1426, loss: 0.008688081987202168\r\nStep 1427, loss: 0.00800800696015358\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1535, loss: 0.007195985876023769\r\nStep 1428, loss: 0.007720023859292269\r\nStep 1429, loss: 0.009424170479178429\r\nStep 1430, loss: 0.008792435750365257\r\nStep 1431, loss: 0.008004057221114635\r\nStep 1432, loss: 0.006983859930187464\r\nStep 1433, loss: 0.007000492885708809\r\nStep 1434, loss: 0.008304187096655369\r\nStep 1435, loss: 0.007322118151932955\r\nStep 1436, loss: 0.007619250565767288\r\nStep 1437, loss: 0.007775019388645887\r\nStep 1438, loss: 0.008904464542865753\r\nStep 1439, loss: 0.008660362102091312\r\nStep 1440, loss: 0.0072564673610031605\r\nStep 1441, loss: 0.008629596792161465\r\nStep 1442, loss: 0.008559372276067734\r\nStep 1443, loss: 0.009303729049861431\r\nStep 1444, loss: 0.007258622441440821\r\nStep 1445, loss: 0.008753298781812191\r\nStep 1446, loss: 0.0072275446727871895\r\nStep 1447, loss: 0.008680860511958599\r\nStep 1448, loss: 0.006690030451864004\r\nStep 1449, loss: 0.007440764456987381\r\nStep 1450, loss: 0.008073128759860992\r\nStep 1451, loss: 0.006983441766351461\r\nStep 1452, loss: 0.0064823199063539505\r\nStep 1453, loss: 0.007557818200439215\r\nStep 1454, loss: 0.007969280704855919\r\nStep 1455, loss: 0.008277376182377338\r\nStep 1456, loss: 0.008277270942926407\r\nStep 1457, loss: 0.008309218101203442\r\nStep 1458, loss: 0.007829679176211357\r\nStep 1459, loss: 0.006761828903108835\r\nStep 1460, loss: 0.009594653733074665\r\nStep 1461, loss: 0.00914834812283516\r\nStep 1462, loss: 0.008355977013707161\r\nStep 1463, loss: 0.008831902407109737\r\nStep 1464, loss: 0.009627453982830048\r\nStep 1465, loss: 0.006862392649054527\r\nStep 1466, loss: 0.007331472355872393\r\nStep 1467, loss: 0.008676938712596893\r\nStep 1468, loss: 0.007690990809351206\r\nStep 1469, loss: 0.006529787555336952\r\nStep 1470, loss: 0.007124866358935833\r\nStep 1471, loss: 0.007046773564070463\r\nStep 1472, loss: 0.008099162951111794\r\nStep 1473, loss: 0.007711583282798529\r\nStep 1474, loss: 0.00813211314380169\r\nStep 1475, loss: 0.007400526199489832\r\nStep 1476, loss: 0.00715376203879714\r\nStep 1477, loss: 0.00896035972982645\r\nStep 1478, loss: 0.007030337117612362\r\nStep 1479, loss: 0.008727632462978363\r\nStep 1480, loss: 0.00776367262005806\r\nStep 1481, loss: 0.007565171457827091\r\nStep 1482, loss: 0.007057382259517908\r\nStep 1483, loss: 0.008310321718454361\r\nStep 1484, loss: 0.008377046324312687\r\nStep 1485, loss: 0.008779951371252537\r\nStep 1486, loss: 0.008347886614501476\r\nStep 1487, loss: 0.007677553221583366\r\nStep 1488, loss: 0.006510563660413027\r\nStep 1489, loss: 0.00869457982480526\r\nStep 1490, loss: 0.007162831258028746\r\nStep 1491, loss: 0.008387118577957153\r\nStep 1492, loss: 0.007167611736804247\r\nStep 1493, loss: 0.007728440221399069\r\nStep 1494, loss: 0.006670766975730658\r\nStep 1495, loss: 0.007737570907920599\r\nStep 1496, loss: 0.008742470294237137\r\nStep 1497, loss: 0.00723178219050169\r\nStep 1498, loss: 0.009606396779417992\r\nStep 1499, loss: 0.008293060585856438\r\nStep 1500, loss: 0.007404019590467215\r\nStep 1501, loss: 0.008258557878434658\r\nStep 1502, loss: 0.008233203552663326\r\nStep 1503, loss: 0.0076472461223602295\r\nStep 1504, loss: 0.008197000250220299\r\nStep 1505, loss: 0.008064262568950653\r\nStep 1506, loss: 0.008342066779732704\r\nStep 1507, loss: 0.008036400191485882\r\nStep 1508, loss: 0.006755773909389973\r\nStep 1509, loss: 0.008095656521618366\r\nStep 1510, loss: 0.007921221666038036\r\nStep 1511, loss: 0.008781630545854568\r\nStep 1512, loss: 0.0077179791405797005\r\nStep 1513, loss: 0.008915404789149761\r\nStep 1514, loss: 0.007048477418720722\r\nStep 1515, loss: 0.008523005992174149\r\nStep 1516, loss: 0.007729056756943464\r\nStep 1517, loss: 0.00937560573220253\r\nStep 1518, loss: 0.008599952794611454\r\nStep 1519, loss: 0.007923164404928684\r\nStep 1520, loss: 0.00739239202812314\r\nStep 1521, loss: 0.007268124725669622\r\nStep 1522, loss: 0.007549547124654055\r\nStep 1523, loss: 0.0073455385863780975\r\nStep 1524, loss: 0.006058149971067905\r\nStep 1525, loss: 0.007222222629934549\r\nStep 1526, loss: 0.008520731702446938\r\nStep 1527, loss: 0.006562237627804279\r\nStep 1528, loss: 0.007870600558817387\r\nStep 1529, loss: 0.007636326365172863\r\nStep 1530, loss: 0.008687810972332954\r\nStep 1531, loss: 0.0074821473099291325\r\nStep 1532, loss: 0.007979928515851498\r\nStep 1533, loss: 0.00925440900027752\r\nStep 1534, loss: 0.008057350292801857\r\nStep 1535, loss: 0.007195985876023769\r\n",,terminal_output +989,2375733,"TERMINAL",0,0,"109496",,terminal_output +990,2376857,"TERMINAL",0,0,"2305307",,terminal_output +991,2377879,"TERMINAL",0,0,"31618",,terminal_output +992,2378904,"TERMINAL",0,0,"42729",,terminal_output +993,2379928,"TERMINAL",0,0,"53839:00",,terminal_output +994,2380911,"TERMINAL",0,0,"64941",,terminal_output +995,2381979,"TERMINAL",0,0,"753052",,terminal_output +996,2382975,"TERMINAL",0,0,"86163",,terminal_output +997,2384026,"TERMINAL",0,0,"97274",,terminal_output +998,2385153,"TERMINAL",0,0,"208385",,terminal_output +999,2386176,"TERMINAL",0,0,"19496",,terminal_output +1000,2387200,"TERMINAL",0,0,"2405407",,terminal_output +1001,2388327,"TERMINAL",0,0,"31618",,terminal_output +1002,2389287,"TERMINAL",0,0,"42729",,terminal_output +1003,2390329,"TERMINAL",0,0,"538310",,terminal_output +1004,2391368,"TERMINAL",0,0,"64941",,terminal_output +1005,2392410,"TERMINAL",0,0,"754052",,terminal_output +1006,2393551,"TERMINAL",0,0,"86163",,terminal_output +1007,2394575,"TERMINAL",0,0,"97274",,terminal_output +1008,2395599,"TERMINAL",0,0,"308385",,terminal_output +1009,2396570,"TERMINAL",0,0,"19496",,terminal_output +1010,2397617,"TERMINAL",0,0,"2505507",,terminal_output +1011,2398676,"TERMINAL",0,0,"31618",,terminal_output +1012,2399366,"TERMINAL",0,0,"Running on 8 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3309202492237091\r\nStep 1, loss: 0.27616071701049805\r\nStep 2, loss: 0.23833505809307098\r\nStep 3, loss: 0.20353123545646667\r\nStep 4, loss: 0.18312352895736694\r\nStep 5, loss: 0.16567449271678925\r\nStep 6, loss: 0.1496116816997528\r\nStep 7, loss: 0.1397426575422287\r\nStep 8, loss: 0.1301795393228531\r\nStep 9, loss: 0.12435252219438553\r\nStep 10, loss: 0.11484285444021225\r\nStep 11, loss: 0.11342452466487885\r\nStep 12, loss: 0.10503888875246048\r\nStep 13, loss: 0.09828095138072968\r\nStep 14, loss: 0.10296234488487244\r\nStep 15, loss: 0.09688253700733185\r\nStep 16, loss: 0.09001464396715164\r\nStep 17, loss: 0.08598079532384872\r\nStep 18, loss: 0.08745668828487396\r\nStep 19, loss: 0.0775802731513977\r\nStep 20, loss: 0.08166162669658661\r\nStep 21, loss: 0.07300376892089844\r\nStep 22, loss: 0.07714924216270447\r\nStep 23, loss: 0.07810085266828537\r\nStep 24, loss: 0.0794411152601242\r\nStep 25, loss: 0.07307913899421692\r\nStep 26, loss: 0.07097838819026947\r\nStep 27, loss: 0.06554681807756424\r\nStep 28, loss: 0.065010204911232\r\nStep 29, loss: 0.06810400635004044\r\nStep 30, loss: 0.06351493299007416\r\nStep 31, loss: 0.057667750865221024\r\nStep 32, loss: 0.062011126428842545\r\nStep 33, loss: 0.0609549917280674\r\nStep 34, loss: 0.06150144338607788\r\nStep 35, loss: 0.057500164955854416\r\nStep 36, loss: 0.05634911358356476\r\nStep 37, loss: 0.054869506508111954\r\nStep 38, loss: 0.05245821550488472\r\nStep 39, loss: 0.05741355940699577\r\nStep 40, loss: 0.05354347452521324\r\nStep 41, loss: 0.05392030254006386\r\nStep 42, loss: 0.05377781391143799\r\nStep 43, loss: 0.04958712309598923\r\nStep 44, loss: 0.05107380822300911\r\nStep 45, loss: 0.05087074637413025\r\nStep 46, loss: 0.0468355156481266\r\nStep 47, loss: 0.051542893052101135\r\nStep 48, loss: 0.0509573332965374\r\nStep 49, loss: 0.0446152463555336\r\nRunning on 8 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3309202492237091\r\nStep 1, loss: 0.27616071701049805\r\nStep 2, loss: 0.23833505809307098\r\nStep 3, loss: 0.20353123545646667\r\nStep 4, loss: 0.18312352895736694\r\nStep 5, loss: 0.16567449271678925\r\nStep 6, loss: 0.1496116816997528\r\nStep 7, loss: 0.1397426575422287\r\nStep 8, loss: 0.1301795393228531\r\nStep 9, loss: 0.12435252219438553\r\nStep 10, loss: 0.11484285444021225\r\nStep 11, loss: 0.11342452466487885\r\nStep 12, loss: 0.10503888875246048\r\nStep 13, loss: 0.09828095138072968\r\nStep 14, loss: 0.10296234488487244\r\nStep 15, loss: 0.09688253700733185\r\nStep 16, loss: 0.09001464396715164\r\nStep 17, loss: 0.08598079532384872\r\nStep 18, loss: 0.08745668828487396\r\nStep 19, loss: 0.0775802731513977\r\nStep 20, loss: 0.08166162669658661\r\nStep 50, loss: 0.041748303920030594\r\nStep 51, loss: 0.04902645945549011\r\nStep 52, loss: 0.047454170882701874\r\nStep 53, loss: 0.040505826473236084\r\nStep 54, loss: 0.03718667849898338\r\nStep 55, loss: 0.03961941972374916\r\nStep 56, loss: 0.03947916626930237\r\nStep 57, loss: 0.03693949803709984\r\nStep 58, loss: 0.03782138228416443\r\nStep 59, loss: 0.03795648366212845\r\nStep 60, loss: 0.03371633216738701\r\nStep 61, loss: 0.044886138290166855\r\nStep 62, loss: 0.038592565804719925\r\nStep 63, loss: 0.0360867939889431\r\nStep 64, loss: 0.036005713045597076\r\nStep 65, loss: 0.034621596336364746\r\nStep 66, loss: 0.03188462182879448\r\nStep 67, loss: 0.029557932168245316\r\nStep 68, loss: 0.0318872295320034\r\nStep 69, loss: 0.030645810067653656\r\nStep 70, loss: 0.028607860207557678\r\nStep 71, loss: 0.03495610132813454\r\nStep 72, loss: 0.029537387192249298\r\nStep 73, loss: 0.033071547746658325\r\nStep 74, loss: 0.03534964472055435\r\nStep 75, loss: 0.03360242769122124\r\nStep 76, loss: 0.028890511021018028\r\nStep 77, loss: 0.029018152505159378\r\nStep 21, loss: 0.07300376892089844\r\nStep 22, loss: 0.07714924216270447\r\nStep 23, loss: 0.07810085266828537\r\nStep 24, loss: 0.0794411152601242\r\nStep 25, loss: 0.07307913899421692\r\nStep 26, loss: 0.07097838819026947\r\nStep 27, loss: 0.06554681807756424\r\nStep 28, loss: 0.065010204911232\r\nStep 29, loss: 0.06810400635004044\r\nStep 30, loss: 0.06351493299007416\r\nStep 31, loss: 0.057667750865221024\r\nStep 32, loss: 0.062011126428842545\r\nStep 33, loss: 0.0609549917280674\r\nStep 34, loss: 0.06150144338607788\r\nStep 35, loss: 0.057500164955854416\r\nStep 36, loss: 0.05634911358356476\r\nStep 37, loss: 0.054869506508111954\r\nStep 38, loss: 0.05245821550488472\r\nStep 39, loss: 0.05741355940699577\r\nStep 40, loss: 0.05354347452521324\r\nStep 41, loss: 0.05392030254006386\r\nStep 42, loss: 0.05377781391143799\r\nStep 43, loss: 0.04958712309598923\r\nStep 44, loss: 0.05107380822300911\r\nStep 45, loss: 0.05087074637413025\r\nStep 46, loss: 0.0468355156481266\r\nStep 47, loss: 0.051542893052101135\r\nStep 48, loss: 0.0509573332965374\r\nStep 49, loss: 0.0446152463555336\r\nStep 78, loss: 0.029858632013201714\r\nStep 79, loss: 0.02790442295372486\r\nStep 80, loss: 0.030148373916745186\r\nStep 81, loss: 0.02741095796227455\r\nStep 82, loss: 0.02769358456134796\r\nStep 83, loss: 0.03266967460513115\r\nStep 84, loss: 0.030720101669430733\r\nStep 85, loss: 0.02795012854039669\r\nStep 86, loss: 0.027471110224723816\r\nStep 87, loss: 0.028233686462044716\r\nStep 88, loss: 0.026210511103272438\r\nStep 89, loss: 0.027847111225128174\r\nStep 90, loss: 0.02239195443689823\r\nStep 91, loss: 0.02604525536298752\r\nStep 92, loss: 0.022576235234737396\r\nStep 93, loss: 0.025205545127391815\r\nStep 94, loss: 0.022538412362337112\r\nStep 95, loss: 0.023945016786456108\r\nStep 96, loss: 0.02386571280658245\r\nStep 97, loss: 0.02088065817952156\r\nStep 98, loss: 0.02455727569758892\r\nStep 99, loss: 0.024158356711268425\r\nStep 100, loss: 0.02200816199183464\r\nStep 101, loss: 0.023882700130343437\r\nStep 102, loss: 0.023078344762325287\r\nStep 103, loss: 0.021429037675261497\r\nStep 104, loss: 0.02174082212150097\r\nStep 105, loss: 0.023439496755599976\r\nStep 50, loss: 0.041748303920030594\r\nStep 51, loss: 0.04902645945549011\r\nStep 52, loss: 0.047454170882701874\r\nStep 53, loss: 0.040505826473236084\r\nStep 54, loss: 0.03718667849898338\r\nStep 55, loss: 0.03961941972374916\r\nStep 56, loss: 0.03947916626930237\r\nStep 57, loss: 0.03693949803709984\r\nStep 58, loss: 0.03782138228416443\r\nStep 59, loss: 0.03795648366212845\r\nStep 60, loss: 0.03371633216738701\r\nStep 61, loss: 0.044886138290166855\r\nStep 62, loss: 0.038592565804719925\r\nStep 63, loss: 0.0360867939889431\r\nStep 64, loss: 0.036005713045597076\r\nStep 65, loss: 0.034621596336364746\r\nStep 66, loss: 0.03188462182879448\r\nStep 67, loss: 0.029557932168245316\r\nStep 68, loss: 0.0318872295320034\r\nStep 69, loss: 0.030645810067653656\r\nStep 70, loss: 0.028607860207557678\r\nStep 71, loss: 0.03495610132813454\r\nStep 72, loss: 0.029537387192249298\r\nStep 73, loss: 0.033071547746658325\r\nStep 74, loss: 0.03534964472055435\r\nStep 75, loss: 0.03360242769122124\r\nStep 76, loss: 0.028890511021018028\r\nStep 77, loss: 0.029018152505159378\r\nStep 106, loss: 0.02335542067885399\r\nStep 107, loss: 0.020467158406972885\r\nStep 78, loss: 0.029858632013201714\r\nStep 79, loss: 0.02790442295372486\r\nStep 80, loss: 0.030148373916745186\r\nStep 81, loss: 0.02741095796227455\r\nStep 82, loss: 0.02769358456134796\r\nStep 83, loss: 0.03266967460513115\r\nStep 84, loss: 0.030720101669430733\r\nStep 85, loss: 0.02795012854039669\r\nStep 86, loss: 0.027471110224723816\r\nStep 87, loss: 0.028233686462044716\r\nStep 88, loss: 0.026210511103272438\r\nStep 89, loss: 0.027847111225128174\r\nStep 90, loss: 0.02239195443689823\r\nStep 91, loss: 0.02604525536298752\r\nStep 92, loss: 0.022576235234737396\r\nStep 93, loss: 0.025205545127391815\r\nStep 94, loss: 0.022538412362337112\r\nStep 95, loss: 0.023945016786456108\r\nStep 96, loss: 0.02386571280658245\r\nStep 97, loss: 0.02088065817952156\r\nStep 98, loss: 0.02455727569758892\r\nStep 99, loss: 0.024158356711268425\r\nStep 100, loss: 0.02200816199183464\r\nStep 101, loss: 0.023882700130343437\r\nStep 102, loss: 0.023078344762325287\r\nStep 103, loss: 0.021429037675261497\r\nStep 104, loss: 0.02174082212150097\r\nStep 105, loss: 0.023439496755599976\r\nStep 108, loss: 0.023992128670215607\r\nStep 109, loss: 0.02077864110469818\r\nStep 110, loss: 0.022285621613264084\r\nStep 111, loss: 0.021920684725046158\r\nStep 112, loss: 0.020692449063062668\r\nStep 113, loss: 0.018958691507577896\r\nStep 114, loss: 0.02061193436384201\r\nStep 115, loss: 0.020444221794605255\r\nStep 116, loss: 0.02078094705939293\r\nStep 117, loss: 0.019729148596525192\r\nStep 118, loss: 0.020966283977031708\r\nStep 119, loss: 0.017715569585561752\r\nStep 120, loss: 0.018365614116191864\r\nStep 121, loss: 0.01908261515200138\r\nStep 122, loss: 0.019502971321344376\r\nStep 123, loss: 0.017858631908893585\r\nStep 124, loss: 0.015927297994494438\r\nStep 125, loss: 0.019642645493149757\r\nStep 126, loss: 0.018544591963291168\r\nStep 127, loss: 0.015933632850646973\r\nStep 128, loss: 0.01618993654847145\r\nStep 129, loss: 0.016362672671675682\r\nStep 130, loss: 0.015333064831793308\r\nStep 131, loss: 0.016816189512610435\r\nStep 132, loss: 0.0164976604282856\r\nStep 133, loss: 0.016223587095737457\r\nStep 134, loss: 0.017775867134332657\r\nStep 106, loss: 0.02335542067885399\r\nStep 107, loss: 0.020467158406972885\r\nStep 135, loss: 0.015498277731239796\r\nStep 136, loss: 0.015683045610785484\r\nStep 137, loss: 0.01383606344461441\r\nStep 138, loss: 0.01694605126976967\r\nStep 139, loss: 0.017274845391511917\r\nStep 140, loss: 0.015150913037359715\r\nStep 141, loss: 0.015680069103837013\r\nStep 142, loss: 0.014308595098555088\r\nStep 143, loss: 0.015988625586032867\r\nStep 144, loss: 0.01708797924220562\r\nStep 145, loss: 0.015064721927046776\r\nStep 146, loss: 0.015196548774838448\r\nStep 147, loss: 0.015233844518661499\r\nStep 148, loss: 0.015244445763528347\r\nStep 149, loss: 0.013996983878314495\r\nStep 150, loss: 0.013958653435111046\r\nStep 151, loss: 0.01511000283062458\r\nStep 152, loss: 0.013374081812798977\r\nStep 153, loss: 0.014145979657769203\r\nStep 154, loss: 0.015165048651397228\r\nStep 155, loss: 0.014963418245315552\r\nStep 156, loss: 0.015931718051433563\r\nStep 157, loss: 0.015752702951431274\r\nStep 158, loss: 0.01360904797911644\r\nStep 159, loss: 0.015732955187559128\r\nStep 160, loss: 0.016268953680992126\r\nStep 161, loss: 0.012776114046573639\r\nStep 108, loss: 0.023992128670215607\r\nStep 109, loss: 0.02077864110469818\r\nStep 110, loss: 0.022285621613264084\r\nStep 111, loss: 0.021920684725046158\r\nStep 112, loss: 0.020692449063062668\r\nStep 113, loss: 0.018958691507577896\r\nStep 114, loss: 0.02061193436384201\r\nStep 115, loss: 0.020444221794605255\r\nStep 116, loss: 0.02078094705939293\r\nStep 117, loss: 0.019729148596525192\r\nStep 118, loss: 0.020966283977031708\r\nStep 119, loss: 0.017715569585561752\r\nStep 120, loss: 0.018365614116191864\r\nStep 121, loss: 0.01908261515200138\r\nStep 122, loss: 0.019502971321344376\r\nStep 123, loss: 0.017858631908893585\r\nStep 124, loss: 0.015927297994494438\r\nStep 125, loss: 0.019642645493149757\r\nStep 126, loss: 0.018544591963291168\r\nStep 127, loss: 0.015933632850646973\r\nStep 128, loss: 0.01618993654847145\r\nStep 129, loss: 0.016362672671675682\r\nStep 130, loss: 0.015333064831793308\r\nStep 131, loss: 0.016816189512610435\r\nStep 132, loss: 0.0164976604282856\r\nStep 133, loss: 0.016223587095737457\r\nStep 134, loss: 0.017775867134332657\r\nStep 162, loss: 0.014002346433699131\r\nStep 163, loss: 0.016474682837724686\r\nStep 164, loss: 0.014050696976482868\r\nStep 165, loss: 0.015429678373038769\r\nStep 166, loss: 0.014365949667990208\r\nStep 167, loss: 0.014085184782743454\r\nStep 168, loss: 0.015173561871051788\r\nStep 169, loss: 0.014162395149469376\r\nStep 170, loss: 0.014322423376142979\r\nStep 171, loss: 0.013332856819033623\r\nStep 172, loss: 0.012761498801410198\r\nStep 173, loss: 0.01492702029645443\r\nStep 174, loss: 0.013961599208414555\r\nStep 175, loss: 0.012958317995071411\r\nStep 176, loss: 0.015379429794847965\r\nStep 177, loss: 0.014328060671687126\r\nStep 178, loss: 0.014507713727653027\r\nStep 179, loss: 0.013650377281010151\r\nStep 180, loss: 0.013451213017106056\r\nStep 181, loss: 0.012091542594134808\r\nStep 182, loss: 0.013077723793685436\r\nStep 183, loss: 0.01345899235457182\r\nStep 184, loss: 0.01339371595531702\r\nStep 185, loss: 0.011480382643640041\r\nStep 186, loss: 0.01359980646520853\r\nStep 187, loss: 0.01263586338609457\r\nStep 188, loss: 0.012540409341454506\r\nStep 135, loss: 0.015498277731239796\r\nStep 136, loss: 0.015683045610785484\r\nStep 137, loss: 0.01383606344461441\r\nStep 138, loss: 0.01694605126976967\r\nStep 139, loss: 0.017274845391511917\r\nStep 140, loss: 0.015150913037359715\r\nStep 141, loss: 0.015680069103837013\r\nStep 142, loss: 0.014308595098555088\r\nStep 143, loss: 0.015988625586032867\r\nStep 144, loss: 0.01708797924220562\r\nStep 145, loss: 0.015064721927046776\r\nStep 146, loss: 0.015196548774838448\r\nStep 147, loss: 0.015233844518661499\r\nStep 148, loss: 0.015244445763528347\r\nStep 149, loss: 0.013996983878314495\r\nStep 150, loss: 0.013958653435111046\r\nStep 151, loss: 0.01511000283062458\r\nStep 152, loss: 0.013374081812798977\r\nStep 153, loss: 0.014145979657769203\r\nStep 154, loss: 0.015165048651397228\r\nStep 155, loss: 0.014963418245315552\r\nStep 156, loss: 0.015931718051433563\r\nStep 157, loss: 0.015752702951431274\r\nStep 158, loss: 0.01360904797911644\r\nStep 159, loss: 0.015732955187559128\r\nStep 160, loss: 0.016268953680992126\r\nStep 161, loss: 0.012776114046573639\r\nStep 189, loss: 0.013751196675002575\r\nStep 190, loss: 0.012528365477919579\r\nStep 191, loss: 0.01227360125631094\r\nStep 192, loss: 0.012847634963691235\r\nStep 193, loss: 0.011203471571207047\r\nStep 194, loss: 0.013195750303566456\r\nStep 195, loss: 0.012723291292786598\r\nStep 196, loss: 0.010990786366164684\r\nStep 197, loss: 0.009713550098240376\r\nStep 198, loss: 0.011404153890907764\r\nStep 199, loss: 0.010602226480841637\r\nStep 200, loss: 0.010594911873340607\r\nStep 201, loss: 0.0118786059319973\r\nStep 202, loss: 0.012351179495453835\r\nStep 203, loss: 0.012053634971380234\r\nStep 204, loss: 0.01243047509342432\r\nStep 205, loss: 0.012431791052222252\r\nStep 206, loss: 0.012530690059065819\r\nStep 207, loss: 0.009664309211075306\r\nStep 208, loss: 0.01006846223026514\r\nStep 209, loss: 0.010861100628972054\r\nStep 210, loss: 0.011857938952744007\r\nStep 211, loss: 0.01253808755427599\r\nStep 212, loss: 0.011201965622603893\r\nStep 213, loss: 0.012819544412195683\r\nStep 214, loss: 0.010710623115301132\r\nStep 215, loss: 0.011954248882830143\r\nStep 162, loss: 0.014002346433699131\r\nStep 163, loss: 0.016474682837724686\r\nStep 164, loss: 0.014050696976482868\r\nStep 165, loss: 0.015429678373038769\r\nStep 166, loss: 0.014365949667990208\r\nStep 167, loss: 0.014085184782743454\r\nStep 168, loss: 0.015173561871051788\r\nStep 169, loss: 0.014162395149469376\r\nStep 170, loss: 0.014322423376142979\r\nStep 171, loss: 0.013332856819033623\r\nStep 172, loss: 0.012761498801410198\r\nStep 173, loss: 0.01492702029645443\r\nStep 174, loss: 0.013961599208414555\r\nStep 175, loss: 0.012958317995071411\r\nStep 176, loss: 0.015379429794847965\r\nStep 177, loss: 0.014328060671687126\r\nStep 178, loss: 0.014507713727653027\r\nStep 179, loss: 0.013650377281010151\r\nStep 180, loss: 0.013451213017106056\r\nStep 181, loss: 0.012091542594134808\r\nStep 182, loss: 0.013077723793685436\r\nStep 183, loss: 0.01345899235457182\r\nStep 184, loss: 0.01339371595531702\r\nStep 185, loss: 0.011480382643640041\r\nStep 186, loss: 0.01359980646520853\r\nStep 187, loss: 0.01263586338609457\r\nStep 188, loss: 0.012540409341454506\r\nStep 216, loss: 0.012150179594755173\r\nStep 217, loss: 0.012084055691957474\r\nStep 218, loss: 0.013123934157192707\r\nStep 189, loss: 0.013751196675002575\r\nStep 190, loss: 0.012528365477919579\r\nStep 191, loss: 0.01227360125631094\r\nStep 192, loss: 0.012847634963691235\r\nStep 193, loss: 0.011203471571207047\r\nStep 194, loss: 0.013195750303566456\r\nStep 195, loss: 0.012723291292786598\r\nStep 196, loss: 0.010990786366164684\r\nStep 197, loss: 0.009713550098240376\r\nStep 198, loss: 0.011404153890907764\r\nStep 199, loss: 0.010602226480841637\r\nStep 200, loss: 0.010594911873340607\r\nStep 201, loss: 0.0118786059319973\r\nStep 202, loss: 0.012351179495453835\r\nStep 203, loss: 0.012053634971380234\r\nStep 204, loss: 0.01243047509342432\r\nStep 205, loss: 0.012431791052222252\r\nStep 206, loss: 0.012530690059065819\r\nStep 207, loss: 0.009664309211075306\r\nStep 208, loss: 0.01006846223026514\r\nStep 209, loss: 0.010861100628972054\r\nStep 210, loss: 0.011857938952744007\r\nStep 211, loss: 0.01253808755427599\r\nStep 212, loss: 0.011201965622603893\r\nStep 213, loss: 0.012819544412195683\r\nStep 214, loss: 0.010710623115301132\r\nStep 215, loss: 0.011954248882830143\r\nRunning on 8 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3309202492237091\r\nStep 1, loss: 0.27616071701049805\r\nStep 2, loss: 0.23833505809307098\r\nStep 3, loss: 0.20353123545646667\r\nStep 4, loss: 0.18312352895736694\r\nStep 5, loss: 0.16567449271678925\r\nStep 6, loss: 0.1496116816997528\r\nStep 7, loss: 0.1397426575422287\r\nStep 8, loss: 0.1301795393228531\r\nStep 9, loss: 0.12435252219438553\r\nStep 10, loss: 0.11484285444021225\r\nStep 11, loss: 0.11342452466487885\r\nStep 12, loss: 0.10503888875246048\r\nStep 13, loss: 0.09828095138072968\r\nStep 14, loss: 0.10296234488487244\r\nStep 15, loss: 0.09688253700733185\r\nStep 16, loss: 0.09001464396715164\r\nStep 17, loss: 0.08598079532384872\r\nStep 18, loss: 0.08745668828487396\r\nStep 19, loss: 0.0775802731513977\r\nStep 20, loss: 0.08166162669658661\r\nStep 216, loss: 0.012150179594755173\r\nStep 217, loss: 0.012084055691957474\r\nStep 218, loss: 0.013123934157192707\r\nStep 21, loss: 0.07300376892089844\r\nStep 22, loss: 0.07714924216270447\r\nStep 23, loss: 0.07810085266828537\r\nStep 24, loss: 0.0794411152601242\r\nStep 25, loss: 0.07307913899421692\r\nStep 26, loss: 0.07097838819026947\r\nStep 27, loss: 0.06554681807756424\r\nStep 28, loss: 0.065010204911232\r\nStep 29, loss: 0.06810400635004044\r\nStep 30, loss: 0.06351493299007416\r\nStep 31, loss: 0.057667750865221024\r\nStep 32, loss: 0.062011126428842545\r\nStep 33, loss: 0.0609549917280674\r\nStep 34, loss: 0.06150144338607788\r\nStep 35, loss: 0.057500164955854416\r\nStep 36, loss: 0.05634911358356476\r\nStep 37, loss: 0.054869506508111954\r\nStep 38, loss: 0.05245821550488472\r\nStep 39, loss: 0.05741355940699577\r\nStep 40, loss: 0.05354347452521324\r\nStep 41, loss: 0.05392030254006386\r\nStep 42, loss: 0.05377781391143799\r\nStep 43, loss: 0.04958712309598923\r\nStep 44, loss: 0.05107380822300911\r\nStep 45, loss: 0.05087074637413025\r\nStep 46, loss: 0.0468355156481266\r\nStep 47, loss: 0.051542893052101135\r\nStep 48, loss: 0.0509573332965374\r\nStep 49, loss: 0.0446152463555336\r\nRunning on 8 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3309202492237091\r\nStep 1, loss: 0.27616071701049805\r\nStep 2, loss: 0.23833505809307098\r\nStep 3, loss: 0.20353123545646667\r\nStep 4, loss: 0.18312352895736694\r\nStep 5, loss: 0.16567449271678925\r\nStep 6, loss: 0.1496116816997528\r\nStep 7, loss: 0.1397426575422287\r\nStep 8, loss: 0.1301795393228531\r\nStep 9, loss: 0.12435252219438553\r\nStep 10, loss: 0.11484285444021225\r\nStep 11, loss: 0.11342452466487885\r\nStep 12, loss: 0.10503888875246048\r\nStep 13, loss: 0.09828095138072968\r\nStep 14, loss: 0.10296234488487244\r\nStep 15, loss: 0.09688253700733185\r\nStep 16, loss: 0.09001464396715164\r\nStep 17, loss: 0.08598079532384872\r\nStep 18, loss: 0.08745668828487396\r\nStep 19, loss: 0.0775802731513977\r\nStep 20, loss: 0.08166162669658661\r\nStep 50, loss: 0.041748303920030594\r\nStep 51, loss: 0.04902645945549011\r\nStep 52, loss: 0.047454170882701874\r\nStep 53, loss: 0.040505826473236084\r\nStep 54, loss: 0.03718667849898338\r\nStep 55, loss: 0.03961941972374916\r\nStep 56, loss: 0.03947916626930237\r\nStep 57, loss: 0.03693949803709984\r\nStep 58, loss: 0.03782138228416443\r\nStep 59, loss: 0.03795648366212845\r\nStep 60, loss: 0.03371633216738701\r\nStep 61, loss: 0.044886138290166855\r\nStep 62, loss: 0.038592565804719925\r\nStep 63, loss: 0.0360867939889431\r\nStep 64, loss: 0.036005713045597076\r\nStep 65, loss: 0.034621596336364746\r\nStep 66, loss: 0.03188462182879448\r\nStep 67, loss: 0.029557932168245316\r\nStep 68, loss: 0.0318872295320034\r\nStep 69, loss: 0.030645810067653656\r\nStep 70, loss: 0.028607860207557678\r\nStep 71, loss: 0.03495610132813454\r\nStep 72, loss: 0.029537387192249298\r\nStep 73, loss: 0.033071547746658325\r\nStep 74, loss: 0.03534964472055435\r\nStep 75, loss: 0.03360242769122124\r\nStep 76, loss: 0.028890511021018028\r\nStep 77, loss: 0.029018152505159378\r\nStep 21, loss: 0.07300376892089844\r\nStep 22, loss: 0.07714924216270447\r\nStep 23, loss: 0.07810085266828537\r\nStep 24, loss: 0.0794411152601242\r\nStep 25, loss: 0.07307913899421692\r\nStep 26, loss: 0.07097838819026947\r\nStep 27, loss: 0.06554681807756424\r\nStep 28, loss: 0.065010204911232\r\nStep 29, loss: 0.06810400635004044\r\nStep 30, loss: 0.06351493299007416\r\nStep 31, loss: 0.057667750865221024\r\nStep 32, loss: 0.062011126428842545\r\nStep 33, loss: 0.0609549917280674\r\nStep 34, loss: 0.06150144338607788\r\nStep 35, loss: 0.057500164955854416\r\nStep 36, loss: 0.05634911358356476\r\nStep 37, loss: 0.054869506508111954\r\nStep 38, loss: 0.05245821550488472\r\nStep 39, loss: 0.05741355940699577\r\nStep 40, loss: 0.05354347452521324\r\nStep 41, loss: 0.05392030254006386\r\nStep 42, loss: 0.05377781391143799\r\nStep 43, loss: 0.04958712309598923\r\nStep 44, loss: 0.05107380822300911\r\nStep 45, loss: 0.05087074637413025\r\nStep 46, loss: 0.0468355156481266\r\nStep 47, loss: 0.051542893052101135\r\nStep 48, loss: 0.0509573332965374\r\nStep 49, loss: 0.0446152463555336\r\nStep 78, loss: 0.029858632013201714\r\nStep 79, loss: 0.02790442295372486\r\nStep 80, loss: 0.030148373916745186\r\nStep 81, loss: 0.02741095796227455\r\nStep 82, loss: 0.02769358456134796\r\nStep 83, loss: 0.03266967460513115\r\nStep 84, loss: 0.030720101669430733\r\nStep 85, loss: 0.02795012854039669\r\nStep 86, loss: 0.027471110224723816\r\nStep 87, loss: 0.028233686462044716\r\nStep 88, loss: 0.026210511103272438\r\nStep 89, loss: 0.027847111225128174\r\nStep 90, loss: 0.02239195443689823\r\nStep 91, loss: 0.02604525536298752\r\nStep 92, loss: 0.022576235234737396\r\nStep 93, loss: 0.025205545127391815\r\nStep 94, loss: 0.022538412362337112\r\nStep 95, loss: 0.023945016786456108\r\nStep 96, loss: 0.02386571280658245\r\nStep 97, loss: 0.02088065817952156\r\nStep 98, loss: 0.02455727569758892\r\nStep 99, loss: 0.024158356711268425\r\nStep 100, loss: 0.02200816199183464\r\nStep 101, loss: 0.023882700130343437\r\nStep 102, loss: 0.023078344762325287\r\nStep 103, loss: 0.021429037675261497\r\nStep 104, loss: 0.02174082212150097\r\nStep 105, loss: 0.023439496755599976\r\nStep 50, loss: 0.041748303920030594\r\nStep 51, loss: 0.04902645945549011\r\nStep 52, loss: 0.047454170882701874\r\nStep 53, loss: 0.040505826473236084\r\nStep 54, loss: 0.03718667849898338\r\nStep 55, loss: 0.03961941972374916\r\nStep 56, loss: 0.03947916626930237\r\nStep 57, loss: 0.03693949803709984\r\nStep 58, loss: 0.03782138228416443\r\nStep 59, loss: 0.03795648366212845\r\nStep 60, loss: 0.03371633216738701\r\nStep 61, loss: 0.044886138290166855\r\nStep 62, loss: 0.038592565804719925\r\nStep 63, loss: 0.0360867939889431\r\nStep 64, loss: 0.036005713045597076\r\nStep 65, loss: 0.034621596336364746\r\nStep 66, loss: 0.03188462182879448\r\nStep 67, loss: 0.029557932168245316\r\nStep 68, loss: 0.0318872295320034\r\nStep 69, loss: 0.030645810067653656\r\nStep 70, loss: 0.028607860207557678\r\nStep 71, loss: 0.03495610132813454\r\nStep 72, loss: 0.029537387192249298\r\nStep 73, loss: 0.033071547746658325\r\nStep 74, loss: 0.03534964472055435\r\nStep 75, loss: 0.03360242769122124\r\nStep 76, loss: 0.028890511021018028\r\nStep 77, loss: 0.029018152505159378\r\nStep 106, loss: 0.02335542067885399\r\nStep 107, loss: 0.020467158406972885\r\nStep 78, loss: 0.029858632013201714\r\nStep 79, loss: 0.02790442295372486\r\nStep 80, loss: 0.030148373916745186\r\nStep 81, loss: 0.02741095796227455\r\nStep 82, loss: 0.02769358456134796\r\nStep 83, loss: 0.03266967460513115\r\nStep 84, loss: 0.030720101669430733\r\nStep 85, loss: 0.02795012854039669\r\nStep 86, loss: 0.027471110224723816\r\nStep 87, loss: 0.028233686462044716\r\nStep 88, loss: 0.026210511103272438\r\nStep 89, loss: 0.027847111225128174\r\nStep 90, loss: 0.02239195443689823\r\nStep 91, loss: 0.02604525536298752\r\nStep 92, loss: 0.022576235234737396\r\nStep 93, loss: 0.025205545127391815\r\nStep 94, loss: 0.022538412362337112\r\nStep 95, loss: 0.023945016786456108\r\nStep 96, loss: 0.02386571280658245\r\nStep 97, loss: 0.02088065817952156\r\nStep 98, loss: 0.02455727569758892\r\nStep 99, loss: 0.024158356711268425\r\nStep 100, loss: 0.02200816199183464\r\nStep 101, loss: 0.023882700130343437\r\nStep 102, loss: 0.023078344762325287\r\nStep 103, loss: 0.021429037675261497\r\nStep 104, loss: 0.02174082212150097\r\nStep 105, loss: 0.023439496755599976\r\nStep 108, loss: 0.023992128670215607\r\nStep 109, loss: 0.02077864110469818\r\nStep 110, loss: 0.022285621613264084\r\nStep 111, loss: 0.021920684725046158\r\nStep 112, loss: 0.020692449063062668\r\nStep 113, loss: 0.018958691507577896\r\nStep 114, loss: 0.02061193436384201\r\nStep 115, loss: 0.020444221794605255\r\nStep 116, loss: 0.02078094705939293\r\nStep 117, loss: 0.019729148596525192\r\nStep 118, loss: 0.020966283977031708\r\nStep 119, loss: 0.017715569585561752\r\nStep 120, loss: 0.018365614116191864\r\nStep 121, loss: 0.01908261515200138\r\nStep 122, loss: 0.019502971321344376\r\nStep 123, loss: 0.017858631908893585\r\nStep 124, loss: 0.015927297994494438\r\nStep 125, loss: 0.019642645493149757\r\nStep 126, loss: 0.018544591963291168\r\nStep 127, loss: 0.015933632850646973\r\nStep 128, loss: 0.01618993654847145\r\nStep 129, loss: 0.016362672671675682\r\nStep 130, loss: 0.015333064831793308\r\nStep 131, loss: 0.016816189512610435\r\nStep 132, loss: 0.0164976604282856\r\nStep 133, loss: 0.016223587095737457\r\nStep 134, loss: 0.017775867134332657\r\nStep 106, loss: 0.02335542067885399\r\nStep 107, loss: 0.020467158406972885\r\nStep 135, loss: 0.015498277731239796\r\nStep 136, loss: 0.015683045610785484\r\nStep 137, loss: 0.01383606344461441\r\nStep 138, loss: 0.01694605126976967\r\nStep 139, loss: 0.017274845391511917\r\nStep 140, loss: 0.015150913037359715\r\nStep 141, loss: 0.015680069103837013\r\nStep 142, loss: 0.014308595098555088\r\nStep 143, loss: 0.015988625586032867\r\nStep 144, loss: 0.01708797924220562\r\nStep 145, loss: 0.015064721927046776\r\nStep 146, loss: 0.015196548774838448\r\nStep 147, loss: 0.015233844518661499\r\nStep 148, loss: 0.015244445763528347\r\nStep 149, loss: 0.013996983878314495\r\nStep 150, loss: 0.013958653435111046\r\nStep 151, loss: 0.01511000283062458\r\nStep 152, loss: 0.013374081812798977\r\nStep 153, loss: 0.014145979657769203\r\nStep 154, loss: 0.015165048651397228\r\nStep 155, loss: 0.014963418245315552\r\nStep 156, loss: 0.015931718051433563\r\nStep 157, loss: 0.015752702951431274\r\nStep 158, loss: 0.01360904797911644\r\nStep 159, loss: 0.015732955187559128\r\nStep 160, loss: 0.016268953680992126\r\nStep 161, loss: 0.012776114046573639\r\nStep 108, loss: 0.023992128670215607\r\nStep 109, loss: 0.02077864110469818\r\nStep 110, loss: 0.022285621613264084\r\nStep 111, loss: 0.021920684725046158\r\nStep 112, loss: 0.020692449063062668\r\nStep 113, loss: 0.018958691507577896\r\nStep 114, loss: 0.02061193436384201\r\nStep 115, loss: 0.020444221794605255\r\nStep 116, loss: 0.02078094705939293\r\nStep 117, loss: 0.019729148596525192\r\nStep 118, loss: 0.020966283977031708\r\nStep 119, loss: 0.017715569585561752\r\nStep 120, loss: 0.018365614116191864\r\nStep 121, loss: 0.01908261515200138\r\nStep 122, loss: 0.019502971321344376\r\nStep 123, loss: 0.017858631908893585\r\nStep 124, loss: 0.015927297994494438\r\nStep 125, loss: 0.019642645493149757\r\nStep 126, loss: 0.018544591963291168\r\nStep 127, loss: 0.015933632850646973\r\nStep 128, loss: 0.01618993654847145\r\nStep 129, loss: 0.016362672671675682\r\nStep 130, loss: 0.015333064831793308\r\nStep 131, loss: 0.016816189512610435\r\nStep 132, loss: 0.0164976604282856\r\nStep 133, loss: 0.016223587095737457\r\nStep 134, loss: 0.017775867134332657\r\nStep 162, loss: 0.014002346433699131\r\nStep 163, loss: 0.016474682837724686\r\nStep 164, loss: 0.014050696976482868\r\nStep 165, loss: 0.015429678373038769\r\nStep 166, loss: 0.014365949667990208\r\nStep 167, loss: 0.014085184782743454\r\nStep 168, loss: 0.015173561871051788\r\nStep 169, loss: 0.014162395149469376\r\nStep 170, loss: 0.014322423376142979\r\nStep 171, loss: 0.013332856819033623\r\nStep 172, loss: 0.012761498801410198\r\nStep 173, loss: 0.01492702029645443\r\nStep 174, loss: 0.013961599208414555\r\nStep 175, loss: 0.012958317995071411\r\nStep 176, loss: 0.015379429794847965\r\nStep 177, loss: 0.014328060671687126\r\nStep 178, loss: 0.014507713727653027\r\nStep 179, loss: 0.013650377281010151\r\nStep 180, loss: 0.013451213017106056\r\nStep 181, loss: 0.012091542594134808\r\nStep 182, loss: 0.013077723793685436\r\nStep 183, loss: 0.01345899235457182\r\nStep 184, loss: 0.01339371595531702\r\nStep 185, loss: 0.011480382643640041\r\nStep 186, loss: 0.01359980646520853\r\nStep 187, loss: 0.01263586338609457\r\nStep 188, loss: 0.012540409341454506\r\nStep 135, loss: 0.015498277731239796\r\nStep 136, loss: 0.015683045610785484\r\nStep 137, loss: 0.01383606344461441\r\nStep 138, loss: 0.01694605126976967\r\nStep 139, loss: 0.017274845391511917\r\nStep 140, loss: 0.015150913037359715\r\nStep 141, loss: 0.015680069103837013\r\nStep 142, loss: 0.014308595098555088\r\nStep 143, loss: 0.015988625586032867\r\nStep 144, loss: 0.01708797924220562\r\nStep 145, loss: 0.015064721927046776\r\nStep 146, loss: 0.015196548774838448\r\nStep 147, loss: 0.015233844518661499\r\nStep 148, loss: 0.015244445763528347\r\nStep 149, loss: 0.013996983878314495\r\nStep 150, loss: 0.013958653435111046\r\nStep 151, loss: 0.01511000283062458\r\nStep 152, loss: 0.013374081812798977\r\nStep 153, loss: 0.014145979657769203\r\nStep 154, loss: 0.015165048651397228\r\nStep 155, loss: 0.014963418245315552\r\nStep 156, loss: 0.015931718051433563\r\nStep 157, loss: 0.015752702951431274\r\nStep 158, loss: 0.01360904797911644\r\nStep 159, loss: 0.015732955187559128\r\nStep 160, loss: 0.016268953680992126\r\nStep 161, loss: 0.012776114046573639\r\nStep 189, loss: 0.013751196675002575\r\nStep 190, loss: 0.012528365477919579\r\nStep 191, loss: 0.01227360125631094\r\nStep 192, loss: 0.012847634963691235\r\nStep 193, loss: 0.011203471571207047\r\nStep 194, loss: 0.013195750303566456\r\nStep 195, loss: 0.012723291292786598\r\nStep 196, loss: 0.010990786366164684\r\nStep 197, loss: 0.009713550098240376\r\nStep 198, loss: 0.011404153890907764\r\nStep 199, loss: 0.010602226480841637\r\nStep 200, loss: 0.010594911873340607\r\nStep 201, loss: 0.0118786059319973\r\nStep 202, loss: 0.012351179495453835\r\nStep 203, loss: 0.012053634971380234\r\nStep 204, loss: 0.01243047509342432\r\nStep 205, loss: 0.012431791052222252\r\nStep 206, loss: 0.012530690059065819\r\nStep 207, loss: 0.009664309211075306\r\nStep 208, loss: 0.01006846223026514\r\nStep 209, loss: 0.010861100628972054\r\nStep 210, loss: 0.011857938952744007\r\nStep 211, loss: 0.01253808755427599\r\nStep 212, loss: 0.011201965622603893\r\nStep 213, loss: 0.012819544412195683\r\nStep 214, loss: 0.010710623115301132\r\nStep 215, loss: 0.011954248882830143\r\nStep 162, loss: 0.014002346433699131\r\nStep 163, loss: 0.016474682837724686\r\nStep 164, loss: 0.014050696976482868\r\nStep 165, loss: 0.015429678373038769\r\nStep 166, loss: 0.014365949667990208\r\nStep 167, loss: 0.014085184782743454\r\nStep 168, loss: 0.015173561871051788\r\nStep 169, loss: 0.014162395149469376\r\nStep 170, loss: 0.014322423376142979\r\nStep 171, loss: 0.013332856819033623\r\nStep 172, loss: 0.012761498801410198\r\nStep 173, loss: 0.01492702029645443\r\nStep 174, loss: 0.013961599208414555\r\nStep 175, loss: 0.012958317995071411\r\nStep 176, loss: 0.015379429794847965\r\nStep 177, loss: 0.014328060671687126\r\nStep 178, loss: 0.014507713727653027\r\nStep 179, loss: 0.013650377281010151\r\nStep 180, loss: 0.013451213017106056\r\nStep 181, loss: 0.012091542594134808\r\nStep 182, loss: 0.013077723793685436\r\nStep 183, loss: 0.01345899235457182\r\nStep 184, loss: 0.01339371595531702\r\nStep 185, loss: 0.011480382643640041\r\nStep 186, loss: 0.01359980646520853\r\nStep 187, loss: 0.01263586338609457\r\nStep 188, loss: 0.012540409341454506\r\nStep 216, loss: 0.012150179594755173\r\nStep 217, loss: 0.012084055691957474\r\nStep 218, loss: 0.013123934157192707\r\nStep 189, loss: 0.013751196675002575\r\nStep 190, loss: 0.012528365477919579\r\nStep 191, loss: 0.01227360125631094\r\nStep 192, loss: 0.012847634963691235\r\nStep 193, loss: 0.011203471571207047\r\nStep 194, loss: 0.013195750303566456\r\nStep 195, loss: 0.012723291292786598\r\nStep 196, loss: 0.010990786366164684\r\nStep 197, loss: 0.009713550098240376\r\nStep 198, loss: 0.011404153890907764\r\nStep 199, loss: 0.010602226480841637\r\nStep 200, loss: 0.010594911873340607\r\nStep 201, loss: 0.0118786059319973\r\nStep 202, loss: 0.012351179495453835\r\nStep 203, loss: 0.012053634971380234\r\nStep 204, loss: 0.01243047509342432\r\nStep 205, loss: 0.012431791052222252\r\nStep 206, loss: 0.012530690059065819\r\nStep 207, loss: 0.009664309211075306\r\nStep 208, loss: 0.01006846223026514\r\nStep 209, loss: 0.010861100628972054\r\nStep 210, loss: 0.011857938952744007\r\nStep 211, loss: 0.01253808755427599\r\nStep 212, loss: 0.011201965622603893\r\nStep 213, loss: 0.012819544412195683\r\nStep 214, loss: 0.010710623115301132\r\nStep 215, loss: 0.011954248882830143\r\nRunning on 8 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3309202492237091\r\nStep 1, loss: 0.27616071701049805\r\nStep 2, loss: 0.23833505809307098\r\nStep 3, loss: 0.20353123545646667\r\nStep 4, loss: 0.18312352895736694\r\nStep 5, loss: 0.16567449271678925\r\nStep 6, loss: 0.1496116816997528\r\nStep 7, loss: 0.1397426575422287\r\nStep 8, loss: 0.1301795393228531\r\nStep 9, loss: 0.12435252219438553\r\nStep 10, loss: 0.11484285444021225\r\nStep 11, loss: 0.11342452466487885\r\nStep 12, loss: 0.10503888875246048\r\nStep 13, loss: 0.09828095138072968\r\nStep 14, loss: 0.10296234488487244\r\nStep 15, loss: 0.09688253700733185\r\nStep 16, loss: 0.09001464396715164\r\nStep 17, loss: 0.08598079532384872\r\nStep 18, loss: 0.08745668828487396\r\nStep 19, loss: 0.0775802731513977\r\nStep 20, loss: 0.08166162669658661\r\nStep 216, loss: 0.012150179594755173\r\nStep 217, loss: 0.012084055691957474\r\nStep 218, loss: 0.013123934157192707\r\nStep 21, loss: 0.07300376892089844\r\nStep 22, loss: 0.07714924216270447\r\nStep 23, loss: 0.07810085266828537\r\nStep 24, loss: 0.0794411152601242\r\nStep 25, loss: 0.07307913899421692\r\nStep 26, loss: 0.07097838819026947\r\nStep 27, loss: 0.06554681807756424\r\nStep 28, loss: 0.065010204911232\r\nStep 29, loss: 0.06810400635004044\r\nStep 30, loss: 0.06351493299007416\r\nStep 31, loss: 0.057667750865221024\r\nStep 32, loss: 0.062011126428842545\r\nStep 33, loss: 0.0609549917280674\r\nStep 34, loss: 0.06150144338607788\r\nStep 35, loss: 0.057500164955854416\r\nStep 36, loss: 0.05634911358356476\r\nStep 37, loss: 0.054869506508111954\r\nStep 38, loss: 0.05245821550488472\r\nStep 39, loss: 0.05741355940699577\r\nStep 40, loss: 0.05354347452521324\r\nStep 41, loss: 0.05392030254006386\r\nStep 42, loss: 0.05377781391143799\r\nStep 43, loss: 0.04958712309598923\r\nStep 44, loss: 0.05107380822300911\r\nStep 45, loss: 0.05087074637413025\r\nStep 46, loss: 0.0468355156481266\r\nStep 47, loss: 0.051542893052101135\r\nStep 48, loss: 0.0509573332965374\r\nStep 49, loss: 0.0446152463555336\r\nRunning on 8 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3309202492237091\r\nStep 1, loss: 0.27616071701049805\r\nStep 2, loss: 0.23833505809307098\r\nStep 3, loss: 0.20353123545646667\r\nStep 4, loss: 0.18312352895736694\r\nStep 5, loss: 0.16567449271678925\r\nStep 6, loss: 0.1496116816997528\r\nStep 7, loss: 0.1397426575422287\r\nStep 8, loss: 0.1301795393228531\r\nStep 9, loss: 0.12435252219438553\r\nStep 10, loss: 0.11484285444021225\r\nStep 11, loss: 0.11342452466487885\r\nStep 12, loss: 0.10503888875246048\r\nStep 13, loss: 0.09828095138072968\r\nStep 14, loss: 0.10296234488487244\r\nStep 15, loss: 0.09688253700733185\r\nStep 16, loss: 0.09001464396715164\r\nStep 17, loss: 0.08598079532384872\r\nStep 18, loss: 0.08745668828487396\r\nStep 19, loss: 0.0775802731513977\r\nStep 20, loss: 0.08166162669658661\r\nStep 50, loss: 0.041748303920030594\r\nStep 51, loss: 0.04902645945549011\r\nStep 52, loss: 0.047454170882701874\r\nStep 53, loss: 0.040505826473236084\r\nStep 54, loss: 0.03718667849898338\r\nStep 55, loss: 0.03961941972374916\r\nStep 56, loss: 0.03947916626930237\r\nStep 57, loss: 0.03693949803709984\r\nStep 58, loss: 0.03782138228416443\r\nStep 59, loss: 0.03795648366212845\r\nStep 60, loss: 0.03371633216738701\r\nStep 61, loss: 0.044886138290166855\r\nStep 62, loss: 0.038592565804719925\r\nStep 63, loss: 0.0360867939889431\r\nStep 64, loss: 0.036005713045597076\r\nStep 65, loss: 0.034621596336364746\r\nStep 66, loss: 0.03188462182879448\r\nStep 67, loss: 0.029557932168245316\r\nStep 68, loss: 0.0318872295320034\r\nStep 69, loss: 0.030645810067653656\r\nStep 70, loss: 0.028607860207557678\r\nStep 71, loss: 0.03495610132813454\r\nStep 72, loss: 0.029537387192249298\r\nStep 73, loss: 0.033071547746658325\r\nStep 74, loss: 0.03534964472055435\r\nStep 75, loss: 0.03360242769122124\r\nStep 76, loss: 0.028890511021018028\r\nStep 77, loss: 0.029018152505159378\r\nStep 21, loss: 0.07300376892089844\r\nStep 22, loss: 0.07714924216270447\r\nStep 23, loss: 0.07810085266828537\r\nStep 24, loss: 0.0794411152601242\r\nStep 25, loss: 0.07307913899421692\r\nStep 26, loss: 0.07097838819026947\r\nStep 27, loss: 0.06554681807756424\r\nStep 28, loss: 0.065010204911232\r\nStep 29, loss: 0.06810400635004044\r\nStep 30, loss: 0.06351493299007416\r\nStep 31, loss: 0.057667750865221024\r\nStep 32, loss: 0.062011126428842545\r\nStep 33, loss: 0.0609549917280674\r\nStep 34, loss: 0.06150144338607788\r\nStep 35, loss: 0.057500164955854416\r\nStep 36, loss: 0.05634911358356476\r\nStep 37, loss: 0.054869506508111954\r\nStep 38, loss: 0.05245821550488472\r\nStep 39, loss: 0.05741355940699577\r\nStep 40, loss: 0.05354347452521324\r\nStep 41, loss: 0.05392030254006386\r\nStep 42, loss: 0.05377781391143799\r\nStep 43, loss: 0.04958712309598923\r\nStep 44, loss: 0.05107380822300911\r\nStep 45, loss: 0.05087074637413025\r\nStep 46, loss: 0.0468355156481266\r\nStep 47, loss: 0.051542893052101135\r\nStep 48, loss: 0.0509573332965374\r\nStep 49, loss: 0.0446152463555336\r\nStep 78, loss: 0.029858632013201714\r\nStep 79, loss: 0.02790442295372486\r\nStep 80, loss: 0.030148373916745186\r\nStep 81, loss: 0.02741095796227455\r\nStep 82, loss: 0.02769358456134796\r\nStep 83, loss: 0.03266967460513115\r\nStep 84, loss: 0.030720101669430733\r\nStep 85, loss: 0.02795012854039669\r\nStep 86, loss: 0.027471110224723816\r\nStep 87, loss: 0.028233686462044716\r\nStep 88, loss: 0.026210511103272438\r\nStep 89, loss: 0.027847111225128174\r\nStep 90, loss: 0.02239195443689823\r\nStep 91, loss: 0.02604525536298752\r\nStep 92, loss: 0.022576235234737396\r\nStep 93, loss: 0.025205545127391815\r\nStep 94, loss: 0.022538412362337112\r\nStep 95, loss: 0.023945016786456108\r\nStep 96, loss: 0.02386571280658245\r\nStep 97, loss: 0.02088065817952156\r\nStep 98, loss: 0.02455727569758892\r\nStep 99, loss: 0.024158356711268425\r\nStep 100, loss: 0.02200816199183464\r\nStep 101, loss: 0.023882700130343437\r\nStep 102, loss: 0.023078344762325287\r\nStep 103, loss: 0.021429037675261497\r\nStep 104, loss: 0.02174082212150097\r\nStep 105, loss: 0.023439496755599976\r\nStep 50, loss: 0.041748303920030594\r\nStep 51, loss: 0.04902645945549011\r\nStep 52, loss: 0.047454170882701874\r\nStep 53, loss: 0.040505826473236084\r\nStep 54, loss: 0.03718667849898338\r\nStep 55, loss: 0.03961941972374916\r\nStep 56, loss: 0.03947916626930237\r\nStep 57, loss: 0.03693949803709984\r\nStep 58, loss: 0.03782138228416443\r\nStep 59, loss: 0.03795648366212845\r\nStep 60, loss: 0.03371633216738701\r\nStep 61, loss: 0.044886138290166855\r\nStep 62, loss: 0.038592565804719925\r\nStep 63, loss: 0.0360867939889431\r\nStep 64, loss: 0.036005713045597076\r\nStep 65, loss: 0.034621596336364746\r\nStep 66, loss: 0.03188462182879448\r\nStep 67, loss: 0.029557932168245316\r\nStep 68, loss: 0.0318872295320034\r\nStep 69, loss: 0.030645810067653656\r\nStep 70, loss: 0.028607860207557678\r\nStep 71, loss: 0.03495610132813454\r\nStep 72, loss: 0.029537387192249298\r\nStep 73, loss: 0.033071547746658325\r\nStep 74, loss: 0.03534964472055435\r\nStep 75, loss: 0.03360242769122124\r\nStep 76, loss: 0.028890511021018028\r\nStep 77, loss: 0.029018152505159378\r\nStep 106, loss: 0.02335542067885399\r\nStep 107, loss: 0.020467158406972885\r\nStep 78, loss: 0.029858632013201714\r\nStep 79, loss: 0.02790442295372486\r\nStep 80, loss: 0.030148373916745186\r\nStep 81, loss: 0.02741095796227455\r\nStep 82, loss: 0.02769358456134796\r\nStep 83, loss: 0.03266967460513115\r\nStep 84, loss: 0.030720101669430733\r\nStep 85, loss: 0.02795012854039669\r\nStep 86, loss: 0.027471110224723816\r\nStep 87, loss: 0.028233686462044716\r\nStep 88, loss: 0.026210511103272438\r\nStep 89, loss: 0.027847111225128174\r\nStep 90, loss: 0.02239195443689823\r\nStep 91, loss: 0.02604525536298752\r\nStep 92, loss: 0.022576235234737396\r\nStep 93, loss: 0.025205545127391815\r\nStep 94, loss: 0.022538412362337112\r\nStep 95, loss: 0.023945016786456108\r\nStep 96, loss: 0.02386571280658245\r\nStep 97, loss: 0.02088065817952156\r\nStep 98, loss: 0.02455727569758892\r\nStep 99, loss: 0.024158356711268425\r\nStep 100, loss: 0.02200816199183464\r\nStep 101, loss: 0.023882700130343437\r\nStep 102, loss: 0.023078344762325287\r\nStep 103, loss: 0.021429037675261497\r\nStep 104, loss: 0.02174082212150097\r\nStep 105, loss: 0.023439496755599976\r\nStep 108, loss: 0.023992128670215607\r\nStep 109, loss: 0.02077864110469818\r\nStep 110, loss: 0.022285621613264084\r\nStep 111, loss: 0.021920684725046158\r\nStep 112, loss: 0.020692449063062668\r\nStep 113, loss: 0.018958691507577896\r\nStep 114, loss: 0.02061193436384201\r\nStep 115, loss: 0.020444221794605255\r\nStep 116, loss: 0.02078094705939293\r\nStep 117, loss: 0.019729148596525192\r\nStep 118, loss: 0.020966283977031708\r\nStep 119, loss: 0.017715569585561752\r\nStep 120, loss: 0.018365614116191864\r\nStep 121, loss: 0.01908261515200138\r\nStep 122, loss: 0.019502971321344376\r\nStep 123, loss: 0.017858631908893585\r\nStep 124, loss: 0.015927297994494438\r\nStep 125, loss: 0.019642645493149757\r\nStep 126, loss: 0.018544591963291168\r\nStep 127, loss: 0.015933632850646973\r\nStep 128, loss: 0.01618993654847145\r\nStep 129, loss: 0.016362672671675682\r\nStep 130, loss: 0.015333064831793308\r\nStep 131, loss: 0.016816189512610435\r\nStep 132, loss: 0.0164976604282856\r\nStep 133, loss: 0.016223587095737457\r\nStep 134, loss: 0.017775867134332657\r\nStep 106, loss: 0.02335542067885399\r\nStep 107, loss: 0.020467158406972885\r\nStep 135, loss: 0.015498277731239796\r\nStep 136, loss: 0.015683045610785484\r\nStep 137, loss: 0.01383606344461441\r\nStep 138, loss: 0.01694605126976967\r\nStep 139, loss: 0.017274845391511917\r\nStep 140, loss: 0.015150913037359715\r\nStep 141, loss: 0.015680069103837013\r\nStep 142, loss: 0.014308595098555088\r\nStep 143, loss: 0.015988625586032867\r\nStep 144, loss: 0.01708797924220562\r\nStep 145, loss: 0.015064721927046776\r\nStep 146, loss: 0.015196548774838448\r\nStep 147, loss: 0.015233844518661499\r\nStep 148, loss: 0.015244445763528347\r\nStep 149, loss: 0.013996983878314495\r\nStep 150, loss: 0.013958653435111046\r\nStep 151, loss: 0.01511000283062458\r\nStep 152, loss: 0.013374081812798977\r\nStep 153, loss: 0.014145979657769203\r\nStep 154, loss: 0.015165048651397228\r\nStep 155, loss: 0.014963418245315552\r\nStep 156, loss: 0.015931718051433563\r\nStep 157, loss: 0.015752702951431274\r\nStep 158, loss: 0.01360904797911644\r\nStep 159, loss: 0.015732955187559128\r\nStep 160, loss: 0.016268953680992126\r\nStep 161, loss: 0.012776114046573639\r\nStep 108, loss: 0.023992128670215607\r\nStep 109, loss: 0.02077864110469818\r\nStep 110, loss: 0.022285621613264084\r\nStep 111, loss: 0.021920684725046158\r\nStep 112, loss: 0.020692449063062668\r\nStep 113, loss: 0.018958691507577896\r\nStep 114, loss: 0.02061193436384201\r\nStep 115, loss: 0.020444221794605255\r\nStep 116, loss: 0.02078094705939293\r\nStep 117, loss: 0.019729148596525192\r\nStep 118, loss: 0.020966283977031708\r\nStep 119, loss: 0.017715569585561752\r\nStep 120, loss: 0.018365614116191864\r\nStep 121, loss: 0.01908261515200138\r\nStep 122, loss: 0.019502971321344376\r\nStep 123, loss: 0.017858631908893585\r\nStep 124, loss: 0.015927297994494438\r\nStep 125, loss: 0.019642645493149757\r\nStep 126, loss: 0.018544591963291168\r\nStep 127, loss: 0.015933632850646973\r\nStep 128, loss: 0.01618993654847145\r\nStep 129, loss: 0.016362672671675682\r\nStep 130, loss: 0.015333064831793308\r\nStep 131, loss: 0.016816189512610435\r\nStep 132, loss: 0.0164976604282856\r\nStep 133, loss: 0.016223587095737457\r\nStep 134, loss: 0.017775867134332657\r\nStep 162, loss: 0.014002346433699131\r\nStep 163, loss: 0.016474682837724686\r\nStep 164, loss: 0.014050696976482868\r\nStep 165, loss: 0.015429678373038769\r\nStep 166, loss: 0.014365949667990208\r\nStep 167, loss: 0.014085184782743454\r\nStep 168, loss: 0.015173561871051788\r\nStep 169, loss: 0.014162395149469376\r\nStep 170, loss: 0.014322423376142979\r\nStep 171, loss: 0.013332856819033623\r\nStep 172, loss: 0.012761498801410198\r\nStep 173, loss: 0.01492702029645443\r\nStep 174, loss: 0.013961599208414555\r\nStep 175, loss: 0.012958317995071411\r\nStep 176, loss: 0.015379429794847965\r\nStep 177, loss: 0.014328060671687126\r\nStep 178, loss: 0.014507713727653027\r\nStep 179, loss: 0.013650377281010151\r\nStep 180, loss: 0.013451213017106056\r\nStep 181, loss: 0.012091542594134808\r\nStep 182, loss: 0.013077723793685436\r\nStep 183, loss: 0.01345899235457182\r\nStep 184, loss: 0.01339371595531702\r\nStep 185, loss: 0.011480382643640041\r\nStep 186, loss: 0.01359980646520853\r\nStep 187, loss: 0.01263586338609457\r\nStep 188, loss: 0.012540409341454506\r\nStep 135, loss: 0.015498277731239796\r\nStep 136, loss: 0.015683045610785484\r\nStep 137, loss: 0.01383606344461441\r\nStep 138, loss: 0.01694605126976967\r\nStep 139, loss: 0.017274845391511917\r\nStep 140, loss: 0.015150913037359715\r\nStep 141, loss: 0.015680069103837013\r\nStep 142, loss: 0.014308595098555088\r\nStep 143, loss: 0.015988625586032867\r\nStep 144, loss: 0.01708797924220562\r\nStep 145, loss: 0.015064721927046776\r\nStep 146, loss: 0.015196548774838448\r\nStep 147, loss: 0.015233844518661499\r\nStep 148, loss: 0.015244445763528347\r\nStep 149, loss: 0.013996983878314495\r\nStep 150, loss: 0.013958653435111046\r\nStep 151, loss: 0.01511000283062458\r\nStep 152, loss: 0.013374081812798977\r\nStep 153, loss: 0.014145979657769203\r\nStep 154, loss: 0.015165048651397228\r\nStep 155, loss: 0.014963418245315552\r\nStep 156, loss: 0.015931718051433563\r\nStep 157, loss: 0.015752702951431274\r\nStep 158, loss: 0.01360904797911644\r\nStep 159, loss: 0.015732955187559128\r\nStep 160, loss: 0.016268953680992126\r\nStep 161, loss: 0.012776114046573639\r\nStep 189, loss: 0.013751196675002575\r\nStep 190, loss: 0.012528365477919579\r\nStep 191, loss: 0.01227360125631094\r\nStep 192, loss: 0.012847634963691235\r\nStep 193, loss: 0.011203471571207047\r\nStep 194, loss: 0.013195750303566456\r\nStep 195, loss: 0.012723291292786598\r\nStep 196, loss: 0.010990786366164684\r\nStep 197, loss: 0.009713550098240376\r\nStep 198, loss: 0.011404153890907764\r\nStep 199, loss: 0.010602226480841637\r\nStep 200, loss: 0.010594911873340607\r\nStep 201, loss: 0.0118786059319973\r\nStep 202, loss: 0.012351179495453835\r\nStep 203, loss: 0.012053634971380234\r\nStep 204, loss: 0.01243047509342432\r\nStep 205, loss: 0.012431791052222252\r\nStep 206, loss: 0.012530690059065819\r\nStep 207, loss: 0.009664309211075306\r\nStep 208, loss: 0.01006846223026514\r\nStep 209, loss: 0.010861100628972054\r\nStep 210, loss: 0.011857938952744007\r\nStep 211, loss: 0.01253808755427599\r\nStep 212, loss: 0.011201965622603893\r\nStep 213, loss: 0.012819544412195683\r\nStep 214, loss: 0.010710623115301132\r\nStep 215, loss: 0.011954248882830143\r\nStep 162, loss: 0.014002346433699131\r\nStep 163, loss: 0.016474682837724686\r\nStep 164, loss: 0.014050696976482868\r\nStep 165, loss: 0.015429678373038769\r\nStep 166, loss: 0.014365949667990208\r\nStep 167, loss: 0.014085184782743454\r\nStep 168, loss: 0.015173561871051788\r\nStep 169, loss: 0.014162395149469376\r\nStep 170, loss: 0.014322423376142979\r\nStep 171, loss: 0.013332856819033623\r\nStep 172, loss: 0.012761498801410198\r\nStep 173, loss: 0.01492702029645443\r\nStep 174, loss: 0.013961599208414555\r\nStep 175, loss: 0.012958317995071411\r\nStep 176, loss: 0.015379429794847965\r\nStep 177, loss: 0.014328060671687126\r\nStep 178, loss: 0.014507713727653027\r\nStep 179, loss: 0.013650377281010151\r\nStep 180, loss: 0.013451213017106056\r\nStep 181, loss: 0.012091542594134808\r\nStep 182, loss: 0.013077723793685436\r\nStep 183, loss: 0.01345899235457182\r\nStep 184, loss: 0.01339371595531702\r\nStep 185, loss: 0.011480382643640041\r\nStep 186, loss: 0.01359980646520853\r\nStep 187, loss: 0.01263586338609457\r\nStep 188, loss: 0.012540409341454506\r\nStep 216, loss: 0.012150179594755173\r\nStep 217, loss: 0.012084055691957474\r\nStep 218, loss: 0.013123934157192707\r\nStep 189, loss: 0.013751196675002575\r\nStep 190, loss: 0.012528365477919579\r\nStep 191, loss: 0.01227360125631094\r\nStep 192, loss: 0.012847634963691235\r\nStep 193, loss: 0.011203471571207047\r\nStep 194, loss: 0.013195750303566456\r\nStep 195, loss: 0.012723291292786598\r\nStep 196, loss: 0.010990786366164684\r\nStep 197, loss: 0.009713550098240376\r\nStep 198, loss: 0.011404153890907764\r\nStep 199, loss: 0.010602226480841637\r\nStep 200, loss: 0.010594911873340607\r\nStep 201, loss: 0.0118786059319973\r\nStep 202, loss: 0.012351179495453835\r\nStep 203, loss: 0.012053634971380234\r\nStep 204, loss: 0.01243047509342432\r\nStep 205, loss: 0.012431791052222252\r\nStep 206, loss: 0.012530690059065819\r\nStep 207, loss: 0.009664309211075306\r\nStep 208, loss: 0.01006846223026514\r\nStep 209, loss: 0.010861100628972054\r\nStep 210, loss: 0.011857938952744007\r\nStep 211, loss: 0.01253808755427599\r\nStep 212, loss: 0.011201965622603893\r\nStep 213, loss: 0.012819544412195683\r\nStep 214, loss: 0.010710623115301132\r\nStep 215, loss: 0.011954248882830143\r\nStep 216, loss: 0.012150179594755173\r\nStep 217, loss: 0.012084055691957474\r\nStep 218, loss: 0.013123934157192707\r\nRunning on 8 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3309202492237091\r\nStep 1, loss: 0.27616071701049805\r\nStep 2, loss: 0.23833505809307098\r\nStep 3, loss: 0.20353123545646667\r\nStep 4, loss: 0.18312352895736694\r\nStep 5, loss: 0.16567449271678925\r\nStep 6, loss: 0.1496116816997528\r\nStep 7, loss: 0.1397426575422287\r\nStep 8, loss: 0.1301795393228531\r\nStep 9, loss: 0.12435252219438553\r\nStep 10, loss: 0.11484285444021225\r\nStep 11, loss: 0.11342452466487885\r\nStep 12, loss: 0.10503888875246048\r\nStep 13, loss: 0.09828095138072968\r\nStep 14, loss: 0.10296234488487244\r\nStep 15, loss: 0.09688253700733185\r\nStep 16, loss: 0.09001464396715164\r\nStep 17, loss: 0.08598079532384872\r\nStep 18, loss: 0.08745668828487396\r\nStep 19, loss: 0.0775802731513977\r\nStep 20, loss: 0.08166162669658661\r\nStep 21, loss: 0.07300376892089844\r\nStep 22, loss: 0.07714924216270447\r\nStep 23, loss: 0.07810085266828537\r\nStep 24, loss: 0.0794411152601242\r\nStep 25, loss: 0.07307913899421692\r\nStep 26, loss: 0.07097838819026947\r\nStep 27, loss: 0.06554681807756424\r\nStep 28, loss: 0.065010204911232\r\nStep 29, loss: 0.06810400635004044\r\nStep 30, loss: 0.06351493299007416\r\nStep 31, loss: 0.057667750865221024\r\nStep 32, loss: 0.062011126428842545\r\nStep 33, loss: 0.0609549917280674\r\nStep 34, loss: 0.06150144338607788\r\nStep 35, loss: 0.057500164955854416\r\nStep 36, loss: 0.05634911358356476\r\nStep 37, loss: 0.054869506508111954\r\nStep 38, loss: 0.05245821550488472\r\nStep 39, loss: 0.05741355940699577\r\nStep 40, loss: 0.05354347452521324\r\nStep 41, loss: 0.05392030254006386\r\nStep 42, loss: 0.05377781391143799\r\nStep 43, loss: 0.04958712309598923\r\nStep 44, loss: 0.05107380822300911\r\nStep 45, loss: 0.05087074637413025\r\nStep 46, loss: 0.0468355156481266\r\nStep 47, loss: 0.051542893052101135\r\nStep 48, loss: 0.0509573332965374\r\nStep 49, loss: 0.0446152463555336\r\nStep 50, loss: 0.041748303920030594\r\nStep 51, loss: 0.04902645945549011\r\nStep 52, loss: 0.047454170882701874\r\nStep 53, loss: 0.040505826473236084\r\nStep 54, loss: 0.03718667849898338\r\nStep 55, loss: 0.03961941972374916\r\nStep 56, loss: 0.03947916626930237\r\nStep 57, loss: 0.03693949803709984\r\nStep 58, loss: 0.03782138228416443\r\nStep 59, loss: 0.03795648366212845\r\nStep 60, loss: 0.03371633216738701\r\nStep 61, loss: 0.044886138290166855\r\nStep 62, loss: 0.038592565804719925\r\nStep 63, loss: 0.0360867939889431\r\nStep 64, loss: 0.036005713045597076\r\nStep 65, loss: 0.034621596336364746\r\nStep 66, loss: 0.03188462182879448\r\nStep 67, loss: 0.029557932168245316\r\nStep 68, loss: 0.0318872295320034\r\nStep 69, loss: 0.030645810067653656\r\nStep 70, loss: 0.028607860207557678\r\nStep 71, loss: 0.03495610132813454\r\nStep 72, loss: 0.029537387192249298\r\nStep 73, loss: 0.033071547746658325\r\nStep 74, loss: 0.03534964472055435\r\nStep 75, loss: 0.03360242769122124\r\nStep 76, loss: 0.028890511021018028\r\nStep 77, loss: 0.029018152505159378\r\nStep 78, loss: 0.029858632013201714\r\nStep 79, loss: 0.02790442295372486\r\nStep 80, loss: 0.030148373916745186\r\nStep 81, loss: 0.02741095796227455\r\nStep 82, loss: 0.02769358456134796\r\nStep 83, loss: 0.03266967460513115\r\nStep 84, loss: 0.030720101669430733\r\nStep 85, loss: 0.02795012854039669\r\nStep 86, loss: 0.027471110224723816\r\nStep 87, loss: 0.028233686462044716\r\nStep 88, loss: 0.026210511103272438\r\nStep 89, loss: 0.027847111225128174\r\nStep 90, loss: 0.02239195443689823\r\nStep 91, loss: 0.02604525536298752\r\nStep 92, loss: 0.022576235234737396\r\nStep 93, loss: 0.025205545127391815\r\nStep 94, loss: 0.022538412362337112\r\nStep 95, loss: 0.023945016786456108\r\nStep 96, loss: 0.02386571280658245\r\nStep 97, loss: 0.02088065817952156\r\nStep 98, loss: 0.02455727569758892\r\nStep 99, loss: 0.024158356711268425\r\nStep 100, loss: 0.02200816199183464\r\nStep 101, loss: 0.023882700130343437\r\nStep 102, loss: 0.023078344762325287\r\nStep 103, loss: 0.021429037675261497\r\nStep 104, loss: 0.02174082212150097\r\nStep 105, loss: 0.023439496755599976\r\nStep 106, loss: 0.02335542067885399\r\nStep 107, loss: 0.020467158406972885\r\nStep 108, loss: 0.023992128670215607\r\nStep 109, loss: 0.02077864110469818\r\nStep 110, loss: 0.022285621613264084\r\nStep 111, loss: 0.021920684725046158\r\nStep 112, loss: 0.020692449063062668\r\nStep 113, loss: 0.018958691507577896\r\nStep 114, loss: 0.02061193436384201\r\nStep 115, loss: 0.020444221794605255\r\nStep 116, loss: 0.02078094705939293\r\nStep 117, loss: 0.019729148596525192\r\nStep 118, loss: 0.020966283977031708\r\nStep 119, loss: 0.017715569585561752\r\nStep 120, loss: 0.018365614116191864\r\nStep 121, loss: 0.01908261515200138\r\nStep 122, loss: 0.019502971321344376\r\nStep 123, loss: 0.017858631908893585\r\nStep 124, loss: 0.015927297994494438\r\nStep 125, loss: 0.019642645493149757\r\nStep 126, loss: 0.018544591963291168\r\nStep 127, loss: 0.015933632850646973\r\nStep 128, loss: 0.01618993654847145\r\nStep 129, loss: 0.016362672671675682\r\nStep 130, loss: 0.015333064831793308\r\nStep 131, loss: 0.016816189512610435\r\nStep 132, loss: 0.0164976604282856\r\nStep 133, loss: 0.016223587095737457\r\nStep 134, loss: 0.017775867134332657\r\nStep 135, loss: 0.015498277731239796\r\nStep 136, loss: 0.015683045610785484\r\nStep 137, loss: 0.01383606344461441\r\nStep 138, loss: 0.01694605126976967\r\nStep 139, loss: 0.017274845391511917\r\nStep 140, loss: 0.015150913037359715\r\nStep 141, loss: 0.015680069103837013\r\nStep 142, loss: 0.014308595098555088\r\nStep 143, loss: 0.015988625586032867\r\nStep 144, loss: 0.01708797924220562\r\nStep 145, loss: 0.015064721927046776\r\nStep 146, loss: 0.015196548774838448\r\nStep 147, loss: 0.015233844518661499\r\nStep 148, loss: 0.015244445763528347\r\nStep 149, loss: 0.013996983878314495\r\nStep 150, loss: 0.013958653435111046\r\nStep 151, loss: 0.01511000283062458\r\nStep 152, loss: 0.013374081812798977\r\nStep 153, loss: 0.014145979657769203\r\nStep 154, loss: 0.015165048651397228\r\nStep 155, loss: 0.014963418245315552\r\nStep 156, loss: 0.015931718051433563\r\nStep 157, loss: 0.015752702951431274\r\nStep 158, loss: 0.01360904797911644\r\nStep 159, loss: 0.015732955187559128\r\nStep 160, loss: 0.016268953680992126\r\nStep 161, loss: 0.012776114046573639\r\nStep 162, loss: 0.014002346433699131\r\nStep 163, loss: 0.016474682837724686\r\nStep 164, loss: 0.014050696976482868\r\nStep 165, loss: 0.015429678373038769\r\nStep 166, loss: 0.014365949667990208\r\nStep 167, loss: 0.014085184782743454\r\nStep 168, loss: 0.015173561871051788\r\nStep 169, loss: 0.014162395149469376\r\nStep 170, loss: 0.014322423376142979\r\nStep 171, loss: 0.013332856819033623\r\nStep 172, loss: 0.012761498801410198\r\nStep 173, loss: 0.01492702029645443\r\nStep 174, loss: 0.013961599208414555\r\nStep 175, loss: 0.012958317995071411\r\nStep 176, loss: 0.015379429794847965\r\nStep 177, loss: 0.014328060671687126\r\nStep 178, loss: 0.014507713727653027\r\nStep 179, loss: 0.013650377281010151\r\nStep 180, loss: 0.013451213017106056\r\nStep 181, loss: 0.012091542594134808\r\nStep 182, loss: 0.013077723793685436\r\nStep 183, loss: 0.01345899235457182\r\nStep 184, loss: 0.01339371595531702\r\nStep 185, loss: 0.011480382643640041\r\nStep 186, loss: 0.01359980646520853\r\nStep 187, loss: 0.01263586338609457\r\nStep 188, loss: 0.012540409341454506\r\nStep 189, loss: 0.013751196675002575\r\nStep 190, loss: 0.012528365477919579\r\nStep 191, loss: 0.01227360125631094\r\nStep 192, loss: 0.012847634963691235\r\nStep 193, loss: 0.011203471571207047\r\nStep 194, loss: 0.013195750303566456\r\nStep 195, loss: 0.012723291292786598\r\nStep 196, loss: 0.010990786366164684\r\nStep 197, loss: 0.009713550098240376\r\nStep 198, loss: 0.011404153890907764\r\nStep 199, loss: 0.010602226480841637\r\nStep 200, loss: 0.010594911873340607\r\nStep 201, loss: 0.0118786059319973\r\nStep 202, loss: 0.012351179495453835\r\nStep 203, loss: 0.012053634971380234\r\nStep 204, loss: 0.01243047509342432\r\nStep 205, loss: 0.012431791052222252\r\nStep 206, loss: 0.012530690059065819\r\nStep 207, loss: 0.009664309211075306\r\nStep 208, loss: 0.01006846223026514\r\nStep 209, loss: 0.010861100628972054\r\nStep 210, loss: 0.011857938952744007\r\nStep 211, loss: 0.01253808755427599\r\nStep 212, loss: 0.011201965622603893\r\nStep 213, loss: 0.012819544412195683\r\nStep 214, loss: 0.010710623115301132\r\nStep 215, loss: 0.011954248882830143\r\nStep 216, loss: 0.012150179594755173\r\nStep 217, loss: 0.012084055691957474\r\nStep 218, loss: 0.013123934157192707\r\nRunning on 8 devices.\r\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\r\nParameter counts:\r\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\r\nStarting training from step 0...\r\nStep 0, loss: 0.3309202492237091\r\nStep 1, loss: 0.27616071701049805\r\nStep 2, loss: 0.23833505809307098\r\nStep 3, loss: 0.20353123545646667\r\nStep 4, loss: 0.18312352895736694\r\nStep 5, loss: 0.16567449271678925\r\nStep 6, loss: 0.1496116816997528\r\nStep 7, loss: 0.1397426575422287\r\nStep 8, loss: 0.1301795393228531\r\nStep 9, loss: 0.12435252219438553\r\nStep 10, loss: 0.11484285444021225\r\nStep 11, loss: 0.11342452466487885\r\nStep 12, loss: 0.10503888875246048\r\nStep 13, loss: 0.09828095138072968\r\nStep 14, loss: 0.10296234488487244\r\nStep 15, loss: 0.09688253700733185\r\nStep 16, loss: 0.09001464396715164\r\nStep 17, loss: 0.08598079532384872\r\nStep 18, loss: 0.08745668828487396\r\nStep 19, loss: 0.0775802731513977\r\nStep 20, loss: 0.08166162669658661\r\nStep 21, loss: 0.07300376892089844\r\nStep 22, loss: 0.07714924216270447\r\nStep 23, loss: 0.07810085266828537\r\nStep 24, loss: 0.0794411152601242\r\nStep 25, loss: 0.07307913899421692\r\nStep 26, loss: 0.07097838819026947\r\nStep 27, loss: 0.06554681807756424\r\nStep 28, loss: 0.065010204911232\r\nStep 29, loss: 0.06810400635004044\r\nStep 30, loss: 0.06351493299007416\r\nStep 31, loss: 0.057667750865221024\r\nStep 32, loss: 0.062011126428842545\r\nStep 33, loss: 0.0609549917280674\r\nStep 34, loss: 0.06150144338607788\r\nStep 35, loss: 0.057500164955854416\r\nStep 36, loss: 0.05634911358356476\r\nStep 37, loss: 0.054869506508111954\r\nStep 38, loss: 0.05245821550488472\r\nStep 39, loss: 0.05741355940699577\r\nStep 40, loss: 0.05354347452521324\r\nStep 41, loss: 0.05392030254006386\r\nStep 42, loss: 0.05377781391143799\r\nStep 43, loss: 0.04958712309598923\r\nStep 44, loss: 0.05107380822300911\r\nStep 45, loss: 0.05087074637413025\r\nStep 46, loss: 0.0468355156481266\r\nStep 47, loss: 0.051542893052101135\r\nStep 48, loss: 0.0509573332965374\r\nStep 49, loss: 0.0446152463555336\r\nStep 50, loss: 0.041748303920030594\r\nStep 51, loss: 0.04902645945549011\r\nStep 52, loss: 0.047454170882701874\r\nStep 53, loss: 0.040505826473236084\r\nStep 54, loss: 0.03718667849898338\r\nStep 55, loss: 0.03961941972374916\r\nStep 56, loss: 0.03947916626930237\r\nStep 57, loss: 0.03693949803709984\r\nStep 58, loss: 0.03782138228416443\r\nStep 59, loss: 0.03795648366212845\r\nStep 60, loss: 0.03371633216738701\r\nStep 61, loss: 0.044886138290166855\r\nStep 62, loss: 0.038592565804719925\r\nStep 63, loss: 0.0360867939889431\r\nStep 64, loss: 0.036005713045597076\r\nStep 65, loss: 0.034621596336364746\r\nStep 66, loss: 0.03188462182879448\r\nStep 67, loss: 0.029557932168245316\r\nStep 68, loss: 0.0318872295320034\r\nStep 69, loss: 0.030645810067653656\r\nStep 70, loss: 0.028607860207557678\r\nStep 71, loss: 0.03495610132813454\r\nStep 72, loss: 0.029537387192249298\r\nStep 73, loss: 0.033071547746658325\r\nStep 74, loss: 0.03534964472055435\r\nStep 75, loss: 0.03360242769122124\r\nStep 76, loss: 0.028890511021018028\r\nStep 77, loss: 0.029018152505159378\r\nStep 78, loss: 0.029858632013201714\r\nStep 79, loss: 0.02790442295372486\r\nStep 80, loss: 0.030148373916745186\r\nStep 81, loss: 0.02741095796227455\r\nStep 82, loss: 0.02769358456134796\r\nStep 83, loss: 0.03266967460513115\r\nStep 84, loss: 0.030720101669430733\r\nStep 85, loss: 0.02795012854039669\r\nStep 86, loss: 0.027471110224723816\r\nStep 87, loss: 0.028233686462044716\r\nStep 88, loss: 0.026210511103272438\r\nStep 89, loss: 0.027847111225128174\r\nStep 90, loss: 0.02239195443689823\r\nStep 91, loss: 0.02604525536298752\r\nStep 92, loss: 0.022576235234737396\r\nStep 93, loss: 0.025205545127391815\r\nStep 94, loss: 0.022538412362337112\r\nStep 95, loss: 0.023945016786456108\r\nStep 96, loss: 0.02386571280658245\r\nStep 97, loss: 0.02088065817952156\r\nStep 98, loss: 0.02455727569758892\r\nStep 99, loss: 0.024158356711268425\r\nStep 100, loss: 0.02200816199183464\r\nStep 101, loss: 0.023882700130343437\r\nStep 102, loss: 0.023078344762325287\r\nStep 103, loss: 0.021429037675261497\r\nStep 104, loss: 0.02174082212150097\r\nStep 105, loss: 0.023439496755599976\r\nStep 106, loss: 0.02335542067885399\r\nStep 107, loss: 0.020467158406972885\r\nStep 108, loss: 0.023992128670215607\r\nStep 109, loss: 0.02077864110469818\r\nStep 110, loss: 0.022285621613264084\r\nStep 111, loss: 0.021920684725046158\r\nStep 112, loss: 0.020692449063062668\r\nStep 113, loss: 0.018958691507577896\r\nStep 114, loss: 0.02061193436384201\r\nStep 115, loss: 0.020444221794605255\r\nStep 116, loss: 0.02078094705939293\r\nStep 117, loss: 0.019729148596525192\r\nStep 118, loss: 0.020966283977031708\r\nStep 119, loss: 0.017715569585561752\r\nStep 120, loss: 0.018365614116191864\r\nStep 121, loss: 0.01908261515200138\r\nStep 122, loss: 0.019502971321344376\r\nStep 123, loss: 0.017858631908893585\r\nStep 124, loss: 0.015927297994494438\r\nStep 125, loss: 0.019642645493149757\r\nStep 126, loss: 0.018544591963291168\r\nStep 127, loss: 0.015933632850646973\r\nStep 128, loss: 0.01618993654847145\r\nStep 129, loss: 0.016362672671675682\r\nStep 130, loss: 0.015333064831793308\r\nStep 131, loss: 0.016816189512610435\r\nStep 132, loss: 0.0164976604282856\r\nStep 133, loss: 0.016223587095737457\r\nStep 134, loss: 0.017775867134332657\r\nStep 135, loss: 0.015498277731239796\r\nStep 136, loss: 0.015683045610785484\r\nStep 137, loss: 0.01383606344461441\r\nStep 138, loss: 0.01694605126976967\r\nStep 139, loss: 0.017274845391511917\r\nStep 140, loss: 0.015150913037359715\r\nStep 141, loss: 0.015680069103837013\r\nStep 142, loss: 0.014308595098555088\r\nStep 143, loss: 0.015988625586032867\r\nStep 144, loss: 0.01708797924220562\r\nStep 145, loss: 0.015064721927046776\r\nStep 146, loss: 0.015196548774838448\r\nStep 147, loss: 0.015233844518661499\r\nStep 148, loss: 0.015244445763528347\r\nStep 149, loss: 0.013996983878314495\r\nStep 150, loss: 0.013958653435111046\r\nStep 151, loss: 0.01511000283062458\r\nStep 152, loss: 0.013374081812798977\r\nStep 153, loss: 0.014145979657769203\r\nStep 154, loss: 0.015165048651397228\r\nStep 155, loss: 0.014963418245315552\r\nStep 156, loss: 0.015931718051433563\r\nStep 157, loss: 0.015752702951431274\r\nStep 158, loss: 0.01360904797911644\r\nStep 159, loss: 0.015732955187559128\r\nStep 160, loss: 0.016268953680992126\r\nStep 161, loss: 0.012776114046573639\r\nStep 162, loss: 0.014002346433699131\r\nStep 163, loss: 0.016474682837724686\r\nStep 164, loss: 0.014050696976482868\r\nStep 165, loss: 0.015429678373038769\r\nStep 166, loss: 0.014365949667990208\r\nStep 167, loss: 0.014085184782743454\r\nStep 168, loss: 0.015173561871051788\r\nStep 169, loss: 0.014162395149469376\r\nStep 170, loss: 0.014322423376142979\r\nStep 171, loss: 0.013332856819033623\r\nStep 172, loss: 0.012761498801410198\r\nStep 173, loss: 0.01492702029645443\r\nStep 174, loss: 0.013961599208414555\r\nStep 175, loss: 0.012958317995071411\r\nStep 176, loss: 0.015379429794847965\r\nStep 177, loss: 0.014328060671687126\r\nStep 178, loss: 0.014507713727653027\r\nStep 179, loss: 0.013650377281010151\r\nStep 180, loss: 0.013451213017106056\r\nStep 181, loss: 0.012091542594134808\r\nStep 182, loss: 0.013077723793685436\r\nStep 183, loss: 0.01345899235457182\r\nStep 184, loss: 0.01339371595531702\r\nStep 185, loss: 0.011480382643640041\r\nStep 186, loss: 0.01359980646520853\r\nStep 187, loss: 0.01263586338609457\r\nStep 188, loss: 0.012540409341454506\r\nStep 189, loss: 0.013751196675002575\r\nStep 190, loss: 0.012528365477919579\r\nStep 191, loss: 0.01227360125631094\r\nStep 192, loss: 0.012847634963691235\r\nStep 193, loss: 0.011203471571207047\r\nStep 194, loss: 0.013195750303566456\r\nStep 195, loss: 0.012723291292786598\r\nStep 196, loss: 0.010990786366164684\r\nStep 197, loss: 0.009713550098240376\r\nStep 198, loss: 0.011404153890907764\r\nStep 199, loss: 0.010602226480841637\r\nStep 200, loss: 0.010594911873340607\r\nStep 201, loss: 0.0118786059319973\r\nStep 202, loss: 0.012351179495453835\r\nStep 203, loss: 0.012053634971380234\r\nStep 204, loss: 0.01243047509342432\r\nStep 205, loss: 0.012431791052222252\r\nStep 206, loss: 0.012530690059065819\r\nStep 207, loss: 0.009664309211075306\r\nStep 208, loss: 0.01006846223026514\r\nStep 209, loss: 0.010861100628972054\r\nStep 210, loss: 0.011857938952744007\r\nStep 211, loss: 0.01253808755427599\r\nStep 212, loss: 0.011201965622603893\r\nStep 213, loss: 0.012819544412195683\r\nStep 214, loss: 0.010710623115301132\r\nStep 215, loss: 0.011954248882830143\r\nStep 216, loss: 0.012150179594755173\r\nStep 217, loss: 0.012084055691957474\r\nStep 218, loss: 0.013123934157192707\r\n",,terminal_output +1013,2399797,"TERMINAL",0,0,"438320",,terminal_output +1014,2400805,"TERMINAL",0,0,"64941",,terminal_output +1015,2401843,"TERMINAL",0,0,"755052",,terminal_output +1016,2402834,"TERMINAL",0,0,"86163",,terminal_output +1017,2403894,"TERMINAL",0,0,"97274",,terminal_output +1018,2404915,"TERMINAL",0,0,"408385",,terminal_output +1019,2405957,"TERMINAL",0,0,"19496",,terminal_output +1020,2407020,"TERMINAL",0,0,"29:0059:007",,terminal_output +1021,2408094,"TERMINAL",0,0,"31618",,terminal_output +1022,2409089,"TERMINAL",0,0,"42729",,terminal_output +1023,2410130,"TERMINAL",0,0,"538330",,terminal_output +1024,2411175,"TERMINAL",0,0,"64941",,terminal_output +1025,2412221,"TERMINAL",0,0,"755:0052",,terminal_output +1026,2413255,"TERMINAL",0,0,"86163",,terminal_output +1027,2414301,"TERMINAL",0,0,"97274",,terminal_output +1028,2415347,"TERMINAL",0,0,"508385",,terminal_output +1029,2416394,"TERMINAL",0,0,"19496",,terminal_output +1030,2417422,"TERMINAL",0,0,"2105107",,terminal_output +1031,2418534,"TERMINAL",0,0,"31618",,terminal_output +1032,2419510,"TERMINAL",0,0,"42729",,terminal_output +1033,2420555,"TERMINAL",0,0,"538340",,terminal_output +1034,2421733,"TERMINAL",0,0,"64941",,terminal_output +1035,2422733,"TERMINAL",0,0,"751052",,terminal_output +1036,2423757,"TERMINAL",0,0,"86163",,terminal_output +1037,2424755,"TERMINAL",0,0,"98385",,terminal_output +1038,2425909,"TERMINAL",0,0,"5:019496",,terminal_output +1039,2426935,"TERMINAL",0,0,"2205207",,terminal_output +1040,2427954,"TERMINAL",0,0,"31618",,terminal_output +1041,2428957,"TERMINAL",0,0,"42729",,terminal_output +1042,2430010,"TERMINAL",0,0,"538350",,terminal_output +1043,2431063,"TERMINAL",0,0,"64941",,terminal_output +1044,2432164,"TERMINAL",0,0,"752052",,terminal_output +1045,2433178,"TERMINAL",0,0,"86163",,terminal_output +1046,2434199,"TERMINAL",0,0,"97274",,terminal_output +1047,2435244,"TERMINAL",0,0,"108385",,terminal_output +1048,2436304,"TERMINAL",0,0,"19496",,terminal_output +1049,2437376,"TERMINAL",0,0,"2305307",,terminal_output +1050,2438403,"TERMINAL",0,0,"31618",,terminal_output +1051,2439430,"TERMINAL",0,0,"42729",,terminal_output +1052,2440462,"TERMINAL",0,0,"538340:00",,terminal_output +1053,2441512,"TERMINAL",0,0,"64941",,terminal_output +1054,2442554,"TERMINAL",0,0,"753052",,terminal_output +1055,2443647,"TERMINAL",0,0,"86163",,terminal_output +1056,2444749,"TERMINAL",0,0,"97274",,terminal_output +1057,2445775,"TERMINAL",0,0,"208385",,terminal_output +1058,2446763,"TERMINAL",0,0,"1405407",,terminal_output +1059,2447823,"TERMINAL",0,0,"31618",,terminal_output +1060,2448844,"TERMINAL",0,0,"42729",,terminal_output +1061,2449971,"TERMINAL",0,0,"538310",,terminal_output +1062,2451014,"TERMINAL",0,0,"64941",,terminal_output +1063,2452020,"TERMINAL",0,0,"754052",,terminal_output +1064,2453154,"TERMINAL",0,0,"86163",,terminal_output +1065,2454094,"TERMINAL",0,0,"97274",,terminal_output +1066,2455145,"TERMINAL",0,0,"308385",,terminal_output +1067,2456218,"TERMINAL",0,0,"19496",,terminal_output +1068,2457228,"TERMINAL",0,0,"2505507",,terminal_output +1069,2458332,"TERMINAL",0,0,"31618",,terminal_output +1070,2459302,"TERMINAL",0,0,"42729",,terminal_output +1071,2460348,"TERMINAL",0,0,"538320",,terminal_output +1072,2461400,"TERMINAL",0,0,"64941",,terminal_output +1073,2462450,"TERMINAL",0,0,"755052",,terminal_output +1074,2463594,"TERMINAL",0,0,"86163",,terminal_output +1075,2464615,"TERMINAL",0,0,"97274",,terminal_output +1076,2465585,"TERMINAL",0,0,"408385",,terminal_output +1077,2466665,"TERMINAL",0,0,"19496",,terminal_output +1078,2467677,"TERMINAL",0,0,"210:00540:007",,terminal_output +1079,2468814,"TERMINAL",0,0,"32729",,terminal_output +1080,2469838,"TERMINAL",0,0,"538330",,terminal_output +1081,2470861,"TERMINAL",0,0,"64941",,terminal_output +1082,2471848,"TERMINAL",0,0,"756:0052",,terminal_output +1083,2472910,"TERMINAL",0,0,"86163",,terminal_output +1084,2474036,"TERMINAL",0,0,"97274",,terminal_output +1085,2475061,"TERMINAL",0,0,"508385",,terminal_output +1086,2476088,"TERMINAL",0,0,"19496",,terminal_output +1087,2477062,"TERMINAL",0,0,"2105107",,terminal_output +1088,2478134,"TERMINAL",0,0,"31618",,terminal_output +1089,2479258,"TERMINAL",0,0,"42729",,terminal_output +1090,2480211,"TERMINAL",0,0,"538340",,terminal_output +1091,2481315,"TERMINAL",0,0,"64941",,terminal_output +1092,2482330,"TERMINAL",0,0,"751052",,terminal_output +1093,2483356,"TERMINAL",0,0,"86163",,terminal_output +1094,2484390,"TERMINAL",0,0,"97274",,terminal_output +1095,2485427,"TERMINAL",0,0,"6:008385",,terminal_output +1096,2486468,"TERMINAL",0,0,"19496",,terminal_output +1097,2487555,"TERMINAL",0,0,"2205207",,terminal_output +1098,2488557,"TERMINAL",0,0,"31618",,terminal_output +1099,2489601,"TERMINAL",0,0,"42729",,terminal_output +1100,2490726,"TERMINAL",0,0,"538350",,terminal_output +1101,2491687,"TERMINAL",0,0,"64941",,terminal_output +1102,2492775,"TERMINAL",0,0,"762163",,terminal_output +1103,2493812,"TERMINAL",0,0,"97274",,terminal_output +1104,2494807,"TERMINAL",0,0,"108385",,terminal_output +1105,2495954,"TERMINAL",0,0,"19496",,terminal_output +1106,2496888,"TERMINAL",0,0,"2305307",,terminal_output +1107,2497997,"TERMINAL",0,0,"31618",,terminal_output +1108,2498961,"TERMINAL",0,0,"42729",,terminal_output +1109,2500015,"TERMINAL",0,0,"53831:00",,terminal_output +1110,2500291,"TERMINAL",0,0,"Step 219, loss: 0.012402343563735485\r\nStep 220, loss: 0.011631792411208153\r\nStep 221, loss: 0.011584474705159664\r\nStep 222, loss: 0.01097952201962471\r\nStep 223, loss: 0.012712574563920498\r\nStep 224, loss: 0.010229759849607944\r\nStep 225, loss: 0.011558798141777515\r\nStep 226, loss: 0.010583264753222466\r\nStep 227, loss: 0.010705210268497467\r\nStep 228, loss: 0.0116357933729887\r\nStep 229, loss: 0.010814939625561237\r\nStep 230, loss: 0.010558605194091797\r\nStep 231, loss: 0.009908916428685188\r\nStep 232, loss: 0.011581497266888618\r\nStep 233, loss: 0.011802499182522297\r\nStep 234, loss: 0.011465274728834629\r\nStep 235, loss: 0.012010223232209682\r\nStep 236, loss: 0.010722370818257332\r\nStep 237, loss: 0.010274405591189861\r\nStep 238, loss: 0.011278481222689152\r\nStep 239, loss: 0.010846936143934727\r\nStep 240, loss: 0.01259419322013855\r\nStep 241, loss: 0.012422946281731129\r\nStep 242, loss: 0.013480153866112232\r\nStep 243, loss: 0.01149264071136713\r\nStep 244, loss: 0.012327280826866627\r\nStep 245, loss: 0.011576535180211067\r\nStep 219, loss: 0.012402343563735485\r\nStep 220, loss: 0.011631792411208153\r\nStep 221, loss: 0.011584474705159664\r\nStep 222, loss: 0.01097952201962471\r\nStep 223, loss: 0.012712574563920498\r\nStep 224, loss: 0.010229759849607944\r\nStep 225, loss: 0.011558798141777515\r\nStep 226, loss: 0.010583264753222466\r\nStep 227, loss: 0.010705210268497467\r\nStep 228, loss: 0.0116357933729887\r\nStep 229, loss: 0.010814939625561237\r\nStep 230, loss: 0.010558605194091797\r\nStep 231, loss: 0.009908916428685188\r\nStep 232, loss: 0.011581497266888618\r\nStep 233, loss: 0.011802499182522297\r\nStep 234, loss: 0.011465274728834629\r\nStep 235, loss: 0.012010223232209682\r\nStep 236, loss: 0.010722370818257332\r\nStep 237, loss: 0.010274405591189861\r\nStep 238, loss: 0.011278481222689152\r\nStep 239, loss: 0.010846936143934727\r\nStep 240, loss: 0.01259419322013855\r\nStep 241, loss: 0.012422946281731129\r\nStep 242, loss: 0.013480153866112232\r\nStep 243, loss: 0.01149264071136713\r\nStep 244, loss: 0.012327280826866627\r\nStep 245, loss: 0.011576535180211067\r\nStep 246, loss: 0.011629262007772923\r\nStep 247, loss: 0.012494875118136406\r\nStep 248, loss: 0.011843645945191383\r\nStep 249, loss: 0.012646166607737541\r\nStep 250, loss: 0.011376439593732357\r\nStep 251, loss: 0.011232168413698673\r\nStep 252, loss: 0.011057781055569649\r\nStep 253, loss: 0.010992903262376785\r\nStep 254, loss: 0.011301021091639996\r\nStep 255, loss: 0.01089110691100359\r\nStep 256, loss: 0.0103179682046175\r\nStep 257, loss: 0.010743721388280392\r\nStep 258, loss: 0.010402617044746876\r\nStep 259, loss: 0.010314985178411007\r\nStep 260, loss: 0.010662691667675972\r\nStep 261, loss: 0.011964893899857998\r\nStep 262, loss: 0.011444470845162868\r\nStep 263, loss: 0.010359481908380985\r\nStep 264, loss: 0.011071950197219849\r\nStep 265, loss: 0.010385474190115929\r\nStep 266, loss: 0.010786756873130798\r\nStep 267, loss: 0.009867917746305466\r\nStep 268, loss: 0.011203785426914692\r\nStep 269, loss: 0.011108278296887875\r\nStep 270, loss: 0.011300038546323776\r\nStep 271, loss: 0.010646146722137928\r\nStep 272, loss: 0.010200191289186478\r\nStep 246, loss: 0.011629262007772923\r\nStep 247, loss: 0.012494875118136406\r\nStep 248, loss: 0.011843645945191383\r\nStep 249, loss: 0.012646166607737541\r\nStep 250, loss: 0.011376439593732357\r\nStep 251, loss: 0.011232168413698673\r\nStep 252, loss: 0.011057781055569649\r\nStep 253, loss: 0.010992903262376785\r\nStep 254, loss: 0.011301021091639996\r\nStep 255, loss: 0.01089110691100359\r\nStep 256, loss: 0.0103179682046175\r\nStep 257, loss: 0.010743721388280392\r\nStep 258, loss: 0.010402617044746876\r\nStep 259, loss: 0.010314985178411007\r\nStep 260, loss: 0.010662691667675972\r\nStep 261, loss: 0.011964893899857998\r\nStep 262, loss: 0.011444470845162868\r\nStep 263, loss: 0.010359481908380985\r\nStep 264, loss: 0.011071950197219849\r\nStep 265, loss: 0.010385474190115929\r\nStep 266, loss: 0.010786756873130798\r\nStep 267, loss: 0.009867917746305466\r\nStep 268, loss: 0.011203785426914692\r\nStep 269, loss: 0.011108278296887875\r\nStep 270, loss: 0.011300038546323776\r\nStep 271, loss: 0.010646146722137928\r\nStep 272, loss: 0.010200191289186478\r\nStep 273, loss: 0.010808263905346394\r\nStep 274, loss: 0.011265968903899193\r\nStep 275, loss: 0.010250569321215153\r\nStep 276, loss: 0.008533592335879803\r\nStep 277, loss: 0.009490706957876682\r\nStep 278, loss: 0.009791299700737\r\nStep 279, loss: 0.01029213983565569\r\nStep 280, loss: 0.011753804981708527\r\nStep 281, loss: 0.00971137173473835\r\nStep 282, loss: 0.012008721940219402\r\nStep 283, loss: 0.011298278346657753\r\nStep 284, loss: 0.011328218504786491\r\nStep 285, loss: 0.011197622865438461\r\nStep 286, loss: 0.010864348150789738\r\nStep 287, loss: 0.010775812901556492\r\nStep 288, loss: 0.010691273957490921\r\nStep 289, loss: 0.009966089390218258\r\nStep 290, loss: 0.010224885307252407\r\nStep 291, loss: 0.009930402971804142\r\nStep 292, loss: 0.010686405934393406\r\nStep 293, loss: 0.011255526915192604\r\nStep 294, loss: 0.010371316224336624\r\nStep 295, loss: 0.009607769548892975\r\nStep 296, loss: 0.010344200767576694\r\nStep 297, loss: 0.010129978880286217\r\nStep 298, loss: 0.010164660401642323\r\nStep 299, loss: 0.010222935117781162\r\nStep 273, loss: 0.010808263905346394\r\nStep 274, loss: 0.011265968903899193\r\nStep 275, loss: 0.010250569321215153\r\nStep 276, loss: 0.008533592335879803\r\nStep 277, loss: 0.009490706957876682\r\nStep 278, loss: 0.009791299700737\r\nStep 279, loss: 0.01029213983565569\r\nStep 280, loss: 0.011753804981708527\r\nStep 281, loss: 0.00971137173473835\r\nStep 282, loss: 0.012008721940219402\r\nStep 283, loss: 0.011298278346657753\r\nStep 284, loss: 0.011328218504786491\r\nStep 285, loss: 0.011197622865438461\r\nStep 286, loss: 0.010864348150789738\r\nStep 287, loss: 0.010775812901556492\r\nStep 288, loss: 0.010691273957490921\r\nStep 289, loss: 0.009966089390218258\r\nStep 290, loss: 0.010224885307252407\r\nStep 291, loss: 0.009930402971804142\r\nStep 292, loss: 0.010686405934393406\r\nStep 293, loss: 0.011255526915192604\r\nStep 294, loss: 0.010371316224336624\r\nStep 295, loss: 0.009607769548892975\r\nStep 296, loss: 0.010344200767576694\r\nStep 297, loss: 0.010129978880286217\r\nStep 298, loss: 0.010164660401642323\r\nStep 299, loss: 0.010222935117781162\r\nStep 300, loss: 0.01120209414511919\r\nStep 301, loss: 0.009847558103501797\r\nStep 302, loss: 0.011294144205749035\r\nStep 303, loss: 0.010222863405942917\r\nStep 304, loss: 0.009446301497519016\r\nStep 305, loss: 0.008409097790718079\r\nStep 306, loss: 0.00865502841770649\r\nStep 307, loss: 0.009920651093125343\r\nStep 308, loss: 0.009057370945811272\r\nStep 309, loss: 0.010928399860858917\r\nStep 310, loss: 0.010560085996985435\r\nStep 311, loss: 0.009174142964184284\r\nStep 312, loss: 0.008631434291601181\r\nStep 313, loss: 0.010008814744651318\r\nStep 314, loss: 0.00838238000869751\r\nStep 315, loss: 0.008831451646983624\r\nStep 316, loss: 0.00959621462970972\r\nStep 317, loss: 0.009931536391377449\r\nStep 318, loss: 0.009141447022557259\r\nStep 319, loss: 0.009971554391086102\r\nStep 320, loss: 0.009367017075419426\r\nStep 321, loss: 0.009710235521197319\r\nStep 322, loss: 0.010341423563659191\r\nStep 323, loss: 0.01046605221927166\r\nStep 324, loss: 0.010934630408883095\r\nStep 325, loss: 0.009343990124762058\r\nStep 326, loss: 0.010263442061841488\r\nStep 300, loss: 0.01120209414511919\r\nStep 301, loss: 0.009847558103501797\r\nStep 302, loss: 0.011294144205749035\r\nStep 303, loss: 0.010222863405942917\r\nStep 304, loss: 0.009446301497519016\r\nStep 305, loss: 0.008409097790718079\r\nStep 306, loss: 0.00865502841770649\r\nStep 307, loss: 0.009920651093125343\r\nStep 308, loss: 0.009057370945811272\r\nStep 309, loss: 0.010928399860858917\r\nStep 310, loss: 0.010560085996985435\r\nStep 311, loss: 0.009174142964184284\r\nStep 312, loss: 0.008631434291601181\r\nStep 313, loss: 0.010008814744651318\r\nStep 314, loss: 0.00838238000869751\r\nStep 315, loss: 0.008831451646983624\r\nStep 316, loss: 0.00959621462970972\r\nStep 317, loss: 0.009931536391377449\r\nStep 318, loss: 0.009141447022557259\r\nStep 319, loss: 0.009971554391086102\r\nStep 320, loss: 0.009367017075419426\r\nStep 321, loss: 0.009710235521197319\r\nStep 322, loss: 0.010341423563659191\r\nStep 323, loss: 0.01046605221927166\r\nStep 324, loss: 0.010934630408883095\r\nStep 325, loss: 0.009343990124762058\r\nStep 326, loss: 0.010263442061841488\r\nStep 327, loss: 0.00978117436170578\r\nStep 328, loss: 0.011806083843111992\r\nStep 329, loss: 0.010895607993006706\r\nStep 327, loss: 0.00978117436170578\r\nStep 328, loss: 0.011806083843111992\r\nStep 329, loss: 0.010895607993006706\r\nStep 219, loss: 0.012402343563735485\r\nStep 220, loss: 0.011631792411208153\r\nStep 221, loss: 0.011584474705159664\r\nStep 222, loss: 0.01097952201962471\r\nStep 223, loss: 0.012712574563920498\r\nStep 224, loss: 0.010229759849607944\r\nStep 225, loss: 0.011558798141777515\r\nStep 226, loss: 0.010583264753222466\r\nStep 227, loss: 0.010705210268497467\r\nStep 228, loss: 0.0116357933729887\r\nStep 229, loss: 0.010814939625561237\r\nStep 230, loss: 0.010558605194091797\r\nStep 231, loss: 0.009908916428685188\r\nStep 232, loss: 0.011581497266888618\r\nStep 233, loss: 0.011802499182522297\r\nStep 234, loss: 0.011465274728834629\r\nStep 235, loss: 0.012010223232209682\r\nStep 236, loss: 0.010722370818257332\r\nStep 237, loss: 0.010274405591189861\r\nStep 238, loss: 0.011278481222689152\r\nStep 239, loss: 0.010846936143934727\r\nStep 240, loss: 0.01259419322013855\r\nStep 241, loss: 0.012422946281731129\r\nStep 242, loss: 0.013480153866112232\r\nStep 243, loss: 0.01149264071136713\r\nStep 244, loss: 0.012327280826866627\r\nStep 245, loss: 0.011576535180211067\r\nStep 330, loss: 0.008183012716472149\r\nStep 331, loss: 0.009584175422787666\r\nStep 332, loss: 0.009803099557757378\r\nStep 333, loss: 0.008213256485760212\r\nStep 334, loss: 0.00965780857950449\r\nStep 335, loss: 0.008655296638607979\r\nStep 336, loss: 0.01056174747645855\r\nStep 337, loss: 0.008038876578211784\r\nStep 338, loss: 0.01079742144793272\r\nStep 339, loss: 0.008959968574345112\r\nStep 340, loss: 0.00852648913860321\r\nStep 341, loss: 0.008913596160709858\r\nStep 342, loss: 0.008657476864755154\r\nStep 343, loss: 0.008968519978225231\r\nStep 344, loss: 0.009772698394954205\r\nStep 345, loss: 0.009810158982872963\r\nStep 346, loss: 0.010945646092295647\r\nStep 347, loss: 0.010864058509469032\r\nStep 348, loss: 0.00884501077234745\r\nStep 349, loss: 0.010526054538786411\r\nStep 350, loss: 0.0088763777166605\r\nStep 351, loss: 0.009007164277136326\r\nStep 352, loss: 0.008891397155821323\r\nStep 353, loss: 0.0092351408675313\r\nStep 354, loss: 0.01059066690504551\r\nStep 355, loss: 0.010139535181224346\r\nStep 356, loss: 0.010005930438637733\r\nStep 246, loss: 0.011629262007772923\r\nStep 247, loss: 0.012494875118136406\r\nStep 248, loss: 0.011843645945191383\r\nStep 249, loss: 0.012646166607737541\r\nStep 250, loss: 0.011376439593732357\r\nStep 251, loss: 0.011232168413698673\r\nStep 252, loss: 0.011057781055569649\r\nStep 253, loss: 0.010992903262376785\r\nStep 254, loss: 0.011301021091639996\r\nStep 255, loss: 0.01089110691100359\r\nStep 256, loss: 0.0103179682046175\r\nStep 257, loss: 0.010743721388280392\r\nStep 258, loss: 0.010402617044746876\r\nStep 259, loss: 0.010314985178411007\r\nStep 260, loss: 0.010662691667675972\r\nStep 261, loss: 0.011964893899857998\r\nStep 262, loss: 0.011444470845162868\r\nStep 263, loss: 0.010359481908380985\r\nStep 264, loss: 0.011071950197219849\r\nStep 265, loss: 0.010385474190115929\r\nStep 266, loss: 0.010786756873130798\r\nStep 267, loss: 0.009867917746305466\r\nStep 268, loss: 0.011203785426914692\r\nStep 269, loss: 0.011108278296887875\r\nStep 270, loss: 0.011300038546323776\r\nStep 271, loss: 0.010646146722137928\r\nStep 272, loss: 0.010200191289186478\r\nStep 357, loss: 0.008771003223955631\r\nStep 358, loss: 0.008067444898188114\r\nStep 359, loss: 0.008601492270827293\r\nStep 360, loss: 0.009435067884624004\r\nStep 361, loss: 0.009560937993228436\r\nStep 362, loss: 0.008613449521362782\r\nStep 363, loss: 0.008572123013436794\r\nStep 364, loss: 0.009219275787472725\r\nStep 365, loss: 0.01010737195611\r\nStep 366, loss: 0.008756442926824093\r\nStep 367, loss: 0.010213833302259445\r\nStep 368, loss: 0.00893139187246561\r\nStep 369, loss: 0.009876120835542679\r\nStep 370, loss: 0.009222878143191338\r\nStep 371, loss: 0.009084029123187065\r\nStep 372, loss: 0.009581203572452068\r\nStep 373, loss: 0.0099358931183815\r\nStep 374, loss: 0.008505059406161308\r\nStep 375, loss: 0.009633081965148449\r\nStep 376, loss: 0.00966871902346611\r\nStep 377, loss: 0.008310618810355663\r\nStep 378, loss: 0.008672129362821579\r\nStep 379, loss: 0.007983251474797726\r\nStep 380, loss: 0.009981289505958557\r\nStep 381, loss: 0.008883092552423477\r\nStep 382, loss: 0.009355833753943443\r\nStep 383, loss: 0.008394931443035603\r\nStep 273, loss: 0.010808263905346394\r\nStep 274, loss: 0.011265968903899193\r\nStep 275, loss: 0.010250569321215153\r\nStep 276, loss: 0.008533592335879803\r\nStep 277, loss: 0.009490706957876682\r\nStep 278, loss: 0.009791299700737\r\nStep 279, loss: 0.01029213983565569\r\nStep 280, loss: 0.011753804981708527\r\nStep 281, loss: 0.00971137173473835\r\nStep 282, loss: 0.012008721940219402\r\nStep 283, loss: 0.011298278346657753\r\nStep 284, loss: 0.011328218504786491\r\nStep 285, loss: 0.011197622865438461\r\nStep 286, loss: 0.010864348150789738\r\nStep 287, loss: 0.010775812901556492\r\nStep 288, loss: 0.010691273957490921\r\nStep 289, loss: 0.009966089390218258\r\nStep 290, loss: 0.010224885307252407\r\nStep 291, loss: 0.009930402971804142\r\nStep 292, loss: 0.010686405934393406\r\nStep 293, loss: 0.011255526915192604\r\nStep 294, loss: 0.010371316224336624\r\nStep 295, loss: 0.009607769548892975\r\nStep 296, loss: 0.010344200767576694\r\nStep 297, loss: 0.010129978880286217\r\nStep 298, loss: 0.010164660401642323\r\nStep 299, loss: 0.010222935117781162\r\nStep 384, loss: 0.011625821702182293\r\nStep 385, loss: 0.008715483359992504\r\nStep 386, loss: 0.00883227027952671\r\nStep 387, loss: 0.009458016604185104\r\nStep 388, loss: 0.009332780726253986\r\nStep 389, loss: 0.009258671663701534\r\nStep 390, loss: 0.009851478971540928\r\nStep 391, loss: 0.009628290310502052\r\nStep 392, loss: 0.007471262477338314\r\nStep 393, loss: 0.009414088912308216\r\nStep 394, loss: 0.009029863402247429\r\nStep 395, loss: 0.008434828370809555\r\nStep 396, loss: 0.009456830099225044\r\nStep 397, loss: 0.008712932467460632\r\nStep 398, loss: 0.009513208642601967\r\nStep 399, loss: 0.008333476260304451\r\nStep 400, loss: 0.007227268535643816\r\nStep 401, loss: 0.008709742687642574\r\nStep 402, loss: 0.008780714124441147\r\nStep 403, loss: 0.009415601380169392\r\nStep 404, loss: 0.009301510639488697\r\nStep 405, loss: 0.008122518658638\r\nStep 406, loss: 0.008958345279097557\r\nStep 407, loss: 0.01046680472791195\r\nStep 408, loss: 0.0080665722489357\r\nStep 409, loss: 0.009830777533352375\r\nStep 410, loss: 0.009133830666542053\r\nStep 300, loss: 0.01120209414511919\r\nStep 301, loss: 0.009847558103501797\r\nStep 302, loss: 0.011294144205749035\r\nStep 303, loss: 0.010222863405942917\r\nStep 304, loss: 0.009446301497519016\r\nStep 305, loss: 0.008409097790718079\r\nStep 306, loss: 0.00865502841770649\r\nStep 307, loss: 0.009920651093125343\r\nStep 308, loss: 0.009057370945811272\r\nStep 309, loss: 0.010928399860858917\r\nStep 310, loss: 0.010560085996985435\r\nStep 311, loss: 0.009174142964184284\r\nStep 312, loss: 0.008631434291601181\r\nStep 313, loss: 0.010008814744651318\r\nStep 314, loss: 0.00838238000869751\r\nStep 315, loss: 0.008831451646983624\r\nStep 316, loss: 0.00959621462970972\r\nStep 317, loss: 0.009931536391377449\r\nStep 318, loss: 0.009141447022557259\r\nStep 319, loss: 0.009971554391086102\r\nStep 320, loss: 0.009367017075419426\r\nStep 321, loss: 0.009710235521197319\r\nStep 322, loss: 0.010341423563659191\r\nStep 323, loss: 0.01046605221927166\r\nStep 324, loss: 0.010934630408883095\r\nStep 325, loss: 0.009343990124762058\r\nStep 326, loss: 0.010263442061841488\r\nStep 411, loss: 0.008492544293403625\r\nStep 412, loss: 0.007803461514413357\r\nStep 413, loss: 0.009163353592157364\r\nStep 414, loss: 0.010654965415596962\r\nStep 415, loss: 0.010023774579167366\r\nStep 416, loss: 0.008814546279609203\r\nStep 417, loss: 0.00868968479335308\r\nStep 418, loss: 0.009255973622202873\r\nStep 419, loss: 0.008926976472139359\r\nStep 420, loss: 0.009140003472566605\r\nStep 421, loss: 0.010159704834222794\r\nStep 422, loss: 0.008238519541919231\r\nStep 423, loss: 0.008558676578104496\r\nStep 424, loss: 0.008526754565536976\r\nStep 425, loss: 0.010752619244158268\r\nStep 426, loss: 0.009448438882827759\r\nStep 427, loss: 0.008833286352455616\r\nStep 428, loss: 0.009218545630574226\r\nStep 429, loss: 0.009204461239278316\r\nStep 430, loss: 0.008507666178047657\r\nStep 431, loss: 0.008917532861232758\r\nStep 432, loss: 0.008146978914737701\r\nStep 433, loss: 0.008149527944624424\r\nStep 434, loss: 0.008935561403632164\r\nStep 435, loss: 0.009877092204988003\r\nStep 436, loss: 0.0077535370364785194\r\nStep 437, loss: 0.009797759354114532\r\nStep 327, loss: 0.00978117436170578\r\nStep 328, loss: 0.011806083843111992\r\nStep 329, loss: 0.010895607993006706\r\nStep 438, loss: 0.008172751404345036\r\nStep 439, loss: 0.008792747743427753\r\nStep 440, loss: 0.008777088485658169\r\nStep 330, loss: 0.008183012716472149\r\nStep 331, loss: 0.009584175422787666\r\nStep 332, loss: 0.009803099557757378\r\nStep 333, loss: 0.008213256485760212\r\nStep 334, loss: 0.00965780857950449\r\nStep 335, loss: 0.008655296638607979\r\nStep 336, loss: 0.01056174747645855\r\nStep 337, loss: 0.008038876578211784\r\nStep 338, loss: 0.01079742144793272\r\nStep 339, loss: 0.008959968574345112\r\nStep 340, loss: 0.00852648913860321\r\nStep 341, loss: 0.008913596160709858\r\nStep 342, loss: 0.008657476864755154\r\nStep 343, loss: 0.008968519978225231\r\nStep 344, loss: 0.009772698394954205\r\nStep 345, loss: 0.009810158982872963\r\nStep 346, loss: 0.010945646092295647\r\nStep 347, loss: 0.010864058509469032\r\nStep 348, loss: 0.00884501077234745\r\nStep 349, loss: 0.010526054538786411\r\nStep 350, loss: 0.0088763777166605\r\nStep 351, loss: 0.009007164277136326\r\nStep 352, loss: 0.008891397155821323\r\nStep 353, loss: 0.0092351408675313\r\nStep 354, loss: 0.01059066690504551\r\nStep 355, loss: 0.010139535181224346\r\nStep 356, loss: 0.010005930438637733\r\nStep 219, loss: 0.012402343563735485\r\nStep 220, loss: 0.011631792411208153\r\nStep 221, loss: 0.011584474705159664\r\nStep 222, loss: 0.01097952201962471\r\nStep 223, loss: 0.012712574563920498\r\nStep 224, loss: 0.010229759849607944\r\nStep 225, loss: 0.011558798141777515\r\nStep 226, loss: 0.010583264753222466\r\nStep 227, loss: 0.010705210268497467\r\nStep 228, loss: 0.0116357933729887\r\nStep 229, loss: 0.010814939625561237\r\nStep 230, loss: 0.010558605194091797\r\nStep 231, loss: 0.009908916428685188\r\nStep 232, loss: 0.011581497266888618\r\nStep 233, loss: 0.011802499182522297\r\nStep 234, loss: 0.011465274728834629\r\nStep 235, loss: 0.012010223232209682\r\nStep 236, loss: 0.010722370818257332\r\nStep 237, loss: 0.010274405591189861\r\nStep 238, loss: 0.011278481222689152\r\nStep 239, loss: 0.010846936143934727\r\nStep 240, loss: 0.01259419322013855\r\nStep 241, loss: 0.012422946281731129\r\nStep 242, loss: 0.013480153866112232\r\nStep 243, loss: 0.01149264071136713\r\nStep 244, loss: 0.012327280826866627\r\nStep 245, loss: 0.011576535180211067\r\nStep 357, loss: 0.008771003223955631\r\nStep 358, loss: 0.008067444898188114\r\nStep 359, loss: 0.008601492270827293\r\nStep 360, loss: 0.009435067884624004\r\nStep 361, loss: 0.009560937993228436\r\nStep 362, loss: 0.008613449521362782\r\nStep 363, loss: 0.008572123013436794\r\nStep 364, loss: 0.009219275787472725\r\nStep 365, loss: 0.01010737195611\r\nStep 366, loss: 0.008756442926824093\r\nStep 367, loss: 0.010213833302259445\r\nStep 368, loss: 0.00893139187246561\r\nStep 369, loss: 0.009876120835542679\r\nStep 370, loss: 0.009222878143191338\r\nStep 371, loss: 0.009084029123187065\r\nStep 372, loss: 0.009581203572452068\r\nStep 373, loss: 0.0099358931183815\r\nStep 374, loss: 0.008505059406161308\r\nStep 375, loss: 0.009633081965148449\r\nStep 376, loss: 0.00966871902346611\r\nStep 377, loss: 0.008310618810355663\r\nStep 378, loss: 0.008672129362821579\r\nStep 379, loss: 0.007983251474797726\r\nStep 380, loss: 0.009981289505958557\r\nStep 381, loss: 0.008883092552423477\r\nStep 382, loss: 0.009355833753943443\r\nStep 383, loss: 0.008394931443035603\r\nStep 246, loss: 0.011629262007772923\r\nStep 247, loss: 0.012494875118136406\r\nStep 248, loss: 0.011843645945191383\r\nStep 249, loss: 0.012646166607737541\r\nStep 250, loss: 0.011376439593732357\r\nStep 251, loss: 0.011232168413698673\r\nStep 252, loss: 0.011057781055569649\r\nStep 253, loss: 0.010992903262376785\r\nStep 254, loss: 0.011301021091639996\r\nStep 255, loss: 0.01089110691100359\r\nStep 256, loss: 0.0103179682046175\r\nStep 257, loss: 0.010743721388280392\r\nStep 258, loss: 0.010402617044746876\r\nStep 259, loss: 0.010314985178411007\r\nStep 260, loss: 0.010662691667675972\r\nStep 261, loss: 0.011964893899857998\r\nStep 262, loss: 0.011444470845162868\r\nStep 263, loss: 0.010359481908380985\r\nStep 264, loss: 0.011071950197219849\r\nStep 265, loss: 0.010385474190115929\r\nStep 266, loss: 0.010786756873130798\r\nStep 267, loss: 0.009867917746305466\r\nStep 268, loss: 0.011203785426914692\r\nStep 269, loss: 0.011108278296887875\r\nStep 270, loss: 0.011300038546323776\r\nStep 271, loss: 0.010646146722137928\r\nStep 272, loss: 0.010200191289186478\r\nStep 384, loss: 0.011625821702182293\r\nStep 385, loss: 0.008715483359992504\r\nStep 386, loss: 0.00883227027952671\r\nStep 387, loss: 0.009458016604185104\r\nStep 388, loss: 0.009332780726253986\r\nStep 389, loss: 0.009258671663701534\r\nStep 390, loss: 0.009851478971540928\r\nStep 391, loss: 0.009628290310502052\r\nStep 392, loss: 0.007471262477338314\r\nStep 393, loss: 0.009414088912308216\r\nStep 394, loss: 0.009029863402247429\r\nStep 395, loss: 0.008434828370809555\r\nStep 396, loss: 0.009456830099225044\r\nStep 397, loss: 0.008712932467460632\r\nStep 398, loss: 0.009513208642601967\r\nStep 399, loss: 0.008333476260304451\r\nStep 400, loss: 0.007227268535643816\r\nStep 401, loss: 0.008709742687642574\r\nStep 402, loss: 0.008780714124441147\r\nStep 403, loss: 0.009415601380169392\r\nStep 404, loss: 0.009301510639488697\r\nStep 405, loss: 0.008122518658638\r\nStep 406, loss: 0.008958345279097557\r\nStep 407, loss: 0.01046680472791195\r\nStep 408, loss: 0.0080665722489357\r\nStep 409, loss: 0.009830777533352375\r\nStep 410, loss: 0.009133830666542053\r\nStep 273, loss: 0.010808263905346394\r\nStep 274, loss: 0.011265968903899193\r\nStep 275, loss: 0.010250569321215153\r\nStep 276, loss: 0.008533592335879803\r\nStep 277, loss: 0.009490706957876682\r\nStep 278, loss: 0.009791299700737\r\nStep 279, loss: 0.01029213983565569\r\nStep 280, loss: 0.011753804981708527\r\nStep 281, loss: 0.00971137173473835\r\nStep 282, loss: 0.012008721940219402\r\nStep 283, loss: 0.011298278346657753\r\nStep 284, loss: 0.011328218504786491\r\nStep 285, loss: 0.011197622865438461\r\nStep 286, loss: 0.010864348150789738\r\nStep 287, loss: 0.010775812901556492\r\nStep 288, loss: 0.010691273957490921\r\nStep 289, loss: 0.009966089390218258\r\nStep 290, loss: 0.010224885307252407\r\nStep 291, loss: 0.009930402971804142\r\nStep 292, loss: 0.010686405934393406\r\nStep 293, loss: 0.011255526915192604\r\nStep 294, loss: 0.010371316224336624\r\nStep 295, loss: 0.009607769548892975\r\nStep 296, loss: 0.010344200767576694\r\nStep 297, loss: 0.010129978880286217\r\nStep 298, loss: 0.010164660401642323\r\nStep 299, loss: 0.010222935117781162\r\nStep 411, loss: 0.008492544293403625\r\nStep 412, loss: 0.007803461514413357\r\nStep 413, loss: 0.009163353592157364\r\nStep 414, loss: 0.010654965415596962\r\nStep 415, loss: 0.010023774579167366\r\nStep 416, loss: 0.008814546279609203\r\nStep 417, loss: 0.00868968479335308\r\nStep 418, loss: 0.009255973622202873\r\nStep 419, loss: 0.008926976472139359\r\nStep 420, loss: 0.009140003472566605\r\nStep 421, loss: 0.010159704834222794\r\nStep 422, loss: 0.008238519541919231\r\nStep 423, loss: 0.008558676578104496\r\nStep 424, loss: 0.008526754565536976\r\nStep 425, loss: 0.010752619244158268\r\nStep 426, loss: 0.009448438882827759\r\nStep 427, loss: 0.008833286352455616\r\nStep 428, loss: 0.009218545630574226\r\nStep 429, loss: 0.009204461239278316\r\nStep 430, loss: 0.008507666178047657\r\nStep 431, loss: 0.008917532861232758\r\nStep 432, loss: 0.008146978914737701\r\nStep 433, loss: 0.008149527944624424\r\nStep 434, loss: 0.008935561403632164\r\nStep 435, loss: 0.009877092204988003\r\nStep 436, loss: 0.0077535370364785194\r\nStep 437, loss: 0.009797759354114532\r\nStep 300, loss: 0.01120209414511919\r\nStep 301, loss: 0.009847558103501797\r\nStep 302, loss: 0.011294144205749035\r\nStep 303, loss: 0.010222863405942917\r\nStep 304, loss: 0.009446301497519016\r\nStep 305, loss: 0.008409097790718079\r\nStep 306, loss: 0.00865502841770649\r\nStep 307, loss: 0.009920651093125343\r\nStep 308, loss: 0.009057370945811272\r\nStep 309, loss: 0.010928399860858917\r\nStep 310, loss: 0.010560085996985435\r\nStep 311, loss: 0.009174142964184284\r\nStep 312, loss: 0.008631434291601181\r\nStep 313, loss: 0.010008814744651318\r\nStep 314, loss: 0.00838238000869751\r\nStep 315, loss: 0.008831451646983624\r\nStep 316, loss: 0.00959621462970972\r\nStep 317, loss: 0.009931536391377449\r\nStep 318, loss: 0.009141447022557259\r\nStep 319, loss: 0.009971554391086102\r\nStep 320, loss: 0.009367017075419426\r\nStep 321, loss: 0.009710235521197319\r\nStep 322, loss: 0.010341423563659191\r\nStep 323, loss: 0.01046605221927166\r\nStep 324, loss: 0.010934630408883095\r\nStep 325, loss: 0.009343990124762058\r\nStep 326, loss: 0.010263442061841488\r\nStep 438, loss: 0.008172751404345036\r\nStep 439, loss: 0.008792747743427753\r\nStep 440, loss: 0.008777088485658169\r\nStep 327, loss: 0.00978117436170578\r\nStep 328, loss: 0.011806083843111992\r\nStep 329, loss: 0.010895607993006706\r\nStep 330, loss: 0.008183012716472149\r\nStep 331, loss: 0.009584175422787666\r\nStep 332, loss: 0.009803099557757378\r\nStep 333, loss: 0.008213256485760212\r\nStep 334, loss: 0.00965780857950449\r\nStep 335, loss: 0.008655296638607979\r\nStep 336, loss: 0.01056174747645855\r\nStep 337, loss: 0.008038876578211784\r\nStep 338, loss: 0.01079742144793272\r\nStep 339, loss: 0.008959968574345112\r\nStep 340, loss: 0.00852648913860321\r\nStep 341, loss: 0.008913596160709858\r\nStep 342, loss: 0.008657476864755154\r\nStep 343, loss: 0.008968519978225231\r\nStep 344, loss: 0.009772698394954205\r\nStep 345, loss: 0.009810158982872963\r\nStep 346, loss: 0.010945646092295647\r\nStep 347, loss: 0.010864058509469032\r\nStep 348, loss: 0.00884501077234745\r\nStep 349, loss: 0.010526054538786411\r\nStep 350, loss: 0.0088763777166605\r\nStep 351, loss: 0.009007164277136326\r\nStep 352, loss: 0.008891397155821323\r\nStep 353, loss: 0.0092351408675313\r\nStep 354, loss: 0.01059066690504551\r\nStep 355, loss: 0.010139535181224346\r\nStep 356, loss: 0.010005930438637733\r\nStep 330, loss: 0.008183012716472149\r\nStep 331, loss: 0.009584175422787666\r\nStep 332, loss: 0.009803099557757378\r\nStep 333, loss: 0.008213256485760212\r\nStep 334, loss: 0.00965780857950449\r\nStep 335, loss: 0.008655296638607979\r\nStep 336, loss: 0.01056174747645855\r\nStep 337, loss: 0.008038876578211784\r\nStep 338, loss: 0.01079742144793272\r\nStep 339, loss: 0.008959968574345112\r\nStep 340, loss: 0.00852648913860321\r\nStep 341, loss: 0.008913596160709858\r\nStep 342, loss: 0.008657476864755154\r\nStep 343, loss: 0.008968519978225231\r\nStep 344, loss: 0.009772698394954205\r\nStep 345, loss: 0.009810158982872963\r\nStep 346, loss: 0.010945646092295647\r\nStep 347, loss: 0.010864058509469032\r\nStep 348, loss: 0.00884501077234745\r\nStep 349, loss: 0.010526054538786411\r\nStep 350, loss: 0.0088763777166605\r\nStep 351, loss: 0.009007164277136326\r\nStep 352, loss: 0.008891397155821323\r\nStep 353, loss: 0.0092351408675313\r\nStep 354, loss: 0.01059066690504551\r\nStep 355, loss: 0.010139535181224346\r\nStep 356, loss: 0.010005930438637733\r\nStep 357, loss: 0.008771003223955631\r\nStep 358, loss: 0.008067444898188114\r\nStep 359, loss: 0.008601492270827293\r\nStep 360, loss: 0.009435067884624004\r\nStep 361, loss: 0.009560937993228436\r\nStep 362, loss: 0.008613449521362782\r\nStep 363, loss: 0.008572123013436794\r\nStep 364, loss: 0.009219275787472725\r\nStep 365, loss: 0.01010737195611\r\nStep 366, loss: 0.008756442926824093\r\nStep 367, loss: 0.010213833302259445\r\nStep 368, loss: 0.00893139187246561\r\nStep 369, loss: 0.009876120835542679\r\nStep 370, loss: 0.009222878143191338\r\nStep 371, loss: 0.009084029123187065\r\nStep 372, loss: 0.009581203572452068\r\nStep 373, loss: 0.0099358931183815\r\nStep 374, loss: 0.008505059406161308\r\nStep 375, loss: 0.009633081965148449\r\nStep 376, loss: 0.00966871902346611\r\nStep 377, loss: 0.008310618810355663\r\nStep 378, loss: 0.008672129362821579\r\nStep 379, loss: 0.007983251474797726\r\nStep 380, loss: 0.009981289505958557\r\nStep 381, loss: 0.008883092552423477\r\nStep 382, loss: 0.009355833753943443\r\nStep 383, loss: 0.008394931443035603\r\nStep 357, loss: 0.008771003223955631\r\nStep 358, loss: 0.008067444898188114\r\nStep 359, loss: 0.008601492270827293\r\nStep 360, loss: 0.009435067884624004\r\nStep 361, loss: 0.009560937993228436\r\nStep 362, loss: 0.008613449521362782\r\nStep 363, loss: 0.008572123013436794\r\nStep 364, loss: 0.009219275787472725\r\nStep 365, loss: 0.01010737195611\r\nStep 366, loss: 0.008756442926824093\r\nStep 367, loss: 0.010213833302259445\r\nStep 368, loss: 0.00893139187246561\r\nStep 369, loss: 0.009876120835542679\r\nStep 370, loss: 0.009222878143191338\r\nStep 371, loss: 0.009084029123187065\r\nStep 372, loss: 0.009581203572452068\r\nStep 373, loss: 0.0099358931183815\r\nStep 374, loss: 0.008505059406161308\r\nStep 375, loss: 0.009633081965148449\r\nStep 376, loss: 0.00966871902346611\r\nStep 377, loss: 0.008310618810355663\r\nStep 378, loss: 0.008672129362821579\r\nStep 379, loss: 0.007983251474797726\r\nStep 380, loss: 0.009981289505958557\r\nStep 381, loss: 0.008883092552423477\r\nStep 382, loss: 0.009355833753943443\r\nStep 383, loss: 0.008394931443035603\r\nStep 384, loss: 0.011625821702182293\r\nStep 385, loss: 0.008715483359992504\r\nStep 386, loss: 0.00883227027952671\r\nStep 387, loss: 0.009458016604185104\r\nStep 388, loss: 0.009332780726253986\r\nStep 389, loss: 0.009258671663701534\r\nStep 390, loss: 0.009851478971540928\r\nStep 391, loss: 0.009628290310502052\r\nStep 392, loss: 0.007471262477338314\r\nStep 393, loss: 0.009414088912308216\r\nStep 394, loss: 0.009029863402247429\r\nStep 395, loss: 0.008434828370809555\r\nStep 396, loss: 0.009456830099225044\r\nStep 397, loss: 0.008712932467460632\r\nStep 398, loss: 0.009513208642601967\r\nStep 399, loss: 0.008333476260304451\r\nStep 400, loss: 0.007227268535643816\r\nStep 401, loss: 0.008709742687642574\r\nStep 402, loss: 0.008780714124441147\r\nStep 403, loss: 0.009415601380169392\r\nStep 404, loss: 0.009301510639488697\r\nStep 405, loss: 0.008122518658638\r\nStep 406, loss: 0.008958345279097557\r\nStep 407, loss: 0.01046680472791195\r\nStep 408, loss: 0.0080665722489357\r\nStep 409, loss: 0.009830777533352375\r\nStep 410, loss: 0.009133830666542053\r\nStep 384, loss: 0.011625821702182293\r\nStep 385, loss: 0.008715483359992504\r\nStep 386, loss: 0.00883227027952671\r\nStep 387, loss: 0.009458016604185104\r\nStep 388, loss: 0.009332780726253986\r\nStep 389, loss: 0.009258671663701534\r\nStep 390, loss: 0.009851478971540928\r\nStep 391, loss: 0.009628290310502052\r\nStep 392, loss: 0.007471262477338314\r\nStep 393, loss: 0.009414088912308216\r\nStep 394, loss: 0.009029863402247429\r\nStep 395, loss: 0.008434828370809555\r\nStep 396, loss: 0.009456830099225044\r\nStep 397, loss: 0.008712932467460632\r\nStep 398, loss: 0.009513208642601967\r\nStep 399, loss: 0.008333476260304451\r\nStep 400, loss: 0.007227268535643816\r\nStep 401, loss: 0.008709742687642574\r\nStep 402, loss: 0.008780714124441147\r\nStep 403, loss: 0.009415601380169392\r\nStep 404, loss: 0.009301510639488697\r\nStep 405, loss: 0.008122518658638\r\nStep 406, loss: 0.008958345279097557\r\nStep 407, loss: 0.01046680472791195\r\nStep 408, loss: 0.0080665722489357\r\nStep 409, loss: 0.009830777533352375\r\nStep 410, loss: 0.009133830666542053\r\nStep 411, loss: 0.008492544293403625\r\nStep 412, loss: 0.007803461514413357\r\nStep 413, loss: 0.009163353592157364\r\nStep 414, loss: 0.010654965415596962\r\nStep 415, loss: 0.010023774579167366\r\nStep 416, loss: 0.008814546279609203\r\nStep 417, loss: 0.00868968479335308\r\nStep 418, loss: 0.009255973622202873\r\nStep 419, loss: 0.008926976472139359\r\nStep 420, loss: 0.009140003472566605\r\nStep 421, loss: 0.010159704834222794\r\nStep 422, loss: 0.008238519541919231\r\nStep 423, loss: 0.008558676578104496\r\nStep 424, loss: 0.008526754565536976\r\nStep 425, loss: 0.010752619244158268\r\nStep 426, loss: 0.009448438882827759\r\nStep 427, loss: 0.008833286352455616\r\nStep 428, loss: 0.009218545630574226\r\nStep 429, loss: 0.009204461239278316\r\nStep 430, loss: 0.008507666178047657\r\nStep 431, loss: 0.008917532861232758\r\nStep 432, loss: 0.008146978914737701\r\nStep 433, loss: 0.008149527944624424\r\nStep 434, loss: 0.008935561403632164\r\nStep 435, loss: 0.009877092204988003\r\nStep 436, loss: 0.0077535370364785194\r\nStep 437, loss: 0.009797759354114532\r\nStep 411, loss: 0.008492544293403625\r\nStep 412, loss: 0.007803461514413357\r\nStep 413, loss: 0.009163353592157364\r\nStep 414, loss: 0.010654965415596962\r\nStep 415, loss: 0.010023774579167366\r\nStep 416, loss: 0.008814546279609203\r\nStep 417, loss: 0.00868968479335308\r\nStep 418, loss: 0.009255973622202873\r\nStep 419, loss: 0.008926976472139359\r\nStep 420, loss: 0.009140003472566605\r\nStep 421, loss: 0.010159704834222794\r\nStep 422, loss: 0.008238519541919231\r\nStep 423, loss: 0.008558676578104496\r\nStep 424, loss: 0.008526754565536976\r\nStep 425, loss: 0.010752619244158268\r\nStep 426, loss: 0.009448438882827759\r\nStep 427, loss: 0.008833286352455616\r\nStep 428, loss: 0.009218545630574226\r\nStep 429, loss: 0.009204461239278316\r\nStep 430, loss: 0.008507666178047657\r\nStep 431, loss: 0.008917532861232758\r\nStep 432, loss: 0.008146978914737701\r\nStep 433, loss: 0.008149527944624424\r\nStep 434, loss: 0.008935561403632164\r\nStep 435, loss: 0.009877092204988003\r\nStep 436, loss: 0.0077535370364785194\r\nStep 437, loss: 0.009797759354114532\r\nStep 438, loss: 0.008172751404345036\r\nStep 439, loss: 0.008792747743427753\r\nStep 440, loss: 0.008777088485658169\r\nStep 438, loss: 0.008172751404345036\r\nStep 439, loss: 0.008792747743427753\r\nStep 440, loss: 0.008777088485658169\r\nStep 219, loss: 0.012402343563735485\r\nStep 220, loss: 0.011631792411208153\r\nStep 221, loss: 0.011584474705159664\r\nStep 222, loss: 0.01097952201962471\r\nStep 223, loss: 0.012712574563920498\r\nStep 224, loss: 0.010229759849607944\r\nStep 225, loss: 0.011558798141777515\r\nStep 226, loss: 0.010583264753222466\r\nStep 227, loss: 0.010705210268497467\r\nStep 228, loss: 0.0116357933729887\r\nStep 229, loss: 0.010814939625561237\r\nStep 230, loss: 0.010558605194091797\r\nStep 231, loss: 0.009908916428685188\r\nStep 232, loss: 0.011581497266888618\r\nStep 233, loss: 0.011802499182522297\r\nStep 234, loss: 0.011465274728834629\r\nStep 235, loss: 0.012010223232209682\r\nStep 236, loss: 0.010722370818257332\r\nStep 237, loss: 0.010274405591189861\r\nStep 238, loss: 0.011278481222689152\r\nStep 239, loss: 0.010846936143934727\r\nStep 240, loss: 0.01259419322013855\r\nStep 241, loss: 0.012422946281731129\r\nStep 242, loss: 0.013480153866112232\r\nStep 243, loss: 0.01149264071136713\r\nStep 244, loss: 0.012327280826866627\r\nStep 245, loss: 0.011576535180211067\r\nStep 219, loss: 0.012402343563735485\r\nStep 220, loss: 0.011631792411208153\r\nStep 221, loss: 0.011584474705159664\r\nStep 222, loss: 0.01097952201962471\r\nStep 223, loss: 0.012712574563920498\r\nStep 224, loss: 0.010229759849607944\r\nStep 225, loss: 0.011558798141777515\r\nStep 226, loss: 0.010583264753222466\r\nStep 227, loss: 0.010705210268497467\r\nStep 228, loss: 0.0116357933729887\r\nStep 229, loss: 0.010814939625561237\r\nStep 230, loss: 0.010558605194091797\r\nStep 231, loss: 0.009908916428685188\r\nStep 232, loss: 0.011581497266888618\r\nStep 233, loss: 0.011802499182522297\r\nStep 234, loss: 0.011465274728834629\r\nStep 235, loss: 0.012010223232209682\r\nStep 236, loss: 0.010722370818257332\r\nStep 237, loss: 0.010274405591189861\r\nStep 238, loss: 0.011278481222689152\r\nStep 239, loss: 0.010846936143934727\r\nStep 240, loss: 0.01259419322013855\r\nStep 241, loss: 0.012422946281731129\r\nStep 242, loss: 0.013480153866112232\r\nStep 243, loss: 0.01149264071136713\r\nStep 244, loss: 0.012327280826866627\r\nStep 245, loss: 0.011576535180211067\r\nStep 246, loss: 0.011629262007772923\r\nStep 247, loss: 0.012494875118136406\r\nStep 248, loss: 0.011843645945191383\r\nStep 249, loss: 0.012646166607737541\r\nStep 250, loss: 0.011376439593732357\r\nStep 251, loss: 0.011232168413698673\r\nStep 252, loss: 0.011057781055569649\r\nStep 253, loss: 0.010992903262376785\r\nStep 254, loss: 0.011301021091639996\r\nStep 255, loss: 0.01089110691100359\r\nStep 256, loss: 0.0103179682046175\r\nStep 257, loss: 0.010743721388280392\r\nStep 258, loss: 0.010402617044746876\r\nStep 259, loss: 0.010314985178411007\r\nStep 260, loss: 0.010662691667675972\r\nStep 261, loss: 0.011964893899857998\r\nStep 262, loss: 0.011444470845162868\r\nStep 263, loss: 0.010359481908380985\r\nStep 264, loss: 0.011071950197219849\r\nStep 265, loss: 0.010385474190115929\r\nStep 266, loss: 0.010786756873130798\r\nStep 267, loss: 0.009867917746305466\r\nStep 268, loss: 0.011203785426914692\r\nStep 269, loss: 0.011108278296887875\r\nStep 270, loss: 0.011300038546323776\r\nStep 271, loss: 0.010646146722137928\r\nStep 272, loss: 0.010200191289186478\r\nStep 246, loss: 0.011629262007772923\r\nStep 247, loss: 0.012494875118136406\r\nStep 248, loss: 0.011843645945191383\r\nStep 249, loss: 0.012646166607737541\r\nStep 250, loss: 0.011376439593732357\r\nStep 251, loss: 0.011232168413698673\r\nStep 252, loss: 0.011057781055569649\r\nStep 253, loss: 0.010992903262376785\r\nStep 254, loss: 0.011301021091639996\r\nStep 255, loss: 0.01089110691100359\r\nStep 256, loss: 0.0103179682046175\r\nStep 257, loss: 0.010743721388280392\r\nStep 258, loss: 0.010402617044746876\r\nStep 259, loss: 0.010314985178411007\r\nStep 260, loss: 0.010662691667675972\r\nStep 261, loss: 0.011964893899857998\r\nStep 262, loss: 0.011444470845162868\r\nStep 263, loss: 0.010359481908380985\r\nStep 264, loss: 0.011071950197219849\r\nStep 265, loss: 0.010385474190115929\r\nStep 266, loss: 0.010786756873130798\r\nStep 267, loss: 0.009867917746305466\r\nStep 268, loss: 0.011203785426914692\r\nStep 269, loss: 0.011108278296887875\r\nStep 270, loss: 0.011300038546323776\r\nStep 271, loss: 0.010646146722137928\r\nStep 272, loss: 0.010200191289186478\r\nStep 273, loss: 0.010808263905346394\r\nStep 274, loss: 0.011265968903899193\r\nStep 275, loss: 0.010250569321215153\r\nStep 276, loss: 0.008533592335879803\r\nStep 277, loss: 0.009490706957876682\r\nStep 278, loss: 0.009791299700737\r\nStep 279, loss: 0.01029213983565569\r\nStep 280, loss: 0.011753804981708527\r\nStep 281, loss: 0.00971137173473835\r\nStep 282, loss: 0.012008721940219402\r\nStep 283, loss: 0.011298278346657753\r\nStep 284, loss: 0.011328218504786491\r\nStep 285, loss: 0.011197622865438461\r\nStep 286, loss: 0.010864348150789738\r\nStep 287, loss: 0.010775812901556492\r\nStep 288, loss: 0.010691273957490921\r\nStep 289, loss: 0.009966089390218258\r\nStep 290, loss: 0.010224885307252407\r\nStep 291, loss: 0.009930402971804142\r\nStep 292, loss: 0.010686405934393406\r\nStep 293, loss: 0.011255526915192604\r\nStep 294, loss: 0.010371316224336624\r\nStep 295, loss: 0.009607769548892975\r\nStep 296, loss: 0.010344200767576694\r\nStep 297, loss: 0.010129978880286217\r\nStep 298, loss: 0.010164660401642323\r\nStep 299, loss: 0.010222935117781162\r\nStep 273, loss: 0.010808263905346394\r\nStep 274, loss: 0.011265968903899193\r\nStep 275, loss: 0.010250569321215153\r\nStep 276, loss: 0.008533592335879803\r\nStep 277, loss: 0.009490706957876682\r\nStep 278, loss: 0.009791299700737\r\nStep 279, loss: 0.01029213983565569\r\nStep 280, loss: 0.011753804981708527\r\nStep 281, loss: 0.00971137173473835\r\nStep 282, loss: 0.012008721940219402\r\nStep 283, loss: 0.011298278346657753\r\nStep 284, loss: 0.011328218504786491\r\nStep 285, loss: 0.011197622865438461\r\nStep 286, loss: 0.010864348150789738\r\nStep 287, loss: 0.010775812901556492\r\nStep 288, loss: 0.010691273957490921\r\nStep 289, loss: 0.009966089390218258\r\nStep 290, loss: 0.010224885307252407\r\nStep 291, loss: 0.009930402971804142\r\nStep 292, loss: 0.010686405934393406\r\nStep 293, loss: 0.011255526915192604\r\nStep 294, loss: 0.010371316224336624\r\nStep 295, loss: 0.009607769548892975\r\nStep 296, loss: 0.010344200767576694\r\nStep 297, loss: 0.010129978880286217\r\nStep 298, loss: 0.010164660401642323\r\nStep 299, loss: 0.010222935117781162\r\nStep 300, loss: 0.01120209414511919\r\nStep 301, loss: 0.009847558103501797\r\nStep 302, loss: 0.011294144205749035\r\nStep 303, loss: 0.010222863405942917\r\nStep 304, loss: 0.009446301497519016\r\nStep 305, loss: 0.008409097790718079\r\nStep 306, loss: 0.00865502841770649\r\nStep 307, loss: 0.009920651093125343\r\nStep 308, loss: 0.009057370945811272\r\nStep 309, loss: 0.010928399860858917\r\nStep 310, loss: 0.010560085996985435\r\nStep 311, loss: 0.009174142964184284\r\nStep 312, loss: 0.008631434291601181\r\nStep 313, loss: 0.010008814744651318\r\nStep 314, loss: 0.00838238000869751\r\nStep 315, loss: 0.008831451646983624\r\nStep 316, loss: 0.00959621462970972\r\nStep 317, loss: 0.009931536391377449\r\nStep 318, loss: 0.009141447022557259\r\nStep 319, loss: 0.009971554391086102\r\nStep 320, loss: 0.009367017075419426\r\nStep 321, loss: 0.009710235521197319\r\nStep 322, loss: 0.010341423563659191\r\nStep 323, loss: 0.01046605221927166\r\nStep 324, loss: 0.010934630408883095\r\nStep 325, loss: 0.009343990124762058\r\nStep 326, loss: 0.010263442061841488\r\nStep 300, loss: 0.01120209414511919\r\nStep 301, loss: 0.009847558103501797\r\nStep 302, loss: 0.011294144205749035\r\nStep 303, loss: 0.010222863405942917\r\nStep 304, loss: 0.009446301497519016\r\nStep 305, loss: 0.008409097790718079\r\nStep 306, loss: 0.00865502841770649\r\nStep 307, loss: 0.009920651093125343\r\nStep 308, loss: 0.009057370945811272\r\nStep 309, loss: 0.010928399860858917\r\nStep 310, loss: 0.010560085996985435\r\nStep 311, loss: 0.009174142964184284\r\nStep 312, loss: 0.008631434291601181\r\nStep 313, loss: 0.010008814744651318\r\nStep 314, loss: 0.00838238000869751\r\nStep 315, loss: 0.008831451646983624\r\nStep 316, loss: 0.00959621462970972\r\nStep 317, loss: 0.009931536391377449\r\nStep 318, loss: 0.009141447022557259\r\nStep 319, loss: 0.009971554391086102\r\nStep 320, loss: 0.009367017075419426\r\nStep 321, loss: 0.009710235521197319\r\nStep 322, loss: 0.010341423563659191\r\nStep 323, loss: 0.01046605221927166\r\nStep 324, loss: 0.010934630408883095\r\nStep 325, loss: 0.009343990124762058\r\nStep 326, loss: 0.010263442061841488\r\nStep 327, loss: 0.00978117436170578\r\nStep 328, loss: 0.011806083843111992\r\nStep 329, loss: 0.010895607993006706\r\nStep 327, loss: 0.00978117436170578\r\nStep 328, loss: 0.011806083843111992\r\nStep 329, loss: 0.010895607993006706\r\nStep 330, loss: 0.008183012716472149\r\nStep 331, loss: 0.009584175422787666\r\nStep 332, loss: 0.009803099557757378\r\nStep 333, loss: 0.008213256485760212\r\nStep 334, loss: 0.00965780857950449\r\nStep 335, loss: 0.008655296638607979\r\nStep 336, loss: 0.01056174747645855\r\nStep 337, loss: 0.008038876578211784\r\nStep 338, loss: 0.01079742144793272\r\nStep 339, loss: 0.008959968574345112\r\nStep 340, loss: 0.00852648913860321\r\nStep 341, loss: 0.008913596160709858\r\nStep 342, loss: 0.008657476864755154\r\nStep 343, loss: 0.008968519978225231\r\nStep 344, loss: 0.009772698394954205\r\nStep 345, loss: 0.009810158982872963\r\nStep 346, loss: 0.010945646092295647\r\nStep 347, loss: 0.010864058509469032\r\nStep 348, loss: 0.00884501077234745\r\nStep 349, loss: 0.010526054538786411\r\nStep 350, loss: 0.0088763777166605\r\nStep 351, loss: 0.009007164277136326\r\nStep 352, loss: 0.008891397155821323\r\nStep 353, loss: 0.0092351408675313\r\nStep 354, loss: 0.01059066690504551\r\nStep 355, loss: 0.010139535181224346\r\nStep 356, loss: 0.010005930438637733\r\nStep 330, loss: 0.008183012716472149\r\nStep 331, loss: 0.009584175422787666\r\nStep 332, loss: 0.009803099557757378\r\nStep 333, loss: 0.008213256485760212\r\nStep 334, loss: 0.00965780857950449\r\nStep 335, loss: 0.008655296638607979\r\nStep 336, loss: 0.01056174747645855\r\nStep 337, loss: 0.008038876578211784\r\nStep 338, loss: 0.01079742144793272\r\nStep 339, loss: 0.008959968574345112\r\nStep 340, loss: 0.00852648913860321\r\nStep 341, loss: 0.008913596160709858\r\nStep 342, loss: 0.008657476864755154\r\nStep 343, loss: 0.008968519978225231\r\nStep 344, loss: 0.009772698394954205\r\nStep 345, loss: 0.009810158982872963\r\nStep 346, loss: 0.010945646092295647\r\nStep 347, loss: 0.010864058509469032\r\nStep 348, loss: 0.00884501077234745\r\nStep 349, loss: 0.010526054538786411\r\nStep 350, loss: 0.0088763777166605\r\nStep 351, loss: 0.009007164277136326\r\nStep 352, loss: 0.008891397155821323\r\nStep 353, loss: 0.0092351408675313\r\nStep 354, loss: 0.01059066690504551\r\nStep 355, loss: 0.010139535181224346\r\nStep 356, loss: 0.010005930438637733\r\nStep 357, loss: 0.008771003223955631\r\nStep 358, loss: 0.008067444898188114\r\nStep 359, loss: 0.008601492270827293\r\nStep 360, loss: 0.009435067884624004\r\nStep 361, loss: 0.009560937993228436\r\nStep 362, loss: 0.008613449521362782\r\nStep 363, loss: 0.008572123013436794\r\nStep 364, loss: 0.009219275787472725\r\nStep 365, loss: 0.01010737195611\r\nStep 366, loss: 0.008756442926824093\r\nStep 367, loss: 0.010213833302259445\r\nStep 368, loss: 0.00893139187246561\r\nStep 369, loss: 0.009876120835542679\r\nStep 370, loss: 0.009222878143191338\r\nStep 371, loss: 0.009084029123187065\r\nStep 372, loss: 0.009581203572452068\r\nStep 373, loss: 0.0099358931183815\r\nStep 374, loss: 0.008505059406161308\r\nStep 375, loss: 0.009633081965148449\r\nStep 376, loss: 0.00966871902346611\r\nStep 377, loss: 0.008310618810355663\r\nStep 378, loss: 0.008672129362821579\r\nStep 379, loss: 0.007983251474797726\r\nStep 380, loss: 0.009981289505958557\r\nStep 381, loss: 0.008883092552423477\r\nStep 382, loss: 0.009355833753943443\r\nStep 383, loss: 0.008394931443035603\r\nStep 357, loss: 0.008771003223955631\r\nStep 358, loss: 0.008067444898188114\r\nStep 359, loss: 0.008601492270827293\r\nStep 360, loss: 0.009435067884624004\r\nStep 361, loss: 0.009560937993228436\r\nStep 362, loss: 0.008613449521362782\r\nStep 363, loss: 0.008572123013436794\r\nStep 364, loss: 0.009219275787472725\r\nStep 365, loss: 0.01010737195611\r\nStep 366, loss: 0.008756442926824093\r\nStep 367, loss: 0.010213833302259445\r\nStep 368, loss: 0.00893139187246561\r\nStep 369, loss: 0.009876120835542679\r\nStep 370, loss: 0.009222878143191338\r\nStep 371, loss: 0.009084029123187065\r\nStep 372, loss: 0.009581203572452068\r\nStep 373, loss: 0.0099358931183815\r\nStep 374, loss: 0.008505059406161308\r\nStep 375, loss: 0.009633081965148449\r\nStep 376, loss: 0.00966871902346611\r\nStep 377, loss: 0.008310618810355663\r\nStep 378, loss: 0.008672129362821579\r\nStep 379, loss: 0.007983251474797726\r\nStep 380, loss: 0.009981289505958557\r\nStep 381, loss: 0.008883092552423477\r\nStep 382, loss: 0.009355833753943443\r\nStep 383, loss: 0.008394931443035603\r\nStep 384, loss: 0.011625821702182293\r\nStep 385, loss: 0.008715483359992504\r\nStep 386, loss: 0.00883227027952671\r\nStep 387, loss: 0.009458016604185104\r\nStep 388, loss: 0.009332780726253986\r\nStep 389, loss: 0.009258671663701534\r\nStep 390, loss: 0.009851478971540928\r\nStep 391, loss: 0.009628290310502052\r\nStep 392, loss: 0.007471262477338314\r\nStep 393, loss: 0.009414088912308216\r\nStep 394, loss: 0.009029863402247429\r\nStep 395, loss: 0.008434828370809555\r\nStep 396, loss: 0.009456830099225044\r\nStep 397, loss: 0.008712932467460632\r\nStep 398, loss: 0.009513208642601967\r\nStep 399, loss: 0.008333476260304451\r\nStep 400, loss: 0.007227268535643816\r\nStep 401, loss: 0.008709742687642574\r\nStep 402, loss: 0.008780714124441147\r\nStep 403, loss: 0.009415601380169392\r\nStep 404, loss: 0.009301510639488697\r\nStep 405, loss: 0.008122518658638\r\nStep 406, loss: 0.008958345279097557\r\nStep 407, loss: 0.01046680472791195\r\nStep 408, loss: 0.0080665722489357\r\nStep 409, loss: 0.009830777533352375\r\nStep 410, loss: 0.009133830666542053\r\nStep 384, loss: 0.011625821702182293\r\nStep 385, loss: 0.008715483359992504\r\nStep 386, loss: 0.00883227027952671\r\nStep 387, loss: 0.009458016604185104\r\nStep 388, loss: 0.009332780726253986\r\nStep 389, loss: 0.009258671663701534\r\nStep 390, loss: 0.009851478971540928\r\nStep 391, loss: 0.009628290310502052\r\nStep 392, loss: 0.007471262477338314\r\nStep 393, loss: 0.009414088912308216\r\nStep 394, loss: 0.009029863402247429\r\nStep 395, loss: 0.008434828370809555\r\nStep 396, loss: 0.009456830099225044\r\nStep 397, loss: 0.008712932467460632\r\nStep 398, loss: 0.009513208642601967\r\nStep 399, loss: 0.008333476260304451\r\nStep 400, loss: 0.007227268535643816\r\nStep 401, loss: 0.008709742687642574\r\nStep 402, loss: 0.008780714124441147\r\nStep 403, loss: 0.009415601380169392\r\nStep 404, loss: 0.009301510639488697\r\nStep 405, loss: 0.008122518658638\r\nStep 406, loss: 0.008958345279097557\r\nStep 407, loss: 0.01046680472791195\r\nStep 408, loss: 0.0080665722489357\r\nStep 409, loss: 0.009830777533352375\r\nStep 410, loss: 0.009133830666542053\r\nStep 411, loss: 0.008492544293403625\r\nStep 412, loss: 0.007803461514413357\r\nStep 413, loss: 0.009163353592157364\r\nStep 414, loss: 0.010654965415596962\r\nStep 415, loss: 0.010023774579167366\r\nStep 416, loss: 0.008814546279609203\r\nStep 417, loss: 0.00868968479335308\r\nStep 418, loss: 0.009255973622202873\r\nStep 419, loss: 0.008926976472139359\r\nStep 420, loss: 0.009140003472566605\r\nStep 421, loss: 0.010159704834222794\r\nStep 422, loss: 0.008238519541919231\r\nStep 423, loss: 0.008558676578104496\r\nStep 424, loss: 0.008526754565536976\r\nStep 425, loss: 0.010752619244158268\r\nStep 426, loss: 0.009448438882827759\r\nStep 427, loss: 0.008833286352455616\r\nStep 428, loss: 0.009218545630574226\r\nStep 429, loss: 0.009204461239278316\r\nStep 430, loss: 0.008507666178047657\r\nStep 431, loss: 0.008917532861232758\r\nStep 432, loss: 0.008146978914737701\r\nStep 433, loss: 0.008149527944624424\r\nStep 434, loss: 0.008935561403632164\r\nStep 435, loss: 0.009877092204988003\r\nStep 436, loss: 0.0077535370364785194\r\nStep 437, loss: 0.009797759354114532\r\nStep 411, loss: 0.008492544293403625\r\nStep 412, loss: 0.007803461514413357\r\nStep 413, loss: 0.009163353592157364\r\nStep 414, loss: 0.010654965415596962\r\nStep 415, loss: 0.010023774579167366\r\nStep 416, loss: 0.008814546279609203\r\nStep 417, loss: 0.00868968479335308\r\nStep 418, loss: 0.009255973622202873\r\nStep 419, loss: 0.008926976472139359\r\nStep 420, loss: 0.009140003472566605\r\nStep 421, loss: 0.010159704834222794\r\nStep 422, loss: 0.008238519541919231\r\nStep 423, loss: 0.008558676578104496\r\nStep 424, loss: 0.008526754565536976\r\nStep 425, loss: 0.010752619244158268\r\nStep 426, loss: 0.009448438882827759\r\nStep 427, loss: 0.008833286352455616\r\nStep 428, loss: 0.009218545630574226\r\nStep 429, loss: 0.009204461239278316\r\nStep 430, loss: 0.008507666178047657\r\nStep 431, loss: 0.008917532861232758\r\nStep 432, loss: 0.008146978914737701\r\nStep 433, loss: 0.008149527944624424\r\nStep 434, loss: 0.008935561403632164\r\nStep 435, loss: 0.009877092204988003\r\nStep 436, loss: 0.0077535370364785194\r\nStep 437, loss: 0.009797759354114532\r\nStep 438, loss: 0.008172751404345036\r\nStep 439, loss: 0.008792747743427753\r\nStep 440, loss: 0.008777088485658169\r\nStep 438, loss: 0.008172751404345036\r\nStep 439, loss: 0.008792747743427753\r\nStep 440, loss: 0.008777088485658169\r\nStep 219, loss: 0.012402343563735485\r\nStep 220, loss: 0.011631792411208153\r\nStep 221, loss: 0.011584474705159664\r\nStep 222, loss: 0.01097952201962471\r\nStep 223, loss: 0.012712574563920498\r\nStep 224, loss: 0.010229759849607944\r\nStep 225, loss: 0.011558798141777515\r\nStep 226, loss: 0.010583264753222466\r\nStep 227, loss: 0.010705210268497467\r\nStep 228, loss: 0.0116357933729887\r\nStep 229, loss: 0.010814939625561237\r\nStep 230, loss: 0.010558605194091797\r\nStep 231, loss: 0.009908916428685188\r\nStep 232, loss: 0.011581497266888618\r\nStep 233, loss: 0.011802499182522297\r\nStep 234, loss: 0.011465274728834629\r\nStep 235, loss: 0.012010223232209682\r\nStep 236, loss: 0.010722370818257332\r\nStep 237, loss: 0.010274405591189861\r\nStep 238, loss: 0.011278481222689152\r\nStep 239, loss: 0.010846936143934727\r\nStep 240, loss: 0.01259419322013855\r\nStep 241, loss: 0.012422946281731129\r\nStep 242, loss: 0.013480153866112232\r\nStep 243, loss: 0.01149264071136713\r\nStep 244, loss: 0.012327280826866627\r\nStep 245, loss: 0.011576535180211067\r\nStep 219, loss: 0.012402343563735485\r\nStep 220, loss: 0.011631792411208153\r\nStep 221, loss: 0.011584474705159664\r\nStep 222, loss: 0.01097952201962471\r\nStep 223, loss: 0.012712574563920498\r\nStep 224, loss: 0.010229759849607944\r\nStep 225, loss: 0.011558798141777515\r\nStep 226, loss: 0.010583264753222466\r\nStep 227, loss: 0.010705210268497467\r\nStep 228, loss: 0.0116357933729887\r\nStep 229, loss: 0.010814939625561237\r\nStep 230, loss: 0.010558605194091797\r\nStep 231, loss: 0.009908916428685188\r\nStep 232, loss: 0.011581497266888618\r\nStep 233, loss: 0.011802499182522297\r\nStep 234, loss: 0.011465274728834629\r\nStep 235, loss: 0.012010223232209682\r\nStep 236, loss: 0.010722370818257332\r\nStep 237, loss: 0.010274405591189861\r\nStep 238, loss: 0.011278481222689152\r\nStep 239, loss: 0.010846936143934727\r\nStep 240, loss: 0.01259419322013855\r\nStep 241, loss: 0.012422946281731129\r\nStep 242, loss: 0.013480153866112232\r\nStep 243, loss: 0.01149264071136713\r\nStep 244, loss: 0.012327280826866627\r\nStep 245, loss: 0.011576535180211067\r\nStep 246, loss: 0.011629262007772923\r\nStep 247, loss: 0.012494875118136406\r\nStep 248, loss: 0.011843645945191383\r\nStep 249, loss: 0.012646166607737541\r\nStep 250, loss: 0.011376439593732357\r\nStep 251, loss: 0.011232168413698673\r\nStep 252, loss: 0.011057781055569649\r\nStep 253, loss: 0.010992903262376785\r\nStep 254, loss: 0.011301021091639996\r\nStep 255, loss: 0.01089110691100359\r\nStep 256, loss: 0.0103179682046175\r\nStep 257, loss: 0.010743721388280392\r\nStep 258, loss: 0.010402617044746876\r\nStep 259, loss: 0.010314985178411007\r\nStep 260, loss: 0.010662691667675972\r\nStep 261, loss: 0.011964893899857998\r\nStep 262, loss: 0.011444470845162868\r\nStep 263, loss: 0.010359481908380985\r\nStep 264, loss: 0.011071950197219849\r\nStep 265, loss: 0.010385474190115929\r\nStep 266, loss: 0.010786756873130798\r\nStep 267, loss: 0.009867917746305466\r\nStep 268, loss: 0.011203785426914692\r\nStep 269, loss: 0.011108278296887875\r\nStep 270, loss: 0.011300038546323776\r\nStep 271, loss: 0.010646146722137928\r\nStep 272, loss: 0.010200191289186478\r\nStep 246, loss: 0.011629262007772923\r\nStep 247, loss: 0.012494875118136406\r\nStep 248, loss: 0.011843645945191383\r\nStep 249, loss: 0.012646166607737541\r\nStep 250, loss: 0.011376439593732357\r\nStep 251, loss: 0.011232168413698673\r\nStep 252, loss: 0.011057781055569649\r\nStep 253, loss: 0.010992903262376785\r\nStep 254, loss: 0.011301021091639996\r\nStep 255, loss: 0.01089110691100359\r\nStep 256, loss: 0.0103179682046175\r\nStep 257, loss: 0.010743721388280392\r\nStep 258, loss: 0.010402617044746876\r\nStep 259, loss: 0.010314985178411007\r\nStep 260, loss: 0.010662691667675972\r\nStep 261, loss: 0.011964893899857998\r\nStep 262, loss: 0.011444470845162868\r\nStep 263, loss: 0.010359481908380985\r\nStep 264, loss: 0.011071950197219849\r\nStep 265, loss: 0.010385474190115929\r\nStep 266, loss: 0.010786756873130798\r\nStep 267, loss: 0.009867917746305466\r\nStep 268, loss: 0.011203785426914692\r\nStep 269, loss: 0.011108278296887875\r\nStep 270, loss: 0.011300038546323776\r\nStep 271, loss: 0.010646146722137928\r\nStep 272, loss: 0.010200191289186478\r\nStep 273, loss: 0.010808263905346394\r\nStep 274, loss: 0.011265968903899193\r\nStep 275, loss: 0.010250569321215153\r\nStep 276, loss: 0.008533592335879803\r\nStep 277, loss: 0.009490706957876682\r\nStep 278, loss: 0.009791299700737\r\nStep 279, loss: 0.01029213983565569\r\nStep 280, loss: 0.011753804981708527\r\nStep 281, loss: 0.00971137173473835\r\nStep 282, loss: 0.012008721940219402\r\nStep 283, loss: 0.011298278346657753\r\nStep 284, loss: 0.011328218504786491\r\nStep 285, loss: 0.011197622865438461\r\nStep 286, loss: 0.010864348150789738\r\nStep 287, loss: 0.010775812901556492\r\nStep 288, loss: 0.010691273957490921\r\nStep 289, loss: 0.009966089390218258\r\nStep 290, loss: 0.010224885307252407\r\nStep 291, loss: 0.009930402971804142\r\nStep 292, loss: 0.010686405934393406\r\nStep 293, loss: 0.011255526915192604\r\nStep 294, loss: 0.010371316224336624\r\nStep 295, loss: 0.009607769548892975\r\nStep 296, loss: 0.010344200767576694\r\nStep 297, loss: 0.010129978880286217\r\nStep 298, loss: 0.010164660401642323\r\nStep 299, loss: 0.010222935117781162\r\nStep 273, loss: 0.010808263905346394\r\nStep 274, loss: 0.011265968903899193\r\nStep 275, loss: 0.010250569321215153\r\nStep 276, loss: 0.008533592335879803\r\nStep 277, loss: 0.009490706957876682\r\nStep 278, loss: 0.009791299700737\r\nStep 279, loss: 0.01029213983565569\r\nStep 280, loss: 0.011753804981708527\r\nStep 281, loss: 0.00971137173473835\r\nStep 282, loss: 0.012008721940219402\r\nStep 283, loss: 0.011298278346657753\r\nStep 284, loss: 0.011328218504786491\r\nStep 285, loss: 0.011197622865438461\r\nStep 286, loss: 0.010864348150789738\r\nStep 287, loss: 0.010775812901556492\r\nStep 288, loss: 0.010691273957490921\r\nStep 289, loss: 0.009966089390218258\r\nStep 290, loss: 0.010224885307252407\r\nStep 291, loss: 0.009930402971804142\r\nStep 292, loss: 0.010686405934393406\r\nStep 293, loss: 0.011255526915192604\r\nStep 294, loss: 0.010371316224336624\r\nStep 295, loss: 0.009607769548892975\r\nStep 296, loss: 0.010344200767576694\r\nStep 297, loss: 0.010129978880286217\r\nStep 298, loss: 0.010164660401642323\r\nStep 299, loss: 0.010222935117781162\r\nStep 300, loss: 0.01120209414511919\r\nStep 301, loss: 0.009847558103501797\r\nStep 302, loss: 0.011294144205749035\r\nStep 303, loss: 0.010222863405942917\r\nStep 304, loss: 0.009446301497519016\r\nStep 305, loss: 0.008409097790718079\r\nStep 306, loss: 0.00865502841770649\r\nStep 307, loss: 0.009920651093125343\r\nStep 308, loss: 0.009057370945811272\r\nStep 309, loss: 0.010928399860858917\r\nStep 310, loss: 0.010560085996985435\r\nStep 311, loss: 0.009174142964184284\r\nStep 312, loss: 0.008631434291601181\r\nStep 313, loss: 0.010008814744651318\r\nStep 314, loss: 0.00838238000869751\r\nStep 315, loss: 0.008831451646983624\r\nStep 316, loss: 0.00959621462970972\r\nStep 317, loss: 0.009931536391377449\r\nStep 318, loss: 0.009141447022557259\r\nStep 319, loss: 0.009971554391086102\r\nStep 320, loss: 0.009367017075419426\r\nStep 321, loss: 0.009710235521197319\r\nStep 322, loss: 0.010341423563659191\r\nStep 323, loss: 0.01046605221927166\r\nStep 324, loss: 0.010934630408883095\r\nStep 325, loss: 0.009343990124762058\r\nStep 326, loss: 0.010263442061841488\r\nStep 300, loss: 0.01120209414511919\r\nStep 301, loss: 0.009847558103501797\r\nStep 302, loss: 0.011294144205749035\r\nStep 303, loss: 0.010222863405942917\r\nStep 304, loss: 0.009446301497519016\r\nStep 305, loss: 0.008409097790718079\r\nStep 306, loss: 0.00865502841770649\r\nStep 307, loss: 0.009920651093125343\r\nStep 308, loss: 0.009057370945811272\r\nStep 309, loss: 0.010928399860858917\r\nStep 310, loss: 0.010560085996985435\r\nStep 311, loss: 0.009174142964184284\r\nStep 312, loss: 0.008631434291601181\r\nStep 313, loss: 0.010008814744651318\r\nStep 314, loss: 0.00838238000869751\r\nStep 315, loss: 0.008831451646983624\r\nStep 316, loss: 0.00959621462970972\r\nStep 317, loss: 0.009931536391377449\r\nStep 318, loss: 0.009141447022557259\r\nStep 319, loss: 0.009971554391086102\r\nStep 320, loss: 0.009367017075419426\r\nStep 321, loss: 0.009710235521197319\r\nStep 322, loss: 0.010341423563659191\r\nStep 323, loss: 0.01046605221927166\r\nStep 324, loss: 0.010934630408883095\r\nStep 325, loss: 0.009343990124762058\r\nStep 326, loss: 0.010263442061841488\r\nStep 327, loss: 0.00978117436170578\r\nStep 328, loss: 0.011806083843111992\r\nStep 329, loss: 0.010895607993006706\r\nStep 327, loss: 0.00978117436170578\r\nStep 328, loss: 0.011806083843111992\r\nStep 329, loss: 0.010895607993006706\r\nStep 330, loss: 0.008183012716472149\r\nStep 331, loss: 0.009584175422787666\r\nStep 332, loss: 0.009803099557757378\r\nStep 333, loss: 0.008213256485760212\r\nStep 334, loss: 0.00965780857950449\r\nStep 335, loss: 0.008655296638607979\r\nStep 336, loss: 0.01056174747645855\r\nStep 337, loss: 0.008038876578211784\r\nStep 338, loss: 0.01079742144793272\r\nStep 339, loss: 0.008959968574345112\r\nStep 340, loss: 0.00852648913860321\r\nStep 341, loss: 0.008913596160709858\r\nStep 342, loss: 0.008657476864755154\r\nStep 343, loss: 0.008968519978225231\r\nStep 344, loss: 0.009772698394954205\r\nStep 345, loss: 0.009810158982872963\r\nStep 346, loss: 0.010945646092295647\r\nStep 347, loss: 0.010864058509469032\r\nStep 348, loss: 0.00884501077234745\r\nStep 349, loss: 0.010526054538786411\r\nStep 350, loss: 0.0088763777166605\r\nStep 351, loss: 0.009007164277136326\r\nStep 352, loss: 0.008891397155821323\r\nStep 353, loss: 0.0092351408675313\r\nStep 354, loss: 0.01059066690504551\r\nStep 355, loss: 0.010139535181224346\r\nStep 356, loss: 0.010005930438637733\r\nStep 330, loss: 0.008183012716472149\r\nStep 331, loss: 0.009584175422787666\r\nStep 332, loss: 0.009803099557757378\r\nStep 333, loss: 0.008213256485760212\r\nStep 334, loss: 0.00965780857950449\r\nStep 335, loss: 0.008655296638607979\r\nStep 336, loss: 0.01056174747645855\r\nStep 337, loss: 0.008038876578211784\r\nStep 338, loss: 0.01079742144793272\r\nStep 339, loss: 0.008959968574345112\r\nStep 340, loss: 0.00852648913860321\r\nStep 341, loss: 0.008913596160709858\r\nStep 342, loss: 0.008657476864755154\r\nStep 343, loss: 0.008968519978225231\r\nStep 344, loss: 0.009772698394954205\r\nStep 345, loss: 0.009810158982872963\r\nStep 346, loss: 0.010945646092295647\r\nStep 347, loss: 0.010864058509469032\r\nStep 348, loss: 0.00884501077234745\r\nStep 349, loss: 0.010526054538786411\r\nStep 350, loss: 0.0088763777166605\r\nStep 351, loss: 0.009007164277136326\r\nStep 352, loss: 0.008891397155821323\r\nStep 353, loss: 0.0092351408675313\r\nStep 354, loss: 0.01059066690504551\r\nStep 355, loss: 0.010139535181224346\r\nStep 356, loss: 0.010005930438637733\r\nStep 357, loss: 0.008771003223955631\r\nStep 358, loss: 0.008067444898188114\r\nStep 359, loss: 0.008601492270827293\r\nStep 360, loss: 0.009435067884624004\r\nStep 361, loss: 0.009560937993228436\r\nStep 362, loss: 0.008613449521362782\r\nStep 363, loss: 0.008572123013436794\r\nStep 364, loss: 0.009219275787472725\r\nStep 365, loss: 0.01010737195611\r\nStep 366, loss: 0.008756442926824093\r\nStep 367, loss: 0.010213833302259445\r\nStep 368, loss: 0.00893139187246561\r\nStep 369, loss: 0.009876120835542679\r\nStep 370, loss: 0.009222878143191338\r\nStep 371, loss: 0.009084029123187065\r\nStep 372, loss: 0.009581203572452068\r\nStep 373, loss: 0.0099358931183815\r\nStep 374, loss: 0.008505059406161308\r\nStep 375, loss: 0.009633081965148449\r\nStep 376, loss: 0.00966871902346611\r\nStep 377, loss: 0.008310618810355663\r\nStep 378, loss: 0.008672129362821579\r\nStep 379, loss: 0.007983251474797726\r\nStep 380, loss: 0.009981289505958557\r\nStep 381, loss: 0.008883092552423477\r\nStep 382, loss: 0.009355833753943443\r\nStep 383, loss: 0.008394931443035603\r\nStep 357, loss: 0.008771003223955631\r\nStep 358, loss: 0.008067444898188114\r\nStep 359, loss: 0.008601492270827293\r\nStep 360, loss: 0.009435067884624004\r\nStep 361, loss: 0.009560937993228436\r\nStep 362, loss: 0.008613449521362782\r\nStep 363, loss: 0.008572123013436794\r\nStep 364, loss: 0.009219275787472725\r\nStep 365, loss: 0.01010737195611\r\nStep 366, loss: 0.008756442926824093\r\nStep 367, loss: 0.010213833302259445\r\nStep 368, loss: 0.00893139187246561\r\nStep 369, loss: 0.009876120835542679\r\nStep 370, loss: 0.009222878143191338\r\nStep 371, loss: 0.009084029123187065\r\nStep 372, loss: 0.009581203572452068\r\nStep 373, loss: 0.0099358931183815\r\nStep 374, loss: 0.008505059406161308\r\nStep 375, loss: 0.009633081965148449\r\nStep 376, loss: 0.00966871902346611\r\nStep 377, loss: 0.008310618810355663\r\nStep 378, loss: 0.008672129362821579\r\nStep 379, loss: 0.007983251474797726\r\nStep 380, loss: 0.009981289505958557\r\nStep 381, loss: 0.008883092552423477\r\nStep 382, loss: 0.009355833753943443\r\nStep 383, loss: 0.008394931443035603\r\nStep 384, loss: 0.011625821702182293\r\nStep 385, loss: 0.008715483359992504\r\nStep 386, loss: 0.00883227027952671\r\nStep 387, loss: 0.009458016604185104\r\nStep 388, loss: 0.009332780726253986\r\nStep 389, loss: 0.009258671663701534\r\nStep 390, loss: 0.009851478971540928\r\nStep 391, loss: 0.009628290310502052\r\nStep 392, loss: 0.007471262477338314\r\nStep 393, loss: 0.009414088912308216\r\nStep 394, loss: 0.009029863402247429\r\nStep 395, loss: 0.008434828370809555\r\nStep 396, loss: 0.009456830099225044\r\nStep 397, loss: 0.008712932467460632\r\nStep 398, loss: 0.009513208642601967\r\nStep 399, loss: 0.008333476260304451\r\nStep 400, loss: 0.007227268535643816\r\nStep 401, loss: 0.008709742687642574\r\nStep 402, loss: 0.008780714124441147\r\nStep 403, loss: 0.009415601380169392\r\nStep 404, loss: 0.009301510639488697\r\nStep 405, loss: 0.008122518658638\r\nStep 406, loss: 0.008958345279097557\r\nStep 407, loss: 0.01046680472791195\r\nStep 408, loss: 0.0080665722489357\r\nStep 409, loss: 0.009830777533352375\r\nStep 410, loss: 0.009133830666542053\r\nStep 384, loss: 0.011625821702182293\r\nStep 385, loss: 0.008715483359992504\r\nStep 386, loss: 0.00883227027952671\r\nStep 387, loss: 0.009458016604185104\r\nStep 388, loss: 0.009332780726253986\r\nStep 389, loss: 0.009258671663701534\r\nStep 390, loss: 0.009851478971540928\r\nStep 391, loss: 0.009628290310502052\r\nStep 392, loss: 0.007471262477338314\r\nStep 393, loss: 0.009414088912308216\r\nStep 394, loss: 0.009029863402247429\r\nStep 395, loss: 0.008434828370809555\r\nStep 396, loss: 0.009456830099225044\r\nStep 397, loss: 0.008712932467460632\r\nStep 398, loss: 0.009513208642601967\r\nStep 399, loss: 0.008333476260304451\r\nStep 400, loss: 0.007227268535643816\r\nStep 401, loss: 0.008709742687642574\r\nStep 402, loss: 0.008780714124441147\r\nStep 403, loss: 0.009415601380169392\r\nStep 404, loss: 0.009301510639488697\r\nStep 405, loss: 0.008122518658638\r\nStep 406, loss: 0.008958345279097557\r\nStep 407, loss: 0.01046680472791195\r\nStep 408, loss: 0.0080665722489357\r\nStep 409, loss: 0.009830777533352375\r\nStep 410, loss: 0.009133830666542053\r\nStep 411, loss: 0.008492544293403625\r\nStep 412, loss: 0.007803461514413357\r\nStep 413, loss: 0.009163353592157364\r\nStep 414, loss: 0.010654965415596962\r\nStep 415, loss: 0.010023774579167366\r\nStep 416, loss: 0.008814546279609203\r\nStep 417, loss: 0.00868968479335308\r\nStep 418, loss: 0.009255973622202873\r\nStep 419, loss: 0.008926976472139359\r\nStep 420, loss: 0.009140003472566605\r\nStep 421, loss: 0.010159704834222794\r\nStep 422, loss: 0.008238519541919231\r\nStep 423, loss: 0.008558676578104496\r\nStep 424, loss: 0.008526754565536976\r\nStep 425, loss: 0.010752619244158268\r\nStep 426, loss: 0.009448438882827759\r\nStep 427, loss: 0.008833286352455616\r\nStep 428, loss: 0.009218545630574226\r\nStep 429, loss: 0.009204461239278316\r\nStep 430, loss: 0.008507666178047657\r\nStep 431, loss: 0.008917532861232758\r\nStep 432, loss: 0.008146978914737701\r\nStep 433, loss: 0.008149527944624424\r\nStep 434, loss: 0.008935561403632164\r\nStep 435, loss: 0.009877092204988003\r\nStep 436, loss: 0.0077535370364785194\r\nStep 437, loss: 0.009797759354114532\r\nStep 411, loss: 0.008492544293403625\r\nStep 412, loss: 0.007803461514413357\r\nStep 413, loss: 0.009163353592157364\r\nStep 414, loss: 0.010654965415596962\r\nStep 415, loss: 0.010023774579167366\r\nStep 416, loss: 0.008814546279609203\r\nStep 417, loss: 0.00868968479335308\r\nStep 418, loss: 0.009255973622202873\r\nStep 419, loss: 0.008926976472139359\r\nStep 420, loss: 0.009140003472566605\r\nStep 421, loss: 0.010159704834222794\r\nStep 422, loss: 0.008238519541919231\r\nStep 423, loss: 0.008558676578104496\r\nStep 424, loss: 0.008526754565536976\r\nStep 425, loss: 0.010752619244158268\r\nStep 426, loss: 0.009448438882827759\r\nStep 427, loss: 0.008833286352455616\r\nStep 428, loss: 0.009218545630574226\r\nStep 429, loss: 0.009204461239278316\r\nStep 430, loss: 0.008507666178047657\r\nStep 431, loss: 0.008917532861232758\r\nStep 432, loss: 0.008146978914737701\r\nStep 433, loss: 0.008149527944624424\r\nStep 434, loss: 0.008935561403632164\r\nStep 435, loss: 0.009877092204988003\r\nStep 436, loss: 0.0077535370364785194\r\nStep 437, loss: 0.009797759354114532\r\nStep 438, loss: 0.008172751404345036\r\nStep 439, loss: 0.008792747743427753\r\nStep 440, loss: 0.008777088485658169\r\nStep 438, loss: 0.008172751404345036\r\nStep 439, loss: 0.008792747743427753\r\nStep 440, loss: 0.008777088485658169\r\n",,terminal_output +1111,2501060,"TERMINAL",0,0,"64941",,terminal_output +1112,2502094,"TERMINAL",0,0,"753052",,terminal_output +1113,2503424,"TERMINAL",0,0,"86163",,terminal_output +1114,2504244,"TERMINAL",0,0,"97274",,terminal_output +1115,2505212,"TERMINAL",0,0,"208385",,terminal_output +1116,2505983,"TERMINAL",0,0,"srun",,terminal_focus +1117,2506231,"TERMINAL",0,0,"19496",,terminal_output +1118,2507325,"TERMINAL",0,0,"2405407",,terminal_output +1119,2507883,"TERMINAL",0,0,"srun",,terminal_focus +1120,2508302,"TERMINAL",0,0,"31618",,terminal_output +1121,2509388,"TERMINAL",0,0,"42729",,terminal_output +1122,2509402,"TERMINAL",0,0,"Step 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\n",,terminal_output +1123,2509461,"TERMINAL",0,0,"Step 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1751, loss: 0.007350419647991657\r\nStep 1536, loss: 0.008187836036086082\r\nStep 1537, loss: 0.005992586724460125\r\nStep 1538, loss: 0.0075413235463202\r\nStep 1539, loss: 0.0077062686905264854\r\nStep 1540, loss: 0.007754159159958363\r\nStep 1541, loss: 0.00818092841655016\r\nStep 1542, loss: 0.00720906862989068\r\nStep 1543, loss: 0.009623878635466099\r\nStep 1544, loss: 0.008170845918357372\r\nStep 1545, loss: 0.008935060352087021\r\nStep 1546, loss: 0.00749565614387393\r\nStep 1547, loss: 0.008688186295330524\r\nStep 1548, loss: 0.00886282417923212\r\nStep 1549, loss: 0.007526679430156946\r\nStep 1550, loss: 0.006964836735278368\r\nStep 1551, loss: 0.008580098859965801\r\nStep 1552, loss: 0.00906881783157587\r\nStep 1553, loss: 0.008019582368433475\r\nStep 1554, loss: 0.007595897652208805\r\nStep 1555, loss: 0.008318536914885044\r\nStep 1556, loss: 0.008528546430170536\r\nStep 1557, loss: 0.008759227581322193\r\nStep 1558, loss: 0.008838594891130924\r\nStep 1559, loss: 0.006689106114208698\r\nStep 1560, loss: 0.006475722882896662\r\nStep 1561, loss: 0.006674910895526409\r\nStep 1562, loss: 0.008995377458631992\r\nStep 1563, loss: 0.009440598078072071\r\nStep 1564, loss: 0.007657089736312628\r\nStep 1565, loss: 0.006977619603276253\r\nStep 1566, loss: 0.006828180514276028\r\nStep 1567, loss: 0.006950976327061653\r\nStep 1568, loss: 0.005743010435253382\r\nStep 1569, loss: 0.007142997346818447\r\nStep 1570, loss: 0.008083772845566273\r\nStep 1571, loss: 0.008059779182076454\r\nStep 1572, loss: 0.008990857750177383\r\nStep 1573, loss: 0.007011147681623697\r\nStep 1574, loss: 0.007031850516796112\r\nStep 1575, loss: 0.007916480302810669\r\nStep 1576, loss: 0.006887739058583975\r\nStep 1577, loss: 0.006685403175652027\r\nStep 1578, loss: 0.008958718739449978\r\nStep 1579, loss: 0.008453777059912682\r\nStep 1580, loss: 0.008035468868911266\r\nStep 1581, loss: 0.008207778446376324\r\nStep 1582, loss: 0.007178385276347399\r\nStep 1583, loss: 0.008020363748073578\r\nStep 1584, loss: 0.006817690096795559\r\nStep 1585, loss: 0.0069619882851839066\r\nStep 1586, loss: 0.007296544499695301\r\nStep 1587, loss: 0.007718900218605995\r\nStep 1588, loss: 0.00755611015483737\r\nStep 1589, loss: 0.008712777867913246\r\nStep 1590, loss: 0.007497219368815422\r\nStep 1591, loss: 0.008043506182730198\r\nStep 1592, loss: 0.006751270033419132\r\nStep 1593, loss: 0.008551674894988537\r\nStep 1594, loss: 0.007587277330458164\r\nStep 1595, loss: 0.0077667939476668835\r\nStep 1596, loss: 0.008603490889072418\r\nStep 1597, loss: 0.007895995862782001\r\nStep 1598, loss: 0.00929008424282074\r\nStep 1599, loss: 0.009897058829665184\r\nStep 1600, loss: 0.007759366650134325\r\nStep 1601, loss: 0.008193353191018105\r\nStep 1602, loss: 0.007737089414149523\r\nStep 1603, loss: 0.006492974702268839\r\nStep 1604, loss: 0.008825495839118958\r\nStep 1605, loss: 0.008608585223555565\r\nStep 1606, loss: 0.007923366501927376\r\nStep 1607, loss: 0.00665367441251874\r\nStep 1608, loss: 0.00696725957095623\r\nStep 1609, loss: 0.006957065314054489\r\nStep 1610, loss: 0.00696487445384264\r\nStep 1611, loss: 0.007112245075404644\r\nStep 1612, loss: 0.006711696740239859\r\nStep 1613, loss: 0.00747537100687623\r\nStep 1614, loss: 0.008746178820729256\r\nStep 1615, loss: 0.008868562988936901\r\nStep 1616, loss: 0.007783957291394472\r\nStep 1617, loss: 0.007451385725289583\r\nStep 1618, loss: 0.00733732245862484\r\nStep 1619, loss: 0.008677585050463676\r\nStep 1620, loss: 0.006870839279145002\r\nStep 1621, loss: 0.008756528608500957\r\nStep 1622, loss: 0.008265580050647259\r\nStep 1623, loss: 0.007021893281489611\r\nStep 1624, loss: 0.00679453881457448\r\nStep 1625, loss: 0.00711372634395957\r\nStep 1626, loss: 0.008378872647881508\r\nStep 1627, loss: 0.009000335820019245\r\nStep 1628, loss: 0.007025549653917551\r\nStep 1629, loss: 0.008529024198651314\r\nStep 1630, loss: 0.008187185041606426\r\nStep 1631, loss: 0.009054225869476795\r\nStep 1632, loss: 0.008002746850252151\r\nStep 1633, loss: 0.008577891625463963\r\nStep 1634, loss: 0.00878284964710474\r\nStep 1635, loss: 0.008703123778104782\r\nStep 1636, loss: 0.008663534186780453\r\nStep 1637, loss: 0.008749880827963352\r\nStep 1638, loss: 0.00996770616620779\r\nStep 1639, loss: 0.008074612356722355\r\nStep 1640, loss: 0.009668786078691483\r\nStep 1641, loss: 0.010203409940004349\r\nStep 1642, loss: 0.009471501223742962\r\nStep 1643, loss: 0.007027697283774614\r\nStep 1644, loss: 0.0074112084694206715\r\nStep 1645, loss: 0.008350981399416924\r\nStep 1646, loss: 0.007672564126551151\r\nStep 1647, loss: 0.007430119439959526\r\nStep 1648, loss: 0.008122129365801811\r\nStep 1649, loss: 0.006787996273487806\r\nStep 1650, loss: 0.007542396429926157\r\nStep 1651, loss: 0.00665294099599123\r\nStep 1652, loss: 0.006335591431707144\r\nStep 1653, loss: 0.007724990602582693\r\nStep 1654, loss: 0.008031480945646763\r\nStep 1655, loss: 0.007781133521348238\r\nStep 1656, loss: 0.007816992700099945\r\nStep 1657, loss: 0.008315594866871834\r\nStep 1658, loss: 0.008215798065066338\r\nStep 1659, loss: 0.008099790662527084\r\nStep 1660, loss: 0.006514322943985462\r\nStep 1661, loss: 0.007386818993836641\r\nStep 1662, loss: 0.008341656066477299\r\nStep 1663, loss: 0.008426396176218987\r\nStep 1664, loss: 0.008052345365285873\r\nStep 1665, loss: 0.007347847800701857\r\nStep 1666, loss: 0.007858898490667343\r\nStep 1667, loss: 0.007327028084546328\r\nStep 1668, loss: 0.008239862509071827\r\nStep 1669, loss: 0.007635383401066065\r\nStep 1670, loss: 0.008939513936638832\r\nStep 1671, loss: 0.00767308846116066\r\nStep 1672, loss: 0.006550142541527748\r\nStep 1673, loss: 0.006795782130211592\r\nStep 1674, loss: 0.008795519359409809\r\nStep 1675, loss: 0.008797761052846909\r\nStep 1676, loss: 0.006749370601028204\r\nStep 1677, loss: 0.007756649050861597\r\nStep 1678, loss: 0.008998443372547626\r\nStep 1679, loss: 0.007440191227942705\r\nStep 1680, loss: 0.007901267148554325\r\nStep 1681, loss: 0.008502920158207417\r\nStep 1682, loss: 0.008505829609930515\r\nStep 1683, loss: 0.006411525886505842\r\nStep 1684, loss: 0.008588363416492939\r\nStep 1685, loss: 0.00692173233255744\r\nStep 1686, loss: 0.007136263884603977\r\nStep 1687, loss: 0.007504676003009081\r\nStep 1688, loss: 0.007309808861464262\r\nStep 1689, loss: 0.008141886442899704\r\nStep 1690, loss: 0.007988592609763145\r\nStep 1691, loss: 0.008273339830338955\r\nStep 1692, loss: 0.006870232056826353\r\nStep 1693, loss: 0.0068495869636535645\r\nStep 1694, loss: 0.007694709580391645\r\nStep 1695, loss: 0.009599804878234863\r\nStep 1696, loss: 0.006903140340000391\r\nStep 1697, loss: 0.00847446359694004\r\nStep 1698, loss: 0.007219031453132629\r\nStep 1699, loss: 0.008547683246433735\r\nStep 1700, loss: 0.008718534372746944\r\nStep 1701, loss: 0.009107892401516438\r\nStep 1702, loss: 0.008554563857614994\r\nStep 1703, loss: 0.009697475470602512\r\nStep 1704, loss: 0.008394952863454819\r\nStep 1705, loss: 0.007848896086215973\r\nStep 1706, loss: 0.007483388297259808\r\nStep 1707, loss: 0.005993971601128578\r\nStep 1708, loss: 0.00765452953055501\r\nStep 1709, loss: 0.0065749953500926495\r\nStep 1710, loss: 0.007273221854120493\r\nStep 1711, loss: 0.006847929209470749\r\nStep 1712, loss: 0.006090110633522272\r\nStep 1713, loss: 0.007094529457390308\r\nStep 1714, loss: 0.008160137571394444\r\nStep 1715, loss: 0.007716684136539698\r\nStep 1716, loss: 0.007477016653865576\r\nStep 1717, loss: 0.009491547010838985\r\nStep 1718, loss: 0.007434906903654337\r\nStep 1719, loss: 0.008541461080312729\r\nStep 1720, loss: 0.007313624955713749\r\nStep 1721, loss: 0.00781959667801857\r\nStep 1722, loss: 0.008003121241927147\r\nStep 1723, loss: 0.008829144760966301\r\nStep 1724, loss: 0.0074747479520738125\r\nStep 1725, loss: 0.007735308725386858\r\nStep 1726, loss: 0.008236423134803772\r\nStep 1727, loss: 0.007879878394305706\r\nStep 1728, loss: 0.009238236583769321\r\nStep 1729, loss: 0.009845210239291191\r\nStep 1730, loss: 0.008259519003331661\r\nStep 1731, loss: 0.007162179797887802\r\nStep 1732, loss: 0.008448491804301739\r\nStep 1733, loss: 0.008390865288674831\r\nStep 1734, loss: 0.006807361263781786\r\nStep 1735, loss: 0.008280194364488125\r\nStep 1736, loss: 0.006797317881137133\r\nStep 1737, loss: 0.007668196689337492\r\nStep 1738, loss: 0.007283037528395653\r\nStep 1739, loss: 0.00719024334102869\r\nStep 1740, loss: 0.007517328951507807\r\nStep 1741, loss: 0.0072776442393660545\r\nStep 1742, loss: 0.009215869009494781\r\nStep 1743, loss: 0.007592868525534868\r\nStep 1744, loss: 0.007078573107719421\r\nStep 1745, loss: 0.007053877227008343\r\nStep 1746, loss: 0.007802892010658979\r\nStep 1747, loss: 0.007612156216055155\r\nStep 1748, loss: 0.00802230928093195\r\nStep 1749, loss: 0.009651273488998413\r\nStep 1750, loss: 0.007929863408207893\r\nStep 1751, loss: 0.007350419647991657\r\n",,terminal_output +1124,2509470,"TERMINAL",0,0,"^C\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir[?2004h[tum_cte0515@hkn0733 logs_mihir]$ ",,terminal_output +1125,2510381,"TERMINAL",0,0,"538310",,terminal_output +1126,2511425,"TERMINAL",0,0,"64941",,terminal_output +1127,2512474,"TERMINAL",0,0,"754052",,terminal_output +1128,2513525,"TERMINAL",0,0,"86163",,terminal_output +1129,2514575,"TERMINAL",0,0,"97274",,terminal_output +1130,2515623,"TERMINAL",0,0,"308385",,terminal_output +1131,2516674,"TERMINAL",0,0,"19496",,terminal_output +1132,2517796,"TERMINAL",0,0,"2505507",,terminal_output +1133,2518855,"TERMINAL",0,0,"32729",,terminal_output +1134,2519782,"TERMINAL",0,0,"538320",,terminal_output +1135,2520825,"TERMINAL",0,0,"64941",,terminal_output +1136,2521963,"TERMINAL",0,0,"755052",,terminal_output +1137,2522947,"TERMINAL",0,0,"86163",,terminal_output +1138,2523988,"TERMINAL",0,0,"97274",,terminal_output +1139,2525134,"TERMINAL",0,0,"408385",,terminal_output +1140,2526076,"TERMINAL",0,0,"19496",,terminal_output +1141,2527117,"TERMINAL",0,0,"21:0051:007",,terminal_output +1142,2528206,"TERMINAL",0,0,"31618",,terminal_output +1143,2529230,"TERMINAL",0,0,"42729",,terminal_output +1144,2530239,"TERMINAL",0,0,"538330",,terminal_output +1145,2531288,"TERMINAL",0,0,"64941",,terminal_output +1146,2532418,"TERMINAL",0,0,"757:0052",,terminal_output +1147,2533383,"TERMINAL",0,0,"86163",,terminal_output +1148,2534438,"TERMINAL",0,0,"97274",,terminal_output +1149,2535495,"TERMINAL",0,0,"508385",,terminal_output +1150,2536810,"TERMINAL",0,0,"19496",,terminal_output +1151,2537626,"TERMINAL",0,0,"2105107",,terminal_output +1152,2538753,"TERMINAL",0,0,"31618",,terminal_output +1153,2539707,"TERMINAL",0,0,"42729",,terminal_output +1154,2540739,"TERMINAL",0,0,"549441",,terminal_output +1155,2541783,"TERMINAL",0,0,"751052",,terminal_output +1156,2542836,"TERMINAL",0,0,"86163",,terminal_output +1157,2543978,"TERMINAL",0,0,"97274",,terminal_output +1158,2544999,"TERMINAL",0,0,"7:008385",,terminal_output +1159,2545967,"TERMINAL",0,0,"19496",,terminal_output +1160,2547050,"TERMINAL",0,0,"2205207",,terminal_output +1161,2548070,"TERMINAL",0,0,"31618",,terminal_output +1162,2549096,"TERMINAL",0,0,"42729",,terminal_output +1163,2550223,"TERMINAL",0,0,"538350",,terminal_output +1164,2551259,"TERMINAL",0,0,"64941",,terminal_output +1165,2552245,"TERMINAL",0,0,"752052",,terminal_output +1166,2553296,"TERMINAL",0,0,"86163",,terminal_output +1167,2554421,"TERMINAL",0,0,"97274",,terminal_output +1168,2555380,"TERMINAL",0,0,"108385",,terminal_output +1169,2556408,"TERMINAL",0,0,"19496",,terminal_output +1170,2557439,"TERMINAL",0,0,"2305307",,terminal_output +1171,2558480,"TERMINAL",0,0,"31618",,terminal_output +1172,2559506,"TERMINAL",0,0,"42729",,terminal_output +1173,2560565,"TERMINAL",0,0,"53832:00",,terminal_output +1174,2561692,"TERMINAL",0,0,"64941",,terminal_output +1175,2562644,"TERMINAL",0,0,"753052",,terminal_output +1176,2563679,"TERMINAL",0,0,"86163",,terminal_output +1177,2564764,"TERMINAL",0,0,"98385",,terminal_output +1178,2565776,"TERMINAL",0,0,"219496",,terminal_output +1179,2566835,"TERMINAL",0,0,"2405407",,terminal_output +1180,2567939,"TERMINAL",0,0,"31618",,terminal_output +1181,2568906,"TERMINAL",0,0,"42729",,terminal_output +1182,2569987,"TERMINAL",0,0,"538310",,terminal_output +1183,2571012,"TERMINAL",0,0,"64941",,terminal_output +1184,2572077,"TERMINAL",0,0,"754052",,terminal_output +1185,2573086,"TERMINAL",0,0,"86163",,terminal_output +1186,2574196,"TERMINAL",0,0,"97274",,terminal_output +1187,2575229,"TERMINAL",0,0,"308385",,terminal_output +1188,2576231,"TERMINAL",0,0,"19496",,terminal_output +1189,2577266,"TERMINAL",0,0,"2505507",,terminal_output +1190,2578290,"TERMINAL",0,0,"31618",,terminal_output +1191,2579421,"TERMINAL",0,0,"42729",,terminal_output +1192,2580384,"TERMINAL",0,0,"538320",,terminal_output +1193,2581431,"TERMINAL",0,0,"64941",,terminal_output +1194,2582468,"TERMINAL",0,0,"755052",,terminal_output +1195,2583524,"TERMINAL",0,0,"86163",,terminal_output +1196,2584575,"TERMINAL",0,0,"97274",,terminal_output +1197,2585654,"TERMINAL",0,0,"408385",,terminal_output +1198,2586660,"TERMINAL",0,0,"19496",,terminal_output +1199,2587806,"TERMINAL",0,0,"22:0052:007",,terminal_output +1200,2588830,"TERMINAL",0,0,"32729",,terminal_output +1201,2589854,"TERMINAL",0,0,"538330",,terminal_output +1202,2590978,"TERMINAL",0,0,"64941",,terminal_output +1203,2592001,"TERMINAL",0,0,"758:0052",,terminal_output +1204,2593024,"TERMINAL",0,0,"86163",,terminal_output +1205,2594031,"TERMINAL",0,0,"97274",,terminal_output +1206,2595213,"TERMINAL",0,0,"508385",,terminal_output +1207,2596202,"TERMINAL",0,0,"19496",,terminal_output +1208,2597171,"TERMINAL",0,0,"2105107",,terminal_output +1209,2598250,"TERMINAL",0,0,"31618",,terminal_output +1210,2599278,"TERMINAL",0,0,"42729",,terminal_output +1211,2600331,"TERMINAL",0,0,"538340",,terminal_output +1212,2601426,"TERMINAL",0,0,"64941",,terminal_output +1213,2602418,"TERMINAL",0,0,"751052",,terminal_output +1214,2603474,"TERMINAL",0,0,"86163",,terminal_output +1215,2604524,"TERMINAL",0,0,"97274",,terminal_output +1216,2605564,"TERMINAL",0,0,"8:008385",,terminal_output +1217,2606614,"TERMINAL",0,0,"19496",,terminal_output +1218,2607662,"TERMINAL",0,0,"2205207",,terminal_output +1219,2608798,"TERMINAL",0,0,"32729",,terminal_output +1220,2609820,"TERMINAL",0,0,"538350",,terminal_output +1221,2610845,"TERMINAL",0,0,"64941",,terminal_output +1222,2611869,"TERMINAL",0,0,"752052",,terminal_output +1223,2612900,"TERMINAL",0,0,"86163",,terminal_output +1224,2613953,"TERMINAL",0,0,"97274",,terminal_output +1225,2615042,"TERMINAL",0,0,"108385",,terminal_output +1226,2616068,"TERMINAL",0,0,"19496",,terminal_output +1227,2617138,"TERMINAL",0,0,"2305307",,terminal_output +1228,2618144,"TERMINAL",0,0,"31618",,terminal_output +1229,2619197,"TERMINAL",0,0,"42729",,terminal_output +1230,2620245,"TERMINAL",0,0,"53833:00",,terminal_output +1231,2621295,"TERMINAL",0,0,"64941",,terminal_output +1232,2622415,"TERMINAL",0,0,"753052",,terminal_output +1233,2623392,"TERMINAL",0,0,"86163",,terminal_output +1234,2624418,"TERMINAL",0,0,"97274",,terminal_output +1235,2625458,"TERMINAL",0,0,"208385",,terminal_output +1236,2626508,"TERMINAL",0,0,"19496",,terminal_output +1237,2627639,"TERMINAL",0,0,"2405407",,terminal_output +1238,2628662,"TERMINAL",0,0,"31618",,terminal_output +1239,2629657,"TERMINAL",0,0,"42729",,terminal_output +1240,2630813,"TERMINAL",0,0,"538310",,terminal_output +1241,2631756,"TERMINAL",0,0,"654052",,terminal_output +1242,2632862,"TERMINAL",0,0,"86163",,terminal_output +1243,2633889,"TERMINAL",0,0,"97274",,terminal_output +1244,2634901,"TERMINAL",0,0,"308385",,terminal_output +1245,2636033,"TERMINAL",0,0,"19496",,terminal_output +1246,2637092,"TERMINAL",0,0,"2505507",,terminal_output +1247,2638051,"TERMINAL",0,0,"31618",,terminal_output +1248,2639107,"TERMINAL",0,0,"42729",,terminal_output +1249,2640150,"TERMINAL",0,0,"538320",,terminal_output +1250,2641263,"TERMINAL",0,0,"64941",,terminal_output +1251,2642243,"TERMINAL",0,0,"755052",,terminal_output +1252,2643304,"TERMINAL",0,0,"86163",,terminal_output +1253,2644430,"TERMINAL",0,0,"97274",,terminal_output +1254,2645369,"TERMINAL",0,0,"408385",,terminal_output +1255,2646483,"TERMINAL",0,0,"19496",,terminal_output +1256,2647504,"TERMINAL",0,0,"23:0053:007",,terminal_output +1257,2648522,"TERMINAL",0,0,"31618",,terminal_output +1258,2649659,"TERMINAL",0,0,"42729",,terminal_output +1259,2650627,"TERMINAL",0,0,"538330",,terminal_output +1260,2651675,"TERMINAL",0,0,"64941",,terminal_output +1261,2652720,"TERMINAL",0,0,"769:0163",,terminal_output +1262,2653854,"TERMINAL",0,0,"97274",,terminal_output +1263,2654833,"TERMINAL",0,0,"508385",,terminal_output +1264,2655886,"TERMINAL",0,0,"19496",,terminal_output +1265,2656053,"TERMINAL",0,0,"Step 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1966, loss: 0.007302760612219572\r\nStep 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1966, loss: 0.007302760612219572\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1966, loss: 0.007302760612219572\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1966, loss: 0.007302760612219572\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1966, loss: 0.007302760612219572\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1966, loss: 0.007302760612219572\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1966, loss: 0.007302760612219572\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1966, loss: 0.007302760612219572\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1966, loss: 0.007302760612219572\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\n",,terminal_output +1266,2656105,"TERMINAL",0,0,"Step 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1966, loss: 0.007302760612219572\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1966, loss: 0.007302760612219572\r\nStep 1966, loss: 0.007302760612219572\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1966, loss: 0.007302760612219572\r\nStep 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1966, loss: 0.007302760612219572\r\nStep 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1966, loss: 0.007302760612219572\r\n",,terminal_output +1267,2656254,"TERMINAL",0,0,"Step 1752, loss: 0.007587113883346319\r\nStep 1753, loss: 0.008723670616745949\r\nStep 1754, loss: 0.009975233115255833\r\nStep 1755, loss: 0.007778756320476532\r\nStep 1756, loss: 0.007096508052200079\r\nStep 1757, loss: 0.007639630697667599\r\nStep 1758, loss: 0.007171492092311382\r\nStep 1759, loss: 0.007648677099496126\r\nStep 1760, loss: 0.007515218574553728\r\nStep 1761, loss: 0.007691469043493271\r\nStep 1762, loss: 0.007892817258834839\r\nStep 1763, loss: 0.008219906128942966\r\nStep 1764, loss: 0.008599373511970043\r\nStep 1765, loss: 0.0069121867418289185\r\nStep 1766, loss: 0.008407720364630222\r\nStep 1767, loss: 0.008464629761874676\r\nStep 1768, loss: 0.00861058197915554\r\nStep 1769, loss: 0.00802692398428917\r\nStep 1770, loss: 0.00904843769967556\r\nStep 1771, loss: 0.007677456829696894\r\nStep 1772, loss: 0.008123278617858887\r\nStep 1773, loss: 0.007608410436660051\r\nStep 1774, loss: 0.008423535153269768\r\nStep 1775, loss: 0.007830347865819931\r\nStep 1776, loss: 0.008523247204720974\r\nStep 1777, loss: 0.00800322275608778\r\nStep 1778, loss: 0.0074024987407028675\r\nStep 1779, loss: 0.007988659664988518\r\nStep 1780, loss: 0.008393983356654644\r\nStep 1781, loss: 0.00781427975744009\r\nStep 1782, loss: 0.007399311289191246\r\nStep 1783, loss: 0.007245731074362993\r\nStep 1784, loss: 0.006771579850465059\r\nStep 1785, loss: 0.007609602529555559\r\nStep 1786, loss: 0.008843266405165195\r\nStep 1787, loss: 0.008325469680130482\r\nStep 1788, loss: 0.008557874709367752\r\nStep 1789, loss: 0.008673692122101784\r\nStep 1790, loss: 0.008132481016218662\r\nStep 1791, loss: 0.007657380308955908\r\nStep 1792, loss: 0.008321262896060944\r\nStep 1793, loss: 0.006629099138081074\r\nStep 1794, loss: 0.0077498299069702625\r\nStep 1795, loss: 0.010428356938064098\r\nStep 1796, loss: 0.008140066638588905\r\nStep 1797, loss: 0.008285797201097012\r\nStep 1798, loss: 0.007590371649712324\r\nStep 1799, loss: 0.009107510559260845\r\nStep 1800, loss: 0.008384736254811287\r\nStep 1801, loss: 0.009379260241985321\r\nStep 1802, loss: 0.008292770944535732\r\nStep 1803, loss: 0.0074447146616876125\r\nStep 1804, loss: 0.0068242489360272884\r\nStep 1805, loss: 0.006491672247648239\r\nStep 1806, loss: 0.007680016104131937\r\nStep 1807, loss: 0.007407679222524166\r\nStep 1808, loss: 0.0068879080936312675\r\nStep 1809, loss: 0.00631563039496541\r\nStep 1810, loss: 0.007973327301442623\r\nStep 1811, loss: 0.0069587049074471\r\nStep 1812, loss: 0.007649719249457121\r\nStep 1813, loss: 0.007414981722831726\r\nStep 1814, loss: 0.00741694588214159\r\nStep 1815, loss: 0.007430074270814657\r\nStep 1816, loss: 0.010052385739982128\r\nStep 1817, loss: 0.008906478993594646\r\nStep 1818, loss: 0.008217480033636093\r\nStep 1819, loss: 0.00826902687549591\r\nStep 1820, loss: 0.008634312078356743\r\nStep 1821, loss: 0.00724594434723258\r\nStep 1822, loss: 0.009606032632291317\r\nStep 1823, loss: 0.006943860556930304\r\nStep 1824, loss: 0.008477195166051388\r\nStep 1825, loss: 0.008026336319744587\r\nStep 1826, loss: 0.007603360805660486\r\nStep 1827, loss: 0.007352927699685097\r\nStep 1828, loss: 0.008573167957365513\r\nStep 1829, loss: 0.007389913313090801\r\nStep 1830, loss: 0.00866385456174612\r\nStep 1831, loss: 0.007425487972795963\r\nStep 1832, loss: 0.008493616245687008\r\nStep 1833, loss: 0.008756037801504135\r\nStep 1834, loss: 0.007766157388687134\r\nStep 1835, loss: 0.006558957975357771\r\nStep 1836, loss: 0.007619110401719809\r\nStep 1837, loss: 0.007933076471090317\r\nStep 1838, loss: 0.00734346779063344\r\nStep 1839, loss: 0.006797607056796551\r\nStep 1840, loss: 0.007456270512193441\r\nStep 1841, loss: 0.007458324544131756\r\nStep 1842, loss: 0.008603651076555252\r\nStep 1843, loss: 0.008413057774305344\r\nStep 1844, loss: 0.008658475242555141\r\nStep 1845, loss: 0.008845366537570953\r\nStep 1846, loss: 0.008642880246043205\r\nStep 1847, loss: 0.007546432316303253\r\nStep 1848, loss: 0.008352362550795078\r\nStep 1849, loss: 0.0066724251955747604\r\nStep 1850, loss: 0.006749963387846947\r\nStep 1851, loss: 0.008008335717022419\r\nStep 1852, loss: 0.006626685615628958\r\nStep 1853, loss: 0.008494824171066284\r\nStep 1854, loss: 0.0074630845338106155\r\nStep 1855, loss: 0.007691007107496262\r\nStep 1856, loss: 0.005994731094688177\r\nStep 1857, loss: 0.008315281011164188\r\nStep 1858, loss: 0.006639814004302025\r\nStep 1859, loss: 0.007950042374432087\r\nStep 1860, loss: 0.007355806417763233\r\nStep 1861, loss: 0.007078413851559162\r\nStep 1862, loss: 0.008967281319200993\r\nStep 1863, loss: 0.007560933008790016\r\nStep 1864, loss: 0.006890495773404837\r\nStep 1865, loss: 0.007506961468607187\r\nStep 1866, loss: 0.008352764882147312\r\nStep 1867, loss: 0.007868250831961632\r\nStep 1868, loss: 0.008403055369853973\r\nStep 1869, loss: 0.008983764797449112\r\nStep 1870, loss: 0.00930242519825697\r\nStep 1871, loss: 0.008386524394154549\r\nStep 1872, loss: 0.00835971999913454\r\nStep 1873, loss: 0.00684413593262434\r\nStep 1874, loss: 0.0075506907887756824\r\nStep 1875, loss: 0.006223222240805626\r\nStep 1876, loss: 0.007349558174610138\r\nStep 1877, loss: 0.007400050293654203\r\nStep 1878, loss: 0.005997596774250269\r\nStep 1879, loss: 0.008886735886335373\r\nStep 1880, loss: 0.008929897099733353\r\nStep 1881, loss: 0.007534346543252468\r\nStep 1882, loss: 0.007405397482216358\r\nStep 1883, loss: 0.008107206784188747\r\nStep 1884, loss: 0.007917147129774094\r\nStep 1885, loss: 0.007139333989471197\r\nStep 1886, loss: 0.007105696480721235\r\nStep 1887, loss: 0.007592954207211733\r\nStep 1888, loss: 0.009718130342662334\r\nStep 1889, loss: 0.007253255695104599\r\nStep 1890, loss: 0.007049209903925657\r\nStep 1891, loss: 0.008213263005018234\r\nStep 1892, loss: 0.009460396133363247\r\nStep 1893, loss: 0.0073393480852246284\r\nStep 1894, loss: 0.00922479759901762\r\nStep 1895, loss: 0.009373532608151436\r\nStep 1896, loss: 0.006594881881028414\r\nStep 1897, loss: 0.006855552550405264\r\nStep 1898, loss: 0.007560144644230604\r\nStep 1899, loss: 0.007514113560318947\r\nStep 1900, loss: 0.008570155128836632\r\nStep 1901, loss: 0.008249602280557156\r\nStep 1902, loss: 0.007385370787233114\r\nStep 1903, loss: 0.008846917189657688\r\nStep 1904, loss: 0.006955833174288273\r\nStep 1905, loss: 0.007100945338606834\r\nStep 1906, loss: 0.007264026906341314\r\nStep 1907, loss: 0.008413623087108135\r\nStep 1908, loss: 0.007373266853392124\r\nStep 1909, loss: 0.006482332944869995\r\nStep 1910, loss: 0.008299317210912704\r\nStep 1911, loss: 0.00674647418782115\r\nStep 1912, loss: 0.008334393613040447\r\nStep 1913, loss: 0.00773186469450593\r\nStep 1914, loss: 0.008393315598368645\r\nStep 1915, loss: 0.007578670978546143\r\nStep 1916, loss: 0.007013033144176006\r\nStep 1917, loss: 0.008415376767516136\r\nStep 1918, loss: 0.008439400233328342\r\nStep 1919, loss: 0.00830706488341093\r\nStep 1920, loss: 0.0058112675324082375\r\nStep 1921, loss: 0.007347228471189737\r\nStep 1922, loss: 0.0078077660873532295\r\nStep 1923, loss: 0.007486520800739527\r\nStep 1924, loss: 0.008071406744420528\r\nStep 1925, loss: 0.008356905542314053\r\nStep 1926, loss: 0.008094314485788345\r\nStep 1927, loss: 0.0075823478400707245\r\nStep 1928, loss: 0.0072280946187675\r\nStep 1929, loss: 0.006720804609358311\r\nStep 1930, loss: 0.00876590982079506\r\nStep 1931, loss: 0.009002909064292908\r\nStep 1932, loss: 0.008650872856378555\r\nStep 1933, loss: 0.008123574778437614\r\nStep 1934, loss: 0.008499816991388798\r\nStep 1935, loss: 0.00849961768835783\r\nStep 1936, loss: 0.007840009406208992\r\nStep 1937, loss: 0.010489179752767086\r\nStep 1938, loss: 0.007857481949031353\r\nStep 1939, loss: 0.008718672208487988\r\nStep 1940, loss: 0.007200952153652906\r\nStep 1941, loss: 0.007538785692304373\r\nStep 1942, loss: 0.006881448905915022\r\nStep 1943, loss: 0.00803673267364502\r\nStep 1944, loss: 0.006382018327713013\r\nStep 1945, loss: 0.007706017699092627\r\nStep 1946, loss: 0.008537651039659977\r\nStep 1947, loss: 0.007243881933391094\r\nStep 1948, loss: 0.007188852410763502\r\nStep 1949, loss: 0.0070786611177027225\r\nStep 1950, loss: 0.006413279101252556\r\nStep 1951, loss: 0.00776925403624773\r\nStep 1952, loss: 0.00772223761305213\r\nStep 1953, loss: 0.008617812767624855\r\nStep 1954, loss: 0.007393602281808853\r\nStep 1955, loss: 0.009221965447068214\r\nStep 1956, loss: 0.007050357758998871\r\nStep 1957, loss: 0.007142876274883747\r\nStep 1958, loss: 0.007543647196143866\r\nStep 1959, loss: 0.008612647652626038\r\nStep 1960, loss: 0.008025288581848145\r\nStep 1961, loss: 0.0069267600774765015\r\nStep 1962, loss: 0.006813779007643461\r\nStep 1963, loss: 0.0068598221987485886\r\nStep 1964, loss: 0.007313370238989592\r\nStep 1965, loss: 0.006166697479784489\r\nStep 1966, loss: 0.007302760612219572\r\n",,terminal_output +1268,2656940,"TERMINAL",0,0,"2105107",,terminal_output +1269,2657981,"TERMINAL",0,0,"31618",,terminal_output +1270,2659076,"TERMINAL",0,0,"42729",,terminal_output +1271,2660099,"TERMINAL",0,0,"538340",,terminal_output +1272,2661126,"TERMINAL",0,0,"64941",,terminal_output +1273,2662249,"TERMINAL",0,0,"751052",,terminal_output +1274,2663273,"TERMINAL",0,0,"86163",,terminal_output +1275,2664259,"TERMINAL",0,0,"97274",,terminal_output +1276,2665308,"TERMINAL",0,0,"9:008385",,terminal_output +1277,2666447,"TERMINAL",0,0,"19496",,terminal_output +1278,2667417,"TERMINAL",0,0,"2205207",,terminal_output +1279,2668452,"TERMINAL",0,0,"31618",,terminal_output +1280,2669503,"TERMINAL",0,0,"42729",,terminal_output +1281,2670554,"TERMINAL",0,0,"538350",,terminal_output +1282,2671606,"TERMINAL",0,0,"64941",,terminal_output +1283,2672640,"TERMINAL",0,0,"752052",,terminal_output +1284,2673693,"TERMINAL",0,0,"86163",,terminal_output +1285,2674748,"TERMINAL",0,0,"98385",,terminal_output +1286,2675770,"TERMINAL",0,0,"119496",,terminal_output +1287,2676893,"TERMINAL",0,0,"2305307",,terminal_output +1288,2677916,"TERMINAL",0,0,"31618",,terminal_output +1289,2678891,"TERMINAL",0,0,"42729",,terminal_output +1290,2679937,"TERMINAL",0,0,"53834:00",,terminal_output +1291,2680974,"TERMINAL",0,0,"64941",,terminal_output +1292,2682060,"TERMINAL",0,0,"753052",,terminal_output +1293,2683073,"TERMINAL",0,0,"86163",,terminal_output +1294,2684125,"TERMINAL",0,0,"97274",,terminal_output +1295,2685213,"TERMINAL",0,0,"208385",,terminal_output +1296,2686225,"TERMINAL",0,0,"19496",,terminal_output +1297,2687261,"TERMINAL",0,0,"2405407",,terminal_output +1298,2688362,"TERMINAL",0,0,"31618",,terminal_output +1299,2689390,"TERMINAL",0,0,"42729",,terminal_output +1300,2690416,"TERMINAL",0,0,"538310",,terminal_output +1301,2691457,"TERMINAL",0,0,"64941",,terminal_output +1302,2692509,"TERMINAL",0,0,"754052",,terminal_output +1303,2693573,"TERMINAL",0,0,"86163",,terminal_output +1304,2694604,"TERMINAL",0,0,"97274",,terminal_output +1305,2695733,"TERMINAL",0,0,"308385",,terminal_output +1306,2695946,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",0,0,"",plaintext,tab +1307,2695948,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",84,0,"",plaintext,selection_mouse +1308,2696698,"TERMINAL",0,0,"19496",,terminal_output +1309,2696968,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",83,1,"",plaintext,content +1310,2697079,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",82,1,"",plaintext,content +1311,2697508,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",82,0,"4",plaintext,content +1312,2697509,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",83,0,"",plaintext,selection_keyboard +1313,2697570,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",83,0,"8",plaintext,content +1314,2697571,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",84,0,"",plaintext,selection_keyboard +1315,2697751,"TERMINAL",0,0,"2516518",,terminal_output +1316,2697964,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",83,0,"",plaintext,selection_command +1317,2698753,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",1169,0,"",plaintext,selection_mouse +1318,2698776,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",1168,0,"",plaintext,selection_command +1319,2698826,"TERMINAL",0,0,"42729",,terminal_output +1320,2699489,"TERMINAL",0,0,"watch",,terminal_focus +1321,2699839,"TERMINAL",0,0,"538320",,terminal_output +1322,2700871,"TERMINAL",0,0,"64941",,terminal_output +1323,2701928,"TERMINAL",0,0,"755052",,terminal_output +1324,2703011,"TERMINAL",0,0,"86163",,terminal_output +1325,2704023,"TERMINAL",0,0,"97274",,terminal_output +1326,2705072,"TERMINAL",0,0,"408385",,terminal_output +1327,2706119,"TERMINAL",0,0,"19496",,terminal_output +1328,2707169,"TERMINAL",0,0,"24:0054:007",,terminal_output +1329,2708054,"TERMINAL",0,0,"srun",,terminal_focus +1330,2708229,"TERMINAL",0,0,"31618",,terminal_output +1331,2709106,"TERMINAL",0,0,"s",,terminal_output +1332,2709274,"TERMINAL",0,0,"42729",,terminal_output +1333,2709503,"TERMINAL",0,0,"[?25ly[?25h",,terminal_output +1334,2709565,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1335,2709769,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1336,2710303,"TERMINAL",0,0,"538330",,terminal_output +1337,2710606,"TERMINAL",0,0,"[?25l-[?25h",,terminal_output +1338,2710848,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +1339,2710910,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1340,2710972,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1341,2711364,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1342,2711364,"TERMINAL",0,0,"64941",,terminal_output +1343,2711532,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1344,2711589,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +1345,2712426,"TERMINAL",0,0,"7510:0052",,terminal_output +1346,2712542,"TERMINAL",0,0,"\r\n[?2004l\rsending incremental file list\r\n",,terminal_output +1347,2713452,"TERMINAL",0,0,"86163",,terminal_output +1348,2714545,"TERMINAL",0,0,"97274",,terminal_output +1349,2714649,"TERMINAL",0,0,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch\r\n",,terminal_output +1350,2714792,"TERMINAL",0,0,"\r\nsent 13,979 bytes received 98 bytes 4,022.00 bytes/sec\r\ntotal size is 85,286,172 speedup is 6,058.55\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir[?2004h[tum_cte0515@hkn0733 logs_mihir]$ ",,terminal_output +1351,2715472,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +1352,2715535,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1353,2715535,"TERMINAL",0,0,"508385",,terminal_output +1354,2715683,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1355,2715821,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1356,2715901,"TERMINAL",0,0,"[?25le[?25h[?25lr[?25h",,terminal_output +1357,2716057,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0733:~/Projects/jafar_jobs[?2004h[tum_cte0515@hkn0733 jafar_jobs]$ ",,terminal_output +1358,2716620,"TERMINAL",0,0,"19496",,terminal_output +1359,2717280,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1360,2717344,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +1361,2717457,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1362,2717655,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1363,2717655,"TERMINAL",0,0,"2105107",,terminal_output +1364,2717834,"TERMINAL",0,0,"[?25lc[?25h[?25lh[?25h",,terminal_output +1365,2717983,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1366,2718781,"TERMINAL",0,0,"31618",,terminal_output +1367,2719762,"TERMINAL",0,0,"438340",,terminal_output +1368,2720583,"TERMINAL",0,0,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",,terminal_output +1369,2720783,"TERMINAL",0,0,"64941",,terminal_output +1370,2720869,"TERMINAL",0,0,"\rscripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch\r\n[?2004l\rSubmitted batch job 3317115\r\n]0;tum_cte0515@hkn0733:~/Projects/jafar_jobs[?2004h[tum_cte0515@hkn0733 jafar_jobs]$ ",,terminal_output +1371,2721835,"TERMINAL",0,0,"77115laPD 0:00(None)6022to7:14:15507,05207098train_la10:10\t 2 hkn[0629,0631]415\t 1 hkn07333316923 accelerat interact tum_cte0 R44:42\t 4 hkn[0625-0628]",,terminal_output +1372,2722865,"TERMINAL",0,0,"86163",,terminal_output +1373,2723997,"TERMINAL",0,0,"97274",,terminal_output +1374,2724781,"TERMINAL",0,0,"watch",,terminal_focus +1375,2724969,"TERMINAL",0,0,"20:008385",,terminal_output +1376,2726048,"TERMINAL",0,0,"19496",,terminal_output +1377,2727062,"TERMINAL",0,0,"2205207",,terminal_output +1378,2728190,"TERMINAL",0,0,"31618",,terminal_output +1379,2729158,"TERMINAL",0,0,"42729",,terminal_output +1380,2730229,"TERMINAL",0,0,"538350",,terminal_output +1381,2731260,"TERMINAL",0,0,"64941",,terminal_output +1382,2732395,"TERMINAL",0,0,"752052",,terminal_output +1383,2733420,"TERMINAL",0,0,"86163",,terminal_output +1384,2734399,"TERMINAL",0,0,"97274",,terminal_output +1385,2735451,"TERMINAL",0,0,"108385",,terminal_output +1386,2736508,"TERMINAL",0,0,"19496",,terminal_output +1387,2737544,"TERMINAL",0,0,"2305307",,terminal_output +1388,2738641,"TERMINAL",0,0,"31618",,terminal_output +1389,2739144,"TERMINAL",0,0,"srun",,terminal_focus +1390,2739626,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1391,2739635,"TERMINAL",0,0,"42729",,terminal_output +1392,2739688,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1393,2739911,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1394,2740018,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1395,2740081,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1396,2740195,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1397,2740257,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1398,2740382,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1399,2740679,"TERMINAL",0,0,"53835:00",,terminal_output +1400,2741022,"TERMINAL",0,0,"3317098",,terminal_output +1401,2741727,"TERMINAL",0,0,"66022to R 7:14:35hkn[0507,0520]7115la 0:01407,04133052",,terminal_output +1402,2742838,"TERMINAL",0,0,"862163",,terminal_output +1403,2743866,"TERMINAL",0,0,"973274",,terminal_output +1404,2744886,"TERMINAL",0,0,"2084385",,terminal_output +1405,2745910,"TERMINAL",0,0,"195496",,terminal_output +1406,2747038,"TERMINAL",0,0,"24065407",,terminal_output +1407,2748014,"TERMINAL",0,0,"317618",,terminal_output +1408,2749086,"TERMINAL",0,0,"428729",,terminal_output +1409,2749203,"TERMINAL",0,0,"3317098\r\n[?2004l\r]0;tum_cte0515@hkn0733:~/Projects/jafar_jobs[?2004h[tum_cte0515@hkn0733 jafar_jobs]$ ",,terminal_output +1410,2750216,"TERMINAL",0,0,"57098laCG 10:37629,06316022to7:14:43507,0520115 0:09407,0413310",,terminal_output +1411,2751186,"TERMINAL",0,0,"641041",,terminal_output +1412,2752262,"TERMINAL",0,0,"75152",,terminal_output +1413,2753292,"TERMINAL",0,0,"86263",,terminal_output +1414,2754310,"TERMINAL",0,0,"97374",,terminal_output +1415,2755356,"TERMINAL",0,0,"308485",,terminal_output +1416,2756468,"TERMINAL",0,0,"19596",,terminal_output +1417,2757454,"TERMINAL",0,0,"2506507",,terminal_output +1418,2758511,"TERMINAL",0,0,"31718",,terminal_output +1419,2759558,"TERMINAL",0,0,"42829",,terminal_output +1420,2760612,"TERMINAL",0,0,"539320",,terminal_output +1421,2761695,"TERMINAL",0,0,"642041",,terminal_output +1422,2762788,"TERMINAL",0,0,"75152",,terminal_output +1423,2763831,"TERMINAL",0,0,"87374",,terminal_output +1424,2764795,"TERMINAL",0,0,"408485",,terminal_output +1425,2765863,"TERMINAL",0,0,"19596",,terminal_output +1426,2767004,"TERMINAL",0,0,"25:0065:007",,terminal_output +1427,2768033,"TERMINAL",0,0,"31718",,terminal_output +1428,2769055,"TERMINAL",0,0,"42829",,terminal_output +1429,2770093,"TERMINAL",0,0,"539330",,terminal_output +1430,2771101,"TERMINAL",0,0,"643041",,terminal_output +1431,2772143,"TERMINAL",0,0,"75152",,terminal_output +1432,2773212,"TERMINAL",0,0,"86263",,terminal_output +1433,2774277,"TERMINAL",0,0,"97374",,terminal_output +1434,2775311,"TERMINAL",0,0,"508485",,terminal_output +1435,2776353,"TERMINAL",0,0,"19596",,terminal_output +1436,2776668,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=4\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_80M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,tab +1437,2777522,"TERMINAL",0,0,"2106107",,terminal_output +1438,2778477,"TERMINAL",0,0,"31718",,terminal_output +1439,2779513,"TERMINAL",0,0,"42829",,terminal_output +1440,2780517,"TERMINAL",0,0,"539340",,terminal_output +1441,2781563,"TERMINAL",0,0,"644041",,terminal_output +1442,2782734,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",0,0,"",shellscript,tab +1443,2782890,"TERMINAL",0,0,"75152",,terminal_output +1444,2783666,"TERMINAL",0,0,"86263",,terminal_output +1445,2784858,"TERMINAL",0,0,"srun",,terminal_focus +1446,2784903,"TERMINAL",0,0,"97374",,terminal_output +1447,2785763,"TERMINAL",0,0,"1:019596",,terminal_output +1448,2786870,"TERMINAL",0,0,"2206207",,terminal_output +1449,2787868,"TERMINAL",0,0,"31718",,terminal_output +1450,2788919,"TERMINAL",0,0,"42829",,terminal_output +1451,2790076,"TERMINAL",0,0,"539350",,terminal_output +1452,2790113,"TERMINAL",0,0,"Step 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 2181, loss: 0.006485333200544119\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 2181, loss: 0.006485333200544119\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 2181, loss: 0.006485333200544119\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2181, loss: 0.006485333200544119\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 2181, loss: 0.006485333200544119\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 2181, loss: 0.006485333200544119\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2181, loss: 0.006485333200544119\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 2181, loss: 0.006485333200544119\r\nStep 2181, loss: 0.006485333200544119\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 2181, loss: 0.006485333200544119\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 2181, loss: 0.006485333200544119\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 2181, loss: 0.006485333200544119\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 2181, loss: 0.006485333200544119\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 2181, loss: 0.006485333200544119\r\nStep 2181, loss: 0.006485333200544119\r\nStep 1967, loss: 0.007669681217521429\r\nStep 1968, loss: 0.007405542302876711\r\nStep 1969, loss: 0.008523445576429367\r\nStep 1970, loss: 0.006585740949958563\r\nStep 1971, loss: 0.007967020384967327\r\nStep 1972, loss: 0.007430871948599815\r\nStep 1973, loss: 0.007974990643560886\r\nStep 1974, loss: 0.006640189327299595\r\nStep 1975, loss: 0.006986657623201609\r\nStep 1976, loss: 0.006887562572956085\r\nStep 1977, loss: 0.006880535744130611\r\nStep 1978, loss: 0.00864893477410078\r\nStep 1979, loss: 0.007309079170227051\r\nStep 1980, loss: 0.006461316253989935\r\nStep 1981, loss: 0.007693135645240545\r\nStep 1982, loss: 0.008472313173115253\r\nStep 1983, loss: 0.008727866224944592\r\nStep 1984, loss: 0.008116434328258038\r\nStep 1985, loss: 0.008145858533680439\r\nStep 1986, loss: 0.009711185470223427\r\nStep 1987, loss: 0.008214308880269527\r\nStep 1988, loss: 0.00827767327427864\r\nStep 1989, loss: 0.006834420375525951\r\nStep 1990, loss: 0.008719997480511665\r\nStep 1991, loss: 0.007744617760181427\r\nStep 1992, loss: 0.006096074357628822\r\nStep 1993, loss: 0.0058983927592635155\r\nStep 1994, loss: 0.009106900542974472\r\nStep 1995, loss: 0.007174712140113115\r\nStep 1996, loss: 0.008139751851558685\r\nStep 1997, loss: 0.00785200297832489\r\nStep 1998, loss: 0.00958508811891079\r\nStep 1999, loss: 0.00785050354897976\r\nStep 2000, loss: 0.007317959330976009\r\nStep 2001, loss: 0.007080672308802605\r\nStep 2002, loss: 0.007991728372871876\r\nStep 2003, loss: 0.007821274921298027\r\nStep 2004, loss: 0.008489646017551422\r\nStep 2005, loss: 0.0075281537137925625\r\nStep 2006, loss: 0.006736667361110449\r\nStep 2007, loss: 0.007574349641799927\r\nStep 2008, loss: 0.007746479008346796\r\nStep 2009, loss: 0.007272219751030207\r\nStep 2010, loss: 0.007452439051121473\r\nStep 2011, loss: 0.007787893991917372\r\nStep 2012, loss: 0.007093604654073715\r\nStep 2013, loss: 0.007687790784984827\r\nStep 2014, loss: 0.007467956282198429\r\nStep 2015, loss: 0.007624607998877764\r\nStep 2016, loss: 0.007285227533429861\r\nStep 2017, loss: 0.007332447450608015\r\nStep 2018, loss: 0.006121398881077766\r\nStep 2019, loss: 0.006808311678469181\r\nStep 2020, loss: 0.006265093106776476\r\nStep 2021, loss: 0.007726202253252268\r\nStep 2022, loss: 0.006667358800768852\r\nStep 2023, loss: 0.008020839653909206\r\nStep 2024, loss: 0.008737929165363312\r\nStep 2025, loss: 0.00960688479244709\r\nStep 2026, loss: 0.010799295268952847\r\nStep 2027, loss: 0.007353636436164379\r\nStep 2028, loss: 0.007275961339473724\r\nStep 2029, loss: 0.008034869097173214\r\nStep 2030, loss: 0.006865362636744976\r\nStep 2031, loss: 0.007538987323641777\r\nStep 2032, loss: 0.008919459767639637\r\nStep 2033, loss: 0.0060740369372069836\r\nStep 2034, loss: 0.0073450044728815556\r\nStep 2035, loss: 0.008335300721228123\r\nStep 2036, loss: 0.006473970133811235\r\nStep 2037, loss: 0.006627377588301897\r\nStep 2038, loss: 0.006782549899071455\r\nStep 2039, loss: 0.007833597250282764\r\nStep 2040, loss: 0.007012289483100176\r\nStep 2041, loss: 0.008080333471298218\r\nStep 2042, loss: 0.007360116112977266\r\nStep 2043, loss: 0.00804917886853218\r\nStep 2044, loss: 0.008462784811854362\r\nStep 2045, loss: 0.006939365062862635\r\nStep 2046, loss: 0.008770853281021118\r\nStep 2047, loss: 0.008424682542681694\r\nStep 2048, loss: 0.008898302912712097\r\nStep 2049, loss: 0.008553815074265003\r\nStep 2050, loss: 0.009473413228988647\r\nStep 2051, loss: 0.009936349466443062\r\nStep 2052, loss: 0.00961415283381939\r\nStep 2053, loss: 0.007590943947434425\r\nStep 2054, loss: 0.007170454133301973\r\nStep 2055, loss: 0.00796786230057478\r\nStep 2056, loss: 0.007342166732996702\r\nStep 2057, loss: 0.008216858841478825\r\nStep 2058, loss: 0.008538139052689075\r\nStep 2059, loss: 0.007265908177942038\r\nStep 2060, loss: 0.006920312996953726\r\nStep 2061, loss: 0.0074624670669436455\r\nStep 2062, loss: 0.007335641421377659\r\nStep 2063, loss: 0.006655374076217413\r\nStep 2064, loss: 0.0069557367824018\r\nStep 2065, loss: 0.007986187934875488\r\nStep 2066, loss: 0.008852875791490078\r\nStep 2067, loss: 0.007818068377673626\r\nStep 2068, loss: 0.00853408221155405\r\nStep 2069, loss: 0.008041526190936565\r\nStep 2070, loss: 0.007517475634813309\r\nStep 2071, loss: 0.007183353416621685\r\nStep 2072, loss: 0.007504364009946585\r\nStep 2073, loss: 0.008248522877693176\r\nStep 2074, loss: 0.009289816953241825\r\nStep 2075, loss: 0.008523017168045044\r\nStep 2076, loss: 0.009130793623626232\r\nStep 2077, loss: 0.009776411578059196\r\nStep 2078, loss: 0.007912610657513142\r\nStep 2079, loss: 0.008615592494606972\r\nStep 2080, loss: 0.007488418370485306\r\nStep 2081, loss: 0.008683528751134872\r\nStep 2082, loss: 0.005960147362202406\r\nStep 2083, loss: 0.008028778247535229\r\nStep 2084, loss: 0.006622010376304388\r\nStep 2085, loss: 0.006667743436992168\r\nStep 2086, loss: 0.007373551372438669\r\nStep 2087, loss: 0.007238994352519512\r\nStep 2088, loss: 0.007485955487936735\r\nStep 2089, loss: 0.006716012954711914\r\nStep 2090, loss: 0.006583035923540592\r\nStep 2091, loss: 0.008305313065648079\r\nStep 2092, loss: 0.006870834156870842\r\nStep 2093, loss: 0.007218984421342611\r\nStep 2094, loss: 0.007324583828449249\r\nStep 2095, loss: 0.007857448421418667\r\nStep 2096, loss: 0.0076767695136368275\r\nStep 2097, loss: 0.008625109679996967\r\nStep 2098, loss: 0.007954219356179237\r\nStep 2099, loss: 0.008406437002122402\r\nStep 2100, loss: 0.0064675272442400455\r\nStep 2101, loss: 0.007884005084633827\r\nStep 2102, loss: 0.006588571704924107\r\nStep 2103, loss: 0.009607868269085884\r\nStep 2104, loss: 0.00802893191576004\r\nStep 2105, loss: 0.008265656419098377\r\nStep 2106, loss: 0.00791378878057003\r\nStep 2107, loss: 0.0068814679980278015\r\nStep 2108, loss: 0.006877466104924679\r\nStep 2109, loss: 0.006834062747657299\r\nStep 2110, loss: 0.00647063460201025\r\nStep 2111, loss: 0.007915258407592773\r\nStep 2112, loss: 0.005920158699154854\r\nStep 2113, loss: 0.007688958663493395\r\nStep 2114, loss: 0.0072950636968016624\r\nStep 2115, loss: 0.008072075434029102\r\nStep 2116, loss: 0.007164862006902695\r\nStep 2117, loss: 0.007854027673602104\r\nStep 2118, loss: 0.00925062783062458\r\nStep 2119, loss: 0.006641793996095657\r\nStep 2120, loss: 0.007246651686728001\r\nStep 2121, loss: 0.009015616960823536\r\nStep 2122, loss: 0.009036382660269737\r\nStep 2123, loss: 0.007517040707170963\r\nStep 2124, loss: 0.00956637505441904\r\nStep 2125, loss: 0.00796852819621563\r\nStep 2126, loss: 0.00775392260402441\r\nStep 2127, loss: 0.008484766818583012\r\nStep 2128, loss: 0.007591076660901308\r\nStep 2129, loss: 0.007292400114238262\r\nStep 2130, loss: 0.008852246217429638\r\nStep 2131, loss: 0.007560945115983486\r\nStep 2132, loss: 0.007763219065964222\r\nStep 2133, loss: 0.0072074104100465775\r\nStep 2134, loss: 0.008280422538518906\r\nStep 2135, loss: 0.007888955064117908\r\nStep 2136, loss: 0.008342885412275791\r\nStep 2137, loss: 0.005806874018162489\r\nStep 2138, loss: 0.008248311467468739\r\nStep 2139, loss: 0.006253413390368223\r\nStep 2140, loss: 0.007602163590490818\r\nStep 2141, loss: 0.007624937687069178\r\nStep 2142, loss: 0.0073804245330393314\r\nStep 2143, loss: 0.006457494106143713\r\nStep 2144, loss: 0.010475477203726768\r\nStep 2145, loss: 0.008034901693463326\r\nStep 2146, loss: 0.007801996544003487\r\nStep 2147, loss: 0.00928365159779787\r\nStep 2148, loss: 0.007460521999746561\r\nStep 2149, loss: 0.009220042265951633\r\nStep 2150, loss: 0.008657963946461678\r\nStep 2151, loss: 0.00888269953429699\r\nStep 2152, loss: 0.007898455485701561\r\nStep 2153, loss: 0.006836357526481152\r\nStep 2154, loss: 0.009004993364214897\r\nStep 2155, loss: 0.008021147921681404\r\nStep 2156, loss: 0.006917229853570461\r\nStep 2157, loss: 0.007290771696716547\r\nStep 2158, loss: 0.007864288985729218\r\nStep 2159, loss: 0.008170363493263721\r\nStep 2160, loss: 0.007793820928782225\r\nStep 2161, loss: 0.007919901050627232\r\nStep 2162, loss: 0.0073922560550272465\r\nStep 2163, loss: 0.006999340374022722\r\nStep 2164, loss: 0.008284047245979309\r\nStep 2165, loss: 0.006230353377759457\r\nStep 2166, loss: 0.007382285315543413\r\nStep 2167, loss: 0.007332618813961744\r\nStep 2168, loss: 0.005218885838985443\r\nStep 2169, loss: 0.007856744341552258\r\nStep 2170, loss: 0.008586950600147247\r\nStep 2171, loss: 0.0070792874321341515\r\nStep 2172, loss: 0.007221385836601257\r\nStep 2173, loss: 0.007499690167605877\r\nStep 2174, loss: 0.008019034750759602\r\nStep 2175, loss: 0.007694543339312077\r\nStep 2176, loss: 0.009478623978793621\r\nStep 2177, loss: 0.008031236939132214\r\nStep 2178, loss: 0.006757157389074564\r\nStep 2179, loss: 0.007930068299174309\r\nStep 2180, loss: 0.009267700836062431\r\nStep 2181, loss: 0.006485333200544119\r\n",,terminal_output +1453,2791072,"TERMINAL",0,0,"645041",,terminal_output +1454,2792138,"TERMINAL",0,0,"75152",,terminal_output +1455,2793119,"TERMINAL",0,0,"86263",,terminal_output +1456,2794245,"TERMINAL",0,0,"97374",,terminal_output +1457,2795229,"TERMINAL",0,0,"\r106022to R 7:15:28507,05207115la 0:54407,04136924interact45:28\t 1 hkn0733355\t 4 hkn[0625-0628]",,terminal_output +1458,2796294,"TERMINAL",0,0,"19596",,terminal_output +1459,2797317,"TERMINAL",0,0,"2306307",,terminal_output +1460,2798340,"TERMINAL",0,0,"31718",,terminal_output +1461,2799413,"TERMINAL",0,0,"42829",,terminal_output +1462,2800429,"TERMINAL",0,0,"53936:00",,terminal_output +1463,2801480,"TERMINAL",0,0,"641:0041",,terminal_output +1464,2802540,"TERMINAL",0,0,"75152",,terminal_output +1465,2803553,"TERMINAL",0,0,"86263",,terminal_output +1466,2804603,"TERMINAL",0,0,"97374",,terminal_output +1467,2805652,"TERMINAL",0,0,"208485",,terminal_output +1468,2806701,"TERMINAL",0,0,"19596",,terminal_output +1469,2807759,"TERMINAL",0,0,"2417418",,terminal_output +1470,2808890,"TERMINAL",0,0,"42829",,terminal_output +1471,2809914,"TERMINAL",0,0,"539310",,terminal_output +1472,2810937,"TERMINAL",0,0,"641041",,terminal_output +1473,2811959,"TERMINAL",0,0,"75152",,terminal_output +1474,2813088,"TERMINAL",0,0,"86263",,terminal_output +1475,2814109,"TERMINAL",0,0,"97374",,terminal_output +1476,2815090,"TERMINAL",0,0,"308485",,terminal_output +1477,2816157,"TERMINAL",0,0,"19596",,terminal_output +1478,2817183,"TERMINAL",0,0,"2506507",,terminal_output +1479,2818320,"TERMINAL",0,0,"31718",,terminal_output +1480,2819267,"TERMINAL",0,0,"42829",,terminal_output +1481,2820330,"TERMINAL",0,0,"539320",,terminal_output +1482,2821390,"TERMINAL",0,0,"642041",,terminal_output +1483,2822621,"TERMINAL",0,0,"75152",,terminal_output +1484,2823457,"TERMINAL",0,0,"86263",,terminal_output +1485,2824556,"TERMINAL",0,0,"97374",,terminal_output +1486,2825533,"TERMINAL",0,0,"408485",,terminal_output +1487,2826576,"TERMINAL",0,0,"19596",,terminal_output +1488,2827735,"TERMINAL",0,0,"26:0066:007",,terminal_output +1489,2828756,"TERMINAL",0,0,"31718",,terminal_output +1490,2829694,"TERMINAL",0,0,"42829",,terminal_output +1491,2830801,"TERMINAL",0,0,"5430431",,terminal_output +1492,2831774,"TERMINAL",0,0,"75152",,terminal_output +1493,2832854,"TERMINAL",0,0,"86263",,terminal_output +1494,2833978,"TERMINAL",0,0,"97374",,terminal_output +1495,2834999,"TERMINAL",0,0,"508485",,terminal_output +1496,2836024,"TERMINAL",0,0,"19596",,terminal_output +1497,2837000,"TERMINAL",0,0,"2106107",,terminal_output +1498,2838072,"TERMINAL",0,0,"31718",,terminal_output +1499,2839098,"TERMINAL",0,0,"42829",,terminal_output +1500,2840210,"TERMINAL",0,0,"539340",,terminal_output +1501,2841219,"TERMINAL",0,0,"644041",,terminal_output +1502,2842271,"TERMINAL",0,0,"75152",,terminal_output +1503,2843308,"TERMINAL",0,0,"86263",,terminal_output +1504,2844345,"TERMINAL",0,0,"97374",,terminal_output +1505,2845399,"TERMINAL",0,0,"2:008485",,terminal_output +1506,2846477,"TERMINAL",0,0,"19596",,terminal_output +1507,2847494,"TERMINAL",0,0,"2206207",,terminal_output +1508,2848531,"TERMINAL",0,0,"31718",,terminal_output +1509,2849588,"TERMINAL",0,0,"42829",,terminal_output +1510,2850632,"TERMINAL",0,0,"539350",,terminal_output +1511,2851794,"TERMINAL",0,0,"645041",,terminal_output +1512,2852725,"TERMINAL",0,0,"76263",,terminal_output +1513,2853781,"TERMINAL",0,0,"97374",,terminal_output +1514,2854865,"TERMINAL",0,0,"108485",,terminal_output +1515,2855883,"TERMINAL",0,0,"19596",,terminal_output +1516,2857020,"TERMINAL",0,0,"2306307",,terminal_output +1517,2857984,"TERMINAL",0,0,"31718",,terminal_output +1518,2859031,"TERMINAL",0,0,"42829",,terminal_output +1519,2860081,"TERMINAL",0,0,"53937:00",,terminal_output +1520,2863774,"TERMINAL",0,0,"672:0374",,terminal_output +1521,2864824,"TERMINAL",0,0,"208485",,terminal_output +1522,2865872,"TERMINAL",0,0,"19596",,terminal_output +1523,2866910,"TERMINAL",0,0,"2406407",,terminal_output +1524,2867966,"TERMINAL",0,0,"31718",,terminal_output +1525,2869098,"TERMINAL",0,0,"42829",,terminal_output +1526,2870075,"TERMINAL",0,0,"539310",,terminal_output +1527,2871125,"TERMINAL",0,0,"641041",,terminal_output +1528,2872272,"TERMINAL",0,0,"75152",,terminal_output +1529,2873299,"TERMINAL",0,0,"86263",,terminal_output +1530,2874249,"TERMINAL",0,0,"97374",,terminal_output +1531,2875318,"TERMINAL",0,0,"308485",,terminal_output +1532,2876352,"TERMINAL",0,0,"19596",,terminal_output +1533,2877408,"TERMINAL",0,0,"2506507",,terminal_output +1534,2878468,"TERMINAL",0,0,"31718",,terminal_output +1535,2879547,"TERMINAL",0,0,"42829",,terminal_output +1536,2880577,"TERMINAL",0,0,"539320",,terminal_output +1537,2881638,"TERMINAL",0,0,"642041",,terminal_output +1538,2882720,"TERMINAL",0,0,"75152",,terminal_output +1539,2883727,"TERMINAL",0,0,"87374",,terminal_output +1540,2884778,"TERMINAL",0,0,"408485",,terminal_output +1541,2885829,"TERMINAL",0,0,"19596",,terminal_output +1542,2886917,"TERMINAL",0,0,"27:0067:007",,terminal_output +1543,2888045,"TERMINAL",0,0,"31718",,terminal_output +1544,2889065,"TERMINAL",0,0,"42829",,terminal_output +1545,2890052,"TERMINAL",0,0,"539330",,terminal_output +1546,2891092,"TERMINAL",0,0,"643041",,terminal_output +1547,2892150,"TERMINAL",0,0,"75152",,terminal_output +1548,2893193,"TERMINAL",0,0,"86263",,terminal_output +1549,2894243,"TERMINAL",0,0,"97374",,terminal_output +1550,2895316,"TERMINAL",0,0,"508485",,terminal_output +1551,2896106,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",0,0,"",shellscript,tab +1552,2896420,"TERMINAL",0,0,"19596",,terminal_output +1553,2897467,"TERMINAL",0,0,"2106107",,terminal_output +1554,2898499,"TERMINAL",0,0,"31718",,terminal_output +1555,2899532,"TERMINAL",0,0,"42829",,terminal_output +1556,2900426,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",0,0,"",shellscript,tab +1557,2900427,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",942,0,"",shellscript,selection_mouse +1558,2900481,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",941,0,"",shellscript,selection_command +1559,2900548,"TERMINAL",0,0,"539340",,terminal_output +1560,2901388,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1167,0,"",shellscript,selection_mouse +1561,2901390,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1166,0,"",shellscript,selection_command +1562,2901577,"TERMINAL",0,0,"644041",,terminal_output +1563,2902611,"TERMINAL",0,0,"75152",,terminal_output +1564,2903613,"scripts_horeka/modelsize_scaling/lam/tester.sh",0,0,"",shellscript,tab +1565,2903763,"TERMINAL",0,0,"86263",,terminal_output +1566,2904735,"TERMINAL",0,0,"97374",,terminal_output +1567,2905413,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",0,0,"",shellscript,tab +1568,2905755,"TERMINAL",0,0,"3:009596",,terminal_output +1569,2906884,"TERMINAL",0,0,"2206207",,terminal_output +1570,2907910,"TERMINAL",0,0,"31718",,terminal_output +1571,2908898,"TERMINAL",0,0,"42829",,terminal_output +1572,2909530,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",457,0,"",shellscript,selection_mouse +1573,2909951,"TERMINAL",0,0,"539350",,terminal_output +1574,2910163,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1068,0,"",shellscript,selection_mouse +1575,2910174,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1067,0,"",shellscript,selection_command +1576,2910678,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1297,0,"",shellscript,selection_mouse +1577,2910680,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1296,0,"",shellscript,selection_command +1578,2910998,"TERMINAL",0,0,"645041",,terminal_output +1579,2911296,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1407,0,"",shellscript,selection_mouse +1580,2911309,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1406,0,"",shellscript,selection_command +1581,2912106,"TERMINAL",0,0,"75152",,terminal_output +1582,2913096,"TERMINAL",0,0,"86263",,terminal_output +1583,2914156,"TERMINAL",0,0,"97374",,terminal_output +1584,2915209,"TERMINAL",0,0,"108485",,terminal_output +1585,2916307,"TERMINAL",0,0,"19596",,terminal_output +1586,2917280,"TERMINAL",0,0,"2306307",,terminal_output +1587,2918438,"TERMINAL",0,0,"31718",,terminal_output +1588,2919480,"TERMINAL",0,0,"42829",,terminal_output +1589,2920527,"TERMINAL",0,0,"53938:00",,terminal_output +1590,2920762,"TERMINAL",0,0,"Step 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2287, loss: 0.007020978722721338\r\n",,terminal_output +1591,2920871,"TERMINAL",0,0,"Step 2288, loss: 0.00875190831720829\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2396, loss: 0.008082582615315914\r\nStep 2182, loss: 0.007047292776405811\r\nStep 2183, loss: 0.005946440622210503\r\nStep 2184, loss: 0.007753191515803337\r\nStep 2185, loss: 0.007971913553774357\r\nStep 2186, loss: 0.006793928798288107\r\nStep 2187, loss: 0.007563348393887281\r\nStep 2188, loss: 0.007450959179550409\r\nStep 2189, loss: 0.007365739438682795\r\nStep 2190, loss: 0.007971013896167278\r\nStep 2191, loss: 0.006419710349291563\r\nStep 2192, loss: 0.007855601608753204\r\nStep 2193, loss: 0.006554008927196264\r\nStep 2194, loss: 0.007919912226498127\r\nStep 2195, loss: 0.009246248751878738\r\nStep 2196, loss: 0.0077072675339877605\r\nStep 2197, loss: 0.009428897872567177\r\nStep 2198, loss: 0.008707528933882713\r\nStep 2199, loss: 0.008782273158431053\r\nStep 2200, loss: 0.011009319685399532\r\nStep 2201, loss: 0.008429359644651413\r\nStep 2202, loss: 0.007610331755131483\r\nStep 2203, loss: 0.007773495279252529\r\nStep 2204, loss: 0.009314494207501411\r\nStep 2205, loss: 0.007566919084638357\r\nStep 2206, loss: 0.00774649903178215\r\nStep 2207, loss: 0.006603968795388937\r\nStep 2208, loss: 0.006329503376036882\r\nStep 2209, loss: 0.007151519879698753\r\nStep 2210, loss: 0.0077710580080747604\r\nStep 2211, loss: 0.006214924156665802\r\nStep 2212, loss: 0.006694791838526726\r\nStep 2213, loss: 0.0073739453218877316\r\nStep 2214, loss: 0.005623674485832453\r\nStep 2215, loss: 0.0078982999548316\r\nStep 2216, loss: 0.007187147159129381\r\nStep 2217, loss: 0.007525425869971514\r\nStep 2218, loss: 0.006596721243113279\r\nStep 2219, loss: 0.007706223987042904\r\nStep 2220, loss: 0.007000356912612915\r\nStep 2221, loss: 0.008878003805875778\r\nStep 2222, loss: 0.006376492790877819\r\nStep 2223, loss: 0.008050079457461834\r\nStep 2224, loss: 0.007886356674134731\r\nStep 2225, loss: 0.0069083841517567635\r\nStep 2226, loss: 0.006824408657848835\r\nStep 2227, loss: 0.006793968845158815\r\nStep 2228, loss: 0.007624398451298475\r\nStep 2229, loss: 0.0081605464220047\r\nStep 2230, loss: 0.006897060200572014\r\nStep 2231, loss: 0.007584972772747278\r\nStep 2232, loss: 0.0069396113976836205\r\nStep 2233, loss: 0.007637462578713894\r\nStep 2234, loss: 0.007333801127970219\r\nStep 2235, loss: 0.007183652836829424\r\nStep 2236, loss: 0.008483086712658405\r\nStep 2237, loss: 0.007403460331261158\r\nStep 2238, loss: 0.007542226929217577\r\nStep 2239, loss: 0.006474603898823261\r\nStep 2240, loss: 0.006936528254300356\r\nStep 2241, loss: 0.009447640739381313\r\nStep 2242, loss: 0.007099971640855074\r\nStep 2243, loss: 0.00863928534090519\r\nStep 2244, loss: 0.01093036774545908\r\nStep 2245, loss: 0.008053792640566826\r\nStep 2246, loss: 0.008137132041156292\r\nStep 2247, loss: 0.007317351642996073\r\nStep 2248, loss: 0.00731588713824749\r\nStep 2249, loss: 0.006467532832175493\r\nStep 2250, loss: 0.007371681742370129\r\nStep 2251, loss: 0.006458795629441738\r\nStep 2252, loss: 0.007805053610354662\r\nStep 2253, loss: 0.007725644391030073\r\nStep 2254, loss: 0.007198499981313944\r\nStep 2255, loss: 0.00799979642033577\r\nStep 2256, loss: 0.007058672606945038\r\nStep 2257, loss: 0.0069746156223118305\r\nStep 2258, loss: 0.00751868961378932\r\nStep 2259, loss: 0.007431623060256243\r\nStep 2260, loss: 0.0062018707394599915\r\nStep 2261, loss: 0.008419814519584179\r\nStep 2262, loss: 0.006146540865302086\r\nStep 2263, loss: 0.007318414747714996\r\nStep 2264, loss: 0.009463437832891941\r\nStep 2265, loss: 0.006919173989444971\r\nStep 2266, loss: 0.009040049277245998\r\nStep 2267, loss: 0.007615735754370689\r\nStep 2268, loss: 0.010363750159740448\r\nStep 2269, loss: 0.008556756190955639\r\nStep 2270, loss: 0.009120402857661247\r\nStep 2271, loss: 0.008105048909783363\r\nStep 2272, loss: 0.007221136707812548\r\nStep 2273, loss: 0.008906340226531029\r\nStep 2274, loss: 0.00774586945772171\r\nStep 2275, loss: 0.007926639169454575\r\nStep 2276, loss: 0.007177161052823067\r\nStep 2277, loss: 0.007441888563334942\r\nStep 2278, loss: 0.006979580502957106\r\nStep 2279, loss: 0.0070192525163292885\r\nStep 2280, loss: 0.0061452980153262615\r\nStep 2281, loss: 0.007703986018896103\r\nStep 2282, loss: 0.006331630516797304\r\nStep 2283, loss: 0.007644594646990299\r\nStep 2284, loss: 0.008074313402175903\r\nStep 2285, loss: 0.00854454468935728\r\nStep 2286, loss: 0.007954484783113003\r\nStep 2287, loss: 0.007020978722721338\r\nStep 2288, loss: 0.00875190831720829\r\nStep 2289, loss: 0.008151629008352757\r\nStep 2290, loss: 0.007590850815176964\r\nStep 2291, loss: 0.006018661893904209\r\nStep 2292, loss: 0.006589282304048538\r\nStep 2293, loss: 0.007021039258688688\r\nStep 2294, loss: 0.008005218580365181\r\nStep 2295, loss: 0.007952924817800522\r\nStep 2296, loss: 0.008568134158849716\r\nStep 2297, loss: 0.0067298514768481255\r\nStep 2298, loss: 0.0075162495486438274\r\nStep 2299, loss: 0.006727202795445919\r\nStep 2300, loss: 0.006799233146011829\r\nStep 2301, loss: 0.00684049166738987\r\nStep 2302, loss: 0.007529537659138441\r\nStep 2303, loss: 0.006426770705729723\r\nStep 2304, loss: 0.008691138587892056\r\nStep 2305, loss: 0.006516975816339254\r\nStep 2306, loss: 0.0053546535782516\r\nStep 2307, loss: 0.007579582743346691\r\nStep 2308, loss: 0.007574320770800114\r\nStep 2309, loss: 0.007034412119537592\r\nStep 2310, loss: 0.006250488106161356\r\nStep 2311, loss: 0.008510340936481953\r\nStep 2312, loss: 0.00894845649600029\r\nStep 2313, loss: 0.008291991427540779\r\nStep 2314, loss: 0.009398137219250202\r\nStep 2315, loss: 0.008755427785217762\r\nStep 2316, loss: 0.007275292184203863\r\nStep 2317, loss: 0.007888562977313995\r\nStep 2318, loss: 0.0072205038741230965\r\nStep 2319, loss: 0.009770158678293228\r\nStep 2320, loss: 0.007986108772456646\r\nStep 2321, loss: 0.008087538182735443\r\nStep 2322, loss: 0.006975209806114435\r\nStep 2323, loss: 0.007179504260420799\r\nStep 2324, loss: 0.007759228814393282\r\nStep 2325, loss: 0.006794561631977558\r\nStep 2326, loss: 0.006587014067918062\r\nStep 2327, loss: 0.0058727082796394825\r\nStep 2328, loss: 0.005745173431932926\r\nStep 2329, loss: 0.005147646646946669\r\nStep 2330, loss: 0.00745925260707736\r\nStep 2331, loss: 0.006531362421810627\r\nStep 2332, loss: 0.007551598362624645\r\nStep 2333, loss: 0.007515294477343559\r\nStep 2334, loss: 0.006597206927835941\r\nStep 2335, loss: 0.006945605855435133\r\nStep 2336, loss: 0.005773642100393772\r\nStep 2337, loss: 0.007051005959510803\r\nStep 2338, loss: 0.006912588141858578\r\nStep 2339, loss: 0.006388429086655378\r\nStep 2340, loss: 0.006413211580365896\r\nStep 2341, loss: 0.010007134638726711\r\nStep 2342, loss: 0.008879064582288265\r\nStep 2343, loss: 0.007293196395039558\r\nStep 2344, loss: 0.007867034524679184\r\nStep 2345, loss: 0.007829985581338406\r\nStep 2346, loss: 0.007642874028533697\r\nStep 2347, loss: 0.007404367905110121\r\nStep 2348, loss: 0.00787848886102438\r\nStep 2349, loss: 0.007604194339364767\r\nStep 2350, loss: 0.006056034006178379\r\nStep 2351, loss: 0.009245391003787518\r\nStep 2352, loss: 0.007415136322379112\r\nStep 2353, loss: 0.007526281755417585\r\nStep 2354, loss: 0.005462340544909239\r\nStep 2355, loss: 0.006441247649490833\r\nStep 2356, loss: 0.007708383724093437\r\nStep 2357, loss: 0.008638114668428898\r\nStep 2358, loss: 0.007165662478655577\r\nStep 2359, loss: 0.008650296367704868\r\nStep 2360, loss: 0.007847641594707966\r\nStep 2361, loss: 0.006319018546491861\r\nStep 2362, loss: 0.006305510178208351\r\nStep 2363, loss: 0.008855611085891724\r\nStep 2364, loss: 0.007100534625351429\r\nStep 2365, loss: 0.008646843954920769\r\nStep 2366, loss: 0.008140524849295616\r\nStep 2367, loss: 0.006965464912354946\r\nStep 2368, loss: 0.0073202187195420265\r\nStep 2369, loss: 0.008150244131684303\r\nStep 2370, loss: 0.007980136200785637\r\nStep 2371, loss: 0.007754015736281872\r\nStep 2372, loss: 0.00854856614023447\r\nStep 2373, loss: 0.007721675559878349\r\nStep 2374, loss: 0.007285380270332098\r\nStep 2375, loss: 0.007755438331514597\r\nStep 2376, loss: 0.006344699300825596\r\nStep 2377, loss: 0.006605956237763166\r\nStep 2378, loss: 0.005707500036805868\r\nStep 2379, loss: 0.007001930847764015\r\nStep 2380, loss: 0.0059698657132685184\r\nStep 2381, loss: 0.0074415965937078\r\nStep 2382, loss: 0.006967493798583746\r\nStep 2383, loss: 0.008568624034523964\r\nStep 2384, loss: 0.006496841553598642\r\nStep 2385, loss: 0.006507178768515587\r\nStep 2386, loss: 0.008202851749956608\r\nStep 2387, loss: 0.0077567268162965775\r\nStep 2388, loss: 0.010153812356293201\r\nStep 2389, loss: 0.010740460827946663\r\nStep 2390, loss: 0.007692711893469095\r\nStep 2391, loss: 0.010689156129956245\r\nStep 2392, loss: 0.0076215327717363834\r\nStep 2393, loss: 0.006426332518458366\r\nStep 2394, loss: 0.007592188194394112\r\nStep 2395, loss: 0.008529632352292538\r\nStep 2396, loss: 0.008082582615315914\r\n",,terminal_output +1592,2921530,"TERMINAL",0,0,"643:0041",,terminal_output +1593,2922530,"TERMINAL",0,0,"75152",,terminal_output +1594,2923596,"TERMINAL",0,0,"86263",,terminal_output +1595,2924652,"TERMINAL",0,0,"97374",,terminal_output +1596,2925747,"TERMINAL",0,0,"208485",,terminal_output +1597,2926769,"TERMINAL",0,0,"1406407",,terminal_output +1598,2927805,"TERMINAL",0,0,"31718",,terminal_output +1599,2928907,"TERMINAL",0,0,"42829",,terminal_output +1600,2929859,"TERMINAL",0,0,"539310",,terminal_output +1601,2930909,"TERMINAL",0,0,"641041",,terminal_output +1602,2931957,"TERMINAL",0,0,"75152",,terminal_output +1603,2933001,"TERMINAL",0,0,"86263",,terminal_output +1604,2934128,"TERMINAL",0,0,"97374",,terminal_output +1605,2935209,"TERMINAL",0,0,"308485",,terminal_output +1606,2936232,"TERMINAL",0,0,"19596",,terminal_output +1607,2937276,"TERMINAL",0,0,"2506507",,terminal_output +1608,2938424,"TERMINAL",0,0,"31718",,terminal_output +1609,2939384,"TERMINAL",0,0,"42829",,terminal_output +1610,2940477,"TERMINAL",0,0,"539320",,terminal_output +1611,2941478,"TERMINAL",0,0,"642041",,terminal_output +1612,2942526,"TERMINAL",0,0,"75152",,terminal_output +1613,2943551,"TERMINAL",0,0,"86263",,terminal_output +1614,2944607,"TERMINAL",0,0,"97374",,terminal_output +1615,2945662,"TERMINAL",0,0,"408485",,terminal_output +1616,2946719,"TERMINAL",0,0,"19596",,terminal_output +1617,2947850,"TERMINAL",0,0,"28:0178:018",,terminal_output +1618,2948870,"TERMINAL",0,0,"42829",,terminal_output +1619,2949898,"TERMINAL",0,0,"539330",,terminal_output +1620,2950920,"TERMINAL",0,0,"643041",,terminal_output +1621,2952045,"TERMINAL",0,0,"75152",,terminal_output +1622,2953078,"TERMINAL",0,0,"86263",,terminal_output +1623,2954093,"TERMINAL",0,0,"97374",,terminal_output +1624,2955117,"TERMINAL",0,0,"508485",,terminal_output +1625,2956129,"TERMINAL",0,0,"19596",,terminal_output +1626,2957181,"TERMINAL",0,0,"2106107",,terminal_output +1627,2958292,"TERMINAL",0,0,"31718",,terminal_output +1628,2959317,"TERMINAL",0,0,"42829",,terminal_output +1629,2960332,"TERMINAL",0,0,"539340",,terminal_output +1630,2961472,"TERMINAL",0,0,"644041",,terminal_output +1631,2962427,"TERMINAL",0,0,"75152",,terminal_output +1632,2963532,"TERMINAL",0,0,"86263",,terminal_output +1633,2964554,"TERMINAL",0,0,"97374",,terminal_output +1634,2965589,"TERMINAL",0,0,"4:008485",,terminal_output +1635,2966618,"TERMINAL",0,0,"19596",,terminal_output +1636,2967670,"TERMINAL",0,0,"2206207",,terminal_output +1637,2968739,"TERMINAL",0,0,"31718",,terminal_output +1638,2969760,"TERMINAL",0,0,"439350",,terminal_output +1639,2970791,"TERMINAL",0,0,"645041",,terminal_output +1640,2971911,"TERMINAL",0,0,"75152",,terminal_output +1641,2972937,"TERMINAL",0,0,"86263",,terminal_output +1642,2973963,"TERMINAL",0,0,"97374",,terminal_output +1643,2975007,"TERMINAL",0,0,"108485",,terminal_output +1644,2976110,"TERMINAL",0,0,"19596",,terminal_output +1645,2977131,"TERMINAL",0,0,"2306307",,terminal_output +1646,2978159,"TERMINAL",0,0,"31718",,terminal_output +1647,2979287,"TERMINAL",0,0,"42829",,terminal_output +1648,2980228,"TERMINAL",0,0,"53939:00",,terminal_output +1649,2981332,"TERMINAL",0,0,"644:0041",,terminal_output +1650,2982374,"TERMINAL",0,0,"75152",,terminal_output +1651,2983338,"TERMINAL",0,0,"86263",,terminal_output +1652,2984389,"TERMINAL",0,0,"97374",,terminal_output +1653,2985422,"TERMINAL",0,0,"208485",,terminal_output +1654,2986555,"TERMINAL",0,0,"19596",,terminal_output +1655,2987524,"TERMINAL",0,0,"2406407",,terminal_output +1656,2988606,"TERMINAL",0,0,"31718",,terminal_output +1657,2989629,"TERMINAL",0,0,"42829",,terminal_output +1658,2990660,"TERMINAL",0,0,"539310",,terminal_output +1659,2991696,"TERMINAL",0,0,"641041",,terminal_output +1660,2992753,"TERMINAL",0,0,"76263",,terminal_output +1661,2993822,"TERMINAL",0,0,"97374",,terminal_output +1662,2994877,"TERMINAL",0,0,"308485",,terminal_output +1663,2995977,"TERMINAL",0,0,"19596",,terminal_output +1664,2997033,"TERMINAL",0,0,"2506507",,terminal_output +1665,2998123,"TERMINAL",0,0,"31718",,terminal_output +1666,2999171,"TERMINAL",0,0,"42829",,terminal_output +1667,3000119,"TERMINAL",0,0,"539320",,terminal_output +1668,3001140,"TERMINAL",0,0,"642041",,terminal_output +1669,3002195,"TERMINAL",0,0,"75152",,terminal_output +1670,3003244,"TERMINAL",0,0,"86263",,terminal_output +1671,3004274,"TERMINAL",0,0,"97374",,terminal_output +1672,3005323,"TERMINAL",0,0,"408485",,terminal_output +1673,3006377,"TERMINAL",0,0,"19596",,terminal_output +1674,3007443,"TERMINAL",0,0,"29:0069:007",,terminal_output +1675,3008479,"TERMINAL",0,0,"31718",,terminal_output +1676,3009527,"TERMINAL",0,0,"42829",,terminal_output +1677,3010577,"TERMINAL",0,0,"539330",,terminal_output +1678,3011629,"TERMINAL",0,0,"643041",,terminal_output +1679,3012679,"TERMINAL",0,0,"75152",,terminal_output +1680,3013728,"TERMINAL",0,0,"87374",,terminal_output +1681,3014819,"TERMINAL",0,0,"508485",,terminal_output +1682,3015840,"TERMINAL",0,0,"19596",,terminal_output +1683,3016968,"TERMINAL",0,0,"2106107",,terminal_output +1684,3017995,"TERMINAL",0,0,"31718",,terminal_output +1685,3018960,"TERMINAL",0,0,"42829",,terminal_output +1686,3020041,"TERMINAL",0,0,"539340",,terminal_output +1687,3021064,"TERMINAL",0,0,"644041",,terminal_output +1688,3022088,"TERMINAL",0,0,"75152",,terminal_output +1689,3023119,"TERMINAL",0,0,"86263",,terminal_output +1690,3024236,"TERMINAL",0,0,"97374",,terminal_output +1691,3025218,"TERMINAL",0,0,"5:008485",,terminal_output +1692,3026285,"TERMINAL",0,0,"19596",,terminal_output +1693,3027459,"TERMINAL",0,0,"2206207",,terminal_output +1694,3028440,"TERMINAL",0,0,"31718",,terminal_output +1695,3029459,"TERMINAL",0,0,"42829",,terminal_output +1696,3030480,"TERMINAL",0,0,"539350",,terminal_output +1697,3031511,"TERMINAL",0,0,"645041",,terminal_output +1698,3032634,"TERMINAL",0,0,"75152",,terminal_output +1699,3033661,"TERMINAL",0,0,"86263",,terminal_output +1700,3034708,"TERMINAL",0,0,"97374",,terminal_output +1701,3035807,"TERMINAL",0,0,"109596",,terminal_output +1702,3036832,"TERMINAL",0,0,"2306307",,terminal_output +1703,3037860,"TERMINAL",0,0,"31718",,terminal_output +1704,3038888,"TERMINAL",0,0,"42829",,terminal_output +1705,3040008,"TERMINAL",0,0,"539350:00",,terminal_output +1706,3041033,"TERMINAL",0,0,"645:0041",,terminal_output +1707,3042006,"TERMINAL",0,0,"75152",,terminal_output +1708,3043047,"TERMINAL",0,0,"86263",,terminal_output +1709,3044180,"TERMINAL",0,0,"97374",,terminal_output +1710,3045223,"TERMINAL",0,0,"208485",,terminal_output +1711,3046187,"TERMINAL",0,0,"19596",,terminal_output +1712,3047280,"TERMINAL",0,0,"2406407",,terminal_output +1713,3048303,"TERMINAL",0,0,"31718",,terminal_output +1714,3049428,"TERMINAL",0,0,"42829",,terminal_output +1715,3050409,"TERMINAL",0,0,"539310",,terminal_output +1716,3051474,"TERMINAL",0,0,"641041",,terminal_output +1717,3052499,"TERMINAL",0,0,"75152",,terminal_output +1718,3053636,"TERMINAL",0,0,"86263",,terminal_output +1719,3054594,"TERMINAL",0,0,"97374",,terminal_output +1720,3055674,"TERMINAL",0,0,"308485",,terminal_output +1721,3056696,"TERMINAL",0,0,"19596",,terminal_output +1722,3057748,"TERMINAL",0,0,"2517518",,terminal_output +1723,3058788,"TERMINAL",0,0,"42829",,terminal_output +1724,3059874,"TERMINAL",0,0,"539320",,terminal_output +1725,3060884,"TERMINAL",0,0,"642041",,terminal_output +1726,3062024,"TERMINAL",0,0,"75152",,terminal_output +1727,3062956,"TERMINAL",0,0,"86263",,terminal_output +1728,3064010,"TERMINAL",0,0,"97374",,terminal_output +1729,3065096,"TERMINAL",0,0,"408485",,terminal_output +1730,3066121,"TERMINAL",0,0,"19596",,terminal_output +1731,3067137,"TERMINAL",0,0,"220:00650:007",,terminal_output +1732,3068270,"TERMINAL",0,0,"31718",,terminal_output +1733,3069239,"TERMINAL",0,0,"42829",,terminal_output +1734,3070319,"TERMINAL",0,0,"539330",,terminal_output +1735,3070726,"TERMINAL",0,0,"Step 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\n",,terminal_output +1736,3070831,"TERMINAL",0,0,"Step 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2397, loss: 0.007011009380221367\r\nStep 2398, loss: 0.009222020395100117\r\nStep 2399, loss: 0.007734660524874926\r\nStep 2400, loss: 0.008362107910215855\r\nStep 2401, loss: 0.007307569030672312\r\nStep 2402, loss: 0.00836886279284954\r\nStep 2403, loss: 0.00897727720439434\r\nStep 2404, loss: 0.008086943067610264\r\nStep 2405, loss: 0.008360551670193672\r\nStep 2406, loss: 0.0066583710722625256\r\nStep 2407, loss: 0.008961153216660023\r\nStep 2408, loss: 0.008784957230091095\r\nStep 2409, loss: 0.0067122094333171844\r\nStep 2410, loss: 0.0077525717206299305\r\nStep 2411, loss: 0.008152220398187637\r\nStep 2412, loss: 0.007576659321784973\r\nStep 2413, loss: 0.007401448208838701\r\nStep 2414, loss: 0.006765714846551418\r\nStep 2415, loss: 0.008701230399310589\r\nStep 2416, loss: 0.0069888136349618435\r\nStep 2417, loss: 0.008154638111591339\r\nStep 2418, loss: 0.007480349857360125\r\nStep 2419, loss: 0.007382364012300968\r\nStep 2420, loss: 0.008214174769818783\r\nStep 2421, loss: 0.006775974296033382\r\nStep 2422, loss: 0.0063680135644972324\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2423, loss: 0.007302228827029467\r\nStep 2424, loss: 0.006839169189333916\r\nStep 2425, loss: 0.00781087065115571\r\nStep 2426, loss: 0.007544366642832756\r\nStep 2427, loss: 0.005898088216781616\r\nStep 2428, loss: 0.007514003198593855\r\nStep 2429, loss: 0.007356517482548952\r\nStep 2430, loss: 0.006886976771056652\r\nStep 2431, loss: 0.007959047332406044\r\nStep 2432, loss: 0.007545408792793751\r\nStep 2433, loss: 0.010138574987649918\r\nStep 2434, loss: 0.007403177674859762\r\nStep 2435, loss: 0.007963825948536396\r\nStep 2436, loss: 0.006163692567497492\r\nStep 2437, loss: 0.00650549354031682\r\nStep 2438, loss: 0.007840912789106369\r\nStep 2439, loss: 0.007475205697119236\r\nStep 2440, loss: 0.0070392717607319355\r\nStep 2441, loss: 0.007773024030029774\r\nStep 2442, loss: 0.0057103317230939865\r\nStep 2443, loss: 0.0063034710474312305\r\nStep 2444, loss: 0.0076279351487755775\r\nStep 2445, loss: 0.008959918282926083\r\nStep 2446, loss: 0.008051451295614243\r\nStep 2447, loss: 0.005905593745410442\r\nStep 2448, loss: 0.007637695875018835\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2449, loss: 0.00728316605091095\r\nStep 2450, loss: 0.007219402119517326\r\nStep 2451, loss: 0.006014863960444927\r\nStep 2452, loss: 0.008447017520666122\r\nStep 2453, loss: 0.007927624508738518\r\nStep 2454, loss: 0.007105099502950907\r\nStep 2455, loss: 0.007833367213606834\r\nStep 2456, loss: 0.008120629005134106\r\nStep 2457, loss: 0.007153866346925497\r\nStep 2458, loss: 0.007510841824114323\r\nStep 2459, loss: 0.00722730765119195\r\nStep 2460, loss: 0.007352590095251799\r\nStep 2461, loss: 0.005933630280196667\r\nStep 2462, loss: 0.007000593468546867\r\nStep 2463, loss: 0.007709654048085213\r\nStep 2464, loss: 0.007068892475217581\r\nStep 2465, loss: 0.008513451553881168\r\nStep 2466, loss: 0.006648980546742678\r\nStep 2467, loss: 0.007945581339299679\r\nStep 2468, loss: 0.00866736564785242\r\nStep 2469, loss: 0.008055377751588821\r\nStep 2470, loss: 0.006331093143671751\r\nStep 2471, loss: 0.006056362763047218\r\nStep 2472, loss: 0.008721460588276386\r\nStep 2473, loss: 0.006648453418165445\r\nStep 2474, loss: 0.006934760604053736\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2475, loss: 0.0075280978344380856\r\nStep 2476, loss: 0.0069909850135445595\r\nStep 2477, loss: 0.007902940735220909\r\nStep 2478, loss: 0.008560294285416603\r\nStep 2479, loss: 0.006950830575078726\r\nStep 2480, loss: 0.007374444045126438\r\nStep 2481, loss: 0.007784189190715551\r\nStep 2482, loss: 0.007633774541318417\r\nStep 2483, loss: 0.0070148129016160965\r\nStep 2484, loss: 0.008988810703158379\r\nStep 2485, loss: 0.00784060824662447\r\nStep 2486, loss: 0.006766042206436396\r\nStep 2487, loss: 0.00791437178850174\r\nStep 2488, loss: 0.00787375308573246\r\nStep 2489, loss: 0.007770786061882973\r\nStep 2490, loss: 0.006597877014428377\r\nStep 2491, loss: 0.007592851761728525\r\nStep 2492, loss: 0.007470098324120045\r\nStep 2493, loss: 0.007563624531030655\r\nStep 2494, loss: 0.008127234876155853\r\nStep 2495, loss: 0.008192939683794975\r\nStep 2496, loss: 0.007001522928476334\r\nStep 2497, loss: 0.006884061731398106\r\nStep 2498, loss: 0.006947128567844629\r\nStep 2499, loss: 0.007391991559416056\r\nStep 2500, loss: 0.007984338328242302\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2501, loss: 0.008502310141921043\r\nStep 2502, loss: 0.008302035741508007\r\nStep 2503, loss: 0.007996300235390663\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2504, loss: 0.007510323077440262\r\nStep 2505, loss: 0.006705992389470339\r\nStep 2506, loss: 0.006571953184902668\r\nStep 2507, loss: 0.007087813224643469\r\nStep 2508, loss: 0.007923539727926254\r\nStep 2509, loss: 0.0072900597006082535\r\nStep 2510, loss: 0.00589447608217597\r\nStep 2511, loss: 0.005637349560856819\r\nStep 2512, loss: 0.007145498879253864\r\nStep 2513, loss: 0.008346308022737503\r\nStep 2514, loss: 0.006376652047038078\r\nStep 2515, loss: 0.007614105939865112\r\nStep 2516, loss: 0.006451908033341169\r\nStep 2517, loss: 0.006981202866882086\r\nStep 2518, loss: 0.009066260419785976\r\nStep 2519, loss: 0.008330306969583035\r\nStep 2520, loss: 0.008212791755795479\r\nStep 2521, loss: 0.007823663763701916\r\nStep 2522, loss: 0.008066629059612751\r\nStep 2523, loss: 0.006481446325778961\r\nStep 2524, loss: 0.007869022898375988\r\nStep 2525, loss: 0.006650784518569708\r\nStep 2526, loss: 0.007010226137936115\r\nStep 2527, loss: 0.007853398099541664\r\nStep 2528, loss: 0.0067210146225988865\r\nStep 2529, loss: 0.006319122388958931\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2530, loss: 0.007312402129173279\r\nStep 2531, loss: 0.0074559161439538\r\nStep 2532, loss: 0.007933441549539566\r\nStep 2533, loss: 0.006511279381811619\r\nStep 2534, loss: 0.007095860783010721\r\nStep 2535, loss: 0.0072814044542610645\r\nStep 2536, loss: 0.007258410565555096\r\nStep 2537, loss: 0.007107825018465519\r\nStep 2538, loss: 0.007030786480754614\r\nStep 2539, loss: 0.008914768695831299\r\nStep 2540, loss: 0.006776256486773491\r\nStep 2541, loss: 0.006186715327203274\r\nStep 2542, loss: 0.008643125183880329\r\nStep 2543, loss: 0.008657856844365597\r\nStep 2544, loss: 0.006630297750234604\r\nStep 2545, loss: 0.008147209882736206\r\nStep 2546, loss: 0.00733942911028862\r\nStep 2547, loss: 0.00752344261854887\r\nStep 2548, loss: 0.0063140313141047955\r\nStep 2549, loss: 0.007582214195281267\r\nStep 2550, loss: 0.007775105535984039\r\nStep 2551, loss: 0.006107931956648827\r\nStep 2552, loss: 0.006902703549712896\r\nStep 2553, loss: 0.007549134083092213\r\nStep 2554, loss: 0.007413381244987249\r\nStep 2555, loss: 0.006547373719513416\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2556, loss: 0.007332624867558479\r\nStep 2557, loss: 0.009615802206099033\r\nStep 2558, loss: 0.007685955613851547\r\nStep 2559, loss: 0.008209547027945518\r\nStep 2560, loss: 0.00823100097477436\r\nStep 2561, loss: 0.007637364789843559\r\nStep 2562, loss: 0.007423021364957094\r\nStep 2563, loss: 0.008770793676376343\r\nStep 2564, loss: 0.007807317189872265\r\nStep 2565, loss: 0.0068536303006112576\r\nStep 2566, loss: 0.006555009633302689\r\nStep 2567, loss: 0.008149969391524792\r\nStep 2568, loss: 0.0065345908515155315\r\nStep 2569, loss: 0.007536664605140686\r\nStep 2570, loss: 0.007713436149060726\r\nStep 2571, loss: 0.008231834508478642\r\nStep 2572, loss: 0.008097534067928791\r\nStep 2573, loss: 0.007763379719108343\r\nStep 2574, loss: 0.007971882820129395\r\nStep 2575, loss: 0.008070181123912334\r\nStep 2576, loss: 0.00769038125872612\r\nStep 2577, loss: 0.006096909288316965\r\nStep 2578, loss: 0.0073938630521297455\r\nStep 2579, loss: 0.006474383175373077\r\nStep 2580, loss: 0.007321350276470184\r\nStep 2581, loss: 0.006757260765880346\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2582, loss: 0.007891835644841194\r\nStep 2583, loss: 0.006814071908593178\r\nStep 2584, loss: 0.005922597832977772\r\nStep 2585, loss: 0.007624792400747538\r\nStep 2586, loss: 0.005838159937411547\r\nStep 2587, loss: 0.007284690625965595\r\nStep 2588, loss: 0.006778536830097437\r\nStep 2589, loss: 0.00751010375097394\r\nStep 2590, loss: 0.00926201231777668\r\nStep 2591, loss: 0.007845702581107616\r\nStep 2592, loss: 0.008334299549460411\r\nStep 2593, loss: 0.007459577172994614\r\nStep 2594, loss: 0.006296036299318075\r\nStep 2595, loss: 0.008171480149030685\r\nStep 2596, loss: 0.0080800149589777\r\nStep 2597, loss: 0.007184814196079969\r\nStep 2598, loss: 0.007581953890621662\r\nStep 2599, loss: 0.008107693865895271\r\nStep 2600, loss: 0.006956801284104586\r\nStep 2601, loss: 0.0060848877765238285\r\nStep 2602, loss: 0.007925634272396564\r\nStep 2603, loss: 0.007370885461568832\r\nStep 2604, loss: 0.007229670882225037\r\nStep 2605, loss: 0.00792711041867733\r\nStep 2606, loss: 0.007798486389219761\r\nStep 2607, loss: 0.008043816313147545\r\nStep 2608, loss: 0.007629571948200464\r\nStep 2611, loss: 0.006490799598395824\r\nStep 2609, loss: 0.009414143860340118\r\nStep 2610, loss: 0.006693187635391951\r\nStep 2611, loss: 0.006490799598395824\r\n",,terminal_output +1737,3071443,"TERMINAL",0,0,"643041",,terminal_output +1738,3072470,"TERMINAL",0,0,"75152",,terminal_output +1739,3073492,"TERMINAL",0,0,"86263",,terminal_output +1740,3074517,"TERMINAL",0,0,"97374",,terminal_output +1741,3075542,"TERMINAL",0,0,"508485",,terminal_output +1742,3076563,"TERMINAL",0,0,"19596",,terminal_output +1743,3077604,"TERMINAL",0,0,"2106107",,terminal_output +1744,3078700,"TERMINAL",0,0,"31718",,terminal_output +1745,3079697,"TERMINAL",0,0,"42829",,terminal_output +1746,3080776,"TERMINAL",0,0,"5440441",,terminal_output +1747,3081800,"TERMINAL",0,0,"75152",,terminal_output +1748,3082914,"TERMINAL",0,0,"86263",,terminal_output +1749,3083937,"TERMINAL",0,0,"97374",,terminal_output +1750,3084956,"TERMINAL",0,0,"6:008485",,terminal_output +1751,3086088,"TERMINAL",0,0,"19596",,terminal_output +1752,3087099,"TERMINAL",0,0,"2206207",,terminal_output +1753,3088165,"TERMINAL",0,0,"31718",,terminal_output +1754,3089159,"TERMINAL",0,0,"42829",,terminal_output +1755,3090204,"TERMINAL",0,0,"539350",,terminal_output +1756,3091238,"TERMINAL",0,0,"645041",,terminal_output +1757,3091702,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1011,0,"",shellscript,selection_mouse +1758,3092272,"TERMINAL",0,0,"75152",,terminal_output +1759,3092876,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1011,1,"1",shellscript,selection_command +1760,3093109,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1011,1,"1",shellscript,selection_command +1761,3093299,"TERMINAL",0,0,"86263",,terminal_output +1762,3094326,"TERMINAL",0,0,"97374",,terminal_output +1763,3094531,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1011,0,"",shellscript,selection_command +1764,3095259,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1031,1,"",shellscript,content +1765,3095259,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1011,1,"",shellscript,content +1766,3095359,"TERMINAL",0,0,"108485",,terminal_output +1767,3095413,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1030,1,"",shellscript,content +1768,3095413,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1011,1,"",shellscript,content +1769,3095541,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1029,1,"",shellscript,content +1770,3095542,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1011,1,"",shellscript,content +1771,3095679,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1028,1,"",shellscript,content +1772,3095680,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1011,1,"",shellscript,content +1773,3096432,"TERMINAL",0,0,"19596",,terminal_output +1774,3096970,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1027,0,"7",shellscript,content +1775,3096970,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1011,0,"7",shellscript,content +1776,3096972,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1012,0,"",shellscript,selection_keyboard +1777,3097395,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1029,0,"e",shellscript,content +1778,3097395,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1012,0,"e",shellscript,content +1779,3097396,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1013,0,"",shellscript,selection_keyboard +1780,3097471,"TERMINAL",0,0,"2306307",,terminal_output +1781,3097949,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1031,0,"-",shellscript,content +1782,3097949,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1013,0,"-",shellscript,content +1783,3097950,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1014,0,"",shellscript,selection_keyboard +1784,3098107,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1033,0,"5",shellscript,content +1785,3098108,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1014,0,"5",shellscript,content +1786,3098108,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1015,0,"",shellscript,selection_keyboard +1787,3098501,"TERMINAL",0,0,"31718",,terminal_output +1788,3098860,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1014,0,"",shellscript,selection_command +1789,3099555,"TERMINAL",0,0,"42829",,terminal_output +1790,3100611,"TERMINAL",0,0,"53931:00",,terminal_output +1791,3101616,"TERMINAL",0,0,"646:0041",,terminal_output +1792,3102671,"TERMINAL",0,0,"75152",,terminal_output +1793,3103724,"TERMINAL",0,0,"87374",,terminal_output +1794,3104785,"TERMINAL",0,0,"208485",,terminal_output +1795,3105833,"TERMINAL",0,0,"19596",,terminal_output +1796,3106978,"TERMINAL",0,0,"2406407",,terminal_output +1797,3107935,"TERMINAL",0,0,"31718",,terminal_output +1798,3109029,"TERMINAL",0,0,"42829",,terminal_output +1799,3110049,"TERMINAL",0,0,"539310",,terminal_output +1800,3111175,"TERMINAL",0,0,"641041",,terminal_output +1801,3112144,"TERMINAL",0,0,"75152",,terminal_output +1802,3113178,"TERMINAL",0,0,"86263",,terminal_output +1803,3114199,"TERMINAL",0,0,"97374",,terminal_output +1804,3115251,"TERMINAL",0,0,"308485",,terminal_output +1805,3116305,"TERMINAL",0,0,"19596",,terminal_output +1806,3117345,"TERMINAL",0,0,"2506507",,terminal_output +1807,3118446,"TERMINAL",0,0,"31718",,terminal_output +1808,3119470,"TERMINAL",0,0,"42829",,terminal_output +1809,3120609,"TERMINAL",0,0,"539320",,terminal_output +1810,3121622,"TERMINAL",0,0,"642041",,terminal_output +1811,3122649,"TERMINAL",0,0,"75152",,terminal_output +1812,3123644,"TERMINAL",0,0,"86263",,terminal_output +1813,3124787,"TERMINAL",0,0,"97374",,terminal_output +1814,3125734,"TERMINAL",0,0,"409596",,terminal_output +1815,3126845,"TERMINAL",0,0,"21:0061:007",,terminal_output +1816,3127842,"TERMINAL",0,0,"31718",,terminal_output +1817,3128899,"TERMINAL",0,0,"42829",,terminal_output +1818,3129948,"TERMINAL",0,0,"539330",,terminal_output +1819,3130853,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3316923.1 tasks 0-15: running\r\n",,terminal_output +1820,3131001,"TERMINAL",0,0,"643041",,terminal_output +1821,3131630,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3316923.1\r\nsrun: forcing job termination\r\nStep 2612, loss: 0.006791561841964722\r\nStep 2613, loss: 0.008019826374948025\r\nStep 2614, loss: 0.0063033089973032475\r\nStep 2615, loss: 0.006482157856225967\r\nStep 2616, loss: 0.007781367748975754\r\nStep 2617, loss: 0.007318116724491119\r\nStep 2618, loss: 0.0067173088900744915\r\nStep 2619, loss: 0.005896591115742922\r\nStep 2620, loss: 0.006044264417141676\r\nStep 2621, loss: 0.00743854558095336\r\nStep 2622, loss: 0.006313743535429239\r\nStep 2623, loss: 0.006164529826492071\r\nStep 2624, loss: 0.007657760288566351\r\nStep 2625, loss: 0.008027573116123676\r\nStep 2626, loss: 0.007742432411760092\r\nStep 2627, loss: 0.008176776580512524\r\nStep 2628, loss: 0.008598809130489826\r\nStep 2629, loss: 0.006883498281240463\r\nStep 2630, loss: 0.007491798605769873\r\nStep 2631, loss: 0.007078538183122873\r\nStep 2632, loss: 0.006278125569224358\r\nStep 2633, loss: 0.007349437568336725\r\nStep 2634, loss: 0.0065256329253315926\r\nStep 2635, loss: 0.006988814566284418\r\nStep 2636, loss: 0.009450173005461693\r\nStep 2637, loss: 0.007395721971988678\r\nStep 2638, loss: 0.006597508210688829\r\nStep 2639, loss: 0.008348207920789719\r\nStep 2640, loss: 0.008197437971830368\r\nStep 2641, loss: 0.007939758710563183\r\nStep 2642, loss: 0.008184662088751793\r\nStep 2643, loss: 0.007472028490155935\r\nStep 2644, loss: 0.006815717555582523\r\nStep 2645, loss: 0.006325994152575731\r\nStep 2646, loss: 0.006622747518122196\r\nStep 2647, loss: 0.006518843583762646\r\nStep 2648, loss: 0.00710684759542346\r\nStep 2649, loss: 0.00870906189084053\r\nStep 2650, loss: 0.006959839258342981\r\nStep 2651, loss: 0.007403410971164703\r\nStep 2652, loss: 0.007567410357296467\r\nStep 2653, loss: 0.007206164766103029\r\nStep 2654, loss: 0.006868516094982624\r\nStep 2655, loss: 0.008095523342490196\r\nStep 2656, loss: 0.0078019737266004086\r\nStep 2657, loss: 0.006755751557648182\r\nStep 2658, loss: 0.009217513725161552\r\nStep 2659, loss: 0.006370591931045055\r\nStep 2660, loss: 0.007040577474981546\r\nStep 2661, loss: 0.008188849315047264\r\nStep 2662, loss: 0.0076986681669950485\r\nStep 2663, loss: 0.0073158456943929195\r\nStep 2664, loss: 0.007145834621042013\r\nStep 2665, loss: 0.008665402419865131\r\nStep 2666, loss: 0.009344651363790035\r\nStep 2667, loss: 0.00942978449165821\r\nStep 2668, loss: 0.0073892222717404366\r\nStep 2669, loss: 0.007335971109569073\r\nStep 2670, loss: 0.007843065075576305\r\nStep 2671, loss: 0.007722088601440191\r\nStep 2672, loss: 0.0072429911233484745\r\nStep 2673, loss: 0.00785690825432539\r\nStep 2674, loss: 0.007569270674139261\r\nStep 2675, loss: 0.00658267131075263\r\nStep 2676, loss: 0.0062832823023200035\r\nStep 2677, loss: 0.008188135921955109\r\nStep 2678, loss: 0.007272929884493351\r\nStep 2679, loss: 0.00608580419793725\r\nStep 2680, loss: 0.006660285405814648\r\nStep 2681, loss: 0.00541948014870286\r\nStep 2682, loss: 0.007597365416586399\r\nStep 2683, loss: 0.007903697900474072\r\nStep 2684, loss: 0.0070220655761659145\r\nStep 2685, loss: 0.006794515065848827\r\nStep 2686, loss: 0.007172796875238419\r\nStep 2687, loss: 0.00825575552880764\r\nStep 2688, loss: 0.008619176223874092\r\nStep 2689, loss: 0.00754340598359704\r\nStep 2690, loss: 0.007522198371589184\r\nStep 2691, loss: 0.00805687252432108\r\nStep 2692, loss: 0.008638708852231503\r\nStep 2693, loss: 0.007212290074676275\r\nStep 2694, loss: 0.0073548839427530766\r\nStep 2695, loss: 0.0077036721631884575\r\nStep 2696, loss: 0.006982068531215191\r\nStep 2697, loss: 0.007031261455267668\r\nStep 2698, loss: 0.008332055993378162\r\nStep 2699, loss: 0.008362887427210808\r\nStep 2700, loss: 0.00808885507285595\r\nStep 2701, loss: 0.006484369747340679\r\nStep 2702, loss: 0.006959190126508474\r\nStep 2703, loss: 0.007829486392438412\r\nStep 2704, loss: 0.006844683084636927\r\nStep 2705, loss: 0.007465291768312454\r\nStep 2706, loss: 0.006647314876317978\r\nStep 2707, loss: 0.007918104529380798\r\nStep 2708, loss: 0.0074280197732150555\r\nStep 2709, loss: 0.007273267488926649\r\nStep 2612, loss: 0.006791561841964722\r\nStep 2613, loss: 0.008019826374948025\r\nStep 2614, loss: 0.0063033089973032475\r\nStep 2615, loss: 0.006482157856225967\r\nStep 2616, loss: 0.007781367748975754\r\nStep 2617, loss: 0.007318116724491119\r\nStep 2618, loss: 0.0067173088900744915\r\nStep 2619, loss: 0.005896591115742922\r\nStep 2620, loss: 0.006044264417141676\r\nStep 2621, loss: 0.00743854558095336\r\nStep 2622, loss: 0.006313743535429239\r\nStep 2623, loss: 0.006164529826492071\r\nStep 2624, loss: 0.007657760288566351\r\nStep 2625, loss: 0.008027573116123676\r\nStep 2626, loss: 0.007742432411760092\r\nStep 2627, loss: 0.008176776580512524\r\nStep 2628, loss: 0.008598809130489826\r\nStep 2629, loss: 0.006883498281240463\r\nStep 2630, loss: 0.007491798605769873\r\nStep 2631, loss: 0.007078538183122873\r\nStep 2632, loss: 0.006278125569224358\r\nStep 2633, loss: 0.007349437568336725\r\nStep 2634, loss: 0.0065256329253315926\r\nStep 2635, loss: 0.006988814566284418\r\nStep 2636, loss: 0.009450173005461693\r\nStep 2637, loss: 0.007395721971988678\r\nStep 2638, loss: 0.006597508210688829\r\nStep 2639, loss: 0.008348207920789719\r\nStep 2640, loss: 0.008197437971830368\r\nStep 2641, loss: 0.007939758710563183\r\nStep 2642, loss: 0.008184662088751793\r\nStep 2643, loss: 0.007472028490155935\r\nStep 2644, loss: 0.006815717555582523\r\nStep 2645, loss: 0.006325994152575731\r\nStep 2646, loss: 0.006622747518122196\r\nStep 2647, loss: 0.006518843583762646\r\nStep 2648, loss: 0.00710684759542346\r\nStep 2649, loss: 0.00870906189084053\r\nStep 2650, loss: 0.006959839258342981\r\nStep 2651, loss: 0.007403410971164703\r\nStep 2652, loss: 0.007567410357296467\r\nStep 2653, loss: 0.007206164766103029\r\nStep 2654, loss: 0.006868516094982624\r\nStep 2655, loss: 0.008095523342490196\r\nStep 2656, loss: 0.0078019737266004086\r\nStep 2657, loss: 0.006755751557648182\r\nStep 2658, loss: 0.009217513725161552\r\nStep 2659, loss: 0.006370591931045055\r\nStep 2660, loss: 0.007040577474981546\r\nStep 2661, loss: 0.008188849315047264\r\nStep 2662, loss: 0.0076986681669950485\r\nStep 2663, loss: 0.0073158456943929195\r\nStep 2664, loss: 0.007145834621042013\r\nStep 2665, loss: 0.008665402419865131\r\nStep 2666, loss: 0.009344651363790035\r\nStep 2667, loss: 0.00942978449165821\r\nStep 2668, loss: 0.0073892222717404366\r\nStep 2669, loss: 0.007335971109569073\r\nStep 2670, loss: 0.007843065075576305\r\nStep 2671, loss: 0.007722088601440191\r\nStep 2672, loss: 0.0072429911233484745\r\nStep 2673, loss: 0.00785690825432539\r\nStep 2674, loss: 0.007569270674139261\r\nStep 2675, loss: 0.00658267131075263\r\nStep 2676, loss: 0.0062832823023200035\r\nStep 2677, loss: 0.008188135921955109\r\nStep 2678, loss: 0.007272929884493351\r\nStep 2679, loss: 0.00608580419793725\r\nStep 2680, loss: 0.006660285405814648\r\nStep 2681, loss: 0.00541948014870286\r\nStep 2682, loss: 0.007597365416586399\r\nStep 2683, loss: 0.007903697900474072\r\nStep 2684, loss: 0.0070220655761659145\r\nStep 2685, loss: 0.006794515065848827\r\nStep 2686, loss: 0.007172796875238419\r\nStep 2687, loss: 0.00825575552880764\r\nStep 2688, loss: 0.008619176223874092\r\nStep 2689, loss: 0.00754340598359704\r\nStep 2690, loss: 0.007522198371589184\r\nStep 2691, loss: 0.00805687252432108\r\nStep 2692, loss: 0.008638708852231503\r\nStep 2693, loss: 0.007212290074676275\r\nStep 2694, loss: 0.0073548839427530766\r\nStep 2695, loss: 0.0077036721631884575\r\nStep 2696, loss: 0.006982068531215191\r\nStep 2697, loss: 0.007031261455267668\r\nStep 2698, loss: 0.008332055993378162\r\nStep 2699, loss: 0.008362887427210808\r\nStep 2700, loss: 0.00808885507285595\r\nStep 2701, loss: 0.006484369747340679\r\nStep 2702, loss: 0.006959190126508474\r\nStep 2703, loss: 0.007829486392438412\r\nStep 2704, loss: 0.006844683084636927\r\nStep 2705, loss: 0.007465291768312454\r\nStep 2706, loss: 0.006647314876317978\r\nStep 2707, loss: 0.007918104529380798\r\nStep 2708, loss: 0.0074280197732150555\r\nStep 2709, loss: 0.007273267488926649\r\nStep 2612, loss: 0.006791561841964722\r\nStep 2613, loss: 0.008019826374948025\r\nStep 2614, loss: 0.0063033089973032475\r\nStep 2615, loss: 0.006482157856225967\r\nStep 2616, loss: 0.007781367748975754\r\nStep 2617, loss: 0.007318116724491119\r\nStep 2618, loss: 0.0067173088900744915\r\nStep 2619, loss: 0.005896591115742922\r\nStep 2620, loss: 0.006044264417141676\r\nStep 2621, loss: 0.00743854558095336\r\nStep 2622, loss: 0.006313743535429239\r\nStep 2623, loss: 0.006164529826492071\r\nStep 2624, loss: 0.007657760288566351\r\nStep 2625, loss: 0.008027573116123676\r\nStep 2626, loss: 0.007742432411760092\r\nStep 2627, loss: 0.008176776580512524\r\nStep 2628, loss: 0.008598809130489826\r\nStep 2629, loss: 0.006883498281240463\r\nStep 2630, loss: 0.007491798605769873\r\nStep 2631, loss: 0.007078538183122873\r\nStep 2632, loss: 0.006278125569224358\r\nStep 2633, loss: 0.007349437568336725\r\nStep 2634, loss: 0.0065256329253315926\r\nStep 2635, loss: 0.006988814566284418\r\nStep 2636, loss: 0.009450173005461693\r\nStep 2637, loss: 0.007395721971988678\r\nStep 2638, loss: 0.006597508210688829\r\nStep 2639, loss: 0.008348207920789719\r\nStep 2640, loss: 0.008197437971830368\r\nStep 2641, loss: 0.007939758710563183\r\nStep 2642, loss: 0.008184662088751793\r\nStep 2643, loss: 0.007472028490155935\r\nStep 2644, loss: 0.006815717555582523\r\nStep 2645, loss: 0.006325994152575731\r\nStep 2646, loss: 0.006622747518122196\r\nStep 2647, loss: 0.006518843583762646\r\nStep 2648, loss: 0.00710684759542346\r\nStep 2649, loss: 0.00870906189084053\r\nStep 2650, loss: 0.006959839258342981\r\nStep 2651, loss: 0.007403410971164703\r\nStep 2652, loss: 0.007567410357296467\r\nStep 2653, loss: 0.007206164766103029\r\nStep 2654, loss: 0.006868516094982624\r\nStep 2655, loss: 0.008095523342490196\r\nStep 2656, loss: 0.0078019737266004086\r\nStep 2657, loss: 0.006755751557648182\r\nStep 2658, loss: 0.009217513725161552\r\nStep 2659, loss: 0.006370591931045055\r\nStep 2660, loss: 0.007040577474981546\r\nStep 2661, loss: 0.008188849315047264\r\nStep 2662, loss: 0.0076986681669950485\r\nStep 2663, loss: 0.0073158456943929195\r\nStep 2664, loss: 0.007145834621042013\r\nStep 2665, loss: 0.008665402419865131\r\nStep 2666, loss: 0.009344651363790035\r\nStep 2667, loss: 0.00942978449165821\r\nStep 2668, loss: 0.0073892222717404366\r\nStep 2669, loss: 0.007335971109569073\r\nStep 2670, loss: 0.007843065075576305\r\nStep 2671, loss: 0.007722088601440191\r\nStep 2672, loss: 0.0072429911233484745\r\nStep 2673, loss: 0.00785690825432539\r\nStep 2674, loss: 0.007569270674139261\r\nStep 2675, loss: 0.00658267131075263\r\nStep 2676, loss: 0.0062832823023200035\r\nStep 2677, loss: 0.008188135921955109\r\nStep 2678, loss: 0.007272929884493351\r\nStep 2679, loss: 0.00608580419793725\r\nStep 2680, loss: 0.006660285405814648\r\nStep 2681, loss: 0.00541948014870286\r\nStep 2682, loss: 0.007597365416586399\r\nStep 2683, loss: 0.007903697900474072\r\nStep 2684, loss: 0.0070220655761659145\r\nStep 2685, loss: 0.006794515065848827\r\nStep 2686, loss: 0.007172796875238419\r\nStep 2687, loss: 0.00825575552880764\r\nStep 2688, loss: 0.008619176223874092\r\nStep 2689, loss: 0.00754340598359704\r\nStep 2690, loss: 0.007522198371589184\r\nStep 2691, loss: 0.00805687252432108\r\nStep 2692, loss: 0.008638708852231503\r\nStep 2693, loss: 0.007212290074676275\r\nStep 2694, loss: 0.0073548839427530766\r\nStep 2695, loss: 0.0077036721631884575\r\nStep 2696, loss: 0.006982068531215191\r\nStep 2697, loss: 0.007031261455267668\r\nStep 2698, loss: 0.008332055993378162\r\nStep 2699, loss: 0.008362887427210808\r\nStep 2700, loss: 0.00808885507285595\r\nStep 2701, loss: 0.006484369747340679\r\nStep 2702, loss: 0.006959190126508474\r\nStep 2703, loss: 0.007829486392438412\r\nStep 2704, loss: 0.006844683084636927\r\nStep 2705, loss: 0.007465291768312454\r\nStep 2706, loss: 0.006647314876317978\r\nStep 2707, loss: 0.007918104529380798\r\nStep 2708, loss: 0.0074280197732150555\r\nStep 2709, loss: 0.007273267488926649\r\nStep 2612, loss: 0.006791561841964722\r\nStep 2613, loss: 0.008019826374948025\r\nStep 2614, loss: 0.0063033089973032475\r\nStep 2615, loss: 0.006482157856225967\r\nStep 2616, loss: 0.007781367748975754\r\nStep 2617, loss: 0.007318116724491119\r\nStep 2618, loss: 0.0067173088900744915\r\nStep 2619, loss: 0.005896591115742922\r\nStep 2620, loss: 0.006044264417141676\r\nStep 2621, loss: 0.00743854558095336\r\nStep 2622, loss: 0.006313743535429239\r\nStep 2623, loss: 0.006164529826492071\r\nStep 2624, loss: 0.007657760288566351\r\nStep 2625, loss: 0.008027573116123676\r\nStep 2626, loss: 0.007742432411760092\r\nStep 2627, loss: 0.008176776580512524\r\nStep 2628, loss: 0.008598809130489826\r\nStep 2629, loss: 0.006883498281240463\r\nStep 2630, loss: 0.007491798605769873\r\nStep 2631, loss: 0.007078538183122873\r\nStep 2632, loss: 0.006278125569224358\r\nStep 2633, loss: 0.007349437568336725\r\nStep 2634, loss: 0.0065256329253315926\r\nStep 2635, loss: 0.006988814566284418\r\nStep 2636, loss: 0.009450173005461693\r\nStep 2637, loss: 0.007395721971988678\r\nStep 2612, loss: 0.006791561841964722\r\nStep 2613, loss: 0.008019826374948025\r\nStep 2614, loss: 0.0063033089973032475\r\nStep 2615, loss: 0.006482157856225967\r\nStep 2616, loss: 0.007781367748975754\r\nStep 2617, loss: 0.007318116724491119\r\nStep 2618, loss: 0.0067173088900744915\r\nStep 2619, loss: 0.005896591115742922\r\nStep 2620, loss: 0.006044264417141676\r\nStep 2621, loss: 0.00743854558095336\r\nStep 2622, loss: 0.006313743535429239\r\nStep 2623, loss: 0.006164529826492071\r\nStep 2624, loss: 0.007657760288566351\r\nStep 2625, loss: 0.008027573116123676\r\nStep 2626, loss: 0.007742432411760092\r\nStep 2627, loss: 0.008176776580512524\r\nStep 2628, loss: 0.008598809130489826\r\nStep 2629, loss: 0.006883498281240463\r\nStep 2630, loss: 0.007491798605769873\r\nStep 2631, loss: 0.007078538183122873\r\nStep 2632, loss: 0.006278125569224358\r\nStep 2633, loss: 0.007349437568336725\r\nStep 2634, loss: 0.0065256329253315926\r\nStep 2635, loss: 0.006988814566284418\r\nStep 2636, loss: 0.009450173005461693\r\nStep 2637, loss: 0.007395721971988678\r\nStep 2638, loss: 0.006597508210688829\r\nStep 2639, loss: 0.008348207920789719\r\nStep 2640, loss: 0.008197437971830368\r\nStep 2641, loss: 0.007939758710563183\r\nStep 2642, loss: 0.008184662088751793\r\nStep 2643, loss: 0.007472028490155935\r\nStep 2644, loss: 0.006815717555582523\r\nStep 2645, loss: 0.006325994152575731\r\nStep 2646, loss: 0.006622747518122196\r\nStep 2647, loss: 0.006518843583762646\r\nStep 2648, loss: 0.00710684759542346\r\nStep 2649, loss: 0.00870906189084053\r\nStep 2650, loss: 0.006959839258342981\r\nStep 2651, loss: 0.007403410971164703\r\nStep 2652, loss: 0.007567410357296467\r\nStep 2653, loss: 0.007206164766103029\r\nStep 2654, loss: 0.006868516094982624\r\nStep 2655, loss: 0.008095523342490196\r\nStep 2656, loss: 0.0078019737266004086\r\nStep 2657, loss: 0.006755751557648182\r\nStep 2658, loss: 0.009217513725161552\r\nStep 2659, loss: 0.006370591931045055\r\nStep 2660, loss: 0.007040577474981546\r\nStep 2661, loss: 0.008188849315047264\r\nStep 2662, loss: 0.0076986681669950485\r\nStep 2663, loss: 0.0073158456943929195\r\nStep 2612, loss: 0.006791561841964722\r\nStep 2613, loss: 0.008019826374948025\r\nStep 2614, loss: 0.0063033089973032475\r\nStep 2615, loss: 0.006482157856225967\r\nStep 2616, loss: 0.007781367748975754\r\nStep 2617, loss: 0.007318116724491119\r\nStep 2618, loss: 0.0067173088900744915\r\nStep 2619, loss: 0.005896591115742922\r\nStep 2620, loss: 0.006044264417141676\r\nStep 2621, loss: 0.00743854558095336\r\nStep 2622, loss: 0.006313743535429239\r\nStep 2623, loss: 0.006164529826492071\r\nStep 2624, loss: 0.007657760288566351\r\nStep 2625, loss: 0.008027573116123676\r\nStep 2626, loss: 0.007742432411760092\r\nStep 2627, loss: 0.008176776580512524\r\nStep 2628, loss: 0.008598809130489826\r\nStep 2629, loss: 0.006883498281240463\r\nStep 2630, loss: 0.007491798605769873\r\nStep 2631, loss: 0.007078538183122873\r\nStep 2632, loss: 0.006278125569224358\r\nStep 2633, loss: 0.007349437568336725\r\nStep 2634, loss: 0.0065256329253315926\r\nStep 2635, loss: 0.006988814566284418\r\nStep 2636, loss: 0.009450173005461693\r\nStep 2637, loss: 0.007395721971988678\r\nStep 2638, loss: 0.006597508210688829\r\nStep 2639, loss: 0.008348207920789719\r\nStep 2640, loss: 0.008197437971830368\r\nStep 2641, loss: 0.007939758710563183\r\nStep 2642, loss: 0.008184662088751793\r\nStep 2643, loss: 0.007472028490155935\r\nStep 2644, loss: 0.006815717555582523\r\nStep 2645, loss: 0.006325994152575731\r\nStep 2646, loss: 0.006622747518122196\r\nStep 2647, loss: 0.006518843583762646\r\nStep 2648, loss: 0.00710684759542346\r\nStep 2649, loss: 0.00870906189084053\r\nStep 2650, loss: 0.006959839258342981\r\nStep 2651, loss: 0.007403410971164703\r\nStep 2652, loss: 0.007567410357296467\r\nStep 2653, loss: 0.007206164766103029\r\nStep 2654, loss: 0.006868516094982624\r\nStep 2655, loss: 0.008095523342490196\r\nStep 2656, loss: 0.0078019737266004086\r\nStep 2657, loss: 0.006755751557648182\r\nStep 2658, loss: 0.009217513725161552\r\nStep 2659, loss: 0.006370591931045055\r\nStep 2660, loss: 0.007040577474981546\r\nStep 2661, loss: 0.008188849315047264\r\nStep 2662, loss: 0.0076986681669950485\r\nStep 2663, loss: 0.0073158456943929195\r\nStep 2664, loss: 0.007145834621042013\r\nStep 2665, loss: 0.008665402419865131\r\nStep 2666, loss: 0.009344651363790035\r\nStep 2667, loss: 0.00942978449165821\r\nStep 2668, loss: 0.0073892222717404366\r\nStep 2669, loss: 0.007335971109569073\r\nStep 2670, loss: 0.007843065075576305\r\nStep 2671, loss: 0.007722088601440191\r\nStep 2672, loss: 0.0072429911233484745\r\nStep 2673, loss: 0.00785690825432539\r\nStep 2674, loss: 0.007569270674139261\r\nStep 2675, loss: 0.00658267131075263\r\nStep 2676, loss: 0.0062832823023200035\r\nStep 2677, loss: 0.008188135921955109\r\nStep 2678, loss: 0.007272929884493351\r\nStep 2679, loss: 0.00608580419793725\r\nStep 2680, loss: 0.006660285405814648\r\nStep 2681, loss: 0.00541948014870286\r\nStep 2682, loss: 0.007597365416586399\r\nStep 2683, loss: 0.007903697900474072\r\nStep 2684, loss: 0.0070220655761659145\r\nStep 2685, loss: 0.006794515065848827\r\nStep 2686, loss: 0.007172796875238419\r\nStep 2687, loss: 0.00825575552880764\r\nStep 2688, loss: 0.008619176223874092\r\nStep 2689, loss: 0.00754340598359704\r\nStep 2690, loss: 0.007522198371589184\r\nStep 2638, loss: 0.006597508210688829\r\nStep 2639, loss: 0.008348207920789719\r\nStep 2640, loss: 0.008197437971830368\r\nStep 2641, loss: 0.007939758710563183\r\nStep 2642, loss: 0.008184662088751793\r\nStep 2643, loss: 0.007472028490155935\r\nStep 2644, loss: 0.006815717555582523\r\nStep 2645, loss: 0.006325994152575731\r\nStep 2646, loss: 0.006622747518122196\r\nStep 2647, loss: 0.006518843583762646\r\nStep 2648, loss: 0.00710684759542346\r\nStep 2649, loss: 0.00870906189084053\r\nStep 2650, loss: 0.006959839258342981\r\nStep 2651, loss: 0.007403410971164703\r\nStep 2652, loss: 0.007567410357296467\r\nStep 2653, loss: 0.007206164766103029\r\nStep 2654, loss: 0.006868516094982624\r\nStep 2655, loss: 0.008095523342490196\r\nStep 2656, loss: 0.0078019737266004086\r\nStep 2657, loss: 0.006755751557648182\r\nStep 2658, loss: 0.009217513725161552\r\nStep 2659, loss: 0.006370591931045055\r\nStep 2660, loss: 0.007040577474981546\r\nStep 2661, loss: 0.008188849315047264\r\nStep 2662, loss: 0.0076986681669950485\r\nStep 2663, loss: 0.0073158456943929195\r\nStep 2664, loss: 0.007145834621042013\r\nStep 2665, loss: 0.008665402419865131\r\nStep 2666, loss: 0.009344651363790035\r\nStep 2667, loss: 0.00942978449165821\r\nStep 2668, loss: 0.0073892222717404366\r\nStep 2669, loss: 0.007335971109569073\r\nStep 2670, loss: 0.007843065075576305\r\nStep 2671, loss: 0.007722088601440191\r\nStep 2672, loss: 0.0072429911233484745\r\nStep 2673, loss: 0.00785690825432539\r\nStep 2674, loss: 0.007569270674139261\r\nStep 2675, loss: 0.00658267131075263\r\nStep 2676, loss: 0.0062832823023200035\r\nStep 2677, loss: 0.008188135921955109\r\nStep 2678, loss: 0.007272929884493351\r\nStep 2679, loss: 0.00608580419793725\r\nStep 2680, loss: 0.006660285405814648\r\nStep 2681, loss: 0.00541948014870286\r\nStep 2682, loss: 0.007597365416586399\r\nStep 2683, loss: 0.007903697900474072\r\nStep 2684, loss: 0.0070220655761659145\r\nStep 2685, loss: 0.006794515065848827\r\nStep 2686, loss: 0.007172796875238419\r\nStep 2687, loss: 0.00825575552880764\r\nStep 2688, loss: 0.008619176223874092\r\nStep 2689, loss: 0.00754340598359704\r\nStep 2690, loss: 0.007522198371589184\r\nStep 2691, loss: 0.00805687252432108\r\nStep 2692, loss: 0.008638708852231503\r\nStep 2693, loss: 0.007212290074676275\r\nStep 2694, loss: 0.0073548839427530766\r\nStep 2695, loss: 0.0077036721631884575\r\nStep 2696, loss: 0.006982068531215191\r\nStep 2697, loss: 0.007031261455267668\r\nStep 2698, loss: 0.008332055993378162\r\nStep 2699, loss: 0.008362887427210808\r\nStep 2700, loss: 0.00808885507285595\r\nStep 2701, loss: 0.006484369747340679\r\nStep 2702, loss: 0.006959190126508474\r\nStep 2703, loss: 0.007829486392438412\r\nStep 2704, loss: 0.006844683084636927\r\nStep 2705, loss: 0.007465291768312454\r\nStep 2706, loss: 0.006647314876317978\r\nStep 2707, loss: 0.007918104529380798\r\nStep 2708, loss: 0.0074280197732150555\r\nStep 2709, loss: 0.007273267488926649\r\nStep 2664, loss: 0.007145834621042013\r\nStep 2665, loss: 0.008665402419865131\r\nStep 2666, loss: 0.009344651363790035\r\nStep 2667, loss: 0.00942978449165821\r\nStep 2668, loss: 0.0073892222717404366\r\nStep 2669, loss: 0.007335971109569073\r\nStep 2670, loss: 0.007843065075576305\r\nStep 2671, loss: 0.007722088601440191\r\nStep 2672, loss: 0.0072429911233484745\r\nStep 2673, loss: 0.00785690825432539\r\nStep 2674, loss: 0.007569270674139261\r\nStep 2675, loss: 0.00658267131075263\r\nStep 2676, loss: 0.0062832823023200035\r\nStep 2677, loss: 0.008188135921955109\r\nStep 2678, loss: 0.007272929884493351\r\nStep 2679, loss: 0.00608580419793725\r\nStep 2680, loss: 0.006660285405814648\r\nStep 2681, loss: 0.00541948014870286\r\nStep 2682, loss: 0.007597365416586399\r\nStep 2683, loss: 0.007903697900474072\r\nStep 2684, loss: 0.0070220655761659145\r\nStep 2685, loss: 0.006794515065848827\r\nStep 2686, loss: 0.007172796875238419\r\nStep 2687, loss: 0.00825575552880764\r\nStep 2688, loss: 0.008619176223874092\r\nStep 2689, loss: 0.00754340598359704\r\nStep 2690, loss: 0.007522198371589184\r\nStep 2691, loss: 0.00805687252432108\r\nStep 2692, loss: 0.008638708852231503\r\nStep 2693, loss: 0.007212290074676275\r\nStep 2694, loss: 0.0073548839427530766\r\nStep 2695, loss: 0.0077036721631884575\r\nStep 2696, loss: 0.006982068531215191\r\nStep 2697, loss: 0.007031261455267668\r\nStep 2698, loss: 0.008332055993378162\r\nStep 2699, loss: 0.008362887427210808\r\nStep 2700, loss: 0.00808885507285595\r\nStep 2701, loss: 0.006484369747340679\r\nStep 2702, loss: 0.006959190126508474\r\nStep 2703, loss: 0.007829486392438412\r\nStep 2704, loss: 0.006844683084636927\r\nStep 2705, loss: 0.007465291768312454\r\nStep 2706, loss: 0.006647314876317978\r\nStep 2707, loss: 0.007918104529380798\r\nStep 2708, loss: 0.0074280197732150555\r\nStep 2709, loss: 0.007273267488926649\r\nStep 2612, loss: 0.006791561841964722\r\nStep 2613, loss: 0.008019826374948025\r\nStep 2614, loss: 0.0063033089973032475\r\nStep 2615, loss: 0.006482157856225967\r\nStep 2616, loss: 0.007781367748975754\r\nStep 2617, loss: 0.007318116724491119\r\nStep 2618, loss: 0.0067173088900744915\r\nStep 2619, loss: 0.005896591115742922\r\nStep 2620, loss: 0.006044264417141676\r\nStep 2621, loss: 0.00743854558095336\r\nStep 2622, loss: 0.006313743535429239\r\nStep 2623, loss: 0.006164529826492071\r\nStep 2624, loss: 0.007657760288566351\r\nStep 2625, loss: 0.008027573116123676\r\nStep 2626, loss: 0.007742432411760092\r\nStep 2627, loss: 0.008176776580512524\r\nStep 2628, loss: 0.008598809130489826\r\nStep 2629, loss: 0.006883498281240463\r\nStep 2630, loss: 0.007491798605769873\r\nStep 2631, loss: 0.007078538183122873\r\nStep 2632, loss: 0.006278125569224358\r\nStep 2633, loss: 0.007349437568336725\r\nStep 2634, loss: 0.0065256329253315926\r\nStep 2635, loss: 0.006988814566284418\r\nStep 2636, loss: 0.009450173005461693\r\nStep 2637, loss: 0.007395721971988678\r\nStep 2691, loss: 0.00805687252432108\r\nStep 2692, loss: 0.008638708852231503\r\nStep 2693, loss: 0.007212290074676275\r\nStep 2694, loss: 0.0073548839427530766\r\nStep 2695, loss: 0.0077036721631884575\r\nStep 2696, loss: 0.006982068531215191\r\nStep 2697, loss: 0.007031261455267668\r\nStep 2698, loss: 0.008332055993378162\r\nStep 2699, loss: 0.008362887427210808\r\nStep 2700, loss: 0.00808885507285595\r\nStep 2701, loss: 0.006484369747340679\r\nStep 2702, loss: 0.006959190126508474\r\nStep 2703, loss: 0.007829486392438412\r\nStep 2704, loss: 0.006844683084636927\r\nStep 2705, loss: 0.007465291768312454\r\nStep 2706, loss: 0.006647314876317978\r\nStep 2707, loss: 0.007918104529380798\r\nStep 2708, loss: 0.0074280197732150555\r\nStep 2709, loss: 0.007273267488926649\r\nStep 2638, loss: 0.006597508210688829\r\nStep 2639, loss: 0.008348207920789719\r\nStep 2640, loss: 0.008197437971830368\r\nStep 2641, loss: 0.007939758710563183\r\nStep 2642, loss: 0.008184662088751793\r\nStep 2643, loss: 0.007472028490155935\r\nStep 2644, loss: 0.006815717555582523\r\nStep 2645, loss: 0.006325994152575731\r\nStep 2646, loss: 0.006622747518122196\r\nStep 2647, loss: 0.006518843583762646\r\nStep 2648, loss: 0.00710684759542346\r\nStep 2649, loss: 0.00870906189084053\r\nStep 2650, loss: 0.006959839258342981\r\nStep 2651, loss: 0.007403410971164703\r\nStep 2652, loss: 0.007567410357296467\r\nStep 2653, loss: 0.007206164766103029\r\nStep 2654, loss: 0.006868516094982624\r\nStep 2655, loss: 0.008095523342490196\r\nStep 2656, loss: 0.0078019737266004086\r\nStep 2657, loss: 0.006755751557648182\r\nStep 2658, loss: 0.009217513725161552\r\nStep 2659, loss: 0.006370591931045055\r\nStep 2660, loss: 0.007040577474981546\r\nStep 2661, loss: 0.008188849315047264\r\nStep 2662, loss: 0.0076986681669950485\r\nStep 2663, loss: 0.0073158456943929195\r\nStep 2612, loss: 0.006791561841964722\r\nStep 2613, loss: 0.008019826374948025\r\nStep 2614, loss: 0.0063033089973032475\r\nStep 2615, loss: 0.006482157856225967\r\nStep 2616, loss: 0.007781367748975754\r\nStep 2617, loss: 0.007318116724491119\r\nStep 2618, loss: 0.0067173088900744915\r\nStep 2619, loss: 0.005896591115742922\r\nStep 2620, loss: 0.006044264417141676\r\nStep 2621, loss: 0.00743854558095336\r\nStep 2622, loss: 0.006313743535429239\r\nStep 2623, loss: 0.006164529826492071\r\nStep 2624, loss: 0.007657760288566351\r\nStep 2625, loss: 0.008027573116123676\r\nStep 2626, loss: 0.007742432411760092\r\nStep 2627, loss: 0.008176776580512524\r\nStep 2628, loss: 0.008598809130489826\r\nStep 2629, loss: 0.006883498281240463\r\nStep 2630, loss: 0.007491798605769873\r\nStep 2631, loss: 0.007078538183122873\r\nStep 2632, loss: 0.006278125569224358\r\nStep 2633, loss: 0.007349437568336725\r\nStep 2634, loss: 0.0065256329253315926\r\nStep 2635, loss: 0.006988814566284418\r\nStep 2636, loss: 0.009450173005461693\r\nStep 2637, loss: 0.007395721971988678\r\nStep 2664, loss: 0.007145834621042013\r\nStep 2665, loss: 0.008665402419865131\r\nStep 2666, loss: 0.009344651363790035\r\nStep 2667, loss: 0.00942978449165821\r\nStep 2668, loss: 0.0073892222717404366\r\nStep 2669, loss: 0.007335971109569073\r\nStep 2670, loss: 0.007843065075576305\r\nStep 2671, loss: 0.007722088601440191\r\nStep 2672, loss: 0.0072429911233484745\r\nStep 2673, loss: 0.00785690825432539\r\nStep 2674, loss: 0.007569270674139261\r\nStep 2675, loss: 0.00658267131075263\r\nStep 2676, loss: 0.0062832823023200035\r\nStep 2677, loss: 0.008188135921955109\r\nStep 2678, loss: 0.007272929884493351\r\nStep 2679, loss: 0.00608580419793725\r\nStep 2680, loss: 0.006660285405814648\r\nStep 2681, loss: 0.00541948014870286\r\nStep 2682, loss: 0.007597365416586399\r\nStep 2683, loss: 0.007903697900474072\r\nStep 2684, loss: 0.0070220655761659145\r\nStep 2685, loss: 0.006794515065848827\r\nStep 2686, loss: 0.007172796875238419\r\nStep 2687, loss: 0.00825575552880764\r\nStep 2688, loss: 0.008619176223874092\r\nStep 2689, loss: 0.00754340598359704\r\nStep 2690, loss: 0.007522198371589184\r\nStep 2638, loss: 0.006597508210688829\r\nStep 2639, loss: 0.008348207920789719\r\nStep 2640, loss: 0.008197437971830368\r\nStep 2641, loss: 0.007939758710563183\r\nStep 2642, loss: 0.008184662088751793\r\nStep 2643, loss: 0.007472028490155935\r\nStep 2644, loss: 0.006815717555582523\r\nStep 2645, loss: 0.006325994152575731\r\nStep 2646, loss: 0.006622747518122196\r\nStep 2647, loss: 0.006518843583762646\r\nStep 2648, loss: 0.00710684759542346\r\nStep 2649, loss: 0.00870906189084053\r\nStep 2650, loss: 0.006959839258342981\r\nStep 2651, loss: 0.007403410971164703\r\nStep 2652, loss: 0.007567410357296467\r\nStep 2653, loss: 0.007206164766103029\r\nStep 2654, loss: 0.006868516094982624\r\nStep 2655, loss: 0.008095523342490196\r\nStep 2656, loss: 0.0078019737266004086\r\nStep 2657, loss: 0.006755751557648182\r\nStep 2658, loss: 0.009217513725161552\r\nStep 2659, loss: 0.006370591931045055\r\nStep 2660, loss: 0.007040577474981546\r\nStep 2661, loss: 0.008188849315047264\r\nStep 2662, loss: 0.0076986681669950485\r\nStep 2663, loss: 0.0073158456943929195\r\nStep 2691, loss: 0.00805687252432108\r\nStep 2692, loss: 0.008638708852231503\r\nStep 2693, loss: 0.007212290074676275\r\nStep 2694, loss: 0.0073548839427530766\r\nStep 2695, loss: 0.0077036721631884575\r\nStep 2696, loss: 0.006982068531215191\r\nStep 2697, loss: 0.007031261455267668\r\nStep 2698, loss: 0.008332055993378162\r\nStep 2699, loss: 0.008362887427210808\r\nStep 2700, loss: 0.00808885507285595\r\nStep 2701, loss: 0.006484369747340679\r\nStep 2702, loss: 0.006959190126508474\r\nStep 2703, loss: 0.007829486392438412\r\nStep 2704, loss: 0.006844683084636927\r\nStep 2705, loss: 0.007465291768312454\r\nStep 2706, loss: 0.006647314876317978\r\nStep 2707, loss: 0.007918104529380798\r\nStep 2708, loss: 0.0074280197732150555\r\nStep 2709, loss: 0.007273267488926649\r\nStep 2664, loss: 0.007145834621042013\r\nStep 2665, loss: 0.008665402419865131\r\nStep 2666, loss: 0.009344651363790035\r\nStep 2667, loss: 0.00942978449165821\r\nStep 2668, loss: 0.0073892222717404366\r\nStep 2669, loss: 0.007335971109569073\r\nStep 2670, loss: 0.007843065075576305\r\nStep 2671, loss: 0.007722088601440191\r\nStep 2672, loss: 0.0072429911233484745\r\nStep 2673, loss: 0.00785690825432539\r\nStep 2674, loss: 0.007569270674139261\r\nStep 2675, loss: 0.00658267131075263\r\nStep 2676, loss: 0.0062832823023200035\r\nStep 2677, loss: 0.008188135921955109\r\nStep 2678, loss: 0.007272929884493351\r\nStep 2679, loss: 0.00608580419793725\r\nStep 2680, loss: 0.006660285405814648\r\nStep 2681, loss: 0.00541948014870286\r\nStep 2682, loss: 0.007597365416586399\r\nStep 2683, loss: 0.007903697900474072\r\nStep 2684, loss: 0.0070220655761659145\r\nStep 2685, loss: 0.006794515065848827\r\nStep 2686, loss: 0.007172796875238419\r\nStep 2687, loss: 0.00825575552880764\r\nStep 2688, loss: 0.008619176223874092\r\nStep 2689, loss: 0.00754340598359704\r\nStep 2690, loss: 0.007522198371589184\r\nStep 2691, loss: 0.00805687252432108\r\nStep 2692, loss: 0.008638708852231503\r\nStep 2693, loss: 0.007212290074676275\r\nStep 2694, loss: 0.0073548839427530766\r\nStep 2695, loss: 0.0077036721631884575\r\nStep 2696, loss: 0.006982068531215191\r\nStep 2697, loss: 0.007031261455267668\r\nStep 2698, loss: 0.008332055993378162\r\nStep 2699, loss: 0.008362887427210808\r\nStep 2700, loss: 0.00808885507285595\r\nStep 2701, loss: 0.006484369747340679\r\nStep 2702, loss: 0.006959190126508474\r\nStep 2703, loss: 0.007829486392438412\r\nStep 2704, loss: 0.006844683084636927\r\nStep 2705, loss: 0.007465291768312454\r\nStep 2706, loss: 0.006647314876317978\r\nStep 2707, loss: 0.007918104529380798\r\nStep 2708, loss: 0.0074280197732150555\r\nStep 2709, loss: 0.007273267488926649\r\nStep 2612, loss: 0.006791561841964722\r\nStep 2613, loss: 0.008019826374948025\r\nStep 2614, loss: 0.0063033089973032475\r\nStep 2615, loss: 0.006482157856225967\r\nStep 2616, loss: 0.007781367748975754\r\nStep 2617, loss: 0.007318116724491119\r\nStep 2618, loss: 0.0067173088900744915\r\nStep 2619, loss: 0.005896591115742922\r\nStep 2620, loss: 0.006044264417141676\r\nStep 2621, loss: 0.00743854558095336\r\nStep 2622, loss: 0.006313743535429239\r\nStep 2623, loss: 0.006164529826492071\r\nStep 2624, loss: 0.007657760288566351\r\nStep 2625, loss: 0.008027573116123676\r\nStep 2626, loss: 0.007742432411760092\r\nStep 2627, loss: 0.008176776580512524\r\nStep 2628, loss: 0.008598809130489826\r\nStep 2629, loss: 0.006883498281240463\r\nStep 2630, loss: 0.007491798605769873\r\nStep 2631, loss: 0.007078538183122873\r\nStep 2632, loss: 0.006278125569224358\r\nStep 2633, loss: 0.007349437568336725\r\nStep 2634, loss: 0.0065256329253315926\r\nStep 2635, loss: 0.006988814566284418\r\nStep 2636, loss: 0.009450173005461693\r\nStep 2637, loss: 0.007395721971988678\r\nStep 2638, loss: 0.006597508210688829\r\nStep 2639, loss: 0.008348207920789719\r\nStep 2640, loss: 0.008197437971830368\r\nStep 2641, loss: 0.007939758710563183\r\nStep 2642, loss: 0.008184662088751793\r\nStep 2643, loss: 0.007472028490155935\r\nStep 2644, loss: 0.006815717555582523\r\nStep 2645, loss: 0.006325994152575731\r\nStep 2646, loss: 0.006622747518122196\r\nStep 2647, loss: 0.006518843583762646\r\nStep 2648, loss: 0.00710684759542346\r\nStep 2649, loss: 0.00870906189084053\r\nStep 2650, loss: 0.006959839258342981\r\nStep 2651, loss: 0.007403410971164703\r\nStep 2652, loss: 0.007567410357296467\r\nStep 2653, loss: 0.007206164766103029\r\nStep 2654, loss: 0.006868516094982624\r\nStep 2655, loss: 0.008095523342490196\r\nStep 2656, loss: 0.0078019737266004086\r\nStep 2657, loss: 0.006755751557648182\r\nStep 2658, loss: 0.009217513725161552\r\nStep 2659, loss: 0.006370591931045055\r\nStep 2660, loss: 0.007040577474981546\r\nStep 2661, loss: 0.008188849315047264\r\nStep 2662, loss: 0.0076986681669950485\r\nStep 2663, loss: 0.0073158456943929195\r\nStep 2664, loss: 0.007145834621042013\r\nStep 2665, loss: 0.008665402419865131\r\nStep 2666, loss: 0.009344651363790035\r\nStep 2667, loss: 0.00942978449165821\r\nStep 2668, loss: 0.0073892222717404366\r\nStep 2669, loss: 0.007335971109569073\r\nStep 2670, loss: 0.007843065075576305\r\nStep 2671, loss: 0.007722088601440191\r\nStep 2672, loss: 0.0072429911233484745\r\nStep 2673, loss: 0.00785690825432539\r\nStep 2674, loss: 0.007569270674139261\r\nStep 2675, loss: 0.00658267131075263\r\nStep 2676, loss: 0.0062832823023200035\r\nStep 2677, loss: 0.008188135921955109\r\nStep 2678, loss: 0.007272929884493351\r\nStep 2679, loss: 0.00608580419793725\r\nStep 2680, loss: 0.006660285405814648\r\nStep 2681, loss: 0.00541948014870286\r\nStep 2682, loss: 0.007597365416586399\r\nStep 2683, loss: 0.007903697900474072\r\nStep 2684, loss: 0.0070220655761659145\r\nStep 2685, loss: 0.006794515065848827\r\nStep 2686, loss: 0.007172796875238419\r\nStep 2687, loss: 0.00825575552880764\r\nStep 2688, loss: 0.008619176223874092\r\nStep 2689, loss: 0.00754340598359704\r\nStep 2690, loss: 0.007522198371589184\r\nStep 2691, loss: 0.00805687252432108\r\nStep 2692, loss: 0.008638708852231503\r\nStep 2693, loss: 0.007212290074676275\r\nStep 2694, loss: 0.0073548839427530766\r\nStep 2695, loss: 0.0077036721631884575\r\nStep 2696, loss: 0.006982068531215191\r\nStep 2697, loss: 0.007031261455267668\r\nStep 2698, loss: 0.008332055993378162\r\nStep 2699, loss: 0.008362887427210808\r\nStep 2700, loss: 0.00808885507285595\r\nStep 2701, loss: 0.006484369747340679\r\nStep 2702, loss: 0.006959190126508474\r\nStep 2703, loss: 0.007829486392438412\r\nStep 2704, loss: 0.006844683084636927\r\nStep 2705, loss: 0.007465291768312454\r\nStep 2706, loss: 0.006647314876317978\r\nStep 2707, loss: 0.007918104529380798\r\nStep 2708, loss: 0.0074280197732150555\r\nStep 2709, loss: 0.007273267488926649\r\nsrun: Job step aborted: Waiting up to 32 seconds for job step to finish.\r\nStep 2612, loss: 0.006791561841964722\r\nStep 2613, loss: 0.008019826374948025\r\nStep 2614, loss: 0.0063033089973032475\r\nStep 2615, loss: 0.006482157856225967\r\nStep 2616, loss: 0.007781367748975754\r\nStep 2617, loss: 0.007318116724491119\r\nStep 2618, loss: 0.0067173088900744915\r\nStep 2619, loss: 0.005896591115742922\r\nStep 2620, loss: 0.006044264417141676\r\nStep 2621, loss: 0.00743854558095336\r\nStep 2622, loss: 0.006313743535429239\r\nStep 2623, loss: 0.006164529826492071\r\nStep 2624, loss: 0.007657760288566351\r\nStep 2625, loss: 0.008027573116123676\r\nStep 2626, loss: 0.007742432411760092\r\nStep 2627, loss: 0.008176776580512524\r\nStep 2628, loss: 0.008598809130489826\r\nStep 2629, loss: 0.006883498281240463\r\nStep 2630, loss: 0.007491798605769873\r\nStep 2631, loss: 0.007078538183122873\r\nStep 2632, loss: 0.006278125569224358\r\nStep 2633, loss: 0.007349437568336725\r\nStep 2634, loss: 0.0065256329253315926\r\nStep 2635, loss: 0.006988814566284418\r\nStep 2636, loss: 0.009450173005461693\r\nStep 2637, loss: 0.007395721971988678\r\nStep 2638, loss: 0.006597508210688829\r\nStep 2639, loss: 0.008348207920789719\r\nStep 2640, loss: 0.008197437971830368\r\nStep 2641, loss: 0.007939758710563183\r\nStep 2642, loss: 0.008184662088751793\r\nStep 2643, loss: 0.007472028490155935\r\nStep 2644, loss: 0.006815717555582523\r\nStep 2645, loss: 0.006325994152575731\r\nStep 2646, loss: 0.006622747518122196\r\nStep 2647, loss: 0.006518843583762646\r\nStep 2648, loss: 0.00710684759542346\r\nStep 2649, loss: 0.00870906189084053\r\nStep 2650, loss: 0.006959839258342981\r\nStep 2651, loss: 0.007403410971164703\r\nStep 2652, loss: 0.007567410357296467\r\nStep 2653, loss: 0.007206164766103029\r\nStep 2654, loss: 0.006868516094982624\r\nStep 2655, loss: 0.008095523342490196\r\nStep 2656, loss: 0.0078019737266004086\r\nStep 2657, loss: 0.006755751557648182\r\nStep 2658, loss: 0.009217513725161552\r\nStep 2659, loss: 0.006370591931045055\r\nStep 2660, loss: 0.007040577474981546\r\nStep 2661, loss: 0.008188849315047264\r\nStep 2662, loss: 0.0076986681669950485\r\nStep 2663, loss: 0.0073158456943929195\r\nStep 2664, loss: 0.007145834621042013\r\nStep 2665, loss: 0.008665402419865131\r\nStep 2666, loss: 0.009344651363790035\r\nStep 2667, loss: 0.00942978449165821\r\nStep 2668, loss: 0.0073892222717404366\r\nStep 2669, loss: 0.007335971109569073\r\nStep 2670, loss: 0.007843065075576305\r\nStep 2671, loss: 0.007722088601440191\r\nStep 2672, loss: 0.0072429911233484745\r\nStep 2673, loss: 0.00785690825432539\r\nStep 2674, loss: 0.007569270674139261\r\nStep 2675, loss: 0.00658267131075263\r\nStep 2676, loss: 0.0062832823023200035\r\nStep 2677, loss: 0.008188135921955109\r\nStep 2678, loss: 0.007272929884493351\r\nStep 2679, loss: 0.00608580419793725\r\nStep 2680, loss: 0.006660285405814648\r\nStep 2681, loss: 0.00541948014870286\r\nStep 2682, loss: 0.007597365416586399\r\nStep 2683, loss: 0.007903697900474072\r\nStep 2684, loss: 0.0070220655761659145\r\nStep 2685, loss: 0.006794515065848827\r\nStep 2686, loss: 0.007172796875238419\r\nStep 2687, loss: 0.00825575552880764\r\nStep 2688, loss: 0.008619176223874092\r\nStep 2689, loss: 0.00754340598359704\r\nStep 2690, loss: 0.007522198371589184\r\nStep 2691, loss: 0.00805687252432108\r\nStep 2692, loss: 0.008638708852231503\r\nStep 2693, loss: 0.007212290074676275\r\nStep 2694, loss: 0.0073548839427530766\r\nStep 2695, loss: 0.0077036721631884575\r\nStep 2696, loss: 0.006982068531215191\r\nStep 2697, loss: 0.007031261455267668\r\nStep 2698, loss: 0.008332055993378162\r\nStep 2699, loss: 0.008362887427210808\r\nStep 2700, loss: 0.00808885507285595\r\nStep 2701, loss: 0.006484369747340679\r\nStep 2702, loss: 0.006959190126508474\r\nStep 2703, loss: 0.007829486392438412\r\nStep 2704, loss: 0.006844683084636927\r\nStep 2705, loss: 0.007465291768312454\r\nStep 2706, loss: 0.006647314876317978\r\nStep 2707, loss: 0.007918104529380798\r\nStep 2708, loss: 0.0074280197732150555\r\nStep 2709, loss: 0.007273267488926649\r\nStep 2612, loss: 0.006791561841964722\r\nStep 2613, loss: 0.008019826374948025\r\nStep 2614, loss: 0.0063033089973032475\r\nStep 2615, loss: 0.006482157856225967\r\nStep 2616, loss: 0.007781367748975754\r\nStep 2617, loss: 0.007318116724491119\r\nStep 2618, loss: 0.0067173088900744915\r\nStep 2619, loss: 0.005896591115742922\r\nStep 2620, loss: 0.006044264417141676\r\nStep 2621, loss: 0.00743854558095336\r\nStep 2622, loss: 0.006313743535429239\r\nStep 2623, loss: 0.006164529826492071\r\nStep 2624, loss: 0.007657760288566351\r\nStep 2625, loss: 0.008027573116123676\r\nStep 2626, loss: 0.007742432411760092\r\nStep 2627, loss: 0.008176776580512524\r\nStep 2628, loss: 0.008598809130489826\r\nStep 2629, loss: 0.006883498281240463\r\nStep 2630, loss: 0.007491798605769873\r\nStep 2631, loss: 0.007078538183122873\r\nStep 2632, loss: 0.006278125569224358\r\nStep 2633, loss: 0.007349437568336725\r\nStep 2634, loss: 0.0065256329253315926\r\nStep 2635, loss: 0.006988814566284418\r\nStep 2636, loss: 0.009450173005461693\r\nStep 2637, loss: 0.007395721971988678\r\nStep 2638, loss: 0.006597508210688829\r\nStep 2639, loss: 0.008348207920789719\r\nStep 2640, loss: 0.008197437971830368\r\nStep 2641, loss: 0.007939758710563183\r\nStep 2642, loss: 0.008184662088751793\r\nStep 2643, loss: 0.007472028490155935\r\nStep 2644, loss: 0.006815717555582523\r\nStep 2645, loss: 0.006325994152575731\r\nStep 2646, loss: 0.006622747518122196\r\nStep 2647, loss: 0.006518843583762646\r\nStep 2648, loss: 0.00710684759542346\r\nStep 2649, loss: 0.00870906189084053\r\nStep 2650, loss: 0.006959839258342981\r\nStep 2651, loss: 0.007403410971164703\r\nStep 2652, loss: 0.007567410357296467\r\nStep 2653, loss: 0.007206164766103029\r\nStep 2654, loss: 0.006868516094982624\r\nStep 2655, loss: 0.008095523342490196\r\nStep 2656, loss: 0.0078019737266004086\r\nStep 2657, loss: 0.006755751557648182\r\nStep 2658, loss: 0.009217513725161552\r\nStep 2659, loss: 0.006370591931045055\r\nStep 2660, loss: 0.007040577474981546\r\nStep 2661, loss: 0.008188849315047264\r\nStep 2662, loss: 0.0076986681669950485\r\nStep 2663, loss: 0.0073158456943929195\r\nStep 2664, loss: 0.007145834621042013\r\nStep 2665, loss: 0.008665402419865131\r\nStep 2666, loss: 0.009344651363790035\r\nStep 2667, loss: 0.00942978449165821\r\nStep 2668, loss: 0.0073892222717404366\r\nStep 2669, loss: 0.007335971109569073\r\nStep 2670, loss: 0.007843065075576305\r\nStep 2671, loss: 0.007722088601440191\r\nStep 2672, loss: 0.0072429911233484745\r\nStep 2673, loss: 0.00785690825432539\r\nStep 2674, loss: 0.007569270674139261\r\nStep 2675, loss: 0.00658267131075263\r\nStep 2676, loss: 0.0062832823023200035\r\nStep 2677, loss: 0.008188135921955109\r\nStep 2678, loss: 0.007272929884493351\r\nStep 2679, loss: 0.00608580419793725\r\nStep 2680, loss: 0.006660285405814648\r\nStep 2681, loss: 0.00541948014870286\r\nStep 2682, loss: 0.007597365416586399\r\nStep 2683, loss: 0.007903697900474072\r\nStep 2684, loss: 0.0070220655761659145\r\nStep 2685, loss: 0.006794515065848827\r\nStep 2686, loss: 0.007172796875238419\r\nStep 2687, loss: 0.00825575552880764\r\nStep 2688, loss: 0.008619176223874092\r\nStep 2689, loss: 0.00754340598359704\r\nStep 2690, loss: 0.007522198371589184\r\nStep 2691, loss: 0.00805687252432108\r\nStep 2692, loss: 0.008638708852231503\r\nStep 2693, loss: 0.007212290074676275\r\nStep 2694, loss: 0.0073548839427530766\r\nStep 2695, loss: 0.0077036721631884575\r\nStep 2696, loss: 0.006982068531215191\r\nStep 2697, loss: 0.007031261455267668\r\nStep 2698, loss: 0.008332055993378162\r\nStep 2699, loss: 0.008362887427210808\r\nStep 2700, loss: 0.00808885507285595\r\nStep 2701, loss: 0.006484369747340679\r\nStep 2702, loss: 0.006959190126508474\r\nStep 2703, loss: 0.007829486392438412\r\nStep 2704, loss: 0.006844683084636927\r\nStep 2705, loss: 0.007465291768312454\r\nStep 2706, loss: 0.006647314876317978\r\nStep 2707, loss: 0.007918104529380798\r\nStep 2708, loss: 0.0074280197732150555\r\nStep 2709, loss: 0.007273267488926649\r\nStep 2612, loss: 0.006791561841964722\r\nStep 2613, loss: 0.008019826374948025\r\nStep 2614, loss: 0.0063033089973032475\r\nStep 2615, loss: 0.006482157856225967\r\nStep 2616, loss: 0.007781367748975754\r\nStep 2617, loss: 0.007318116724491119\r\nStep 2618, loss: 0.0067173088900744915\r\nStep 2619, loss: 0.005896591115742922\r\nStep 2620, loss: 0.006044264417141676\r\nStep 2621, loss: 0.00743854558095336\r\nStep 2622, loss: 0.006313743535429239\r\nStep 2623, loss: 0.006164529826492071\r\nStep 2624, loss: 0.007657760288566351\r\nStep 2625, loss: 0.008027573116123676\r\nStep 2626, loss: 0.007742432411760092\r\nStep 2627, loss: 0.008176776580512524\r\nStep 2628, loss: 0.008598809130489826\r\nStep 2629, loss: 0.006883498281240463\r\nStep 2630, loss: 0.007491798605769873\r\nStep 2631, loss: 0.007078538183122873\r\nStep 2632, loss: 0.006278125569224358\r\nStep 2633, loss: 0.007349437568336725\r\nStep 2634, loss: 0.0065256329253315926\r\nStep 2635, loss: 0.006988814566284418\r\nStep 2636, loss: 0.009450173005461693\r\nStep 2637, loss: 0.007395721971988678\r\nStep 2638, loss: 0.006597508210688829\r\nStep 2639, loss: 0.008348207920789719\r\nStep 2640, loss: 0.008197437971830368\r\nStep 2641, loss: 0.007939758710563183\r\nStep 2642, loss: 0.008184662088751793\r\nStep 2643, loss: 0.007472028490155935\r\nStep 2644, loss: 0.006815717555582523\r\nStep 2645, loss: 0.006325994152575731\r\nStep 2646, loss: 0.006622747518122196\r\nStep 2647, loss: 0.006518843583762646\r\nStep 2648, loss: 0.00710684759542346\r\nStep 2649, loss: 0.00870906189084053\r\nStep 2650, loss: 0.006959839258342981\r\nStep 2651, loss: 0.007403410971164703\r\nStep 2652, loss: 0.007567410357296467\r\nStep 2653, loss: 0.007206164766103029\r\nStep 2654, loss: 0.006868516094982624\r\nStep 2655, loss: 0.008095523342490196\r\nStep 2656, loss: 0.0078019737266004086\r\nStep 2657, loss: 0.006755751557648182\r\nStep 2658, loss: 0.009217513725161552\r\nStep 2659, loss: 0.006370591931045055\r\nStep 2660, loss: 0.007040577474981546\r\nStep 2661, loss: 0.008188849315047264\r\nStep 2662, loss: 0.0076986681669950485\r\nStep 2663, loss: 0.0073158456943929195\r\nStep 2664, loss: 0.007145834621042013\r\nStep 2665, loss: 0.008665402419865131\r\nStep 2666, loss: 0.009344651363790035\r\nStep 2667, loss: 0.00942978449165821\r\nStep 2668, loss: 0.0073892222717404366\r\nStep 2669, loss: 0.007335971109569073\r\nStep 2670, loss: 0.007843065075576305\r\nStep 2671, loss: 0.007722088601440191\r\nStep 2672, loss: 0.0072429911233484745\r\nStep 2673, loss: 0.00785690825432539\r\nStep 2674, loss: 0.007569270674139261\r\nStep 2675, loss: 0.00658267131075263\r\nStep 2676, loss: 0.0062832823023200035\r\nStep 2677, loss: 0.008188135921955109\r\nStep 2678, loss: 0.007272929884493351\r\nStep 2679, loss: 0.00608580419793725\r\nStep 2680, loss: 0.006660285405814648\r\nStep 2681, loss: 0.00541948014870286\r\nStep 2682, loss: 0.007597365416586399\r\nStep 2683, loss: 0.007903697900474072\r\nStep 2684, loss: 0.0070220655761659145\r\nStep 2685, loss: 0.006794515065848827\r\nStep 2686, loss: 0.007172796875238419\r\nStep 2687, loss: 0.00825575552880764\r\nStep 2688, loss: 0.008619176223874092\r\nStep 2689, loss: 0.00754340598359704\r\nStep 2690, loss: 0.007522198371589184\r\nStep 2691, loss: 0.00805687252432108\r\nStep 2692, loss: 0.008638708852231503\r\nStep 2693, loss: 0.007212290074676275\r\nStep 2694, loss: 0.0073548839427530766\r\nStep 2695, loss: 0.0077036721631884575\r\nStep 2696, loss: 0.006982068531215191\r\nStep 2697, loss: 0.007031261455267668\r\nStep 2698, loss: 0.008332055993378162\r\nStep 2699, loss: 0.008362887427210808\r\nStep 2700, loss: 0.00808885507285595\r\nStep 2701, loss: 0.006484369747340679\r\nStep 2702, loss: 0.006959190126508474\r\nStep 2703, loss: 0.007829486392438412\r\nStep 2704, loss: 0.006844683084636927\r\nStep 2705, loss: 0.007465291768312454\r\nStep 2706, loss: 0.006647314876317978\r\nStep 2707, loss: 0.007918104529380798\r\nStep 2708, loss: 0.0074280197732150555\r\nStep 2709, loss: 0.007273267488926649\r\nslurmstepd: error: *** STEP 3316923.1 ON hkn0625 CANCELLED AT 2025-07-04T11:26:46 ***\r\n",,terminal_output +1822,3132066,"TERMINAL",0,0,"75152",,terminal_output +1823,3132760,"TERMINAL",0,0,"]0;tum_cte0515@hkn0625:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0625 jafar]$ ",,terminal_output +1824,3132943,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0625:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0625 jafar]$ ",,terminal_output +1825,3133097,"TERMINAL",0,0,"86263",,terminal_output +1826,3134215,"TERMINAL",0,0,"97374",,terminal_output +1827,3135223,"TERMINAL",0,0,"508485",,terminal_output +1828,3136266,"TERMINAL",0,0,"19596",,terminal_output +1829,3137289,"TERMINAL",0,0,"2106107",,terminal_output +1830,3138363,"TERMINAL",0,0,"31718",,terminal_output +1831,3139386,"TERMINAL",0,0,"42829",,terminal_output +1832,3140483,"TERMINAL",0,0,"539340",,terminal_output +1833,3141472,"TERMINAL",0,0,"644041",,terminal_output +1834,3142522,"TERMINAL",0,0,"75152",,terminal_output +1835,3143646,"TERMINAL",0,0,"86263",,terminal_output +1836,3144661,"TERMINAL",0,0,"97374",,terminal_output +1837,3145683,"TERMINAL",0,0,"7:008485",,terminal_output +1838,3146711,"TERMINAL",0,0,"19596",,terminal_output +1839,3147764,"TERMINAL",0,0,"2217218",,terminal_output +1840,3148812,"TERMINAL",0,0,"42829",,terminal_output +1841,3149860,"TERMINAL",0,0,"539350",,terminal_output +1842,3150905,"TERMINAL",0,0,"645041",,terminal_output +1843,3152033,"TERMINAL",0,0,"75152",,terminal_output +1844,3153002,"TERMINAL",0,0,"86263",,terminal_output +1845,3154082,"TERMINAL",0,0,"97374",,terminal_output +1846,3155097,"TERMINAL",0,0,"108485",,terminal_output +1847,3156138,"TERMINAL",0,0,"19596",,terminal_output +1848,3157257,"TERMINAL",0,0,"2306307",,terminal_output +1849,3158243,"TERMINAL",0,0,"31718",,terminal_output +1850,3159335,"TERMINAL",0,0,"42829",,terminal_output +1851,3160353,"TERMINAL",0,0,"53932:00",,terminal_output +1852,3161455,"TERMINAL",0,0,"647:0041",,terminal_output +1853,3162430,"TERMINAL",0,0,"75152",,terminal_output +1854,3163479,"TERMINAL",0,0,"86263",,terminal_output +1855,3164559,"TERMINAL",0,0,"97374",,terminal_output +1856,3165574,"TERMINAL",0,0,"208485",,terminal_output +1857,3166637,"TERMINAL",0,0,"19596",,terminal_output +1858,3167664,"TERMINAL",0,0,"2406407",,terminal_output +1859,3168727,"TERMINAL",0,0,"32829",,terminal_output +1860,3169773,"TERMINAL",0,0,"539310",,terminal_output +1861,3170818,"TERMINAL",0,0,"641041",,terminal_output +1862,3171981,"TERMINAL",0,0,"75152",,terminal_output +1863,3173026,"TERMINAL",0,0,"86263",,terminal_output +1864,3174048,"TERMINAL",0,0,"97374",,terminal_output +1865,3175043,"TERMINAL",0,0,"308485",,terminal_output +1866,3176205,"TERMINAL",0,0,"19596",,terminal_output +1867,3177181,"TERMINAL",0,0,"2506507",,terminal_output +1868,3178225,"TERMINAL",0,0,"31718",,terminal_output +1869,3179252,"TERMINAL",0,0,"42829",,terminal_output +1870,3180316,"TERMINAL",0,0,"539320",,terminal_output +1871,3181333,"TERMINAL",0,0,"642041",,terminal_output +1872,3182380,"TERMINAL",0,0,"75152",,terminal_output +1873,3183448,"TERMINAL",0,0,"86263",,terminal_output +1874,3184460,"TERMINAL",0,0,"97374",,terminal_output +1875,3185518,"TERMINAL",0,0,"408485",,terminal_output +1876,3186897,"TERMINAL",0,0,"19596",,terminal_output +1877,3187668,"TERMINAL",0,0,"22:0062:007",,terminal_output +1878,3188659,"TERMINAL",0,0,"31718",,terminal_output +1879,3189718,"TERMINAL",0,0,"42829",,terminal_output +1880,3190757,"TERMINAL",0,0,"5430431",,terminal_output +1881,3192584,"TERMINAL",0,0,"75152",,terminal_output +1882,3193608,"TERMINAL",0,0,"86263",,terminal_output +1883,3194736,"TERMINAL",0,0,"97374",,terminal_output +1884,3195677,"TERMINAL",0,0,"508485",,terminal_output +1885,3196727,"TERMINAL",0,0,"1106107",,terminal_output +1886,3197780,"TERMINAL",0,0,"31718",,terminal_output +1887,3198834,"TERMINAL",0,0,"42829",,terminal_output +1888,3199877,"TERMINAL",0,0,"539340",,terminal_output +1889,3200982,"TERMINAL",0,0,"644041",,terminal_output +1890,3202006,"TERMINAL",0,0,"75152",,terminal_output +1891,3203015,"TERMINAL",0,0,"86263",,terminal_output +1892,3204156,"TERMINAL",0,0,"97374",,terminal_output +1893,3205116,"TERMINAL",0,0,"8:008485",,terminal_output +1894,3206169,"TERMINAL",0,0,"19596",,terminal_output +1895,3207221,"TERMINAL",0,0,"2206207",,terminal_output +1896,3208356,"TERMINAL",0,0,"31718",,terminal_output +1897,3209349,"TERMINAL",0,0,"42829",,terminal_output +1898,3210339,"TERMINAL",0,0,"539350",,terminal_output +1899,3211428,"TERMINAL",0,0,"645041",,terminal_output +1900,3212456,"TERMINAL",0,0,"75152",,terminal_output +1901,3213578,"TERMINAL",0,0,"86263",,terminal_output +1902,3214600,"TERMINAL",0,0,"97374",,terminal_output +1903,3215624,"TERMINAL",0,0,"108485",,terminal_output +1904,3216669,"TERMINAL",0,0,"19596",,terminal_output +1905,3217684,"TERMINAL",0,0,"2306307",,terminal_output +1906,3218744,"TERMINAL",0,0,"32829",,terminal_output +1907,3219791,"TERMINAL",0,0,"53933:00",,terminal_output +1908,3220840,"TERMINAL",0,0,"648:0041",,terminal_output +1909,3221904,"TERMINAL",0,0,"75152",,terminal_output +1910,3222997,"TERMINAL",0,0,"86263",,terminal_output +1911,3224020,"TERMINAL",0,0,"97374",,terminal_output +1912,3225147,"TERMINAL",0,0,"208485",,terminal_output +1913,3226188,"TERMINAL",0,0,"19596",,terminal_output +1914,3227147,"TERMINAL",0,0,"2406407",,terminal_output +1915,3228226,"TERMINAL",0,0,"31718",,terminal_output +1916,3229249,"TERMINAL",0,0,"42829",,terminal_output +1917,3230324,"TERMINAL",0,0,"539310",,terminal_output +1918,3231394,"TERMINAL",0,0,"641041",,terminal_output +1919,3232403,"TERMINAL",0,0,"75152",,terminal_output +1920,3233456,"TERMINAL",0,0,"86263",,terminal_output +1921,3234508,"TERMINAL",0,0,"97374",,terminal_output +1922,3235594,"TERMINAL",0,0,"308485",,terminal_output +1923,3236607,"TERMINAL",0,0,"19596",,terminal_output +1924,3237673,"TERMINAL",0,0,"2506507",,terminal_output +1925,3238773,"TERMINAL",0,0,"31718",,terminal_output +1926,3239797,"TERMINAL",0,0,"439320",,terminal_output +1927,3240792,"TERMINAL",0,0,"642041",,terminal_output +1928,3241841,"TERMINAL",0,0,"75152",,terminal_output +1929,3242880,"TERMINAL",0,0,"86263",,terminal_output +1930,3243931,"TERMINAL",0,0,"97374",,terminal_output +1931,3245012,"TERMINAL",0,0,"408485",,terminal_output +1932,3246021,"TERMINAL",0,0,"19596",,terminal_output +1933,3247107,"TERMINAL",0,0,"23:0063:007",,terminal_output +1934,3248189,"TERMINAL",0,0,"31718",,terminal_output +1935,3249212,"TERMINAL",0,0,"42829",,terminal_output +1936,3250231,"TERMINAL",0,0,"539330",,terminal_output +1937,3251262,"TERMINAL",0,0,"643041",,terminal_output +1938,3252312,"TERMINAL",0,0,"75152",,terminal_output +1939,3253414,"TERMINAL",0,0,"86263",,terminal_output +1940,3254415,"TERMINAL",0,0,"97374",,terminal_output +1941,3255470,"TERMINAL",0,0,"508485",,terminal_output +1942,3256587,"TERMINAL",0,0,"19596",,terminal_output +1943,3257611,"TERMINAL",0,0,"2106107",,terminal_output +1944,3258633,"TERMINAL",0,0,"31718",,terminal_output +1945,3259651,"TERMINAL",0,0,"42829",,terminal_output +1946,3260696,"TERMINAL",0,0,"539340",,terminal_output +1947,3261755,"TERMINAL",0,0,"654152",,terminal_output +1948,3262808,"TERMINAL",0,0,"86263",,terminal_output +1949,3263856,"TERMINAL",0,0,"97374",,terminal_output +1950,3264914,"TERMINAL",0,0,"9:008485",,terminal_output +1951,3265950,"TERMINAL",0,0,"19596",,terminal_output +1952,3266985,"TERMINAL",0,0,"2206207",,terminal_output +1953,3268024,"TERMINAL",0,0,"31718",,terminal_output +1954,3269076,"TERMINAL",0,0,"42829",,terminal_output +1955,3270213,"TERMINAL",0,0,"539350",,terminal_output +1956,3271228,"TERMINAL",0,0,"645041",,terminal_output +1957,3272220,"TERMINAL",0,0,"75152",,terminal_output +1958,3273290,"TERMINAL",0,0,"86263",,terminal_output +1959,3274307,"TERMINAL",0,0,"97374",,terminal_output +1960,3275378,"TERMINAL",0,0,"108485",,terminal_output +1961,3276398,"TERMINAL",0,0,"19596",,terminal_output +1962,3277476,"TERMINAL",0,0,"2306307",,terminal_output +1963,3278500,"TERMINAL",0,0,"31718",,terminal_output +1964,3279757,"TERMINAL",0,0,"42829",,terminal_output +1965,3280689,"TERMINAL",0,0,"53934:00",,terminal_output +1966,3281779,"TERMINAL",0,0,"659:0152",,terminal_output +1967,3282801,"TERMINAL",0,0,"86263",,terminal_output +1968,3283840,"TERMINAL",0,0,"97374",,terminal_output +1969,3284954,"TERMINAL",0,0,"208485",,terminal_output +1970,3285974,"TERMINAL",0,0,"19596",,terminal_output +1971,3287100,"TERMINAL",0,0,"2406407",,terminal_output +1972,3288126,"TERMINAL",0,0,"31718",,terminal_output +1973,3289097,"TERMINAL",0,0,"42829",,terminal_output +1974,3290210,"TERMINAL",0,0,"539310",,terminal_output +1975,3291196,"TERMINAL",0,0,"641041",,terminal_output +1976,3292238,"TERMINAL",0,0,"75152",,terminal_output +1977,3293350,"TERMINAL",0,0,"86263",,terminal_output +1978,3294340,"TERMINAL",0,0,"97374",,terminal_output +1979,3295386,"TERMINAL",0,0,"308485",,terminal_output +1980,3296418,"TERMINAL",0,0,"19596",,terminal_output +1981,3297490,"TERMINAL",0,0,"2506507",,terminal_output +1982,3298527,"TERMINAL",0,0,"31718",,terminal_output +1983,3299600,"TERMINAL",0,0,"42829",,terminal_output +1984,3300641,"TERMINAL",0,0,"539320",,terminal_output +1985,3301770,"TERMINAL",0,0,"642041",,terminal_output +1986,3302755,"TERMINAL",0,0,"76263",,terminal_output +1987,3303766,"TERMINAL",0,0,"97374",,terminal_output +1988,3304828,"TERMINAL",0,0,"408485",,terminal_output +1989,3305389,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",0,0,"",shellscript,tab +1990,3305390,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1408,0,"",shellscript,selection_mouse +1991,3305602,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1378,30,"m=64 \\n --num_latents=2048\n",shellscript,selection_mouse +1992,3305689,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1275,133,"_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +1993,3305690,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1001,407," --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +1994,3305690,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",891,517,"\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +1995,3305690,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",715,693,"job_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +1996,3305690,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",648,760,"ws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +1997,3305690,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1378,30,"m=64 \\n --num_latents=2048\n",shellscript,selection_command +1998,3305728,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",545,863,"tf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +1999,3305921,"TERMINAL",0,0,"19596",,terminal_output +2000,3305993,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",648,760,"ws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2001,3305994,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",715,693,"job_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2002,3306068,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",767,641,"\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2003,3306069,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",891,517,"\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2004,3306069,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",914,494," python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2005,3306070,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1027,381,"_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2006,3306097,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1116,292,"\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2007,3306119,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1132,276,"nizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2008,3306144,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1184,224,"izer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2009,3306168,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1234,174,"ant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2010,3306230,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1185,223,"zer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2011,3306316,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1033,375,"-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2012,3306317,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",525,883,".venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2013,3306343,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",455,953,"0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2014,3306497,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",454,954,"$0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2015,3306576,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",453,955," $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2016,3306635,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",457,951,"\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2017,3306969,"TERMINAL",0,0,"24:0064:007",,terminal_output +2018,3307009,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",489,919,"odule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2019,3307050,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",520,888,"urce .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2020,3307051,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",714,694,"\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2021,3307116,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",767,641,"\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2022,3307116,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",903,505,"SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2023,3307117,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",989,419,"ize=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2024,3307117,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1116,292,"\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2025,3307139,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1233,175,"tant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2026,3307161,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1259,149,"far \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2027,3307234,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1281,127,"tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2028,3307306,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1280,128,"$tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2029,3307435,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1257,151,"jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2030,3307436,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1228,180,"y instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2031,3307437,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1176,232,"gs tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2032,3307437,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1123,285,"name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2033,3307437,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1109,299,"--log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2034,3307438,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1072,336," --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2035,3307438,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1041,367," --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2036,3307466,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1021,387," --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2037,3307503,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",1001,407," --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2038,3307643,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",978,430," --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2039,3307756,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",977,431," --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2040,3307814,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",944,464," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2041,3307841,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",943,465," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2042,3307971,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",910,498,"srun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2043,3307982,"TERMINAL",0,0,"31718",,terminal_output +2044,3309117,"TERMINAL",0,0,"42829",,terminal_output +2045,3310064,"TERMINAL",0,0,"539330",,terminal_output +2046,3311079,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,0,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,tab +2047,3311368,"TERMINAL",0,0,"643041",,terminal_output +2048,3311747,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",828,0,"",shellscript,selection_mouse +2049,3311934,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",818,10,"cords_dir\n",shellscript,selection_mouse +2050,3312063,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",718,110,"nizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2051,3312064,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",654,174,"tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2052,3312065,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",640,188," \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2053,3312065,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",604,224,"og_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2054,3312065,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",603,225,"log_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2055,3312065,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",571,257,"-log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2056,3312066,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",570,258,"--log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2057,3312098,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",549,279," --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2058,3312124,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",548,280," --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2059,3312150,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",547,281," --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2060,3312179,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",527,301," --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2061,3312204,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",526,302," --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2062,3312230,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",504,324," --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2063,3312231,"TERMINAL",0,0,"75152",,terminal_output +2064,3312318,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",471,357," --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2065,3312726,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",438,390,"srun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=5 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,selection_mouse +2066,3313213,"TERMINAL",0,0,"86263",,terminal_output +2067,3314220,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",438,390,"",shellscript,content +2068,3314309,"TERMINAL",0,0,"97374",,terminal_output +2069,3314694,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",438,0,"srun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,content +2070,3315275,"TERMINAL",0,0,"508485",,terminal_output +2071,3315734,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",477,0,"",shellscript,selection_mouse +2072,3316338,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",440,0,"",shellscript,selection_mouse +2073,3316350,"TERMINAL",0,0,"19596",,terminal_output +2074,3316866,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",439,0,"",shellscript,selection_mouse +2075,3317413,"TERMINAL",0,0,"2106107",,terminal_output +2076,3318575,"TERMINAL",0,0,"31718",,terminal_output +2077,3319569,"TERMINAL",0,0,"42829",,terminal_output +2078,3320541,"TERMINAL",0,0,"539340",,terminal_output +2079,3321646,"TERMINAL",0,0,"644041",,terminal_output +2080,3322641,"TERMINAL",0,0,"75152",,terminal_output +2081,3323661,"TERMINAL",0,0,"86263",,terminal_output +2082,3324740,"TERMINAL",0,0,"97374",,terminal_output +2083,3325754,"TERMINAL",0,0,"30:009596",,terminal_output +2084,3326797,"TERMINAL",0,0,"2206207",,terminal_output +2085,3327850,"TERMINAL",0,0,"31718",,terminal_output +2086,3328906,"TERMINAL",0,0,"42829",,terminal_output +2087,3329951,"TERMINAL",0,0,"539350",,terminal_output +2088,3331032,"TERMINAL",0,0,"645041",,terminal_output +2089,3332095,"TERMINAL",0,0,"75152",,terminal_output +2090,3333114,"TERMINAL",0,0,"86263",,terminal_output +2091,3334158,"TERMINAL",0,0,"97374",,terminal_output +2092,3335230,"TERMINAL",0,0,"108485",,terminal_output +2093,3336264,"TERMINAL",0,0,"19596",,terminal_output +2094,3337302,"TERMINAL",0,0,"2306307",,terminal_output +2095,3338405,"TERMINAL",0,0,"31718",,terminal_output +2096,3339399,"TERMINAL",0,0,"42829",,terminal_output +2097,3340448,"TERMINAL",0,0,"53935:00",,terminal_output +2098,3341493,"TERMINAL",0,0,"6410:0041",,terminal_output +2099,3342602,"TERMINAL",0,0,"75152",,terminal_output +2100,3343571,"TERMINAL",0,0,"86263",,terminal_output +2101,3344652,"TERMINAL",0,0,"97374",,terminal_output +2102,3345673,"TERMINAL",0,0,"208485",,terminal_output +2103,3346732,"TERMINAL",0,0,"19596",,terminal_output +2104,3347757,"TERMINAL",0,0,"2417418",,terminal_output +2105,3348800,"TERMINAL",0,0,"42829",,terminal_output +2106,3349829,"TERMINAL",0,0,"539310",,terminal_output +2107,3350897,"TERMINAL",0,0,"641041",,terminal_output +2108,3351931,"TERMINAL",0,0,"75152",,terminal_output +2109,3353047,"TERMINAL",0,0,"86263",,terminal_output +2110,3354075,"TERMINAL",0,0,"97374",,terminal_output +2111,3355088,"TERMINAL",0,0,"308485",,terminal_output +2112,3356211,"TERMINAL",0,0,"19596",,terminal_output +2113,3357189,"TERMINAL",0,0,"2506507",,terminal_output +2114,3358270,"TERMINAL",0,0,"31718",,terminal_output +2115,3359282,"TERMINAL",0,0,"42829",,terminal_output +2116,3360328,"TERMINAL",0,0,"539320",,terminal_output +2117,3361444,"TERMINAL",0,0,"642041",,terminal_output +2118,3362467,"TERMINAL",0,0,"75152",,terminal_output +2119,3363491,"TERMINAL",0,0,"86263",,terminal_output +2120,3364514,"TERMINAL",0,0,"97374",,terminal_output +2121,3365541,"TERMINAL",0,0,"408485",,terminal_output +2122,3366667,"TERMINAL",0,0,"19596",,terminal_output +2123,3367610,"TERMINAL",0,0,"25:0065:007",,terminal_output +2124,3368660,"TERMINAL",0,0,"31718",,terminal_output +2125,3369755,"TERMINAL",0,0,"439330",,terminal_output +2126,3370769,"TERMINAL",0,0,"643041",,terminal_output +2127,3371827,"TERMINAL",0,0,"75152",,terminal_output +2128,3372881,"TERMINAL",0,0,"86263",,terminal_output +2129,3373920,"TERMINAL",0,0,"97374",,terminal_output +2130,3374970,"TERMINAL",0,0,"508485",,terminal_output +2131,3376142,"TERMINAL",0,0,"19596",,terminal_output +2132,3377088,"TERMINAL",0,0,"2106107",,terminal_output +2133,3378134,"TERMINAL",0,0,"31718",,terminal_output +2134,3379261,"TERMINAL",0,0,"42829",,terminal_output +2135,3380229,"TERMINAL",0,0,"539340",,terminal_output +2136,3381248,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",763,0,"",shellscript,selection_mouse +2137,3381318,"TERMINAL",0,0,"644041",,terminal_output +2138,3381824,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",704,0,"",shellscript,selection_mouse +2139,3382365,"TERMINAL",0,0,"75152",,terminal_output +2140,3383360,"TERMINAL",0,0,"86263",,terminal_output +2141,3384512,"TERMINAL",0,0,"97374",,terminal_output +2142,3385510,"TERMINAL",0,0,"1:008485",,terminal_output +2143,3386506,"TERMINAL",0,0,"19596",,terminal_output +2144,3387559,"TERMINAL",0,0,"2206207",,terminal_output +2145,3388681,"TERMINAL",0,0,"31718",,terminal_output +2146,3389477,"scripts_horeka/modelsize_scaling/lam/tester.sh",0,0,"",shellscript,tab +2147,3389768,"TERMINAL",0,0,"42829",,terminal_output +2148,3390369,"scripts_horeka/modelsize_scaling/lam/tester.sh",439,0,"",shellscript,selection_mouse +2149,3390370,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,9,"XLA_FLAGS",shellscript,selection_mouse +2150,3390697,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,11,"XLA_FLAGS=-",shellscript,selection_mouse +2151,3390709,"TERMINAL",0,0,"539350",,terminal_output +2152,3390752,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,34,"XLA_FLAGS=--xla_gpu_autotune_level",shellscript,selection_mouse +2153,3390753,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,79,"XLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir ",shellscript,selection_mouse +2154,3390753,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,80,"XLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $",shellscript,selection_mouse +2155,3390780,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,94,"XLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR",shellscript,selection_mouse +2156,3391000,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,95,"XLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR ",shellscript,selection_mouse +2157,3391072,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,34,"XLA_FLAGS=--xla_gpu_autotune_level",shellscript,selection_mouse +2158,3391560,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,35,"XLA_FLAGS=--xla_gpu_autotune_level=",shellscript,selection_mouse +2159,3391739,"TERMINAL",0,0,"655152",,terminal_output +2160,3391973,"scripts_horeka/modelsize_scaling/lam/tester.sh",438,36,"XLA_FLAGS=--xla_gpu_autotune_level=0",shellscript,selection_mouse +2161,3392783,"TERMINAL",0,0,"86263",,terminal_output +2162,3393836,"TERMINAL",0,0,"97374",,terminal_output +2163,3394902,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,0,"",shellscript,tab +2164,3395147,"TERMINAL",0,0,"108485",,terminal_output +2165,3395612,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",476,0,"",shellscript,selection_mouse +2166,3395901,"TERMINAL",0,0,"19596",,terminal_output +2167,3396060,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",446,0,"",shellscript,selection_mouse +2168,3396977,"TERMINAL",0,0,"2306307",,terminal_output +2169,3397691,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",445,0,"",shellscript,selection_command +2170,3397970,"TERMINAL",0,0,"31718",,terminal_output +2171,3398358,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",438,0,"",shellscript,selection_command +2172,3399132,"TERMINAL",0,0,"42829",,terminal_output +2173,3399166,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",438,0,"XLA_FLAGS=--xla_gpu_autotune_level=0",shellscript,content +2174,3399459,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",474,0," ",shellscript,content +2175,3399461,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",475,0,"",shellscript,selection_keyboard +2176,3399780,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",474,0,"",shellscript,selection_command +2177,3400069,"TERMINAL",0,0,"53936:00",,terminal_output +2178,3401113,"TERMINAL",0,0,"641:0041",,terminal_output +2179,3402153,"TERMINAL",0,0,"75152",,terminal_output +2180,3403224,"TERMINAL",0,0,"86263",,terminal_output +2181,3404250,"TERMINAL",0,0,"97374",,terminal_output +2182,3405336,"TERMINAL",0,0,"208485",,terminal_output +2183,3406345,"TERMINAL",0,0,"19596",,terminal_output +2184,3407404,"TERMINAL",0,0,"2406407",,terminal_output +2185,3408454,"TERMINAL",0,0,"31718",,terminal_output +2186,3409572,"TERMINAL",0,0,"42829",,terminal_output +2187,3410602,"TERMINAL",0,0,"539310",,terminal_output +2188,3411723,"TERMINAL",0,0,"641041",,terminal_output +2189,3412752,"TERMINAL",0,0,"75152",,terminal_output +2190,3413717,"TERMINAL",0,0,"87374",,terminal_output +2191,3414768,"TERMINAL",0,0,"308485",,terminal_output +2192,3415823,"TERMINAL",0,0,"19596",,terminal_output +2193,3416871,"TERMINAL",0,0,"2506507",,terminal_output +2194,3417913,"TERMINAL",0,0,"31718",,terminal_output +2195,3418962,"TERMINAL",0,0,"42829",,terminal_output +2196,3420119,"TERMINAL",0,0,"539320",,terminal_output +2197,3421150,"TERMINAL",0,0,"642041",,terminal_output +2198,3422108,"TERMINAL",0,0,"75152",,terminal_output +2199,3423192,"TERMINAL",0,0,"86263",,terminal_output +2200,3424219,"TERMINAL",0,0,"97374",,terminal_output +2201,3425378,"TERMINAL",0,0,"408485",,terminal_output +2202,3426535,"TERMINAL",0,0,"19596",,terminal_output +2203,3427345,"TERMINAL",0,0,"26:0066:007",,terminal_output +2204,3428413,"TERMINAL",0,0,"31718",,terminal_output +2205,3429539,"TERMINAL",0,0,"42829",,terminal_output +2206,3430565,"TERMINAL",0,0,"539330",,terminal_output +2207,3431546,"TERMINAL",0,0,"643041",,terminal_output +2208,3432598,"TERMINAL",0,0,"75152",,terminal_output +2209,3433650,"TERMINAL",0,0,"86263",,terminal_output +2210,3434765,"TERMINAL",0,0,"97374",,terminal_output +2211,3435786,"TERMINAL",0,0,"509596",,terminal_output +2212,3436816,"TERMINAL",0,0,"2106107",,terminal_output +2213,3437855,"TERMINAL",0,0,"31718",,terminal_output +2214,3438905,"TERMINAL",0,0,"42829",,terminal_output +2215,3439985,"TERMINAL",0,0,"539340",,terminal_output +2216,3441004,"TERMINAL",0,0,"644041",,terminal_output +2217,3442129,"TERMINAL",0,0,"75152",,terminal_output +2218,3443161,"TERMINAL",0,0,"86263",,terminal_output +2219,3444186,"TERMINAL",0,0,"97374",,terminal_output +2220,3445219,"TERMINAL",0,0,"2:008485",,terminal_output +2221,3446335,"TERMINAL",0,0,"19596",,terminal_output +2222,3447358,"TERMINAL",0,0,"2206207",,terminal_output +2223,3448398,"TERMINAL",0,0,"31718",,terminal_output +2224,3449400,"TERMINAL",0,0,"42829",,terminal_output +2225,3450533,"TERMINAL",0,0,"539350",,terminal_output +2226,3451558,"TERMINAL",0,0,"645041",,terminal_output +2227,3452564,"TERMINAL",0,0,"75152",,terminal_output +2228,3453706,"TERMINAL",0,0,"86263",,terminal_output +2229,3454660,"TERMINAL",0,0,"97374",,terminal_output +2230,3455704,"TERMINAL",0,0,"108485",,terminal_output +2231,3456782,"TERMINAL",0,0,"1306307",,terminal_output +2232,3457786,"TERMINAL",0,0,"31718",,terminal_output +2233,3458827,"TERMINAL",0,0,"42829",,terminal_output +2234,3459885,"TERMINAL",0,0,"53937:00",,terminal_output +2235,3460932,"TERMINAL",0,0,"642:0041",,terminal_output +2236,3462004,"TERMINAL",0,0,"75152",,terminal_output +2237,3463127,"TERMINAL",0,0,"86263",,terminal_output +2238,3464152,"TERMINAL",0,0,"97374",,terminal_output +2239,3465486,"TERMINAL",0,0,"208485",,terminal_output +2240,3466511,"TERMINAL",0,0,"19596",,terminal_output +2241,3467539,"TERMINAL",0,0,"2406407",,terminal_output +2242,3468657,"TERMINAL",0,0,"31718",,terminal_output +2243,3469676,"TERMINAL",0,0,"42829",,terminal_output +2244,3470705,"TERMINAL",0,0,"539310",,terminal_output +2245,3471731,"TERMINAL",0,0,"641041",,terminal_output +2246,3472756,"TERMINAL",0,0,"76263",,terminal_output +2247,3473810,"TERMINAL",0,0,"97374",,terminal_output +2248,3474858,"TERMINAL",0,0,"308485",,terminal_output +2249,3475910,"TERMINAL",0,0,"19596",,terminal_output +2250,3476953,"TERMINAL",0,0,"2506507",,terminal_output +2251,3478078,"TERMINAL",0,0,"31718",,terminal_output +2252,3479041,"TERMINAL",0,0,"42829",,terminal_output +2253,3480131,"TERMINAL",0,0,"539320",,terminal_output +2254,3481150,"TERMINAL",0,0,"642041",,terminal_output +2255,3482175,"TERMINAL",0,0,"75152",,terminal_output +2256,3483301,"TERMINAL",0,0,"86263",,terminal_output +2257,3484327,"TERMINAL",0,0,"97374",,terminal_output +2258,3485289,"TERMINAL",0,0,"408485",,terminal_output +2259,3486332,"TERMINAL",0,0,"19596",,terminal_output +2260,3487396,"TERMINAL",0,0,"27:0067:007",,terminal_output +2261,3488522,"TERMINAL",0,0,"31718",,terminal_output +2262,3489566,"TERMINAL",0,0,"42829",,terminal_output +2263,3490570,"TERMINAL",0,0,"539330",,terminal_output +2264,3491595,"TERMINAL",0,0,"643041",,terminal_output +2265,3492640,"TERMINAL",0,0,"75152",,terminal_output +2266,3493646,"TERMINAL",0,0,"86263",,terminal_output +2267,3494771,"TERMINAL",0,0,"97374",,terminal_output +2268,3495806,"TERMINAL",0,0,"509596",,terminal_output +2269,3496836,"TERMINAL",0,0,"2106107",,terminal_output +2270,3497874,"TERMINAL",0,0,"31718",,terminal_output +2271,3498912,"TERMINAL",0,0,"42829",,terminal_output +2272,3499955,"TERMINAL",0,0,"539340",,terminal_output +2273,3501015,"TERMINAL",0,0,"644041",,terminal_output +2274,3502040,"TERMINAL",0,0,"75152",,terminal_output +2275,3503070,"TERMINAL",0,0,"86263",,terminal_output +2276,3504191,"TERMINAL",0,0,"97374",,terminal_output +2277,3505163,"TERMINAL",0,0,"3:008485",,terminal_output +2278,3506242,"TERMINAL",0,0,"19596",,terminal_output +2279,3507262,"TERMINAL",0,0,"2206207",,terminal_output +2280,3508395,"TERMINAL",0,0,"31718",,terminal_output +2281,3509315,"TERMINAL",0,0,"42829",,terminal_output +2282,3510361,"TERMINAL",0,0,"539350",,terminal_output +2283,3511406,"TERMINAL",0,0,"645041",,terminal_output +2284,3512485,"TERMINAL",0,0,"75152",,terminal_output +2285,3513513,"TERMINAL",0,0,"86263",,terminal_output +2286,3514554,"TERMINAL",0,0,"97374",,terminal_output +2287,3515596,"TERMINAL",0,0,"108485",,terminal_output +2288,3516632,"TERMINAL",0,0,"19596",,terminal_output +2289,3517711,"TERMINAL",0,0,"2306307",,terminal_output +2290,3518730,"TERMINAL",0,0,"31718",,terminal_output +2291,3519744,"TERMINAL",0,0,"43938:00",,terminal_output +2292,3520881,"TERMINAL",0,0,"643:0041",,terminal_output +2293,3521829,"TERMINAL",0,0,"75152",,terminal_output +2294,3522869,"TERMINAL",0,0,"86263",,terminal_output +2295,3523909,"TERMINAL",0,0,"97374",,terminal_output +2296,3524954,"TERMINAL",0,0,"208485",,terminal_output +2297,3525995,"TERMINAL",0,0,"19596",,terminal_output +2298,3527129,"TERMINAL",0,0,"2406407",,terminal_output +2299,3528064,"TERMINAL",0,0,"31718",,terminal_output +2300,3530213,"TERMINAL",0,0,"439310",,terminal_output +2301,3531328,"TERMINAL",0,0,"641041",,terminal_output +2302,3532268,"TERMINAL",0,0,"75152",,terminal_output +2303,3533309,"TERMINAL",0,0,"86263",,terminal_output +2304,3534401,"TERMINAL",0,0,"97374",,terminal_output +2305,3535408,"TERMINAL",0,0,"308485",,terminal_output +2306,3536457,"TERMINAL",0,0,"19596",,terminal_output +2307,3537575,"TERMINAL",0,0,"2506507",,terminal_output +2308,3538542,"TERMINAL",0,0,"31718",,terminal_output +2309,3539597,"TERMINAL",0,0,"42829",,terminal_output +2310,3540650,"TERMINAL",0,0,"539320",,terminal_output +2311,3541172,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,0,"",shellscript,tab +2312,3541173,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",582,0,"",shellscript,selection_mouse +2313,3541313,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",581,0,"",shellscript,selection_command +2314,3541696,"TERMINAL",0,0,"642041",,terminal_output +2315,3542751,"TERMINAL",0,0,"76263",,terminal_output +2316,3543841,"TERMINAL",0,0,"97374",,terminal_output +2317,3544844,"TERMINAL",0,0,"408485",,terminal_output +2318,3545894,"TERMINAL",0,0,"19596",,terminal_output +2319,3546924,"TERMINAL",0,0,"28:0068:007",,terminal_output +2320,3547980,"TERMINAL",0,0,"31718",,terminal_output +2321,3549039,"TERMINAL",0,0,"42829",,terminal_output +2322,3550225,"TERMINAL",0,0,"539330",,terminal_output +2323,3551183,"TERMINAL",0,0,"643041",,terminal_output +2324,3552219,"TERMINAL",0,0,"75152",,terminal_output +2325,3553347,"TERMINAL",0,0,"86263",,terminal_output +2326,3554366,"TERMINAL",0,0,"97374",,terminal_output +2327,3555336,"TERMINAL",0,0,"508485",,terminal_output +2328,3556416,"TERMINAL",0,0,"19596",,terminal_output +2329,3557541,"TERMINAL",0,0,"2106107",,terminal_output +2330,3558483,"TERMINAL",0,0,"31718",,terminal_output +2331,3559593,"TERMINAL",0,0,"42829",,terminal_output +2332,3560613,"TERMINAL",0,0,"539340",,terminal_output +2333,3561638,"TERMINAL",0,0,"644041",,terminal_output +2334,3562771,"TERMINAL",0,0,"75152",,terminal_output +2335,3563374,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",973,0,"",shellscript,selection_mouse +2336,3563706,"TERMINAL",0,0,"86263",,terminal_output +2337,3564814,"TERMINAL",0,0,"98485",,terminal_output +2338,3565063,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",949,0,"",shellscript,selection_mouse +2339,3565081,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",948,0,"",shellscript,selection_command +2340,3565541,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",973,0,"",shellscript,selection_mouse +2341,3565809,"TERMINAL",0,0,"4:019596",,terminal_output +2342,3566063,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",972,0,"",shellscript,selection_mouse +2343,3566077,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",971,0,"",shellscript,selection_command +2344,3566861,"TERMINAL",0,0,"2206207",,terminal_output +2345,3567909,"TERMINAL",0,0,"31718",,terminal_output +2346,3568960,"TERMINAL",0,0,"42829",,terminal_output +2347,3570008,"TERMINAL",0,0,"539350",,terminal_output +2348,3571064,"TERMINAL",0,0,"645041",,terminal_output +2349,3572122,"TERMINAL",0,0,"75152",,terminal_output +2350,3573219,"TERMINAL",0,0,"86263",,terminal_output +2351,3574202,"TERMINAL",0,0,"97374",,terminal_output +2352,3575263,"TERMINAL",0,0,"108485",,terminal_output +2353,3576382,"TERMINAL",0,0,"19596",,terminal_output +2354,3577352,"TERMINAL",0,0,"2306307",,terminal_output +2355,3578375,"TERMINAL",0,0,"31718",,terminal_output +2356,3579426,"TERMINAL",0,0,"42829",,terminal_output +2357,3580586,"TERMINAL",0,0,"53939:00",,terminal_output +2358,3581537,"TERMINAL",0,0,"644:0041",,terminal_output +2359,3582631,"TERMINAL",0,0,"75152",,terminal_output +2360,3583655,"TERMINAL",0,0,"86263",,terminal_output +2361,3584681,"TERMINAL",0,0,"97374",,terminal_output +2362,3585730,"TERMINAL",0,0,"209596",,terminal_output +2363,3586779,"TERMINAL",0,0,"2406407",,terminal_output +2364,3587824,"TERMINAL",0,0,"31718",,terminal_output +2365,3588872,"TERMINAL",0,0,"42829",,terminal_output +2366,3589904,"TERMINAL",0,0,"539310",,terminal_output +2367,3590929,"TERMINAL",0,0,"641041",,terminal_output +2368,3591980,"TERMINAL",0,0,"75152",,terminal_output +2369,3593034,"TERMINAL",0,0,"86263",,terminal_output +2370,3594081,"TERMINAL",0,0,"97374",,terminal_output +2371,3595224,"TERMINAL",0,0,"308485",,terminal_output +2372,3596248,"TERMINAL",0,0,"19596",,terminal_output +2373,3597223,"TERMINAL",0,0,"2506507",,terminal_output +2374,3598299,"TERMINAL",0,0,"31718",,terminal_output +2375,3599327,"TERMINAL",0,0,"42829",,terminal_output +2376,3600376,"TERMINAL",0,0,"539320",,terminal_output +2377,3601418,"TERMINAL",0,0,"642041",,terminal_output +2378,3602461,"TERMINAL",0,0,"75152",,terminal_output +2379,3603523,"TERMINAL",0,0,"86263",,terminal_output +2380,3604651,"TERMINAL",0,0,"97374",,terminal_output +2381,3605616,"TERMINAL",0,0,"408485",,terminal_output +2382,3606693,"TERMINAL",0,0,"19596",,terminal_output +2383,3607720,"TERMINAL",0,0,"29:0069:007",,terminal_output +2384,3608788,"TERMINAL",0,0,"32829",,terminal_output +2385,3609869,"TERMINAL",0,0,"539330",,terminal_output +2386,3610857,"TERMINAL",0,0,"643041",,terminal_output +2387,3611895,"TERMINAL",0,0,"75152",,terminal_output +2388,3612942,"TERMINAL",0,0,"86263",,terminal_output +2389,3613990,"TERMINAL",0,0,"97374",,terminal_output +2390,3615035,"TERMINAL",0,0,"508485",,terminal_output +2391,3616114,"TERMINAL",0,0,"19596",,terminal_output +2392,3617138,"TERMINAL",0,0,"2106107",,terminal_output +2393,3618268,"TERMINAL",0,0,"31718",,terminal_output +2394,3619293,"TERMINAL",0,0,"42829",,terminal_output +2395,3620266,"TERMINAL",0,0,"539340",,terminal_output +2396,3621339,"TERMINAL",0,0,"644041",,terminal_output +2397,3622465,"TERMINAL",0,0,"75152",,terminal_output +2398,3623411,"TERMINAL",0,0,"86263",,terminal_output +2399,3624448,"TERMINAL",0,0,"97374",,terminal_output +2400,3625537,"TERMINAL",0,0,"5:008485",,terminal_output +2401,3626663,"TERMINAL",0,0,"19596",,terminal_output +2402,3627690,"TERMINAL",0,0,"2206207",,terminal_output +2403,3628647,"TERMINAL",0,0,"31718",,terminal_output +2404,3629737,"TERMINAL",0,0,"42829",,terminal_output +2405,3630758,"TERMINAL",0,0,"5450451",,terminal_output +2406,3631784,"TERMINAL",0,0,"75152",,terminal_output +2407,3632804,"TERMINAL",0,0,"86263",,terminal_output +2408,3633867,"TERMINAL",0,0,"97374",,terminal_output +2409,3634894,"TERMINAL",0,0,"108485",,terminal_output +2410,3635942,"TERMINAL",0,0,"19596",,terminal_output +2411,3636989,"TERMINAL",0,0,"2306307",,terminal_output +2412,3638036,"TERMINAL",0,0,"31718",,terminal_output +2413,3639086,"TERMINAL",0,0,"42829",,terminal_output +2414,3640214,"TERMINAL",0,0,"53931:00:00",,terminal_output +2415,3641203,"TERMINAL",0,0,"645:0041",,terminal_output +2416,3642233,"TERMINAL",0,0,"75152",,terminal_output +2417,3643280,"TERMINAL",0,0,"86263",,terminal_output +2418,3644325,"TERMINAL",0,0,"97374",,terminal_output +2419,3645129,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",973,0,"",shellscript,selection_mouse +2420,3645378,"TERMINAL",0,0,"208485",,terminal_output +2421,3645642,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",972,0,"",shellscript,selection_mouse +2422,3645656,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",971,0,"",shellscript,selection_command +2423,3646426,"TERMINAL",0,0,"19596",,terminal_output +2424,3647460,"TERMINAL",0,0,"2406407",,terminal_output +2425,3648509,"TERMINAL",0,0,"31718",,terminal_output +2426,3649560,"TERMINAL",0,0,"42829",,terminal_output +2427,3650237,"TERMINAL",0,0,"[?25lsh[?25h[?25lh[?25h",,terminal_output +2428,3650341,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +2429,3650517,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +2430,3650579,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +2431,3650638,"TERMINAL",0,0,"539310",,terminal_output +2432,3651178,"TERMINAL",0,0,"ripts_",,terminal_output +2433,3651652,"TERMINAL",0,0,"641041",,terminal_output +2434,3651705,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +2435,3651767,"TERMINAL",0,0,"oreka/",,terminal_output +2436,3652759,"TERMINAL",0,0,"75152",,terminal_output +2437,3652811,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +2438,3652950,"TERMINAL",0,0,"odelsize_scaling/",,terminal_output +2439,3653478,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +2440,3653600,"TERMINAL",0,0,"okenizer/",,terminal_output +2441,3653738,"TERMINAL",0,0,"87374",,terminal_output +2442,3654777,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +2443,3654786,"TERMINAL",0,0,"308485",,terminal_output +2444,3654894,"TERMINAL",0,0,"ester.sh ",,terminal_output +2445,3655951,"TERMINAL",0,0,"19596",,terminal_output +2446,3656180,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nenv | grep SLURM\r\n\r\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_tokenizer.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --min_lr=7e-5 \\r\n --max_lr=7e-5 \\r\n --log_image_interval=500 \\r\n --log_checkpoint_interval=500 \\r\n --log \\r\n --name=tokenizer-modelsize-80M-$slurm_job_id \\r\n --tags tokenizer model-size-scaling L1 80M \\r\n --entity instant-uv \\r\n --project jafar \\r\n --data_dir $tf_records_dir \\r\n --model_dim=768 \\r\n --num_blocks=12 \\r\n --num_heads=12 \\r\n --latent_dim=64 \\r\n --num_latents=2048\r\n",,terminal_output +2447,3656337,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x4)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=3172857\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0625\r\nSLURM_JOB_START_TIME=1751618115\r\nSLURM_STEP_NODELIST=hkn0625\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751654115\r\nSLURM_PMI2_SRUN_PORT=38731\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x4)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=4\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3316923\r\nSLURM_PTY_PORT=39987\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=50\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=16\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e30.hkn0625\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=108\r\nSLURM_NODELIST=hkn[0625-0628]\r\nSLURM_SRUN_COMM_PORT=39403\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=16\r\nSLURM_NNODES=4\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3316923\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0625\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=39403\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0625-0628]\r\n",,terminal_output +2448,3656514,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +2449,3656589,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +2450,3656975,"TERMINAL",0,0,"2506507",,terminal_output +2451,3657935,"TERMINAL",0,0,"31718",,terminal_output +2452,3658984,"TERMINAL",0,0,"42829",,terminal_output +2453,3660028,"TERMINAL",0,0,"539320",,terminal_output +2454,3661073,"TERMINAL",0,0,"642041",,terminal_output +2455,3662129,"TERMINAL",0,0,"75152",,terminal_output +2456,3663221,"TERMINAL",0,0,"86263",,terminal_output +2457,3664313,"TERMINAL",0,0,"97374",,terminal_output +2458,3665256,"TERMINAL",0,0,"408485",,terminal_output +2459,3666335,"TERMINAL",0,0,"19596",,terminal_output +2460,3667357,"TERMINAL",0,0,"2025-07-04 11:35:42.539804: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:35:42.540033: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:35:42.540171: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:35:42.540203: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621742.552847 1036168 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621742.552847 1036170 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621742.553251 1036169 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621742.553153 1036171 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751621742.557242 1036168 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751621742.557417 1036170 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751621742.557459 1036171 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751621742.557739 1036169 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751621742.571154 1036168 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571171 1036168 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571174 1036168 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571175 1036168 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571156 1036170 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571171 1036170 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571173 1036170 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571175 1036170 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571238 1036171 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571251 1036171 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571253 1036171 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571255 1036171 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571323 1036169 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571339 1036169 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571340 1036169 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621742.571342 1036169 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +2461,3667373,"TERMINAL",0,0,"230:0061:00:007",,terminal_output +2462,3668290,"TERMINAL",0,0,"2025-07-04 11:35:43.500998: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:35:43.501280: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:35:43.501258: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:35:43.501280: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621743.513993 3206555 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621743.514289 3206556 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621743.514164 3206557 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621743.514337 3206558 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751621743.518438 3206555 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751621743.518460 3206557 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751621743.518669 3206556 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751621743.518931 3206558 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751621743.532540 3206555 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532557 3206555 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532559 3206555 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532560 3206555 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532578 3206556 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532594 3206556 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532596 3206556 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532598 3206556 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532574 3206557 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532588 3206557 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532590 3206557 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532591 3206557 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532588 3206558 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532603 3206558 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532604 3206558 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.532606 3206558 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n2025-07-04 11:35:43.538751: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:35:43.539090: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:35:43.539102: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:35:43.539077: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:35:43.539271: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:35:43.539510: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:35:43.539403: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:35:43.539742: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621743.552108 1403082 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621743.552369 1403080 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621743.552604 1558824 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621743.552365 1403081 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621743.552593 1558826 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621743.552718 1558823 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621743.552765 1558825 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751621743.552896 1403079 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751621743.557014 1558826 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751621743.556785 1403080 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751621743.556784 1403082 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751621743.556596 1403081 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751621743.557308 1558824 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751621743.557061 1403079 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751621743.557260 1558825 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751621743.557407 1558823 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751621743.570348 1403079 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570365 1403079 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570367 1403079 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570369 1403079 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570325 1403080 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570344 1403080 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570346 1403080 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570348 1403080 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570349 1403081 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570365 1403081 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570367 1403081 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570369 1403081 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570487 1403082 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570503 1403082 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570505 1403082 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570506 1403082 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570782 1558825 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570797 1558825 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570799 1558825 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570800 1558825 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570766 1558826 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570784 1558826 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570786 1558826 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.570787 1558826 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.571412 1558823 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.571427 1558823 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.571429 1558823 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.571430 1558823 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.571364 1558824 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.571381 1558824 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.571383 1558824 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751621743.571384 1558824 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +2463,3668393,"TERMINAL",0,0,"31718",,terminal_output +2464,3669465,"TERMINAL",0,0,"42829",,terminal_output +2465,3670490,"TERMINAL",0,0,"539330",,terminal_output +2466,3671719,"TERMINAL",0,0,"643041",,terminal_output +2467,3672742,"TERMINAL",0,0,"75152",,terminal_output +2468,3673780,"TERMINAL",0,0,"87374",,terminal_output +2469,3674901,"TERMINAL",0,0,"508485",,terminal_output +2470,3675932,"TERMINAL",0,0,"19596",,terminal_output +2471,3676942,"TERMINAL",0,0,"2106107",,terminal_output +2472,3677966,"TERMINAL",0,0,"31718",,terminal_output +2473,3678990,"TERMINAL",0,0,"42829",,terminal_output +2474,3680045,"TERMINAL",0,0,"539340",,terminal_output +2475,3681139,"TERMINAL",0,0,"644041",,terminal_output +2476,3682140,"TERMINAL",0,0,"W0000 00:00:1751621757.426359 1036168 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751621757.428224 1036170 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751621757.428525 1036169 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751621757.428524 1036171 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +2477,3682151,"TERMINAL",0,0,"75152",,terminal_output +2478,3682678,"TERMINAL",0,0,"W0000 00:00:1751621757.908976 1558823 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751621757.909044 1558824 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751621757.909003 1558825 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751621757.909032 1558826 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751621757.923567 3206555 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751621757.923586 3206556 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751621757.923534 3206557 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751621757.923577 3206558 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751621757.930401 1403080 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751621757.930725 1403081 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751621757.931027 1403082 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751621757.939268 1403079 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +2479,3683289,"TERMINAL",0,0,"86263",,terminal_output +2480,3684238,"TERMINAL",0,0,"97374",,terminal_output +2481,3685285,"TERMINAL",0,0,"6:008485",,terminal_output +2482,3686262,"TERMINAL",0,0,"2025-07-04 11:36:01.467923: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.474307: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.476715: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.476716: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.477280: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.478455: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.489675: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.489675: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.489982: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.489986: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.491300: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.491909: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.491908: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.502879: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.505495: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:01.505494: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2483,3686357,"TERMINAL",0,0,"19596",,terminal_output +2484,3687487,"TERMINAL",0,0,"2206207",,terminal_output +2485,3688448,"TERMINAL",0,0,"31718",,terminal_output +2486,3689470,"TERMINAL",0,0,"42829",,terminal_output +2487,3690501,"TERMINAL",0,0,"539350",,terminal_output +2488,3691585,"TERMINAL",0,0,"645041",,terminal_output +2489,3692608,"TERMINAL",0,0,"75152",,terminal_output +2490,3693673,"TERMINAL",0,0,"86263",,terminal_output +2491,3694764,"TERMINAL",0,0,"97374",,terminal_output +2492,3695783,"TERMINAL",0,0,"109596",,terminal_output +2493,3696806,"TERMINAL",0,0,"2306307",,terminal_output +2494,3697934,"TERMINAL",0,0,"31718",,terminal_output +2495,3698651,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +2496,3698944,"TERMINAL",0,0,"42829",,terminal_output +2497,3699163,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250704_113613-l9baaggl\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run tokenizer-modelsize-80M-debug-mihir\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/l9baaggl\r\n",,terminal_output +2498,3699931,"TERMINAL",0,0,"53931:00",,terminal_output +2499,3700980,"TERMINAL",0,0,"646:0041",,terminal_output +2500,3702022,"TERMINAL",0,0,"75152",,terminal_output +2501,3703071,"TERMINAL",0,0,"86263",,terminal_output +2502,3704089,"TERMINAL",0,0,"97374",,terminal_output +2503,3705111,"TERMINAL",0,0,"208485",,terminal_output +2504,3706593,"TERMINAL",0,0,"19596",,terminal_output +2505,3707662,"TERMINAL",0,0,"2406407",,terminal_output +2506,3708788,"TERMINAL",0,0,"31718",,terminal_output +2507,3709731,"TERMINAL",0,0,"439310",,terminal_output +2508,3710775,"TERMINAL",0,0,"641041",,terminal_output +2509,3711863,"TERMINAL",0,0,"75152",,terminal_output +2510,3712894,"TERMINAL",0,0,"86263",,terminal_output +2511,3713926,"TERMINAL",0,0,"97374",,terminal_output +2512,3714970,"TERMINAL",0,0,"308485",,terminal_output +2513,3716019,"TERMINAL",0,0,"19596",,terminal_output +2514,3717096,"TERMINAL",0,0,"2506507",,terminal_output +2515,3718112,"TERMINAL",0,0,"31718",,terminal_output +2516,3719161,"TERMINAL",0,0,"42829",,terminal_output +2517,3720218,"TERMINAL",0,0,"539320",,terminal_output +2518,3721265,"TERMINAL",0,0,"642041",,terminal_output +2519,3722410,"TERMINAL",0,0,"75152",,terminal_output +2520,3723391,"TERMINAL",0,0,"86263",,terminal_output +2521,3724456,"TERMINAL",0,0,"97374",,terminal_output +2522,3725585,"TERMINAL",0,0,"408485",,terminal_output +2523,3726529,"TERMINAL",0,0,"19596",,terminal_output +2524,3727631,"TERMINAL",0,0,"21:0061:007",,terminal_output +2525,3728674,"TERMINAL",0,0,"31718",,terminal_output +2526,3729783,"TERMINAL",0,0,"42829",,terminal_output +2527,3730717,"TERMINAL",0,0,"5430431",,terminal_output +2528,3731764,"TERMINAL",0,0,"75152",,terminal_output +2529,3732814,"TERMINAL",0,0,"86263",,terminal_output +2530,3733979,"TERMINAL",0,0,"97374",,terminal_output +2531,3735005,"TERMINAL",0,0,"508485",,terminal_output +2532,3736027,"TERMINAL",0,0,"19596",,terminal_output +2533,3737019,"TERMINAL",0,0,"2106107",,terminal_output +2534,3737871,"TERMINAL",0,0,"2025-07-04 11:36:53.119030: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:53.119548: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2535,3738055,"TERMINAL",0,0,"31718",,terminal_output +2536,3738114,"TERMINAL",0,0,"2025-07-04 11:36:53.389299: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:53.389843: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2537,3738179,"TERMINAL",0,0,"2025-07-04 11:36:53.432480: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:53.433026: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2538,3738343,"TERMINAL",0,0,"2025-07-04 11:36:53.627076: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:53.627587: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2539,3738463,"TERMINAL",0,0,"2025-07-04 11:36:53.713442: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:53.713975: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2540,3738534,"TERMINAL",0,0,"2025-07-04 11:36:53.819233: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:53.819758: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2541,3738650,"TERMINAL",0,0,"2025-07-04 11:36:53.893482: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:53.894000: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2542,3738998,"TERMINAL",0,0,"2025-07-04 11:36:54.229035: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:54.229548: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2543,3739103,"TERMINAL",0,0,"42829",,terminal_output +2544,3739816,"TERMINAL",0,0,"2025-07-04 11:36:55.051761: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:55.052297: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2545,3739934,"TERMINAL",0,0,"2025-07-04 11:36:55.196100: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:55.196646: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:55.208866: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:55.209393: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2546,3740089,"TERMINAL",0,0,"2025-07-04 11:36:55.371141: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:55.371692: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2547,3740148,"TERMINAL",0,0,"539340",,terminal_output +2548,3740635,"TERMINAL",0,0,"2025-07-04 11:36:55.837745: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:55.838292: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:55.838723: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:55.839240: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2549,3741044,"TERMINAL",0,0,"2025-07-04 11:36:56.329235: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:56.329774: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2550,3741202,"TERMINAL",0,0,"644041",,terminal_output +2551,3742251,"TERMINAL",0,0,"75152",,terminal_output +2552,3742751,"TERMINAL",0,0,"2025-07-04 11:36:57.995276: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:36:57.996129: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2553,3743401,"TERMINAL",0,0,"86263",,terminal_output +2554,3744423,"TERMINAL",0,0,"97374",,terminal_output +2555,3745403,"TERMINAL",0,0,"7:008485",,terminal_output +2556,3746473,"TERMINAL",0,0,"19596",,terminal_output +2557,3747499,"TERMINAL",0,0,"2206207",,terminal_output +2558,3748547,"TERMINAL",0,0,"31718",,terminal_output +2559,3749580,"TERMINAL",0,0,"42829",,terminal_output +2560,3750633,"TERMINAL",0,0,"539350",,terminal_output +2561,3751693,"TERMINAL",0,0,"645041",,terminal_output +2562,3752819,"TERMINAL",0,0,"76263",,terminal_output +2563,3753846,"TERMINAL",0,0,"97374",,terminal_output +2564,3754871,"TERMINAL",0,0,"108485",,terminal_output +2565,3755893,"TERMINAL",0,0,"19596",,terminal_output +2566,3757020,"TERMINAL",0,0,"2306307",,terminal_output +2567,3758045,"TERMINAL",0,0,"31718",,terminal_output +2568,3759012,"TERMINAL",0,0,"42829",,terminal_output +2569,3760063,"TERMINAL",0,0,"53932:00",,terminal_output +2570,3761220,"TERMINAL",0,0,"647:0041",,terminal_output +2571,3762166,"TERMINAL",0,0,"75152",,terminal_output +2572,3763267,"TERMINAL",0,0,"86263",,terminal_output +2573,3764292,"TERMINAL",0,0,"97374",,terminal_output +2574,3765337,"TERMINAL",0,0,"208485",,terminal_output +2575,3766370,"TERMINAL",0,0,"19596",,terminal_output +2576,3767471,"TERMINAL",0,0,"2406407",,terminal_output +2577,3768466,"TERMINAL",0,0,"31718",,terminal_output +2578,3769518,"TERMINAL",0,0,"42829",,terminal_output +2579,3770640,"TERMINAL",0,0,"539310",,terminal_output +2580,3771627,"TERMINAL",0,0,"641041",,terminal_output +2581,3772664,"TERMINAL",0,0,"75152",,terminal_output +2582,3773719,"TERMINAL",0,0,"87374",,terminal_output +2583,3774758,"TERMINAL",0,0,"308485",,terminal_output +2584,3775862,"TERMINAL",0,0,"19596",,terminal_output +2585,3776884,"TERMINAL",0,0,"2506507",,terminal_output +2586,3777909,"TERMINAL",0,0,"31718",,terminal_output +2587,3779038,"TERMINAL",0,0,"42829",,terminal_output +2588,3779978,"TERMINAL",0,0,"539320",,terminal_output +2589,3781031,"TERMINAL",0,0,"642041",,terminal_output +2590,3782112,"TERMINAL",0,0,"75152",,terminal_output +2591,3783112,"TERMINAL",0,0,"86263",,terminal_output +2592,3784259,"TERMINAL",0,0,"97374",,terminal_output +2593,3785229,"TERMINAL",0,0,"408485",,terminal_output +2594,3786257,"TERMINAL",0,0,"19596",,terminal_output +2595,3787306,"TERMINAL",0,0,"22:0062:007",,terminal_output +2596,3788458,"TERMINAL",0,0,"31718",,terminal_output +2597,3789482,"TERMINAL",0,0,"42829",,terminal_output +2598,3790424,"TERMINAL",0,0,"539330",,terminal_output +2599,3791527,"TERMINAL",0,0,"643041",,terminal_output +2600,3792511,"TERMINAL",0,0,"75152",,terminal_output +2601,3793552,"TERMINAL",0,0,"86263",,terminal_output +2602,3794601,"TERMINAL",0,0,"97374",,terminal_output +2603,3795746,"TERMINAL",0,0,"508485",,terminal_output +2604,3796752,"TERMINAL",0,0,"19596",,terminal_output +2605,3797774,"TERMINAL",0,0,"2117118",,terminal_output +2606,3798781,"TERMINAL",0,0,"42829",,terminal_output +2607,3799830,"TERMINAL",0,0,"539340",,terminal_output +2608,3800869,"TERMINAL",0,0,"644041",,terminal_output +2609,3801974,"TERMINAL",0,0,"75152",,terminal_output +2610,3803002,"TERMINAL",0,0,"86263",,terminal_output +2611,3804020,"TERMINAL",0,0,"97374",,terminal_output +2612,3805066,"TERMINAL",0,0,"8:008485",,terminal_output +2613,3806104,"TERMINAL",0,0,"19596",,terminal_output +2614,3807152,"TERMINAL",0,0,"2206207",,terminal_output +2615,3808221,"TERMINAL",0,0,"31718",,terminal_output +2616,3809247,"TERMINAL",0,0,"42829",,terminal_output +2617,3810280,"TERMINAL",0,0,"539350",,terminal_output +2618,3811395,"TERMINAL",0,0,"645041",,terminal_output +2619,3812370,"TERMINAL",0,0,"75152",,terminal_output +2620,3813445,"TERMINAL",0,0,"86263",,terminal_output +2621,3814467,"TERMINAL",0,0,"97374",,terminal_output +2622,3815595,"TERMINAL",0,0,"108485",,terminal_output +2623,3816539,"TERMINAL",0,0,"19596",,terminal_output +2624,3817649,"TERMINAL",0,0,"2306307",,terminal_output +2625,3818666,"TERMINAL",0,0,"31718",,terminal_output +2626,3819668,"TERMINAL",0,0,"42829",,terminal_output +2627,3820712,"TERMINAL",0,0,"53933:00",,terminal_output +2628,3821839,"TERMINAL",0,0,"658:0152",,terminal_output +2629,3822809,"TERMINAL",0,0,"86263",,terminal_output +2630,3823886,"TERMINAL",0,0,"97374",,terminal_output +2631,3824911,"TERMINAL",0,0,"208485",,terminal_output +2632,3826038,"TERMINAL",0,0,"19596",,terminal_output +2633,3826991,"TERMINAL",0,0,"2406407",,terminal_output +2634,3828046,"TERMINAL",0,0,"31718",,terminal_output +2635,3829090,"TERMINAL",0,0,"42829",,terminal_output +2636,3830146,"TERMINAL",0,0,"539310",,terminal_output +2637,3831198,"TERMINAL",0,0,"641041",,terminal_output +2638,3832286,"TERMINAL",0,0,"75152",,terminal_output +2639,3833276,"TERMINAL",0,0,"86263",,terminal_output +2640,3834328,"TERMINAL",0,0,"97374",,terminal_output +2641,3835376,"TERMINAL",0,0,"308485",,terminal_output +2642,3836423,"TERMINAL",0,0,"19596",,terminal_output +2643,3837471,"TERMINAL",0,0,"2506507",,terminal_output +2644,3838516,"TERMINAL",0,0,"31718",,terminal_output +2645,3839567,"TERMINAL",0,0,"42829",,terminal_output +2646,3840595,"TERMINAL",0,0,"539320",,terminal_output +2647,3841705,"TERMINAL",0,0,"642041",,terminal_output +2648,3842730,"TERMINAL",0,0,"75152",,terminal_output +2649,3843854,"TERMINAL",0,0,"87374",,terminal_output +2650,3844881,"TERMINAL",0,0,"408485",,terminal_output +2651,3845847,"TERMINAL",0,0,"19596",,terminal_output +2652,3846926,"TERMINAL",0,0,"23:0063:007",,terminal_output +2653,3847950,"TERMINAL",0,0,"31718",,terminal_output +2654,3849092,"TERMINAL",0,0,"42829",,terminal_output +2655,3850013,"TERMINAL",0,0,"539330",,terminal_output +2656,3851064,"TERMINAL",0,0,"643041",,terminal_output +2657,3852093,"TERMINAL",0,0,"75152",,terminal_output +2658,3853141,"TERMINAL",0,0,"86263",,terminal_output +2659,3854198,"TERMINAL",0,0,"97374",,terminal_output +2660,3855236,"TERMINAL",0,0,"508485",,terminal_output +2661,3856349,"TERMINAL",0,0,"19596",,terminal_output +2662,3857372,"TERMINAL",0,0,"2106107",,terminal_output +2663,3858565,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_80M.sbatch",0,0,"",shellscript,tab +2664,3858620,"TERMINAL",0,0,"31718",,terminal_output +2665,3859564,"TERMINAL",0,0,"42829",,terminal_output +2666,3860574,"TERMINAL",0,0,"539340",,terminal_output +2667,3861570,"TERMINAL",0,0,"644041",,terminal_output +2668,3862698,"TERMINAL",0,0,"75152",,terminal_output +2669,3863639,"TERMINAL",0,0,"86263",,terminal_output +2670,3864746,"TERMINAL",0,0,"97374",,terminal_output +2671,3865344,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=4\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_80M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,tab +2672,3865761,"TERMINAL",0,0,"9:009596",,terminal_output +2673,3866800,"TERMINAL",0,0,"2206207",,terminal_output +2674,3867845,"TERMINAL",0,0,"31718",,terminal_output +2675,3868296,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,0,"",shellscript,tab +2676,3868297,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",973,0,"",shellscript,selection_mouse +2677,3868893,"TERMINAL",0,0,"42829",,terminal_output +2678,3869922,"TERMINAL",0,0,"539350",,terminal_output +2679,3870991,"TERMINAL",0,0,"645041",,terminal_output +2680,3871859,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,973,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_command +2681,3872004,"TERMINAL",0,0,"75152",,terminal_output +2682,3873154,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,0,"",shellscript,selection_command +2683,3873185,"TERMINAL",0,0,"86263",,terminal_output +2684,3874101,"TERMINAL",0,0,"97374",,terminal_output +2685,3875158,"TERMINAL",0,0,"108485",,terminal_output +2686,3876138,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",0,0,"",shellscript,tab +2687,3876383,"TERMINAL",0,0,"19596",,terminal_output +2688,3876636,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",1194,0,"",shellscript,selection_mouse +2689,3877062,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",1408,0,"",shellscript,selection_mouse +2690,3877241,"TERMINAL",0,0,"2306307",,terminal_output +2691,3877381,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",1407,1,"\n",shellscript,selection_mouse +2692,3877382,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",1319,89,"\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2693,3877382,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",1017,391,"\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2694,3877382,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",660,748,"s/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2695,3877382,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",26,1382,"CH --nodes=4\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_80M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2696,3877383,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",0,1408,"#!/usr/bin/env bash\n\n#SBATCH --nodes=4\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_80M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2697,3877395,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",1407,1,"\n",shellscript,selection_command +2698,3877440,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",0,1408,"#!/usr/bin/env bash\n\n#SBATCH --nodes=4\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_80M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=7e-5 \\n --max_lr=7e-5 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-modelsize-80M-$slurm_job_id \\n --tags tokenizer model-size-scaling L1 80M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir \\n --model_dim=768 \\n --num_blocks=12 \\n --num_heads=12 \\n --latent_dim=64 \\n --num_latents=2048\n",shellscript,selection_mouse +2699,3878228,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",0,0,"",shellscript,selection_command +2700,3878334,"TERMINAL",0,0,"31718",,terminal_output +2701,3879357,"TERMINAL",0,0,"42829",,terminal_output +2702,3880436,"TERMINAL",0,0,"53934:00",,terminal_output +2703,3881437,"TERMINAL",0,0,"649:0041",,terminal_output +2704,3882508,"TERMINAL",0,0,"75152",,terminal_output +2705,3883732,"TERMINAL",0,0,"87374",,terminal_output +2706,3884877,"TERMINAL",0,0,"208485",,terminal_output +2707,3885845,"TERMINAL",0,0,"19596",,terminal_output +2708,3886881,"TERMINAL",0,0,"2406407",,terminal_output +2709,3888115,"TERMINAL",0,0,"31718",,terminal_output +2710,3889008,"TERMINAL",0,0,"42829",,terminal_output +2711,3890023,"TERMINAL",0,0,"539310",,terminal_output +2712,3891063,"TERMINAL",0,0,"641041",,terminal_output +2713,3892087,"TERMINAL",0,0,"75152",,terminal_output +2714,3893176,"TERMINAL",0,0,"86263",,terminal_output +2715,3894243,"TERMINAL",0,0,"97374",,terminal_output +2716,3895266,"TERMINAL",0,0,"308485",,terminal_output +2717,3896409,"TERMINAL",0,0,"19596",,terminal_output +2718,3897399,"TERMINAL",0,0,"2506507",,terminal_output +2719,3898428,"TERMINAL",0,0,"31718",,terminal_output +2720,3899404,"TERMINAL",0,0,"42829",,terminal_output +2721,3900454,"TERMINAL",0,0,"539320",,terminal_output +2722,3900586,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",367,0,"#SBATCH --job-name=train_tokenizer_model_size_scaling_127M\n",shellscript,content +2723,3900588,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",426,58,"",shellscript,content +2724,3903745,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",1118,0," --name=tokenizer-modelsize-127M-$slurm_job_id \\n",shellscript,content +2725,3903746,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",1170,0," --tags tokenizer model-size-scaling L1 127M \\n",shellscript,content +2726,3903747,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",1220,100,"",shellscript,content +2727,3903748,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",1301,0," --model_dim=1024 \\n",shellscript,content +2728,3903748,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",1324,0," --num_blocks=16 \\n",shellscript,content +2729,3903749,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",1346,0," --num_heads=16 \\n",shellscript,content +2730,3903749,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",1367,0," --latent_dim=64 \\n",shellscript,content +2731,3903750,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_127M.sbatch",1389,87,"",shellscript,content +2732,3903912,"TERMINAL",0,0,"6420417515286263",,terminal_output +2733,3904633,"TERMINAL",0,0,"97374",,terminal_output +2734,3905671,"TERMINAL",0,0,"408485",,terminal_output +2735,3906720,"TERMINAL",0,0,"14:0064:007",,terminal_output +2736,3907795,"TERMINAL",0,0,"31718",,terminal_output +2737,3908796,"TERMINAL",0,0,"42829",,terminal_output +2738,3909905,"TERMINAL",0,0,"539330",,terminal_output +2739,3910888,"TERMINAL",0,0,"643041",,terminal_output +2740,3911935,"TERMINAL",0,0,"75152",,terminal_output +2741,3912979,"TERMINAL",0,0,"86263",,terminal_output +2742,3914103,"TERMINAL",0,0,"97374",,terminal_output +2743,3915124,"TERMINAL",0,0,"508485",,terminal_output +2744,3916151,"TERMINAL",0,0,"19596",,terminal_output +2745,3917148,"TERMINAL",0,0,"2106107",,terminal_output +2746,3918377,"TERMINAL",0,0,"31718",,terminal_output +2747,3919328,"TERMINAL",0,0,"42829",,terminal_output +2748,3920293,"TERMINAL",0,0,"539340",,terminal_output +2749,3921373,"TERMINAL",0,0,"644041",,terminal_output +2750,3922399,"TERMINAL",0,0,"75152",,terminal_output +2751,3923525,"TERMINAL",0,0,"86263",,terminal_output +2752,3924552,"TERMINAL",0,0,"97374",,terminal_output +2753,3925571,"TERMINAL",0,0,"40:008485",,terminal_output +2754,3926636,"TERMINAL",0,0,"19596",,terminal_output +2755,3927619,"TERMINAL",0,0,"2206207",,terminal_output +2756,3928638,"TERMINAL",0,0,"31718",,terminal_output +2757,3929686,"TERMINAL",0,0,"42829",,terminal_output +2758,3930737,"TERMINAL",0,0,"5450451",,terminal_output +2759,3931817,"TERMINAL",0,0,"75152",,terminal_output +2760,3932944,"TERMINAL",0,0,"86263",,terminal_output +2761,3933867,"TERMINAL",0,0,"97374",,terminal_output +2762,3934991,"TERMINAL",0,0,"108485",,terminal_output +2763,3935969,"TERMINAL",0,0,"19596",,terminal_output +2764,3937007,"TERMINAL",0,0,"2306307",,terminal_output +2765,3938038,"TERMINAL",0,0,"31718",,terminal_output +2766,3939087,"TERMINAL",0,0,"42829",,terminal_output +2767,3940134,"TERMINAL",0,0,"53935:00",,terminal_output +2768,3941157,"TERMINAL",0,0,"6420:0041",,terminal_output +2769,3941814,"scripts_horeka/modelsize_scaling/model_sizes.md",0,0,"# Genie 1 - Model Sizes and their configs\n\n## Tokenizer model: sizes\n\ndefault: \n| Model | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 1024 | ~38M |\n\n### scaling up \n#### (not tested yet - TODO @mihir)\n\n| Model | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| L1 | 768 | 12 | 12 | 64 | 2048 | ~80M |\n| L2 | 1024 | 12 | 16 | 128 | 2048 | ~140M |\n| L3 | 1152 | 16 | 16 | 128 | 4096 | ~200M |\n| L4 | 896 | 16 | 14 | 96 | 4096 | ~120M |\n| L5 | 1536 | 12 | 24 | 256 | 2048 | ~190M |\n\n\n### tiny models\n| Model | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| S1 | 128 | 2 | 2 | 8 | 128 | ~0.6M |\n| S2 | 192 | 2 | 3 | 16 | 128 | ~1.3M |\n| S3 | 256 | 3 | 4 | 16 | 256 | ~3.6M |\n| S4 | 320 | 4 | 5 | 24 | 256 | ~7.4M |\n| S5 | 384 | 4 | 6 | 32 | 512 | ~10M |\n\n\n## Latent Action model: sizes\ndefault: \n| Model | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|-------|-----------|------------|-----------|------------|-------------|-------------|\n| default | 512 | 8 | 8 | 32 | 6 | ~39M |\n\n### scaling up \n#### (not tested yet - TODO @mihir)\n\n| Name | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|--------------|-----------|------------|-----------|------------|-------------|-------------|\n| XL | 1024 | 12 | 16 | 64 | 12 | ~200M |\n| L | 896 | 12 | 14 | 48 | 8 | ~150M |\n| M+ | 768 | 10 | 12 | 48 | 8 | ~100M |\n| M | 640 | 10 | 10 | 32 | 8 | ~70M |\n| Base+ | 512 | 12 | 8 | 32 | 8 | ~55M |\n\n\n### tiny models\n| Name | model_dim | num_blocks | num_heads | latent_dim | num_latents | Est. Params |\n|--------------|-----------|------------|-----------|------------|-------------|-------------|\n| XS | 128 | 2 | 2 | 8 | 4 | ~0.9M |\n| S | 160 | 2 | 2 | 8 | 4 | ~1.3M |\n| S+ | 192 | 3 | 3 | 8 | 4 | ~2.4M |\n| M- | 256 | 4 | 4 | 16 | 6 | ~5.4M |\n| M | 320 | 6 | 4 | 16 | 6 | ~12M |\n\n\n## Dynamics model: sizes \n\n| Config | dyna_dim | dyna_num_blocks | dyna_num_heads | Approx. Params |\n|--------|----------|-----------------|---------------|----------------|\n| 1 | 512 | 12 | 8 | ~36M |\n| 2 | 768 | 16 | 12 | ~110M |\n| 3 | 1024 | 16 | 16 | ~180M |\n| 4 | 1024 | 24 | 16 | ~270M |\n| 5 | 1536 | 24 | 24 | ~500M |\n\n\n### tiny models\n| Config | dyna_dim | dyna_num_blocks | dyna_num_heads | Approx. Params |\n|--------|----------|-----------------|---------------|----------------|\n| A | 128 | 2 | 4 | ~1.5M |\n| B | 256 | 2 | 4 | ~3.5M |\n| C | 256 | 4 | 4 | ~6M |\n| D | 384 | 4 | 6 | ~12M |\n| E | 512 | 4 | 8 | ~18M |",markdown,tab +2770,3942266,"TERMINAL",0,0,"75152",,terminal_output +2771,3942851,"scripts_horeka/modelsize_scaling/model_sizes.md",980,0,"",markdown,selection_mouse +2772,3943257,"TERMINAL",0,0,"86263",,terminal_output +2773,3943998,"scripts_horeka/modelsize_scaling/model_sizes.md",1092,0,"",markdown,selection_mouse +2774,3944296,"TERMINAL",0,0,"97374",,terminal_output +2775,3945349,"TERMINAL",0,0,"208485",,terminal_output +2776,3946411,"TERMINAL",0,0,"19596",,terminal_output +2777,3947493,"TERMINAL",0,0,"2406407",,terminal_output +2778,3947891,"scripts_horeka/modelsize_scaling/model_sizes.md",567,0,"",markdown,selection_mouse +2779,3948510,"TERMINAL",0,0,"31718",,terminal_output +2780,3949356,"scripts_horeka/modelsize_scaling/model_sizes.md",567,1,"",markdown,content +2781,3949474,"scripts_horeka/modelsize_scaling/model_sizes.md",567,1,"",markdown,content +2782,3949600,"TERMINAL",0,0,"42829",,terminal_output +2783,3949700,"scripts_horeka/modelsize_scaling/model_sizes.md",567,0,"1",markdown,content +2784,3949701,"scripts_horeka/modelsize_scaling/model_sizes.md",568,0,"",markdown,selection_keyboard +2785,3950097,"scripts_horeka/modelsize_scaling/model_sizes.md",568,0,"2",markdown,content +2786,3950098,"scripts_horeka/modelsize_scaling/model_sizes.md",569,0,"",markdown,selection_keyboard +2787,3950609,"TERMINAL",0,0,"539310",,terminal_output +2788,3950695,"scripts_horeka/modelsize_scaling/model_sizes.md",569,0,"7",markdown,content +2789,3950696,"scripts_horeka/modelsize_scaling/model_sizes.md",570,0,"",markdown,selection_keyboard +2790,3950835,"scripts_horeka/modelsize_scaling/model_sizes.md",569,0,"",markdown,selection_command +2791,3951629,"TERMINAL",0,0,"641041",,terminal_output +2792,3952675,"TERMINAL",0,0,"75152",,terminal_output +2793,3953711,"TERMINAL",0,0,"86263",,terminal_output +2794,3954734,"TERMINAL",0,0,"98485",,terminal_output +2795,3955882,"TERMINAL",0,0,"319596",,terminal_output +2796,3956838,"TERMINAL",0,0,"2506507",,terminal_output +2797,3957889,"TERMINAL",0,0,"31718",,terminal_output +2798,3958954,"TERMINAL",0,0,"42829",,terminal_output +2799,3959975,"TERMINAL",0,0,"539320",,terminal_output +2800,3961033,"TERMINAL",0,0,"642041",,terminal_output +2801,3962088,"TERMINAL",0,0,"75152",,terminal_output +2802,3963136,"TERMINAL",0,0,"86263",,terminal_output +2803,3964280,"TERMINAL",0,0,"97374",,terminal_output +2804,3965242,"TERMINAL",0,0,"408485",,terminal_output +2805,3966275,"TERMINAL",0,0,"19596",,terminal_output +2806,3967320,"TERMINAL",0,0,"25:0065:007",,terminal_output +2807,3968365,"TERMINAL",0,0,"31718",,terminal_output +2808,3969417,"TERMINAL",0,0,"42829",,terminal_output +2809,3970525,"TERMINAL",0,0,"539330",,terminal_output +2810,3971549,"TERMINAL",0,0,"643041",,terminal_output +2811,3972575,"TERMINAL",0,0,"75152",,terminal_output +2812,3973632,"TERMINAL",0,0,"86263",,terminal_output +2813,3974638,"TERMINAL",0,0,"97374",,terminal_output +2814,3975684,"TERMINAL",0,0,"508485",,terminal_output +2815,3976731,"TERMINAL",0,0,"1106107",,terminal_output +2816,3977779,"TERMINAL",0,0,"31718",,terminal_output +2817,3978829,"TERMINAL",0,0,"42829",,terminal_output +2818,3979880,"TERMINAL",0,0,"539340",,terminal_output +2819,3980972,"TERMINAL",0,0,"644041",,terminal_output +2820,3981997,"TERMINAL",0,0,"75152",,terminal_output +2821,3983014,"TERMINAL",0,0,"86263",,terminal_output +2822,3984147,"TERMINAL",0,0,"97374",,terminal_output +2823,3985117,"TERMINAL",0,0,"1:008485",,terminal_output +2824,3986193,"TERMINAL",0,0,"19596",,terminal_output +2825,3987217,"TERMINAL",0,0,"2206207",,terminal_output +2826,3988343,"TERMINAL",0,0,"31718",,terminal_output +2827,3989313,"TERMINAL",0,0,"42829",,terminal_output +2828,3990351,"TERMINAL",0,0,"539350",,terminal_output +2829,3991380,"TERMINAL",0,0,"645041",,terminal_output +2830,3992444,"TERMINAL",0,0,"75152",,terminal_output +2831,3993566,"TERMINAL",0,0,"86263",,terminal_output +2832,3994535,"TERMINAL",0,0,"97374",,terminal_output +2833,3995615,"TERMINAL",0,0,"108485",,terminal_output +2834,3996619,"TERMINAL",0,0,"19596",,terminal_output +2835,3997662,"TERMINAL",0,0,"2306307",,terminal_output +2836,3998680,"TERMINAL",0,0,"31718",,terminal_output +2837,3999713,"TERMINAL",0,0,"42829",,terminal_output +2838,4000745,"TERMINAL",0,0,"541:0046:01",,terminal_output +2839,4001861,"TERMINAL",0,0,"75152",,terminal_output +2840,4002717,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,0,"",shellscript,tab +2841,4002864,"TERMINAL",0,0,"86263",,terminal_output +2842,4003908,"TERMINAL",0,0,"97374",,terminal_output +2843,4004305,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",577,0,"",shellscript,selection_mouse +2844,4005037,"TERMINAL",0,0,"208485",,terminal_output +2845,4005260,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",576,0,"",shellscript,selection_command +2846,4005726,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",576,1,"7",shellscript,selection_command +2847,4005987,"TERMINAL",0,0,"19596",,terminal_output +2848,4007083,"TERMINAL",0,0,"2406407",,terminal_output +2849,4007327,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",576,1,"7",shellscript,selection_command +2850,4008114,"TERMINAL",0,0,"31718",,terminal_output +2851,4008153,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",576,0,"",shellscript,selection_command +2852,4008826,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",596,1,"",shellscript,content +2853,4008827,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",576,1,"",shellscript,content +2854,4009143,"TERMINAL",0,0,"42829",,terminal_output +2855,4009821,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",595,0,"5",shellscript,content +2856,4009822,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",576,0,"5",shellscript,content +2857,4009822,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",577,0,"",shellscript,selection_keyboard +2858,4010212,"TERMINAL",0,0,"539310",,terminal_output +2859,4010349,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",576,0,"",shellscript,selection_command +2860,4011367,"TERMINAL",0,0,"641041",,terminal_output +2861,4012397,"TERMINAL",0,0,"75152",,terminal_output +2862,4012830,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",579,0,"",shellscript,selection_mouse +2863,4013432,"TERMINAL",0,0,"86263",,terminal_output +2864,4014373,"TERMINAL",0,0,"97374",,terminal_output +2865,4014741,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",579,1,"6",shellscript,content +2866,4015424,"TERMINAL",0,0,"308485",,terminal_output +2867,4016470,"TERMINAL",0,0,"19596",,terminal_output +2868,4017495,"TERMINAL",0,0,"2506507",,terminal_output +2869,4018557,"TERMINAL",0,0,"31718",,terminal_output +2870,4019680,"TERMINAL",0,0,"42829",,terminal_output +2871,4020738,"TERMINAL",0,0,"539320",,terminal_output +2872,4021678,"TERMINAL",0,0,"642041",,terminal_output +2873,4022726,"TERMINAL",0,0,"76263",,terminal_output +2874,4023774,"TERMINAL",0,0,"97374",,terminal_output +2875,4024823,"TERMINAL",0,0,"408485",,terminal_output +2876,4025872,"TERMINAL",0,0,"19596",,terminal_output +2877,4026948,"TERMINAL",0,0,"26:0066:007",,terminal_output +2878,4027963,"TERMINAL",0,0,"31718",,terminal_output +2879,4029099,"TERMINAL",0,0,"42829",,terminal_output +2880,4030124,"TERMINAL",0,0,"539330",,terminal_output +2881,4031166,"TERMINAL",0,0,"643041",,terminal_output +2882,4032181,"TERMINAL",0,0,"75152",,terminal_output +2883,4033195,"TERMINAL",0,0,"86263",,terminal_output +2884,4034241,"TERMINAL",0,0,"97374",,terminal_output +2885,4035277,"TERMINAL",0,0,"508485",,terminal_output +2886,4036370,"TERMINAL",0,0,"19596",,terminal_output +2887,4037394,"TERMINAL",0,0,"2106107",,terminal_output +2888,4038422,"TERMINAL",0,0,"31718",,terminal_output +2889,4039452,"TERMINAL",0,0,"42829",,terminal_output +2890,4040497,"TERMINAL",0,0,"539340",,terminal_output +2891,4041596,"TERMINAL",0,0,"644041",,terminal_output +2892,4042585,"TERMINAL",0,0,"75152",,terminal_output +2893,4043656,"TERMINAL",0,0,"86263",,terminal_output +2894,4044696,"TERMINAL",0,0,"97374",,terminal_output +2895,4045742,"TERMINAL",0,0,"2:009596",,terminal_output +2896,4046794,"TERMINAL",0,0,"2206207",,terminal_output +2897,4047835,"TERMINAL",0,0,"31718",,terminal_output +2898,4048885,"TERMINAL",0,0,"42829",,terminal_output +2899,4049991,"TERMINAL",0,0,"539350",,terminal_output +2900,4051025,"TERMINAL",0,0,"67115laCG 21:50407,04136022to7:36:24507,052041",,terminal_output +2901,4052026,"TERMINAL",0,0,"7552",,terminal_output +2902,4053081,"TERMINAL",0,0,"8663",,terminal_output +2903,4054188,"TERMINAL",0,0,"9774",,terminal_output +2904,4055215,"TERMINAL",0,0,"10885",,terminal_output +2905,4056235,"TERMINAL",0,0,"1996",,terminal_output +2906,4057261,"TERMINAL",0,0,"230307",,terminal_output +2907,4058388,"TERMINAL",0,0,"3118",,terminal_output +2908,4059411,"TERMINAL",0,0,"4229",,terminal_output +2909,4060407,"TERMINAL",0,0,"5337:00",,terminal_output +2910,4061560,"TERMINAL",0,0,"\r66022to R 7:36:34507,0520924interact1:06:34\t 1 hkn073337:01\t 4 hkn[0625-0628]",,terminal_output +2911,4062588,"TERMINAL",0,0,"7552",,terminal_output +2912,4063620,"TERMINAL",0,0,"8663",,terminal_output +2913,4064591,"TERMINAL",0,0,"9774",,terminal_output +2914,4065626,"TERMINAL",0,0,"20885",,terminal_output +2915,4066681,"TERMINAL",0,0,"1996",,terminal_output +2916,4067711,"TERMINAL",0,0,"240407",,terminal_output +2917,4068763,"TERMINAL",0,0,"3229",,terminal_output +2918,4069798,"TERMINAL",0,0,"53310",,terminal_output +2919,4070878,"TERMINAL",0,0,"6441",,terminal_output +2920,4071896,"TERMINAL",0,0,"7552",,terminal_output +2921,4072946,"TERMINAL",0,0,"8663",,terminal_output +2922,4073991,"TERMINAL",0,0,"9774",,terminal_output +2923,4075057,"TERMINAL",0,0,"30885",,terminal_output +2924,4076089,"TERMINAL",0,0,"1996",,terminal_output +2925,4077135,"TERMINAL",0,0,"250507",,terminal_output +2926,4078178,"TERMINAL",0,0,"3118",,terminal_output +2927,4079274,"TERMINAL",0,0,"4229",,terminal_output +2928,4080284,"TERMINAL",0,0,"53320",,terminal_output +2929,4081322,"TERMINAL",0,0,"6441",,terminal_output +2930,4082451,"TERMINAL",0,0,"7552",,terminal_output +2931,4083475,"TERMINAL",0,0,"8663",,terminal_output +2932,4084498,"TERMINAL",0,0,"9774",,terminal_output +2933,4085523,"TERMINAL",0,0,"40885",,terminal_output +2934,4086544,"TERMINAL",0,0,"1996",,terminal_output +2935,4087554,"TERMINAL",0,0,"27:007:007",,terminal_output +2936,4088707,"TERMINAL",0,0,"3118",,terminal_output +2937,4089720,"TERMINAL",0,0,"4229",,terminal_output +2938,4090688,"TERMINAL",0,0,"53330",,terminal_output +2939,4091721,"TERMINAL",0,0,"6552",,terminal_output +2940,4092773,"TERMINAL",0,0,"8663",,terminal_output +2941,4093827,"TERMINAL",0,0,"9774",,terminal_output +2942,4094871,"TERMINAL",0,0,"50885",,terminal_output +2943,4095967,"TERMINAL",0,0,"1996",,terminal_output +2944,4096976,"TERMINAL",0,0,"210107",,terminal_output +2945,4098121,"TERMINAL",0,0,"3118",,terminal_output +2946,4099141,"TERMINAL",0,0,"4229",,terminal_output +2947,4100165,"TERMINAL",0,0,"53340",,terminal_output +2948,4101188,"TERMINAL",0,0,"6441",,terminal_output +2949,4102225,"TERMINAL",0,0,"7552",,terminal_output +2950,4103339,"TERMINAL",0,0,"8663",,terminal_output +2951,4104362,"TERMINAL",0,0,"9774",,terminal_output +2952,4105305,"TERMINAL",0,0,"3:00885",,terminal_output +2953,4106416,"TERMINAL",0,0,"1996",,terminal_output +2954,4107435,"TERMINAL",0,0,"220207",,terminal_output +2955,4108482,"TERMINAL",0,0,"3118",,terminal_output +2956,4109588,"TERMINAL",0,0,"4229",,terminal_output +2957,4110621,"TERMINAL",0,0,"53350",,terminal_output +2958,4111644,"TERMINAL",0,0,"6441",,terminal_output +2959,4112656,"TERMINAL",0,0,"7552",,terminal_output +2960,4113715,"TERMINAL",0,0,"8663",,terminal_output +2961,4114770,"TERMINAL",0,0,"9885",,terminal_output +2962,4115809,"TERMINAL",0,0,"11996",,terminal_output +2963,4116857,"TERMINAL",0,0,"230307",,terminal_output +2964,4117986,"TERMINAL",0,0,"3118",,terminal_output +2965,4119006,"TERMINAL",0,0,"4229",,terminal_output +2966,4120030,"TERMINAL",0,0,"5338:00",,terminal_output +2967,4121066,"TERMINAL",0,0,"6441",,terminal_output +2968,4122073,"TERMINAL",0,0,"7552",,terminal_output +2969,4123206,"TERMINAL",0,0,"8663",,terminal_output +2970,4124208,"TERMINAL",0,0,"9774",,terminal_output +2971,4125210,"TERMINAL",0,0,"20885",,terminal_output +2972,4126253,"TERMINAL",0,0,"1996",,terminal_output +2973,4127303,"TERMINAL",0,0,"240407",,terminal_output +2974,4128340,"TERMINAL",0,0,"3118",,terminal_output +2975,4129453,"TERMINAL",0,0,"4229",,terminal_output +2976,4130419,"TERMINAL",0,0,"53310",,terminal_output +2977,4131500,"TERMINAL",0,0,"6441",,terminal_output +2978,4132526,"TERMINAL",0,0,"7552",,terminal_output +2979,4133551,"TERMINAL",0,0,"8663",,terminal_output +2980,4134572,"TERMINAL",0,0,"9774",,terminal_output +2981,4135698,"TERMINAL",0,0,"30885",,terminal_output +2982,4136722,"TERMINAL",0,0,"1996",,terminal_output +2983,4137715,"TERMINAL",0,0,"250507",,terminal_output +2984,4138773,"TERMINAL",0,0,"4229",,terminal_output +2985,4139794,"TERMINAL",0,0,"53320",,terminal_output +2986,4140845,"TERMINAL",0,0,"6441",,terminal_output +2987,4141884,"TERMINAL",0,0,"7552",,terminal_output +2988,4142924,"TERMINAL",0,0,"8663",,terminal_output +2989,4143994,"TERMINAL",0,0,"9774",,terminal_output +2990,4145117,"TERMINAL",0,0,"40885",,terminal_output +2991,4146141,"TERMINAL",0,0,"1996",,terminal_output +2992,4147185,"TERMINAL",0,0,"28:008:007",,terminal_output +2993,4148371,"TERMINAL",0,0,"3118",,terminal_output +2994,4149217,"TERMINAL",0,0,"4229",,terminal_output +2995,4150249,"TERMINAL",0,0,"53330",,terminal_output +2996,4151306,"TERMINAL",0,0,"6441",,terminal_output +2997,4151619,"TERMINAL",0,0,"Every 1.0s: squeue --mehkn1993.localdomain: Fri Jul 4 11:43:46 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3316022 accelerat train_to tum_cte0 R 7:38:04\t 2 hkn[0507,0520]3316924 accelerat interact tum_cte0 R 1:08:04\t 1 hkn07333316923 accelerat interact tum_cte0 R 1:08:31\t 4 hkn[0625-0628]",,terminal_output +2998,4151902,"TERMINAL",0,0,"Every 1.0s: squeue --mehkn1993.localdomain: Fri Jul 4 11:43:47 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3316022 accelerat train_to tum_cte0 R 7:38:05\t 2 hkn[0507,0520]3316924 accelerat interact tum_cte0 R 1:08:05\t 1 hkn07333316923 accelerat interact tum_cte0 R 1:08:32\t 4 hkn[0625-0628]",,terminal_output +2999,4153013,"TERMINAL",0,0,"8663",,terminal_output +3000,4154029,"TERMINAL",0,0,"9774",,terminal_output +3001,4155025,"TERMINAL",0,0,"50885",,terminal_output +3002,4156067,"TERMINAL",0,0,"1996",,terminal_output +3003,4157203,"TERMINAL",0,0,"210107",,terminal_output +3004,4158226,"TERMINAL",0,0,"3118",,terminal_output +3005,4159253,"TERMINAL",0,0,"4229",,terminal_output +3006,4160257,"TERMINAL",0,0,"53340",,terminal_output +3007,4161403,"TERMINAL",0,0,"6441",,terminal_output +3008,4162355,"TERMINAL",0,0,"7552",,terminal_output +3009,4163450,"TERMINAL",0,0,"8663",,terminal_output +3010,4164471,"TERMINAL",0,0,"9774",,terminal_output +3011,4165518,"TERMINAL",0,0,"4:00885",,terminal_output +3012,4166623,"TERMINAL",0,0,"1996",,terminal_output +3013,4167648,"TERMINAL",0,0,"220207",,terminal_output +3014,4168608,"TERMINAL",0,0,"3118",,terminal_output +3015,4169697,"TERMINAL",0,0,"4229",,terminal_output +3016,4170721,"TERMINAL",0,0,"53350",,terminal_output +3017,4171750,"TERMINAL",0,0,"6552",,terminal_output +3018,4172808,"TERMINAL",0,0,"8663",,terminal_output +3019,4173855,"TERMINAL",0,0,"9774",,terminal_output +3020,4174907,"TERMINAL",0,0,"10885",,terminal_output +3021,4176043,"TERMINAL",0,0,"1996",,terminal_output +3022,4177012,"TERMINAL",0,0,"230307",,terminal_output +3023,4178094,"TERMINAL",0,0,"3118",,terminal_output +3024,4179134,"TERMINAL",0,0,"4229",,terminal_output +3025,4180228,"TERMINAL",0,0,"5339:00",,terminal_output +3026,4181273,"TERMINAL",0,0,"6441",,terminal_output +3027,4182229,"TERMINAL",0,0,"7552",,terminal_output +3028,4183344,"TERMINAL",0,0,"8663",,terminal_output +3029,4184330,"TERMINAL",0,0,"9774",,terminal_output +3030,4185388,"TERMINAL",0,0,"20885",,terminal_output +3031,4186450,"TERMINAL",0,0,"1996",,terminal_output +3032,4187519,"TERMINAL",0,0,"240407",,terminal_output +3033,4188503,"TERMINAL",0,0,"3118",,terminal_output +3034,4189562,"TERMINAL",0,0,"4229",,terminal_output +3035,4190588,"TERMINAL",0,0,"53310",,terminal_output +3036,4191711,"TERMINAL",0,0,"6441",,terminal_output +3037,4192654,"TERMINAL",0,0,"7552",,terminal_output +3038,4193890,"TERMINAL",0,0,"8663",,terminal_output +3039,4194788,"TERMINAL",0,0,"9885",,terminal_output +3040,4195784,"TERMINAL",0,0,"31996",,terminal_output +3041,4196804,"TERMINAL",0,0,"250507",,terminal_output +3042,4197854,"TERMINAL",0,0,"3118",,terminal_output +3043,4198984,"TERMINAL",0,0,"4229",,terminal_output +3044,4199886,"train_tokenizer.py",0,0,"",python,tab +3045,4200170,"TERMINAL",0,0,"53320",,terminal_output +3046,4201008,"TERMINAL",0,0,"6441",,terminal_output +3047,4202058,"TERMINAL",0,0,"7552",,terminal_output +3048,4202428,"train_tokenizer.py",0,0,"",python,tab +3049,4203038,"TERMINAL",0,0,"8663",,terminal_output +3050,4204101,"TERMINAL",0,0,"9774",,terminal_output +3051,4205126,"TERMINAL",0,0,"40885",,terminal_output +3052,4206227,"TERMINAL",0,0,"1996",,terminal_output +3053,4207185,"TERMINAL",0,0,"29:009:007",,terminal_output +3054,4208301,"TERMINAL",0,0,"3118",,terminal_output +3055,4209324,"TERMINAL",0,0,"4229",,terminal_output +3056,4210349,"TERMINAL",0,0,"53330",,terminal_output +3057,4211476,"TERMINAL",0,0,"6441",,terminal_output +3058,4212607,"TERMINAL",0,0,"7552",,terminal_output +3059,4213511,"TERMINAL",0,0,"8663",,terminal_output +3060,4214548,"TERMINAL",0,0,"9774",,terminal_output +3061,4215144,"train_tokenizer.py",3824,0,"",python,selection_mouse +3062,4215582,"TERMINAL",0,0,"50885",,terminal_output +3063,4216699,"TERMINAL",0,0,"1996",,terminal_output +3064,4217620,"TERMINAL",0,0,"210107",,terminal_output +3065,4217933,"train_tokenizer.py",5425,0,"",python,selection_command +3066,4218653,"TERMINAL",0,0,"3118",,terminal_output +3067,4219698,"TERMINAL",0,0,"4229",,terminal_output +3068,4219745,"train_tokenizer.py",5471,0,"",python,selection_mouse +3069,4220490,"train_tokenizer.py",5456,0,"",python,selection_mouse +3070,4220742,"TERMINAL",0,0,"54441",,terminal_output +3071,4221090,"train_tokenizer.py",5479,0,"",python,selection_mouse +3072,4221639,"train_tokenizer.py",5571,0,"",python,selection_mouse +3073,4221763,"TERMINAL",0,0,"7552",,terminal_output +3074,4222189,"train_tokenizer.py",5490,0,"",python,selection_mouse +3075,4222810,"TERMINAL",0,0,"8663",,terminal_output +3076,4223868,"TERMINAL",0,0,"9774",,terminal_output +3077,4224875,"TERMINAL",0,0,"5:00885",,terminal_output +3078,4226018,"TERMINAL",0,0,"1996",,terminal_output +3079,4226980,"TERMINAL",0,0,"220207",,terminal_output +3080,4228064,"TERMINAL",0,0,"3118",,terminal_output +3081,4229040,"TERMINAL",0,0,"4229",,terminal_output +3082,4230113,"TERMINAL",0,0,"53350",,terminal_output +3083,4231135,"TERMINAL",0,0,"6441",,terminal_output +3084,4232164,"TERMINAL",0,0,"7552",,terminal_output +3085,4233207,"TERMINAL",0,0,"8663",,terminal_output +3086,4234278,"TERMINAL",0,0,"9774",,terminal_output +3087,4235289,"TERMINAL",0,0,"10885",,terminal_output +3088,4236337,"TERMINAL",0,0,"1996",,terminal_output +3089,4237390,"TERMINAL",0,0,"230307",,terminal_output +3090,4238436,"TERMINAL",0,0,"3118",,terminal_output +3091,4239538,"TERMINAL",0,0,"4229",,terminal_output +3092,4239665,"train_tokenizer.py",5326,0,"",python,selection_mouse +3093,4240181,".venv/lib/python3.10/site-packages/optax/__init__.py",0,0,"# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the ""License"");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an ""AS IS"" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n""""""Optax: composable gradient processing and optimization, in JAX.""""""\n\n# pylint: disable=wrong-import-position\n# pylint: disable=g-importing-member\n\nfrom optax import assignment\nfrom optax import contrib\nfrom optax import losses\nfrom optax import monte_carlo\nfrom optax import perturbations\nfrom optax import projections\nfrom optax import schedules\nfrom optax import second_order\nfrom optax import transforms\nfrom optax import tree_utils\nfrom optax._src.alias import adabelief\nfrom optax._src.alias import adadelta\nfrom optax._src.alias import adafactor\nfrom optax._src.alias import adagrad\nfrom optax._src.alias import adam\nfrom optax._src.alias import adamax\nfrom optax._src.alias import adamaxw\nfrom optax._src.alias import adamw\nfrom optax._src.alias import adan\nfrom optax._src.alias import amsgrad\nfrom optax._src.alias import fromage\nfrom optax._src.alias import lamb\nfrom optax._src.alias import lars\nfrom optax._src.alias import lbfgs\nfrom optax._src.alias import lion\nfrom optax._src.alias import MaskOrFn\nfrom optax._src.alias import nadam\nfrom optax._src.alias import nadamw\nfrom optax._src.alias import noisy_sgd\nfrom optax._src.alias import novograd\nfrom optax._src.alias import optimistic_adam\nfrom optax._src.alias import optimistic_gradient_descent\nfrom optax._src.alias import polyak_sgd\nfrom optax._src.alias import radam\nfrom optax._src.alias import rmsprop\nfrom optax._src.alias import rprop\nfrom optax._src.alias import sgd\nfrom optax._src.alias import sign_sgd\nfrom optax._src.alias import sm3\nfrom optax._src.alias import yogi\nfrom optax._src.base import EmptyState\nfrom optax._src.base import GradientTransformation\nfrom optax._src.base import GradientTransformationExtraArgs\nfrom optax._src.base import identity\nfrom optax._src.base import OptState\nfrom optax._src.base import Params\nfrom optax._src.base import ScalarOrSchedule\nfrom optax._src.base import Schedule\nfrom optax._src.base import set_to_zero\nfrom optax._src.base import stateless\nfrom optax._src.base import stateless_with_tree_map\nfrom optax._src.base import TransformInitFn\nfrom optax._src.base import TransformUpdateExtraArgsFn\nfrom optax._src.base import TransformUpdateFn\nfrom optax._src.base import Updates\nfrom optax._src.base import with_extra_args_support\nfrom optax._src.clipping import adaptive_grad_clip\nfrom optax._src.clipping import AdaptiveGradClipState\nfrom optax._src.clipping import clip\nfrom optax._src.clipping import clip_by_block_rms\nfrom optax._src.clipping import clip_by_global_norm\nfrom optax._src.clipping import ClipByGlobalNormState\nfrom optax._src.clipping import ClipState\nfrom optax._src.clipping import per_example_global_norm_clip\nfrom optax._src.clipping import per_example_layer_norm_clip\nfrom optax._src.combine import chain\nfrom optax._src.combine import multi_transform\nfrom optax._src.combine import MultiTransformState\nfrom optax._src.combine import named_chain\nfrom optax._src.constrain import keep_params_nonnegative\nfrom optax._src.constrain import NonNegativeParamsState\nfrom optax._src.constrain import zero_nans\nfrom optax._src.constrain import ZeroNansState\nfrom optax._src.factorized import FactoredState\nfrom optax._src.factorized import scale_by_factored_rms\nfrom optax._src.linear_algebra import global_norm\nfrom optax._src.linear_algebra import matrix_inverse_pth_root\nfrom optax._src.linear_algebra import power_iteration\nfrom optax._src.linesearch import scale_by_backtracking_linesearch\nfrom optax._src.linesearch import scale_by_zoom_linesearch\nfrom optax._src.linesearch import ScaleByBacktrackingLinesearchState\nfrom optax._src.linesearch import ScaleByZoomLinesearchState\nfrom optax._src.linesearch import ZoomLinesearchInfo\nfrom optax._src.lookahead import lookahead\nfrom optax._src.lookahead import LookaheadParams\nfrom optax._src.lookahead import LookaheadState\nfrom optax._src.numerics import safe_increment\nfrom optax._src.numerics import safe_int32_increment\nfrom optax._src.numerics import safe_norm\nfrom optax._src.numerics import safe_root_mean_squares\nfrom optax._src.transform import add_decayed_weights\nfrom optax._src.transform import add_noise\nfrom optax._src.transform import AddDecayedWeightsState\nfrom optax._src.transform import AddNoiseState\nfrom optax._src.transform import apply_every\nfrom optax._src.transform import ApplyEvery\nfrom optax._src.transform import centralize\nfrom optax._src.transform import ema\nfrom optax._src.transform import EmaState\nfrom optax._src.transform import normalize_by_update_norm\nfrom optax._src.transform import scale\nfrom optax._src.transform import scale_by_adadelta\nfrom optax._src.transform import scale_by_adam\nfrom optax._src.transform import scale_by_adamax\nfrom optax._src.transform import scale_by_adan\nfrom optax._src.transform import scale_by_amsgrad\nfrom optax._src.transform import scale_by_belief\nfrom optax._src.transform import scale_by_distance_over_gradients\nfrom optax._src.transform import scale_by_lbfgs\nfrom optax._src.transform import scale_by_learning_rate\nfrom optax._src.transform import scale_by_lion\nfrom optax._src.transform import scale_by_novograd\nfrom optax._src.transform import scale_by_optimistic_gradient\nfrom optax._src.transform import scale_by_param_block_norm\nfrom optax._src.transform import scale_by_param_block_rms\nfrom optax._src.transform import scale_by_polyak\nfrom optax._src.transform import scale_by_radam\nfrom optax._src.transform import scale_by_rms\nfrom optax._src.transform import scale_by_rprop\nfrom optax._src.transform import scale_by_rss\nfrom optax._src.transform import scale_by_schedule\nfrom optax._src.transform import scale_by_sign\nfrom optax._src.transform import scale_by_sm3\nfrom optax._src.transform import scale_by_stddev\nfrom optax._src.transform import scale_by_trust_ratio\nfrom optax._src.transform import scale_by_yogi\nfrom optax._src.transform import ScaleByAdaDeltaState\nfrom optax._src.transform import ScaleByAdamState\nfrom optax._src.transform import ScaleByAdanState\nfrom optax._src.transform import ScaleByAmsgradState\nfrom optax._src.transform import ScaleByBeliefState\nfrom optax._src.transform import ScaleByLBFGSState\nfrom optax._src.transform import ScaleByLionState\nfrom optax._src.transform import ScaleByNovogradState\nfrom optax._src.transform import ScaleByRmsState\nfrom optax._src.transform import ScaleByRpropState\nfrom optax._src.transform import ScaleByRssState\nfrom optax._src.transform import ScaleByRStdDevState\nfrom optax._src.transform import ScaleByScheduleState\nfrom optax._src.transform import ScaleBySM3State\nfrom optax._src.transform import ScaleByTrustRatioState\nfrom optax._src.transform import ScaleState\nfrom optax._src.transform import trace\nfrom optax._src.transform import TraceState\nfrom optax._src.update import apply_updates\nfrom optax._src.update import incremental_update\nfrom optax._src.update import periodic_update\nfrom optax._src.utils import multi_normal\nfrom optax._src.utils import scale_gradient\nfrom optax._src.utils import value_and_grad_from_state\nfrom optax._src.wrappers import apply_if_finite\nfrom optax._src.wrappers import ApplyIfFiniteState\nfrom optax._src.wrappers import conditionally_mask\nfrom optax._src.wrappers import conditionally_transform\nfrom optax._src.wrappers import ConditionallyMaskState\nfrom optax._src.wrappers import ConditionallyTransformState\nfrom optax._src.wrappers import flatten\nfrom optax._src.wrappers import masked\nfrom optax._src.wrappers import MaskedNode\nfrom optax._src.wrappers import MaskedState\nfrom optax._src.wrappers import maybe_update\nfrom optax._src.wrappers import MaybeUpdateState\nfrom optax._src.wrappers import MultiSteps\nfrom optax._src.wrappers import MultiStepsState\nfrom optax._src.wrappers import ShouldSkipUpdateFunction\nfrom optax._src.wrappers import skip_large_updates\nfrom optax._src.wrappers import skip_not_finite\n\n\n# TODO(mtthss): remove tree_utils aliases after updates.\ntree_map_params = tree_utils.tree_map_params\nbias_correction = tree_utils.tree_bias_correction\nupdate_infinity_moment = tree_utils.tree_update_infinity_moment\nupdate_moment = tree_utils.tree_update_moment\nupdate_moment_per_elem_norm = tree_utils.tree_update_moment_per_elem_norm\n\n# TODO(mtthss): remove schedules alises from flat namespaces after user updates.\nconstant_schedule = schedules.constant_schedule\ncosine_decay_schedule = schedules.cosine_decay_schedule\ncosine_onecycle_schedule = schedules.cosine_onecycle_schedule\nexponential_decay = schedules.exponential_decay\ninject_hyperparams = schedules.inject_hyperparams\nInjectHyperparamsState = schedules.InjectHyperparamsState\njoin_schedules = schedules.join_schedules\nlinear_onecycle_schedule = schedules.linear_onecycle_schedule\nlinear_schedule = schedules.linear_schedule\npiecewise_constant_schedule = schedules.piecewise_constant_schedule\npiecewise_interpolate_schedule = schedules.piecewise_interpolate_schedule\npolynomial_schedule = schedules.polynomial_schedule\nsgdr_schedule = schedules.sgdr_schedule\nwarmup_constant_schedule = schedules.warmup_constant_schedule\nwarmup_cosine_decay_schedule = schedules.warmup_cosine_decay_schedule\nwarmup_exponential_decay_schedule = schedules.warmup_exponential_decay_schedule\ninject_stateful_hyperparams = schedules.inject_stateful_hyperparams\nInjectStatefulHyperparamsState = schedules.InjectStatefulHyperparamsState\nWrappedSchedule = schedules.WrappedSchedule\n\n# TODO(mtthss): remove loss aliases from flat namespace once users have updated.\nconvex_kl_divergence = losses.convex_kl_divergence\ncosine_distance = losses.cosine_distance\ncosine_similarity = losses.cosine_similarity\nctc_loss = losses.ctc_loss\nctc_loss_with_forward_probs = losses.ctc_loss_with_forward_probs\nhinge_loss = losses.hinge_loss\nhuber_loss = losses.huber_loss\nkl_divergence = losses.kl_divergence\nl2_loss = losses.l2_loss\nlog_cosh = losses.log_cosh\nntxent = losses.ntxent\nsigmoid_binary_cross_entropy = losses.sigmoid_binary_cross_entropy\nsmooth_labels = losses.smooth_labels\nsafe_softmax_cross_entropy = losses.safe_softmax_cross_entropy\nsoftmax_cross_entropy = losses.softmax_cross_entropy\nsoftmax_cross_entropy_with_integer_labels = (\n losses.softmax_cross_entropy_with_integer_labels\n)\nsquared_error = losses.squared_error\nsigmoid_focal_loss = losses.sigmoid_focal_loss\n\n# pylint: disable=g-import-not-at-top\n# TODO(mtthss): remove contrib aliases from flat namespace once users updated.\n# Deprecated modules\nfrom optax.contrib import differentially_private_aggregate as _deprecated_differentially_private_aggregate\nfrom optax.contrib import DifferentiallyPrivateAggregateState as _deprecated_DifferentiallyPrivateAggregateState\nfrom optax.contrib import dpsgd as _deprecated_dpsgd\n\n_deprecations = {\n # Added Apr 2024\n ""differentially_private_aggregate"": (\n (\n ""optax.differentially_private_aggregate is deprecated: use""\n "" optax.contrib.differentially_private_aggregate (optax v0.1.8 or""\n "" newer).""\n ),\n _deprecated_differentially_private_aggregate,\n ),\n ""DifferentiallyPrivateAggregateState"": (\n (\n ""optax.DifferentiallyPrivateAggregateState is deprecated: use""\n "" optax.contrib.DifferentiallyPrivateAggregateState (optax v0.1.8""\n "" or newer).""\n ),\n _deprecated_DifferentiallyPrivateAggregateState,\n ),\n ""dpsgd"": (\n (\n ""optax.dpsgd is deprecated: use optax.contrib.dpsgd (optax v0.1.8""\n "" or newer).""\n ),\n _deprecated_dpsgd,\n ),\n}\n# pylint: disable=g-bad-import-order\nimport typing as _typing\n\nif _typing.TYPE_CHECKING:\n # pylint: disable=reimported\n from optax.contrib import differentially_private_aggregate\n from optax.contrib import DifferentiallyPrivateAggregateState\n from optax.contrib import dpsgd\n # pylint: enable=reimported\n\nelse:\n from optax._src.deprecations import deprecation_getattr as _deprecation_getattr\n\n __getattr__ = _deprecation_getattr(__name__, _deprecations)\n del _deprecation_getattr\ndel _typing\n# pylint: enable=g-bad-import-order\n# pylint: enable=g-import-not-at-top\n# pylint: enable=g-importing-member\n\n\n__version__ = ""0.2.4""\n\n__all__ = (\n ""adabelief"",\n ""adadelta"",\n ""adafactor"",\n ""adagrad"",\n ""adam"",\n ""adamax"",\n ""adamaxw"",\n ""adamw"",\n ""adan"",\n ""adaptive_grad_clip"",\n ""AdaptiveGradClipState"",\n ""add_decayed_weights"",\n ""add_noise"",\n ""AddDecayedWeightsState"",\n ""AddNoiseState"",\n ""amsgrad"",\n ""apply_every"",\n ""apply_if_finite"",\n ""apply_updates"",\n ""ApplyEvery"",\n ""ApplyIfFiniteState"",\n ""assignment"",\n ""centralize"",\n ""chain"",\n ""clip_by_block_rms"",\n ""clip_by_global_norm"",\n ""clip"",\n ""ClipByGlobalNormState"",\n ""ClipState"",\n ""conditionally_mask"",\n ""ConditionallyMaskState"",\n ""conditionally_transform"",\n ""ConditionallyTransformState"",\n ""constant_schedule"",\n ""ctc_loss"",\n ""ctc_loss_with_forward_probs"",\n ""convex_kl_divergence"",\n ""cosine_decay_schedule"",\n ""cosine_distance"",\n ""cosine_onecycle_schedule"",\n ""cosine_similarity"",\n ""differentially_private_aggregate"",\n ""DifferentiallyPrivateAggregateState"",\n ""dpsgd"",\n ""ema"",\n ""EmaState"",\n ""EmptyState"",\n ""exponential_decay"",\n ""FactoredState"",\n ""flatten"",\n ""fromage"",\n ""global_norm"",\n ""GradientTransformation"",\n ""GradientTransformationExtraArgs"",\n ""hinge_loss"",\n ""huber_loss"",\n ""identity"",\n ""incremental_update"",\n ""inject_hyperparams"",\n ""InjectHyperparamsState"",\n ""join_schedules"",\n ""keep_params_nonnegative"",\n ""kl_divergence"",\n ""l2_loss"",\n ""lamb"",\n ""lars"",\n ""lbfgs"",\n ""lion"",\n ""linear_onecycle_schedule"",\n ""linear_schedule"",\n ""log_cosh"",\n ""lookahead"",\n ""LookaheadParams"",\n ""LookaheadState"",\n ""masked"",\n ""MaskOrFn"",\n ""MaskedState"",\n ""matrix_inverse_pth_root"",\n ""maybe_update"",\n ""MaybeUpdateState"",\n ""multi_normal"",\n ""multi_transform"",\n ""MultiSteps"",\n ""MultiStepsState"",\n ""MultiTransformState"",\n ""nadam"",\n ""nadamw"",\n ""noisy_sgd"",\n ""novograd"",\n ""NonNegativeParamsState"",\n ""ntxent"",\n ""OptState"",\n ""Params"",\n ""periodic_update"",\n ""per_example_global_norm_clip"",\n ""per_example_layer_norm_clip"",\n ""piecewise_constant_schedule"",\n ""piecewise_interpolate_schedule"",\n ""polynomial_schedule"",\n ""power_iteration"",\n ""polyak_sgd"",\n ""radam"",\n ""rmsprop"",\n ""rprop"",\n ""safe_increment"",\n ""safe_int32_increment"",\n ""safe_norm"",\n ""safe_root_mean_squares"",\n ""ScalarOrSchedule"",\n ""scale_by_adadelta"",\n ""scale_by_adam"",\n ""scale_by_adamax"",\n ""scale_by_adan"",\n ""scale_by_amsgrad"",\n ""scale_by_backtracking_linesearch"",\n ""scale_by_belief"",\n ""scale_by_lbfgs"",\n ""scale_by_lion"",\n ""scale_by_factored_rms"",\n ""scale_by_novograd"",\n ""scale_by_param_block_norm"",\n ""scale_by_param_block_rms"",\n ""scale_by_polyak"",\n ""scale_by_radam"",\n ""scale_by_rms"",\n ""scale_by_rprop"",\n ""scale_by_rss"",\n ""scale_by_schedule"",\n ""scale_by_sign"",\n ""scale_by_sm3"",\n ""scale_by_stddev"",\n ""scale_by_trust_ratio"",\n ""scale_by_yogi"",\n ""scale_by_zoom_linesearch"",\n ""scale_gradient"",\n ""scale"",\n ""ScaleByAdaDeltaState"",\n ""ScaleByAdamState"",\n ""ScaleByAdanState"",\n ""ScaleByAmsgradState"",\n ""ScaleByBacktrackingLinesearchState"",\n ""ScaleByBeliefState"",\n ""ScaleByLBFGSState"",\n ""ScaleByLionState"",\n ""ScaleByNovogradState"",\n ""ScaleByRmsState"",\n ""ScaleByRpropState"",\n ""ScaleByRssState"",\n ""ScaleByRStdDevState"",\n ""ScaleByScheduleState"",\n ""ScaleBySM3State"",\n ""ScaleByTrustRatioState"",\n ""ScaleByZoomLinesearchState"",\n ""ScaleState"",\n ""Schedule"",\n ""set_to_zero"",\n ""sgd"",\n ""sgdr_schedule"",\n ""ShouldSkipUpdateFunction"",\n ""sigmoid_binary_cross_entropy"",\n ""sign_sgd"",\n ""skip_large_updates"",\n ""skip_not_finite"",\n ""sm3"",\n ""smooth_labels"",\n ""softmax_cross_entropy"",\n ""softmax_cross_entropy_with_integer_labels"",\n ""stateless"",\n ""stateless_with_tree_map"",\n ""trace"",\n ""TraceState"",\n ""TransformInitFn"",\n ""TransformUpdateFn"",\n ""TransformUpdateExtraArgsFn"",\n ""Updates"",\n ""value_and_grad_from_state"",\n ""warmup_cosine_decay_schedule"",\n ""warmup_exponential_decay_schedule"",\n ""yogi"",\n ""zero_nans"",\n ""ZeroNansState"",\n ""ZoomLinesearchInfo"",\n)\n\n# _________________________________________\n# / Please don't use symbols in `_src` they \\n# \ are not part of the Optax public API. /\n# -----------------------------------------\n# \ ^__^\n# \ (oo)\_______\n# (__)\ )\/\\n# ||----w |\n# || ||\n#\n",python,tab +3094,4240739,"TERMINAL",0,0,"53310:00",,terminal_output +3095,4241587,"TERMINAL",0,0,"6441",,terminal_output +3096,4241891,".venv/lib/python3.10/site-packages/optax/__init__.py",9735,0,"",python,selection_mouse +3097,4242281,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",0,0,"# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the ""License"");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an ""AS IS"" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n""""""Optax Schedules.\n\nSchedules may be used to anneal the value of a hyper-parameter over time; for\ninstance, they may be used to anneal the learning rate used to update an agent's\nparameters or the exploration factor used to select actions.\n""""""\n\nfrom typing import Iterable, Optional, Union\n\nfrom absl import logging\nimport chex\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom optax._src import base\nfrom optax.schedules import _join\n\n\ndef constant_schedule(value: Union[float, int]) -> base.Schedule:\n """"""Constructs a constant schedule.\n\n Args:\n value: value to be held constant throughout.\n\n Returns:\n schedule\n A function that maps step counts to values.\n\n Examples:\n >>> schedule_fn = optax.constant_schedule(5)\n >>> schedule_fn(0)\n 5\n >>> schedule_fn(100)\n 5\n """"""\n return lambda count: value\n\n\ndef polynomial_schedule(\n init_value: chex.Scalar,\n end_value: chex.Scalar,\n power: chex.Scalar,\n transition_steps: int,\n transition_begin: int = 0,\n) -> base.Schedule:\n r""""""Constructs a schedule with polynomial transition from init to end value.\n\n This function transitions the learning rate from an initial value\n (``init_value``) to a final value (``end_value``) over a specified number of\n steps (``transition_steps``) with a polynomial function of power ``power``.\n The transition can optionally begin after a specified number of initial steps\n (``transition_begin``).\n\n More precisely, the learning rate at iteration :math:`t` is given by:\n\n .. math::\n \begin{cases}\n I, & \text{if } t < B \\\n (I - E) \left( 1 - \frac{t - B}{T} \right)^{P} + E, &\n \text{if } B \leq t < B + T \\\n E, & \text{if } t \geq B + T\n \end{cases}\n\n where :math:`I` is the initial value, :math:`E` is the end value,\n :math:`B` is the transition begin, :math:`T` is the transition steps,\n and :math:`P` is the power used for the polynomial transition.\n\n Args:\n init_value: initial value for the scalar to be annealed.\n end_value: end value of the scalar to be annealed.\n power: the power of the polynomial used to transition from init to end.\n transition_steps: number of steps over which annealing takes place.\n The scalar starts changing at ``transition_begin`` steps and completes\n the transition by ``transition_begin + transition_steps`` steps.\n If ``transition_steps <= 0``, then the entire annealing process is\n disabled and the value is held fixed at ``init_value``.\n transition_begin: must be positive. After how many steps to start annealing\n (before this many steps the scalar value is held fixed at ``init_value``).\n\n Returns:\n schedule\n A function that maps step counts to values.\n\n Examples:\n >>> schedule_fn = optax.polynomial_schedule(\n ... init_value=1.0, end_value=0.01, transition_steps=100, power=2)\n >>> schedule_fn(0) # learning rate on the first iteration\n Array(1., dtype=float32, weak_type=True)\n >>> schedule_fn(100) # learning rate on the last iteration\n Array(0.01, dtype=float32, weak_type=True)\n\n The following example uses a non-zero ``transition_begin``. In this case\n the learning rate is kept constant for the first ``transition_begin``\n iterations:\n\n >>> schedule_fn = optax.polynomial_schedule(\n ... init_value=1.0,\n ... end_value=0.01,\n ... transition_steps=100,\n ... transition_begin=5,\n ... power=2,\n ... )\n >>> counts = [0, 5, 6, 104, 105, 110]\n >>> print(\n ... *[f'count:{i} value:{schedule_fn(i):.4f}' for i in counts],\n ... sep='\n')\n count:0 value:1.0000\n count:5 value:1.0000\n count:6 value:0.9803\n count:104 value:0.0101\n count:105 value:0.0100\n count:110 value:0.0100\n """"""\n if transition_steps <= 0:\n logging.info(\n 'A polynomial schedule was set with a non-positive `transition_steps` '\n 'value; this results in a constant schedule with value `init_value`.'\n )\n return lambda count: init_value\n\n if transition_begin < 0:\n logging.info(\n 'A polynomial schedule was set with a negative `transition_begin` '\n 'value; this will result in `transition_begin` falling back to `0`.'\n )\n transition_begin = 0\n\n def schedule(count):\n count = jnp.clip(count - transition_begin, 0, transition_steps)\n frac = 1 - count / transition_steps\n return (init_value - end_value) * (frac**power) + end_value\n\n return schedule\n\n\ndef linear_schedule(\n init_value: chex.Scalar,\n end_value: chex.Scalar,\n transition_steps: int,\n transition_begin: int = 0,\n) -> base.Schedule:\n r""""""Schedule with linear transition from ``init_value`` to ``end_value``.\n\n More precisely, the learning rate at iteration :math:`t` is given by:\n\n .. math::\n \begin{cases}\n I, & \text{if } t < B \\\n I + \frac{t - B}{T} (E - I), & \text{if } B \leq t < B + T \\\n E, & \text{if } t \geq B + T\n \end{cases}\n\n where :math:`I` is the initial value, :math:`E` is the end value,\n :math:`B` is the transition begin, and :math:`T` is the transition steps.\n\n This schedule is equivalent to :func:`optax.polynomial_schedule` with\n ``power=1``.\n\n Args:\n init_value: initial value for the scalar to be annealed.\n end_value: end value of the scalar to be annealed.\n transition_steps: number of steps over which annealing takes place. The\n scalar starts changing at ``transition_begin`` steps and completes the\n transition by ``transition_begin + transition_steps`` steps. If\n ``transition_steps <= 0``, then the entire annealing process is disabled\n and the value is held fixed at ``init_value``.\n transition_begin: must be positive. After how many steps to start annealing\n (before this many steps the scalar value is held fixed at ``init_value``).\n\n Returns:\n schedule\n A function that maps step counts to values.\n\n Examples:\n >>> schedule_fn = optax.linear_schedule(\n ... init_value=1.0, end_value=0.01, transition_steps=100)\n >>> schedule_fn(0) # learning rate on the first iteration\n Array(1., dtype=float32, weak_type=True)\n >>> schedule_fn(100) # learning rate on the last iteration\n Array(0.01, dtype=float32, weak_type=True)\n """"""\n return polynomial_schedule(\n init_value=init_value,\n end_value=end_value,\n power=1,\n transition_steps=transition_steps,\n transition_begin=transition_begin,\n )\n\n\ndef piecewise_constant_schedule(\n init_value: float, boundaries_and_scales: Optional[dict[int, float]] = None\n) -> base.Schedule:\n """"""Returns a function which implements a piecewise constant schedule.\n\n Args:\n init_value: An initial value ``init_v``.\n boundaries_and_scales: A map from boundaries ``b_i`` to non-negative scaling\n factors ``f_i``. For any step count `s`, the schedule returns ``init_v``\n scaled by the product of all factors ``f_i`` such that ``b_i < s``.\n\n Returns:\n schedule\n A function that maps step counts to values.\n """"""\n if boundaries_and_scales is not None:\n all_positive = all(scale >= 0.0 for scale in boundaries_and_scales.values())\n if not all_positive:\n raise ValueError(\n '`piecewise_constant_schedule` expects non-negative scale factors'\n )\n\n def schedule(count):\n v = init_value\n if boundaries_and_scales is not None:\n for threshold, scale in sorted(boundaries_and_scales.items()):\n indicator = jnp.maximum(0.0, jnp.sign(threshold - count))\n v = v * indicator + (1 - indicator) * scale * v\n return v\n\n return schedule\n\n\ndef exponential_decay(\n init_value: float,\n transition_steps: int,\n decay_rate: float,\n transition_begin: int = 0,\n staircase: bool = False,\n end_value: Optional[float] = None,\n) -> base.Schedule:\n """"""Constructs a schedule with either continuous or discrete exponential decay.\n\n This function applies an exponential decay function to a provided initial\n value. When ``count >= transition_begin`` the function returns the decayed\n value as:\n\n .. code-block::\n\n rate_factor = ((count - transition_begin) / transition_steps)\n decayed_value = init_value * (decay_rate ** rate_factor)\n\n If the argument ``staircase`` is ``True`` then ``count / transition_steps`` is\n an integer division and the decayed value follows a staircase function.\n\n Args:\n init_value: the initial learning rate.\n transition_steps: must be positive. See the decay computation above.\n decay_rate: must not be zero. The decay rate.\n transition_begin: must be positive. After how many steps to start annealing\n (before this many steps the scalar value is held fixed at `init_value`).\n staircase: if ``True``, decay the values at discrete intervals.\n end_value: the value at which the exponential decay stops. When ``decay_rate\n < 1``, ``end_value`` is treated as a lower bound, otherwise as an upper\n bound. Has no effect when ``decay_rate = 0``.\n\n Returns:\n schedule\n A function that maps step counts to values.\n """"""\n\n if transition_steps <= 0:\n logging.info(\n 'An exponential schedule was set with a non-positive `transition_steps`'\n ' value; this will result in a constant schedule with value '\n '`init_value`.'\n )\n return lambda count: init_value\n\n if decay_rate == 0:\n logging.info(\n 'An exponential schedule was set with a zero `decay_rate` value; '\n 'this will result in a constant schedule with value `init_value`.'\n )\n return lambda count: init_value\n\n if transition_begin < 0:\n logging.info(\n 'An exponential schedule was set with a negative `transition_begin` '\n 'value; this will result in `transition_begin` falling back to `0`.'\n )\n transition_begin = 0\n\n if end_value is not None:\n clip_fn = jnp.maximum if decay_rate < 1.0 else jnp.minimum\n\n def schedule(count):\n decreased_count = count - transition_begin\n p = decreased_count / transition_steps\n if staircase:\n p = jnp.floor(p)\n decayed_value = jnp.where(\n decreased_count <= 0, init_value, init_value * jnp.power(decay_rate, p)\n )\n if end_value is not None:\n decayed_value = clip_fn(decayed_value, end_value) # pylint: disable=undefined-variable\n return decayed_value\n\n return schedule\n\n\ndef cosine_decay_schedule(\n init_value: float,\n decay_steps: int,\n alpha: float = 0.0,\n exponent: float = 1.0,\n) -> base.Schedule:\n r""""""Returns a function which implements cosine learning rate decay.\n\n This schedule smoothly decreases the learning rate over a specified number of\n steps (``decay_steps``). The decay follows a cosine function, with an optional\n exponent to modify the decay curve. A minimum value (``alpha``) ensures the\n learning rate does not drop entirely to zero.\n\n More precisely, the learning rate at iteration :math:`t` is given by:\n\n .. math::\n \begin{cases}\n \frac{I (1 - \alpha)}{2}(1+\cos(\pi\,\frac{t}{T})^p) + I \alpha\, \n & \text{if } t \leq T \\\n I \alpha, & \text{if } t > T \n \end{cases}\n\n where :math:`T` is the number of decay steps (``decay_steps``), :math:`p` is\n the ``exponent`` and :math:`I` is the initial value (``init_value``).\n\n Args:\n init_value: An initial value for the learning rate.\n decay_steps: Positive integer - the number of steps for which to apply\n the decay for.\n alpha: The minimum value of the multiplier used to adjust the\n learning rate. Defaults to 0.0.\n exponent: The default decay is ``0.5 * (1 + cos(pi * t/T))``, where \n ``t`` is the current timestep and ``T`` is the ``decay_steps``. The\n exponent modifies this to be ``(0.5 * (1 + cos(pi * t/T))) ** exponent``.\n Defaults to 1.0.\n\n Returns:\n schedule\n A function that maps step counts to values.\n\n References:\n Loshchilov et al., `SGDR: Stochastic Gradient Descent with Warm Restarts\n `_, 2017\n """"""\n if not decay_steps > 0:\n raise ValueError(\n 'The cosine_decay_schedule requires positive decay_steps, got'\n f' {decay_steps=}.'\n )\n\n def schedule(count):\n # Avoid int -> int32 overflow in jitted code.\n nonlocal decay_steps\n decay_steps, count = jax.tree.map(\n lambda x: float(x) if isinstance(x, int) else x, (decay_steps, count)\n )\n\n count = jnp.minimum(count, decay_steps)\n cosine_decay = 0.5 * (1 + jnp.cos(jnp.pi * count / decay_steps))\n decayed = (1 - alpha) * cosine_decay**exponent + alpha\n return init_value * decayed\n\n return schedule\n\n\ndef _linear_interpolate(start: float, end: float, pct: float):\n return (end - start) * pct + start\n\n\ndef _cosine_interpolate(start: float, end: float, pct: float):\n return end + (start - end) / 2.0 * (jnp.cos(jnp.pi * pct) + 1)\n\n\ndef piecewise_interpolate_schedule(\n interpolate_type: str,\n init_value: float,\n boundaries_and_scales: Optional[dict[int, float]] = None,\n) -> base.Schedule:\n """"""Returns a function which implements a piecewise interpolated schedule.\n\n Args:\n interpolate_type: 'linear' or 'cosine', specifying the interpolation\n strategy.\n init_value: An initial value ``init_v``.\n boundaries_and_scales: A map from boundaries ``b_i`` to non-negative scaling\n factors ``f_i``. At boundary step ``b_i``, the schedule returns ``init_v``\n scaled by the product of all factors ``f_j`` such that ``b_j <= b_i``. The\n values in between each boundary will be interpolated as per ``type``.\n\n Returns:\n schedule\n A function that maps step counts to values.\n """"""\n if interpolate_type == 'linear':\n interpolate_fn = _linear_interpolate\n elif interpolate_type == 'cosine':\n interpolate_fn = _cosine_interpolate\n else:\n raise ValueError(""`interpolate_type` must be either 'cos' or 'linear'"")\n\n if boundaries_and_scales:\n boundaries, scales = zip(*sorted(boundaries_and_scales.items()))\n if not all(scale >= 0.0 for scale in scales):\n raise ValueError(\n '`piecewise_interpolate_schedule` expects non-negative scale factors'\n )\n else:\n boundaries, scales = (), ()\n\n bounds = np.stack((0,) + boundaries)\n values = np.cumprod(np.stack((init_value,) + scales))\n interval_sizes = bounds[1:] - bounds[:-1]\n\n def schedule(count):\n indicator = (bounds[:-1] <= count) & (count < bounds[1:])\n pct = (count - bounds[:-1]) / interval_sizes\n interp_vals = interpolate_fn(values[:-1], values[1:], pct)\n return indicator.dot(interp_vals) + (bounds[-1] <= count) * values[-1]\n\n return schedule\n\n\ndef linear_onecycle_schedule(\n transition_steps: int,\n peak_value: float,\n pct_start: float = 0.3,\n pct_final: float = 0.85,\n div_factor: float = 25.0,\n final_div_factor: float = 1e4,\n) -> base.Schedule:\n r""""""Returns a learning rate with three linear phases.\n\n * *Phase 1*, from iteration 0 to ``pct_start * transition_steps``. The\n learning rate increases linearly from ``peak_value / div_factor`` to\n ``peak_value``.\n * *Phase 2*, from iteration ``pct_start * transition_steps`` to\n ``pct_final * transition_steps``. The learning rate decreases linearly from\n ``peak_value`` back to the initial ``peak_value/div_factor``.\n * *Phase 3*: For the remaining steps, the learning rate interpolates between\n ``peak_value/div_factor`` and ``peak_value / final_div_factor``. If\n ``final_div_factor`` is larger than ``div_factor``, this is a decreasing\n phase.\n\n Args:\n transition_steps: Number of steps over which annealing takes place.\n peak_value: Maximum value attained by schedule at pct_start percent of the\n cycle (in number of steps).\n pct_start: The percentage of the cycle (in number of steps) spent increasing\n the learning rate.\n pct_final: The percentage of the cycle (in number of steps) spent increasing\n to ``peak_value`` then decreasing back to ``init_value``.\n div_factor: Determines the initial value via ``init_value = peak_value /\n div_factor``.\n final_div_factor: Determines the final value via ``final_value = init_value\n / final_div_factor``.\n\n Returns:\n schedule\n A function that maps step counts to values\n\n References:\n Smith et al, `Super-Convergence: Very Fast Training of Neural Networks Using\n Large Learning Rates `_, 2017\n """"""\n if transition_steps <= 0:\n raise ValueError(\n 'A linear onecycle schedule was set with a non-positive '\n '`transition_steps`'\n )\n\n return piecewise_interpolate_schedule(\n 'linear',\n peak_value / div_factor,\n {\n int(pct_start * transition_steps): div_factor,\n int(pct_final * transition_steps): 1.0 / div_factor,\n transition_steps: 1.0 / final_div_factor,\n },\n )\n\n\ndef cosine_onecycle_schedule(\n transition_steps: int,\n peak_value: float,\n pct_start: float = 0.3,\n div_factor: float = 25.0,\n final_div_factor: float = 1e4,\n) -> base.Schedule:\n """"""Returns a function which implements the onecycle learning rate schedule.\n\n This learning rate increases the learning rate and then decreases it in a\n cosine-like manner. The number of steps over which the learning rate increases\n is determined by the ``pct_start`` argument. The maximum value of the learning\n rate is determined by the ``peak_value`` argument, the initial value of the\n learning rate is determined through the formula ``init_value = peak_value /\n div_factor``, and the final value is determined by the ``final_div_factor``\n argument.\n\n Args:\n transition_steps: Number of steps over which annealing takes place.\n peak_value: Maximum value attained by schedule at pct_start percent of the\n cycle (in number of steps).\n pct_start: The percentage of the cycle (in number of steps) spent increasing\n the learning rate.\n div_factor: Determines the initial value via ``init_value = peak_value /\n div_factor``.\n final_div_factor: Determines the final value via ``final_value = init_value\n / final_div_factor``.\n\n Returns:\n schedule\n A function that maps step counts to values\n\n References:\n Smith et al, `Super-Convergence: Very Fast Training of Neural Networks Using\n Large Learning Rates `_, 2017\n """"""\n if transition_steps <= 0:\n raise ValueError(\n 'A linear onecycle schedule was set with a non-positive '\n '`transition_steps`'\n )\n\n return piecewise_interpolate_schedule(\n 'cosine',\n peak_value / div_factor,\n {\n int(pct_start * transition_steps): div_factor,\n int(transition_steps): 1.0 / (div_factor * final_div_factor),\n },\n )\n\n\ndef warmup_constant_schedule(\n init_value: float,\n peak_value: float,\n warmup_steps: int,\n) -> base.Schedule:\n r""""""Linear warmup followed by constant schedule i.e no decay.\n\n Args:\n init_value: Initial value for the scalar to be annealed.\n peak_value: Peak value for scalar to be annealed at end of warmup.\n warmup_steps: Positive integer, the length of the linear warmup.\n\n Returns:\n schedule\n A function that maps step counts to values\n """"""\n return linear_schedule(\n init_value=init_value,\n end_value=peak_value,\n transition_steps=warmup_steps,\n )\n\n\ndef warmup_cosine_decay_schedule(\n init_value: float,\n peak_value: float,\n warmup_steps: int,\n decay_steps: int,\n end_value: float = 0.0,\n exponent: float = 1.0,\n) -> base.Schedule:\n r""""""Linear warmup followed by cosine decay.\n\n Args:\n init_value: Initial value for the scalar to be annealed.\n peak_value: Peak value for scalar to be annealed at end of warmup.\n warmup_steps: Positive integer, the length of the linear warmup.\n decay_steps: Positive integer, the total length of the schedule. Note that\n this includes the warmup time, so the number of steps during which cosine\n annealing is applied is ``decay_steps - warmup_steps``.\n end_value: End value of the scalar to be annealed.\n exponent: The default decay is ``0.5 * (1 + cos(pi t/T))``, where ``t`` is\n the current timestep and ``T`` is ``decay_steps``. The exponent modifies\n this to be ``(0.5 * (1 + cos(pi * t/T))) ** exponent``. Defaults to 1.0.\n\n Returns:\n schedule\n A function that maps step counts to values\n """"""\n alpha = 0.0 if peak_value == 0.0 else end_value / peak_value\n schedules = [\n linear_schedule(\n init_value=init_value,\n end_value=peak_value,\n transition_steps=warmup_steps,\n ),\n cosine_decay_schedule(\n init_value=peak_value,\n decay_steps=decay_steps - warmup_steps,\n alpha=alpha,\n exponent=exponent,\n ),\n ]\n return _join.join_schedules(schedules, [warmup_steps])\n\n\ndef warmup_exponential_decay_schedule(\n init_value: float,\n peak_value: float,\n warmup_steps: int,\n transition_steps: int,\n decay_rate: float,\n transition_begin: int = 0,\n staircase: bool = False,\n end_value: Optional[float] = None,\n) -> base.Schedule:\n """"""Linear warmup followed by exponential decay.\n\n Args:\n init_value: Initial value for the scalar to be annealed.\n peak_value: Peak value for scalar to be annealed at end of warmup.\n warmup_steps: Positive integer, the length of the linear warmup.\n transition_steps: must be positive. See :func:`optax.exponential_decay` for\n more details.\n decay_rate: must not be zero. The decay rate.\n transition_begin: must be positive. After how many steps to start annealing\n (before this many steps the scalar value is held fixed at ``peak_value``).\n staircase: if ``True``, decay the values at discrete intervals.\n end_value: the value at which the exponential decay stops. When ``decay_rate\n < 1``, ``end_value`` is treated as a lower bound, otherwise as an upper\n bound. Has no effect when ``decay_rate = 0``.\n\n Returns:\n schedule\n A function that maps step counts to values\n """"""\n schedules = [\n linear_schedule(\n init_value=init_value,\n end_value=peak_value,\n transition_steps=warmup_steps,\n ),\n exponential_decay(\n init_value=peak_value,\n transition_steps=transition_steps,\n decay_rate=decay_rate,\n transition_begin=transition_begin,\n staircase=staircase,\n end_value=end_value,\n ),\n ]\n return _join.join_schedules(schedules, [warmup_steps])\n\n\ndef sgdr_schedule(\n cosine_kwargs: Iterable[dict[str, chex.Numeric]],\n) -> base.Schedule:\n """"""SGD with warm restarts.\n\n This learning rate schedule applies multiple joined cosine decay cycles.\n\n Args:\n cosine_kwargs: An Iterable of dicts, where each element specifies the\n arguments to pass to each cosine decay cycle. The ``decay_steps`` kwarg\n will specify how long each cycle lasts for, and therefore when to\n transition to the next cycle.\n\n Returns:\n schedule\n A function that maps step counts to values\n\n References:\n Loshchilov et al., `SGDR: Stochastic Gradient Descent with Warm Restarts\n `_, 2017\n """"""\n boundaries = []\n schedules = []\n step = 0\n for kwargs in cosine_kwargs:\n schedules += [warmup_cosine_decay_schedule(**kwargs)]\n boundaries += [step + kwargs['decay_steps']]\n step += kwargs['decay_steps']\n return _join.join_schedules(schedules, boundaries[:-1])\n",python,tab +3098,4242677,"TERMINAL",0,0,"7552",,terminal_output +3099,4243689,"TERMINAL",0,0,"8663",,terminal_output +3100,4244718,"TERMINAL",0,0,"9885",,terminal_output +3101,4245783,"TERMINAL",0,0,"21996",,terminal_output +3102,4246814,"TERMINAL",0,0,"240407",,terminal_output +3103,4247947,"TERMINAL",0,0,"3118",,terminal_output +3104,4248899,"TERMINAL",0,0,"4229",,terminal_output +3105,4249949,"TERMINAL",0,0,"53310",,terminal_output +3106,4251104,"TERMINAL",0,0,"6441",,terminal_output +3107,4252057,"TERMINAL",0,0,"7552",,terminal_output +3108,4253154,"TERMINAL",0,0,"8663",,terminal_output +3109,4254189,"TERMINAL",0,0,"9774",,terminal_output +3110,4255078,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21080,0,"",python,selection_mouse +3111,4255222,"TERMINAL",0,0,"30885",,terminal_output +3112,4256241,"TERMINAL",0,0,"1996",,terminal_output +3113,4256600,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21131,0,"",python,selection_mouse +3114,4257202,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21103,0,"",python,selection_mouse +3115,4257300,"TERMINAL",0,0,"250507",,terminal_output +3116,4257346,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21097,10,"init_value",python,selection_mouse +3117,4258041,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21168,0,"",python,selection_mouse +3118,4258204,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21162,16,"transition_steps",python,selection_mouse +3119,4258357,"TERMINAL",0,0,"3118",,terminal_output +3120,4259134,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21329,0,"",python,selection_mouse +3121,4259297,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21324,5,"alpha",python,selection_mouse +3122,4259393,"TERMINAL",0,0,"4229",,terminal_output +3123,4259914,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21281,0,"",python,selection_mouse +3124,4260062,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21274,11,"decay_steps",python,selection_mouse +3125,4260436,"TERMINAL",0,0,"53320",,terminal_output +3126,4260613,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21216,0,"",python,selection_mouse +3127,4260854,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21216,29,"ecay_schedule(\n init",python,selection_mouse +3128,4260931,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21216,62,"ecay_schedule(\n init_value=peak_value,\n deca",python,selection_mouse +3129,4261321,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21278,0,"",python,selection_mouse +3130,4261322,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21274,11,"decay_steps",python,selection_mouse +3131,4261493,"TERMINAL",0,0,"6441",,terminal_output +3132,4261878,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21246,0,"",python,selection_mouse +3133,4262035,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21241,10,"init_value",python,selection_mouse +3134,4262574,"TERMINAL",0,0,"7552",,terminal_output +3135,4263690,"TERMINAL",0,0,"8663",,terminal_output +3136,4264508,".venv/lib/python3.10/site-packages/optax/__init__.py",0,0,"",python,tab +3137,4264717,"TERMINAL",0,0,"9774",,terminal_output +3138,4265748,"TERMINAL",0,0,"40885",,terminal_output +3139,4266368,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",0,0,"",python,tab +3140,4266745,"TERMINAL",0,0,"140:0010:007",,terminal_output +3141,4267796,"TERMINAL",0,0,"3118",,terminal_output +3142,4268819,"TERMINAL",0,0,"4229",,terminal_output +3143,4269869,"TERMINAL",0,0,"53330",,terminal_output +3144,4270975,"TERMINAL",0,0,"6441",,terminal_output +3145,4271995,"TERMINAL",0,0,"7552",,terminal_output +3146,4273120,"TERMINAL",0,0,"8663",,terminal_output +3147,4274052,"TERMINAL",0,0,"9774",,terminal_output +3148,4275090,"TERMINAL",0,0,"50885",,terminal_output +3149,4276194,"TERMINAL",0,0,"1996",,terminal_output +3150,4277197,"TERMINAL",0,0,"210107",,terminal_output +3151,4278343,"TERMINAL",0,0,"3118",,terminal_output +3152,4279303,"TERMINAL",0,0,"4229",,terminal_output +3153,4279593,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",20182,0,"",python,selection_mouse +3154,4280355,"TERMINAL",0,0,"53340",,terminal_output +3155,4281398,"TERMINAL",0,0,"6441",,terminal_output +3156,4282045,"train_tokenizer.py",0,0,"",python,tab +3157,4282434,"TERMINAL",0,0,"7552",,terminal_output +3158,4283475,"TERMINAL",0,0,"8663",,terminal_output +3159,4284590,"TERMINAL",0,0,"9774",,terminal_output +3160,4285614,"TERMINAL",0,0,"6:00885",,terminal_output +3161,4286259,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",0,0,"",python,tab +3162,4286611,"TERMINAL",0,0,"1996",,terminal_output +3163,4287666,"TERMINAL",0,0,"220207",,terminal_output +3164,4288725,"TERMINAL",0,0,"3118",,terminal_output +3165,4289812,"TERMINAL",0,0,"53350",,terminal_output +3166,4290811,"TERMINAL",0,0,"6441",,terminal_output +3167,4291856,"TERMINAL",0,0,"7552",,terminal_output +3168,4292630,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",20088,0,"",python,selection_mouse +3169,4292649,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",20087,0,"",python,selection_command +3170,4292905,"TERMINAL",0,0,"8663",,terminal_output +3171,4293957,"TERMINAL",0,0,"9774",,terminal_output +3172,4295037,"TERMINAL",0,0,"10885",,terminal_output +3173,4296041,"TERMINAL",0,0,"1996",,terminal_output +3174,4297090,"TERMINAL",0,0,"230307",,terminal_output +3175,4297253,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21255,0,"",python,selection_mouse +3176,4297818,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21246,0,"",python,selection_mouse +3177,4298111,"TERMINAL",0,0,"3118",,terminal_output +3178,4298699,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21469,0,"",python,selection_mouse +3179,4299662,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21249,0,"",python,selection_mouse +3180,4300493,"TERMINAL",0,0,"42295331:00",,terminal_output +3181,4301298,"TERMINAL",0,0,"6441",,terminal_output +3182,4302292,"TERMINAL",0,0,"7552",,terminal_output +3183,4303436,"TERMINAL",0,0,"8663",,terminal_output +3184,4304376,"TERMINAL",0,0,"9774",,terminal_output +3185,4305081,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21254,0,"",python,selection_mouse +3186,4305418,"TERMINAL",0,0,"20885",,terminal_output +3187,4305780,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21249,0,"",python,selection_mouse +3188,4305939,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21241,10,"init_value",python,selection_mouse +3189,4306503,"TERMINAL",0,0,"1996",,terminal_output +3190,4306540,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21254,0,"",python,selection_mouse +3191,4306692,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21252,10,"peak_value",python,selection_mouse +3192,4307511,"TERMINAL",0,0,"240407",,terminal_output +3193,4308567,"TERMINAL",0,0,"3118",,terminal_output +3194,4309678,"TERMINAL",0,0,"4229",,terminal_output +3195,4310746,"TERMINAL",0,0,"53310",,terminal_output +3196,4311763,"TERMINAL",0,0,"6441",,terminal_output +3197,4312767,"TERMINAL",0,0,"7663",,terminal_output +3198,4313795,"TERMINAL",0,0,"9774",,terminal_output +3199,4314845,"TERMINAL",0,0,"30885",,terminal_output +3200,4315885,"TERMINAL",0,0,"1996",,terminal_output +3201,4316928,"TERMINAL",0,0,"250507",,terminal_output +3202,4317973,"TERMINAL",0,0,"3118",,terminal_output +3203,4319012,"TERMINAL",0,0,"4229",,terminal_output +3204,4320122,"TERMINAL",0,0,"53320",,terminal_output +3205,4321147,"TERMINAL",0,0,"6441",,terminal_output +3206,4322113,"TERMINAL",0,0,"7552",,terminal_output +3207,4323197,"TERMINAL",0,0,"8663",,terminal_output +3208,4324200,"TERMINAL",0,0,"9774",,terminal_output +3209,4325251,"TERMINAL",0,0,"40885",,terminal_output +3210,4326199,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21714,0,"",python,selection_mouse +3211,4326237,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21713,0,"",python,selection_command +3212,4326298,"TERMINAL",0,0,"1996",,terminal_output +3213,4326789,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21476,0,"",python,selection_mouse +3214,4326794,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21475,0,"",python,selection_command +3215,4327396,"TERMINAL",0,0,"21:001:007",,terminal_output +3216,4327437,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21063,0,"",python,selection_mouse +3217,4327448,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",21062,0,"",python,selection_command +3218,4328379,"TERMINAL",0,0,"3118",,terminal_output +3219,4329440,"TERMINAL",0,0,"4229",,terminal_output +3220,4330475,"TERMINAL",0,0,"53330",,terminal_output +3221,4331518,"genie.py",0,0,"from typing import Dict, Any\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nfrom jax import NamedSharding\nfrom flax.training.train_state import TrainState\nfrom flax.training import orbax_utils\nfrom orbax.checkpoint import PyTreeCheckpointer\n\nfrom models.dynamics import DynamicsMaskGIT\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\n\nclass Genie(nn.Module):\n """"""Genie model""""""\n\n # --- Tokenizer ---\n in_dim: int\n tokenizer_dim: int\n latent_patch_dim: int\n num_patch_latents: int\n patch_size: int\n tokenizer_num_blocks: int\n tokenizer_num_heads: int\n # --- LAM ---\n lam_dim: int\n latent_action_dim: int\n num_latent_actions: int\n lam_patch_size: int\n lam_num_blocks: int\n lam_num_heads: int\n # --- Dynamics ---\n dyna_dim: int\n dyna_num_blocks: int\n dyna_num_heads: int\n dropout: float = 0.0\n mask_limit: float = 0.0\n\n def setup(self):\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n )\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\n lam_outputs = self.lam.vq_encode(batch[""videos""], training=False)\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\n latent_actions=jax.lax.stop_gradient(lam_outputs[""z_q""]),\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )\n return outputs\n\n @nn.compact\n def sample_mihir(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n steps: int,\n temperature: float,\n sample_argmax: bool,\n ) -> Any:\n # B == batch_size\n # T == num_frames (input)\n # N == num_patches\n # S == seq_len\n # A == action_space\n # D == latent_dim\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""] # (B, T, N)\n B, T, N = token_idxs.shape\n S = seq_len\n print(""token_idxs shape:"", token_idxs.shape)\n pad_shape = (B, S - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs.dtype)\n token_idxs = jnp.concatenate([token_idxs, pad], axis=1) # shape (B, S, N)\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # begin potential forloop (from T to S)\n initial_T = T\n for T in range(initial_T, S):\n print(f""sampling frame {T}"")\n # Create a mask that is 1 (True) where we just padded\n # token_idxs shape: (B, S, N), T = original length, S = seq_len\n # mask is True for padded positions (i.e., t >= T)\n mask = (jnp.arange(S)[None, :, None] >= T) # shape (1, S, 1)\n mask = jnp.broadcast_to(mask, (B, S, N)) # shape (B, S, N)\n init_mask = mask.astype(bool)\n token_idxs *= ~init_mask\n #print(""token_idxs[0,:,0]:"", token_idxs[0,:,0])\n #print(""init_mask[0,:,0]:"", init_mask[0,:,0])\n\n assert init_mask.shape == (B, S, N), ""Wrong mask shape""\n\n # --- Initialize MaskGIT ---\n init_carry = (\n batch[""rng""],\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStepMihir,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n token_idxs = final_carry[2]\n\n new_frame_pixels = self.tokenizer.decode(\n token_idxs,\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # B == batch_size\n # T == num_frames (input)\n # N == num_patches\n # S == seq_len\n # A == action_space\n # D == latent_dim\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]# (B, T, N)\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0] # (B, N) \n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""]) # (B, S, A, D)\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0] # (B, N)\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStepMihir(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, mask, token_idxs, action_tokens = carry\n step = x\n B, S, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_embed = self.dynamics.patch_embed(token_idxs)\n # Mask vid_embed: set to mask_token where mask==1, else keep vid_embed\n # mask: (B, S, N), vid_embed: (B, S, N, D), mask_token: (D,)\n mask_token = self.dynamics.mask_token # (1,1, 1, D,)\n # Expand mask to (B, S, N, 1) for broadcasting\n mask_expanded = mask[..., None]\n vid_embed = jnp.where(mask_expanded, mask_token, vid_embed)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits_old = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n final_logits= self.dynamics.dynamics(vid_embed) / step_temp\n # jax.debug.print(""final_logits shape: {}"", final_logits.shape)\n # jax.debug.print(""final_logits_tmp shape: {}"", final_logits_tmp.shape)\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs_old = jnp.argmax(final_logits_old, axis=-1)\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n # jax.debug.print(""sampled_token_idxs shape: {}"", sampled_token_idxs.shape)\n # jax.debug.print(""sampled_token_idxs_tmp shape: {}"", sampled_token_idxs_tmp.shape)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n gather_fn_tmp = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs_old = gather_fn(jax.nn.softmax(final_logits_old), sampled_token_idxs_old)\n final_token_probs = gather_fn_tmp(jax.nn.softmax(final_logits), sampled_token_idxs)\n # jax.debug.print(""final_token_probs shape: {}"", final_token_probs.shape)\n # jax.debug.print(""final_token_probs_tmp shape: {}"", final_token_probs_tmp.shape)\n final_token_probs += ~mask\n # Update masked tokens only\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_mask, token_idxs, action_tokens)\n return new_carry, None\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n ) # (B, T+1, N)\n vid_embed = self.dynamics.patch_embed(vid_token_idxs) # (B, T+1, N, D)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1), # (B, N, 1)\n self.dynamics.mask_token[0], # (B, 1, D)\n vid_embed[:, -1], # (B, N, D)\n ) # (B, N, D)\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None\n\n\ndef restore_genie_components(\n train_state: TrainState,\n sharding: NamedSharding,\n inputs: Dict[str, jax.Array],\n rng: jax.Array,\n args,\n):\n """"""Restore pre-trained Genie components""""""\n rng, _rng = jax.random.split(rng)\n\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n )\n tokenizer_init_params = dummy_tokenizer.init(_rng, inputs)\n lam_init_params = dummy_lam.init(_rng, inputs)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.adamw(\n learning_rate=optax.constant_schedule(args.max_lr),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n )\n\n dummy_tokenizer_train_state = TrainState.create(\n apply_fn=dummy_tokenizer.apply, params=tokenizer_init_params, tx=dummy_tx\n )\n dummy_lam_train_state = TrainState.create(\n apply_fn=dummy_lam.apply, params=lam_init_params, tx=dummy_tx\n )\n\n def create_abstract_sharded_pytree(pytree_template, sharding_spec):\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)\n\n abstract_sharded_tokenizer_state = create_abstract_sharded_pytree(\n dummy_tokenizer_train_state, sharding\n )\n abstract_sharded_lam_state = create_abstract_sharded_pytree(\n dummy_lam_train_state, sharding\n )\n\n tokenizer_restore_target = {""model"": abstract_sharded_tokenizer_state}\n lam_restore_target = {""model"": abstract_sharded_lam_state}\n\n tokenizer_restore_args = orbax_utils.restore_args_from_target(\n tokenizer_restore_target\n )\n lam_restore_args = orbax_utils.restore_args_from_target(lam_restore_target)\n\n restored_tokenizer_params = (\n PyTreeCheckpointer()\n .restore(\n args.tokenizer_checkpoint,\n item=tokenizer_restore_target,\n restore_args=tokenizer_restore_args,\n )[""model""]\n .params[""params""]\n )\n restored_lam_params = (\n PyTreeCheckpointer()\n .restore(\n args.lam_checkpoint, item=lam_restore_target, restore_args=lam_restore_args\n )[""model""]\n .params[""params""]\n )\n # Genie does not initialize all LAM modules, thus we omit those extra modules during restoration\n # (f.srambical) FIXME: Currently, this is a small HBM memory crunch since the LAM's decoder is loaded into HBM and immediately dicarded.\n # A workaround would be to restore to host memory first, and only move the weights to HBM after pruning the decoder\n restored_lam_params = {\n k: v\n for k, v in restored_lam_params.items()\n if k in train_state.params[""params""][""lam""]\n }\n\n train_state.params[""params""][""tokenizer""].update(restored_tokenizer_params)\n train_state.params[""params""][""lam""].update(restored_lam_params)\n\n return train_state\n",python,tab +3222,4331722,"TERMINAL",0,0,"6441",,terminal_output +3223,4332606,"TERMINAL",0,0,"7552",,terminal_output +3224,4333188,"scripts_horeka/batchsize_scaling/adjusted_lr/run_sbatch.sh",0,0,"sbatch /home/hk-project-p0023960/tum_cte0515/Projects/jafar/scripts_horeka/batchsize_scaling/adjusted_lr/train_tokenizer_1_nodes.sbatch\nsbatch /home/hk-project-p0023960/tum_cte0515/Projects/jafar/scripts_horeka/batchsize_scaling/adjusted_lr/train_tokenizer_2_nodes.sbatch\nsbatch /home/hk-project-p0023960/tum_cte0515/Projects/jafar/scripts_horeka/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes.sbatch",shellscript,tab +3225,4333807,"TERMINAL",0,0,"8663",,terminal_output +3226,4334419,"genie.py",0,0,"",python,tab +3227,4334698,"TERMINAL",0,0,"9774",,terminal_output +3228,4335919,"TERMINAL",0,0,"50896",,terminal_output +3229,4336762,"TERMINAL",0,0,"210107",,terminal_output +3230,4337850,"TERMINAL",0,0,"3118",,terminal_output +3231,4338864,"TERMINAL",0,0,"4229",,terminal_output +3232,4339917,"TERMINAL",0,0,"53340",,terminal_output +3233,4341013,"TERMINAL",0,0,"6441",,terminal_output +3234,4341977,"TERMINAL",0,0,"7552",,terminal_output +3235,4343062,"TERMINAL",0,0,"8663",,terminal_output +3236,4344098,"TERMINAL",0,0,"9774",,terminal_output +3237,4345215,"TERMINAL",0,0,"7:00885",,terminal_output +3238,4346141,"TERMINAL",0,0,"1996",,terminal_output +3239,4347178,"TERMINAL",0,0,"220207",,terminal_output +3240,4348284,"TERMINAL",0,0,"3118",,terminal_output +3241,4349247,"TERMINAL",0,0,"4229",,terminal_output +3242,4350290,"TERMINAL",0,0,"53350",,terminal_output +3243,4351333,"TERMINAL",0,0,"6441",,terminal_output +3244,4352483,"TERMINAL",0,0,"7552",,terminal_output +3245,4353507,"TERMINAL",0,0,"8663",,terminal_output +3246,4354549,"TERMINAL",0,0,"9774",,terminal_output +3247,4355575,"TERMINAL",0,0,"10885",,terminal_output +3248,4356551,"TERMINAL",0,0,"1996",,terminal_output +3249,4357603,"TERMINAL",0,0,"230307",,terminal_output +3250,4358628,"TERMINAL",0,0,"3118",,terminal_output +3251,4359678,"TERMINAL",0,0,"4229",,terminal_output +3252,4360728,"TERMINAL",0,0,"5442:01",,terminal_output +3253,4361816,"TERMINAL",0,0,"7552",,terminal_output +3254,4362827,"TERMINAL",0,0,"8663",,terminal_output +3255,4363857,"TERMINAL",0,0,"9774",,terminal_output +3256,4364895,"TERMINAL",0,0,"20885",,terminal_output +3257,4365942,"TERMINAL",0,0,"1996",,terminal_output +3258,4366982,"TERMINAL",0,0,"240407",,terminal_output +3259,4368047,"TERMINAL",0,0,"3118",,terminal_output +3260,4369073,"TERMINAL",0,0,"4229",,terminal_output +3261,4370212,"TERMINAL",0,0,"53310",,terminal_output +3262,4371224,"TERMINAL",0,0,"6441",,terminal_output +3263,4371993,"scripts_horeka/batchsize_scaling/adjusted_lr/train_tokenizer_2_nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --job-name=train_tokenizer_batch_size_scaling_2_node\n#SBATCH --mem=100G\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=250 \\n --log \\n --name=tokenizer-batch-size-scaling-2-node-$slurm_job_id \\n --tags tokenizer batch-size-scaling 2-node \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,tab +3264,4372196,"TERMINAL",0,0,"7552",,terminal_output +3265,4373274,"TERMINAL",0,0,"8663",,terminal_output +3266,4374359,"genie.py",0,0,"",python,tab +3267,4374553,"TERMINAL",0,0,"9774",,terminal_output +3268,4375378,"TERMINAL",0,0,"30885",,terminal_output +3269,4376443,"TERMINAL",0,0,"1996",,terminal_output +3270,4377766,".venv/lib/python3.10/site-packages/optax/schedules/_schedule.py",0,0,"",python,tab +3271,4377954,"TERMINAL",0,0,"250507",,terminal_output +3272,4378504,"TERMINAL",0,0,"3118",,terminal_output +3273,4379649,"TERMINAL",0,0,"4229",,terminal_output +3274,4380643,"TERMINAL",0,0,"53320",,terminal_output +3275,4381664,"TERMINAL",0,0,"6441",,terminal_output +3276,4382715,"TERMINAL",0,0,"7552",,terminal_output +3277,4383263,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,0,"",shellscript,tab +3278,4383752,"TERMINAL",0,0,"8774",,terminal_output +3279,4384822,"TERMINAL",0,0,"40885",,terminal_output +3280,4385864,"TERMINAL",0,0,"1996",,terminal_output +3281,4386293,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",949,0,"",shellscript,selection_mouse +3282,4386312,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",948,0,"",shellscript,selection_command +3283,4386901,"TERMINAL",0,0,"22:002:007",,terminal_output +3284,4387934,"TERMINAL",0,0,"3118",,terminal_output +3285,4388989,"TERMINAL",0,0,"4229",,terminal_output +3286,4390031,"TERMINAL",0,0,"53330",,terminal_output +3287,4391187,"TERMINAL",0,0,"6441",,terminal_output +3288,4392135,"TERMINAL",0,0,"7552",,terminal_output +3289,4393185,"TERMINAL",0,0,"8663",,terminal_output +3290,4394259,"TERMINAL",0,0,"9774",,terminal_output +3291,4395280,"TERMINAL",0,0,"50885",,terminal_output +3292,4396339,"TERMINAL",0,0,"1996",,terminal_output +3293,4397379,"TERMINAL",0,0,"210107",,terminal_output +3294,4398461,"TERMINAL",0,0,"3118",,terminal_output +3295,4399485,"TERMINAL",0,0,"4229",,terminal_output +3296,4400622,"TERMINAL",0,0,"53340",,terminal_output +3297,4401572,"TERMINAL",0,0,"6441",,terminal_output +3298,4402625,"TERMINAL",0,0,"7552",,terminal_output +3299,4403666,"TERMINAL",0,0,"8663",,terminal_output +3300,4404713,"TERMINAL",0,0,"9774",,terminal_output +3301,4405754,"TERMINAL",0,0,"8:00996",,terminal_output +3302,4406800,"TERMINAL",0,0,"220207",,terminal_output +3303,4407846,"TERMINAL",0,0,"3118",,terminal_output +3304,4408895,"TERMINAL",0,0,"4229",,terminal_output +3305,4409946,"TERMINAL",0,0,"53350",,terminal_output +3306,4410988,"TERMINAL",0,0,"6441",,terminal_output +3307,4412037,"TERMINAL",0,0,"7552",,terminal_output +3308,4413092,"TERMINAL",0,0,"8663",,terminal_output +3309,4414137,"TERMINAL",0,0,"9774",,terminal_output +3310,4415223,"TERMINAL",0,0,"10885",,terminal_output +3311,4416277,"TERMINAL",0,0,"1996",,terminal_output +3312,4417276,"TERMINAL",0,0,"230307",,terminal_output +3313,4418315,"TERMINAL",0,0,"3118",,terminal_output +3314,4419365,"TERMINAL",0,0,"4229",,terminal_output +3315,4420413,"TERMINAL",0,0,"5333:00",,terminal_output +3316,4421458,"TERMINAL",0,0,"6441",,terminal_output +3317,4422524,"TERMINAL",0,0,"7552",,terminal_output +3318,4423912,"TERMINAL",0,0,"8774",,terminal_output +3319,4424947,"TERMINAL",0,0,"20885",,terminal_output +3320,4426002,"TERMINAL",0,0,"1996",,terminal_output +3321,4427045,"TERMINAL",0,0,"240407",,terminal_output +3322,4428090,"TERMINAL",0,0,"3118",,terminal_output +3323,4429181,"TERMINAL",0,0,"4229",,terminal_output +3324,4430205,"TERMINAL",0,0,"53310",,terminal_output +3325,4431230,"TERMINAL",0,0,"6441",,terminal_output +3326,4432274,"TERMINAL",0,0,"7552",,terminal_output +3327,4433379,"TERMINAL",0,0,"8663",,terminal_output +3328,4434404,"TERMINAL",0,0,"9774",,terminal_output +3329,4435416,"TERMINAL",0,0,"30885",,terminal_output +3330,4436555,"TERMINAL",0,0,"1996",,terminal_output +3331,4437578,"TERMINAL",0,0,"250507",,terminal_output +3332,4438609,"TERMINAL",0,0,"3118",,terminal_output +3333,4439627,"TERMINAL",0,0,"4229",,terminal_output +3334,4440649,"TERMINAL",0,0,"53320",,terminal_output +3335,4441689,"TERMINAL",0,0,"6441",,terminal_output +3336,4442800,"TERMINAL",0,0,"7663",,terminal_output +3337,4443782,"TERMINAL",0,0,"9774",,terminal_output +3338,4444844,"TERMINAL",0,0,"40885",,terminal_output +3339,4445304,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",906,0,"",shellscript,selection_mouse +3340,4445305,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",905,0,"",shellscript,selection_command +3341,4445887,"TERMINAL",0,0,"1996",,terminal_output +3342,4445923,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",855,0,"",shellscript,selection_mouse +3343,4446667,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",595,0,"",shellscript,selection_mouse +3344,4446911,"TERMINAL",0,0,"23:003:007",,terminal_output +3345,4447735,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",576,0,"",shellscript,selection_mouse +3346,4447962,"TERMINAL",0,0,"3118",,terminal_output +3347,4448622,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",576,1,"",shellscript,content +3348,4448760,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",576,1,"",shellscript,content +3349,4448974,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",576,1,"",shellscript,content +3350,4449058,"TERMINAL",0,0,"4229",,terminal_output +3351,4449165,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",576,1,"",shellscript,content +3352,4449993,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",576,0,"0",shellscript,content +3353,4449994,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",577,0,"",shellscript,selection_keyboard +3354,4450072,"TERMINAL",0,0,"53330",,terminal_output +3355,4450969,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",576,0,"",shellscript,selection_command +3356,4450970,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",724,0,"",shellscript,selection_mouse +3357,4451110,"TERMINAL",0,0,"6441",,terminal_output +3358,4452221,"TERMINAL",0,0,"7552",,terminal_output +3359,4453214,"TERMINAL",0,0,"8663",,terminal_output +3360,4454372,"TERMINAL",0,0,"9774",,terminal_output +3361,4455341,"TERMINAL",0,0,"50885",,terminal_output +3362,4456258,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,0,"",shellscript,tab +3363,4456259,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",419,0,"",shellscript,selection_mouse +3364,4456359,"TERMINAL",0,0,"1996",,terminal_output +3365,4457446,"TERMINAL",0,0,"210107",,terminal_output +3366,4458448,"TERMINAL",0,0,"3118",,terminal_output +3367,4459498,"TERMINAL",0,0,"4229",,terminal_output +3368,4460618,"TERMINAL",0,0,"53340",,terminal_output +3369,4461585,"TERMINAL",0,0,"6441",,terminal_output +3370,4462621,"TERMINAL",0,0,"7552",,terminal_output +3371,4463691,"TERMINAL",0,0,"8663",,terminal_output +3372,4464702,"TERMINAL",0,0,"9774",,terminal_output +3373,4465741,"TERMINAL",0,0,"9:00996",,terminal_output +3374,4466874,"TERMINAL",0,0,"220207",,terminal_output +3375,4467855,"TERMINAL",0,0,"3118",,terminal_output +3376,4468918,"TERMINAL",0,0,"4229",,terminal_output +3377,4469883,"TERMINAL",0,0,"53350",,terminal_output +3378,4470930,"TERMINAL",0,0,"6441",,terminal_output +3379,4471716,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3316923.2 tasks 0-15: running\r\n",,terminal_output +3380,4471955,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3316923.2\r\nsrun: forcing job termination\r\nsrun: Job step aborted: Waiting up to 32 seconds for job step to finish.\r\nslurmstepd: error: *** STEP 3316923.2 ON hkn0625 CANCELLED AT 2025-07-04T11:49:07 ***\r\n",,terminal_output +3381,4471966,"TERMINAL",0,0,"7552",,terminal_output +3382,4472144,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3316923.2\r\nsrun: job abort in progress\r\n",,terminal_output +3383,4472678,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3316923.2\r\n",,terminal_output +3384,4473003,"TERMINAL",0,0,"8663",,terminal_output +3385,4473296,"TERMINAL",0,0,"]0;tum_cte0515@hkn0625:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0625 jafar]$ ^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0625:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0625 jafar]$ ",,terminal_output +3386,4473826,"TERMINAL",0,0,"sh scripts_horeka/modelsize_scaling/tokenizer/tester.sh ",,terminal_output +3387,4474101,"TERMINAL",0,0,"9774",,terminal_output +3388,4474330,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nenv | grep SLURM\r\n\r\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_tokenizer.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --min_lr=0 \\r\n --max_lr=5e-5 \\r\n --log_image_interval=500 \\r\n --log_checkpoint_interval=500 \\r\n --log \\r\n --name=tokenizer-modelsize-80M-$slurm_job_id \\r\n --tags tokenizer model-size-scaling L1 80M \\r\n --entity instant-uv \\r\n --project jafar \\r\n --data_dir $tf_records_dir \\r\n --model_dim=768 \\r\n --num_blocks=12 \\r\n --num_heads=12 \\r\n --latent_dim=64 \\r\n --num_latents=2048\r\n",,terminal_output +3389,4474438,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x4)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=3172857\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0625\r\nSLURM_JOB_START_TIME=1751618115\r\nSLURM_STEP_NODELIST=hkn0625\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751654115\r\nSLURM_PMI2_SRUN_PORT=38731\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x4)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=4\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3316923\r\nSLURM_PTY_PORT=39987\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=50\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=16\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e30.hkn0625\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=108\r\nSLURM_NODELIST=hkn[0625-0628]\r\nSLURM_SRUN_COMM_PORT=39403\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=16\r\nSLURM_NNODES=4\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3316923\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0625\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=39403\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0625-0628]\r\n",,terminal_output +3390,4474590,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +3391,4475080,"TERMINAL",0,0,"10885",,terminal_output +3392,4475580,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3316923.3 tasks 0-15: running\r\n",,terminal_output +3393,4475752,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3316923.3\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 6, in \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 6, in \r\nsrun: forcing job termination\r\nTraceback (most recent call last):\r\n from flax.training import orbax_utils\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/__init__.py"", line 24, in \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 6, in \r\n from flax.training import orbax_utils\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/__init__.py"", line 24, in \r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 6, in \r\n from flax.training import orbax_utils\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/__init__.py"", line 24, in \r\n from flax import core\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/__init__.py"", line 15, in \r\n from flax.training import orbax_utils\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/__init__.py"", line 24, in \r\n from flax import core\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/__init__.py"", line 15, in \r\n from flax import core\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/__init__.py"", line 15, in \r\n from .axes_scan import broadcast as broadcast\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 20, in \r\n from .axes_scan import broadcast as broadcast\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 20, in \r\n from flax import core\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/__init__.py"", line 15, in \r\n from .axes_scan import broadcast as broadcast\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 20, in \r\n from .axes_scan import broadcast as broadcast\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 20, in \r\nsrun: Job step aborted: Waiting up to 32 seconds for job step to finish.\r\nException ignored in: \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lib/__init__.py"", line 128, in _xla_gc_callback\r\nException ignored in: \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lib/__init__.py"", line 128, in _xla_gc_callback\r\nException ignored in: \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lib/__init__.py"", line 128, in _xla_gc_callback\r\nException ignored in: \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lib/__init__.py"", line 128, in _xla_gc_callback\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 6, in \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 6, in \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 6, in \r\n from flax.training import orbax_utils\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/__init__.py"", line 24, in \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 6, in \r\n from flax.training import orbax_utils\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/__init__.py"", line 24, in \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 6, in \r\n from flax.training import orbax_utils\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/__init__.py"", line 24, in \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 6, in \r\n from flax.training import orbax_utils\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/__init__.py"", line 24, in \r\n from flax.training import orbax_utils\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/__init__.py"", line 24, in \r\n from flax import core\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/__init__.py"", line 15, in \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 6, in \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 6, in \r\n from flax.training import orbax_utils\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/__init__.py"", line 24, in \r\n from flax import core\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/__init__.py"", line 15, in \r\n from flax import core\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/__init__.py"", line 15, in \r\n from flax import core\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/__init__.py"", line 15, in \r\n from flax.training import orbax_utils\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/__init__.py"", line 24, in \r\n from flax.training import orbax_utils\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/__init__.py"", line 24, in \r\n from flax import core\r\n from flax import core\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/__init__.py"", line 15, in \r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/__init__.py"", line 15, in \r\n from flax import core\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/__init__.py"", line 15, in \r\n from .axes_scan import broadcast as broadcast\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 20, in \r\n from .axes_scan import broadcast as broadcast\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 20, in \r\n from flax import core\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/__init__.py"", line 15, in \r\n from .axes_scan import broadcast as broadcast\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 20, in \r\n from .axes_scan import broadcast as broadcast\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 20, in \r\nslurmstepd: error: *** STEP 3316923.3 ON hkn0625 CANCELLED AT 2025-07-04T11:49:11 ***\r\n from .axes_scan import broadcast as broadcast\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 20, in \r\n from .axes_scan import broadcast as broadcast\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 20, in \r\n from .axes_scan import broadcast as broadcast\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 20, in \r\n from .axes_scan import broadcast as broadcast\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/axes_scan.py"", line 20, in \r\n",,terminal_output +3394,4475925,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3316923.3\r\nsrun: job abort in progress\r\n]0;tum_cte0515@hkn0625:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0625 jafar]$ ",,terminal_output +3395,4476111,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0625:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0625 jafar]$ ",,terminal_output +3396,4476127,"TERMINAL",0,0,"1996",,terminal_output +3397,4476888,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0625:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0625 jafar]$ ",,terminal_output +3398,4477207,"TERMINAL",0,0,"230307",,terminal_output +3399,4477631,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,0,"",shellscript,tab +3400,4477632,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",627,0,"",shellscript,selection_mouse +3401,4478230,"TERMINAL",0,0,"3118",,terminal_output +3402,4478551,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",626,1,"",shellscript,content +3403,4478668,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",625,1,"",shellscript,content +3404,4478797,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",625,0,"1",shellscript,content +3405,4478798,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",626,0,"",shellscript,selection_keyboard +3406,4479238,"TERMINAL",0,0,"4229",,terminal_output +3407,4479277,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",625,0,"",shellscript,selection_command +3408,4480291,"TERMINAL",0,0,"5334:00",,terminal_output +3409,4480419,"TERMINAL",0,0,"sh scripts_horeka/modelsize_scaling/tokenizer/tester.sh ",,terminal_output +3410,4481065,"TERMINAL",0,0,"\r\n[?2004l\r\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nenv | grep SLURM\r\n\r\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_tokenizer.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --min_lr=0 \\r\n --max_lr=5e-5 \\r\n --log_image_interval=10 \\r\n --log_checkpoint_interval=500 \\r\n --log \\r\n --name=tokenizer-modelsize-80M-$slurm_job_id \\r\n --tags tokenizer model-size-scaling L1 80M \\r\n --entity instant-uv \\r\n --project jafar \\r\n --data_dir $tf_records_dir \\r\n --model_dim=768 \\r\n --num_blocks=12 \\r\n --num_heads=12 \\r\n --latent_dim=64 \\r\n --num_latents=2048\r\n",,terminal_output +3411,4481172,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x4)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=3172857\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0625\r\nSLURM_JOB_START_TIME=1751618115\r\nSLURM_STEP_NODELIST=hkn0625\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751654115\r\nSLURM_PMI2_SRUN_PORT=38731\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x4)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=4\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3316923\r\nSLURM_PTY_PORT=39987\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=50\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=16\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e30.hkn0625\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=108\r\nSLURM_NODELIST=hkn[0625-0628]\r\nSLURM_SRUN_COMM_PORT=39403\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=16\r\nSLURM_NNODES=4\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3316923\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0625\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=39403\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0625-0628]\r\n",,terminal_output +3412,4481305,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +3413,4481377,"TERMINAL",0,0,"6441",,terminal_output +3414,4482430,"TERMINAL",0,0,"7552",,terminal_output +3415,4483416,"TERMINAL",0,0,"8663",,terminal_output +3416,4483859,"TERMINAL",0,0,"2025-07-04 11:49:19.086760: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:49:19.086820: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:49:19.087275: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:49:19.087760: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:49:19.087733: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:49:19.087856: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:49:19.089750: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:49:19.089851: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:49:19.090117: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:49:19.090155: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:49:19.094696: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:49:19.094997: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:49:19.095032: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:49:19.095432: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.100038 1040286 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.100295 3210930 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.100661 3210929 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.100818 1040288 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.100934 1040287 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.100968 1040289 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.102622 1407305 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.102829 1407306 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.103231 1407303 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.103748 1407304 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751622559.104788 3210930 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751622559.104713 1040286 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751622559.105067 3210929 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751622559.104943 1040288 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751622559.105346 1040287 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751622559.105409 1040289 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751622559.107308 1407305 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751622559.107417 1407303 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751622559.107487 1407306 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.107992 1562984 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.107989 1562986 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751622559.108492 1407304 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.108389 1562985 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.108819 1562983 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751622559.112181 1562984 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751622559.112178 1562986 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751622559.113308 1562983 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751622559.113249 1562985 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751622559.117559 3210930 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.117578 3210930 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.117579 3210930 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.117581 3210930 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.117901 3210929 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.117918 3210929 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.117920 3210929 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.117921 3210929 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.117983 1040288 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118000 1040288 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118002 1040288 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118004 1040288 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118277 1040286 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118292 1040286 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118294 1040286 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118296 1040286 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118534 1040287 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118550 1040287 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118551 1040287 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118553 1040287 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118531 1040289 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118548 1040289 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118550 1040289 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.118551 1040289 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.121077 1407305 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.121093 1407305 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.121095 1407305 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.121096 1407305 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.121248 1407303 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.121262 1407303 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.121264 1407303 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.121265 1407303 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.121423 1407306 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.121440 1407306 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.121441 1407306 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.121443 1407306 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.122090 1407304 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.122105 1407304 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.122107 1407304 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.122108 1407304 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.125491 1562986 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.125507 1562986 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.125509 1562986 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.125510 1562986 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.125719 1562984 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.125733 1562984 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.125734 1562984 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.125736 1562984 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.126697 1562983 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.126715 1562983 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.126716 1562983 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.126718 1562983 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.126807 1562985 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.126823 1562985 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.126824 1562985 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.126825 1562985 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n2025-07-04 11:49:19.128752: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-04 11:49:19.129868: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.141584 3210931 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751622559.142682 3210928 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751622559.146075 3210931 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751622559.146740 3210928 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\n",,terminal_output +3417,4483920,"TERMINAL",0,0,"W0000 00:00:1751622559.159138 3210928 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.159154 3210928 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.159156 3210928 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.159158 3210928 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.159139 3210931 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.159154 3210931 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.159156 3210931 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751622559.159157 3210931 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +3418,4484468,"TERMINAL",0,0,"9774",,terminal_output +3419,4485508,"TERMINAL",0,0,"20885",,terminal_output +3420,4486554,"TERMINAL",0,0,"1996",,terminal_output +3421,4487708,"TERMINAL",0,0,"240407",,terminal_output +3422,4488673,"TERMINAL",0,0,"3118",,terminal_output +3423,4489800,"TERMINAL",0,0,"4229",,terminal_output +3424,4490079,"TERMINAL",0,0,"W0000 00:00:1751622565.361635 1040286 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.361634 1040288 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.361641 1040289 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.362488 1562983 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.362489 3210928 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.362454 1562985 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.362489 3210930 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.362470 1040287 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.362736 1562984 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.362830 1562986 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.362836 1407303 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.363284 3210929 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.363294 3210931 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.363685 1407305 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.363755 1407306 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751622565.364002 1407304 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +3425,4490732,"TERMINAL",0,0,"54411",,terminal_output +3426,4491796,"TERMINAL",0,0,"7552",,terminal_output +3427,4492895,"TERMINAL",0,0,"8663",,terminal_output +3428,4493664,"TERMINAL",0,0,"2025-07-04 11:49:28.879706: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:49:28.881395: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:49:28.885433: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:49:28.893745: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:49:28.895974: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:49:28.904594: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:49:28.905273: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:49:28.907592: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:49:28.908817: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:49:28.911974: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:49:28.912863: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:49:28.915372: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3429,4493726,"TERMINAL",0,0,"2025-07-04 11:49:28.980410: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:49:28.986650: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:49:28.988863: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:49:28.991590: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3430,4493946,"TERMINAL",0,0,"9774",,terminal_output +3431,4494991,"TERMINAL",0,0,"30885",,terminal_output +3432,4496023,"TERMINAL",0,0,"1996",,terminal_output +3433,4497077,"TERMINAL",0,0,"250507",,terminal_output +3434,4498218,"TERMINAL",0,0,"3118",,terminal_output +3435,4499226,"TERMINAL",0,0,"4229",,terminal_output +3436,4500221,"TERMINAL",0,0,"53320",,terminal_output +3437,4501270,"TERMINAL",0,0,"6441",,terminal_output +3438,4502299,"TERMINAL",0,0,"7552",,terminal_output +3439,4503358,"TERMINAL",0,0,"8663",,terminal_output +3440,4504444,"TERMINAL",0,0,"9774",,terminal_output +3441,4505453,"TERMINAL",0,0,"40885",,terminal_output +3442,4506463,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +3443,4506522,"TERMINAL",0,0,"1996",,terminal_output +3444,4507218,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250704_114941-vnl3uzrg\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run tokenizer-modelsize-80M-debug-mihir\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/vnl3uzrg\r\n",,terminal_output +3445,4507536,"TERMINAL",0,0,"24:004:007",,terminal_output +3446,4508642,"TERMINAL",0,0,"3118",,terminal_output +3447,4509631,"TERMINAL",0,0,"4229",,terminal_output +3448,4510678,"TERMINAL",0,0,"53330",,terminal_output +3449,4511708,"TERMINAL",0,0,"6441",,terminal_output +3450,4512950,"TERMINAL",0,0,"7663",,terminal_output +3451,4513873,"TERMINAL",0,0,"9774",,terminal_output +3452,4514835,"TERMINAL",0,0,"50885",,terminal_output +3453,4515913,"TERMINAL",0,0,"1996",,terminal_output +3454,4516938,"TERMINAL",0,0,"210107",,terminal_output +3455,4517977,"TERMINAL",0,0,"3118",,terminal_output +3456,4519019,"TERMINAL",0,0,"4229",,terminal_output +3457,4520066,"TERMINAL",0,0,"53340",,terminal_output +3458,4521135,"TERMINAL",0,0,"6441",,terminal_output +3459,4522280,"TERMINAL",0,0,"7552",,terminal_output +3460,4523179,"TERMINAL",0,0,"8663",,terminal_output +3461,4524317,"TERMINAL",0,0,"9774",,terminal_output +3462,4525274,"TERMINAL",0,0,"50:00885",,terminal_output +3463,4526322,"TERMINAL",0,0,"1996",,terminal_output +3464,4527383,"TERMINAL",0,0,"220207",,terminal_output +3465,4528441,"TERMINAL",0,0,"3118",,terminal_output +3466,4529449,"TERMINAL",0,0,"4229",,terminal_output +3467,4530557,"TERMINAL",0,0,"53350",,terminal_output +3468,4531545,"TERMINAL",0,0,"6441",,terminal_output +3469,4532605,"TERMINAL",0,0,"7552",,terminal_output +3470,4533638,"TERMINAL",0,0,"8663",,terminal_output +3471,4534687,"TERMINAL",0,0,"9774",,terminal_output +3472,4535728,"TERMINAL",0,0,"10996",,terminal_output +3473,4536803,"TERMINAL",0,0,"230307",,terminal_output +3474,4537827,"TERMINAL",0,0,"3118",,terminal_output +3475,4538853,"TERMINAL",0,0,"4229",,terminal_output +3476,4539897,"TERMINAL",0,0,"5335:00",,terminal_output +3477,4540951,"TERMINAL",0,0,"6441",,terminal_output +3478,4541991,"TERMINAL",0,0,"7552",,terminal_output +3479,4543036,"TERMINAL",0,0,"8663",,terminal_output +3480,4544079,"TERMINAL",0,0,"9774",,terminal_output +3481,4545207,"TERMINAL",0,0,"20885",,terminal_output +3482,4545617,"TERMINAL",0,0,"2025-07-04 11:50:20.814510: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:20.815052: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:20.888467: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:20.889025: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3483,4545772,"TERMINAL",0,0,"2025-07-04 11:50:21.046002: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:21.046531: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3484,4545938,"TERMINAL",0,0,"2025-07-04 11:50:21.158519: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:21.159045: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:21.224002: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:21.224522: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3485,4546001,"TERMINAL",0,0,"2025-07-04 11:50:21.248712: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:21.249267: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3486,4546224,"TERMINAL",0,0,"1996",,terminal_output +3487,4546368,"TERMINAL",0,0,"2025-07-04 11:50:21.652634: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:21.653156: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3488,4546555,"TERMINAL",0,0,"2025-07-04 11:50:21.836360: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:21.836887: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3489,4547187,"TERMINAL",0,0,"2025-07-04 11:50:22.453078: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:22.453607: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3490,4547248,"TERMINAL",0,0,"240407",,terminal_output +3491,4547396,"TERMINAL",0,0,"2025-07-04 11:50:22.665406: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:22.665965: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3492,4547445,"TERMINAL",0,0,"2025-07-04 11:50:22.710282: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:22.710816: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3493,4547876,"TERMINAL",0,0,"2025-07-04 11:50:23.065343: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:23.065897: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3494,4548272,"TERMINAL",0,0,"2025-07-04 11:50:23.478349: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:23.478902: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3495,4548292,"TERMINAL",0,0,"3118",,terminal_output +3496,4548521,"TERMINAL",0,0,"2025-07-04 11:50:23.805156: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:23.805721: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3497,4548581,"TERMINAL",0,0,"2025-07-04 11:50:23.860904: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:23.861431: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3498,4549329,"TERMINAL",0,0,"2025-07-04 11:50:24.615410: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-04 11:50:24.615985: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3499,4549342,"TERMINAL",0,0,"4229",,terminal_output +3500,4550397,"TERMINAL",0,0,"53310",,terminal_output +3501,4551449,"TERMINAL",0,0,"6441",,terminal_output +3502,4552536,"TERMINAL",0,0,"7552",,terminal_output +3503,4553597,"TERMINAL",0,0,"8663",,terminal_output +3504,4554603,"TERMINAL",0,0,"9774",,terminal_output +3505,4555645,"TERMINAL",0,0,"30885",,terminal_output +3506,4556676,"TERMINAL",0,0,"1996",,terminal_output +3507,4557721,"TERMINAL",0,0,"251518",,terminal_output +3508,4558768,"TERMINAL",0,0,"4229",,terminal_output +3509,4559819,"TERMINAL",0,0,"53320",,terminal_output +3510,4560971,"TERMINAL",0,0,"6441",,terminal_output +3511,4561920,"TERMINAL",0,0,"7552",,terminal_output +3512,4562975,"TERMINAL",0,0,"8663",,terminal_output +3513,4564014,"TERMINAL",0,0,"9774",,terminal_output +3514,4565046,"TERMINAL",0,0,"40885",,terminal_output +3515,4566191,"TERMINAL",0,0,"1996",,terminal_output +3516,4567216,"TERMINAL",0,0,"25:005:007",,terminal_output +3517,4568241,"TERMINAL",0,0,"3118",,terminal_output +3518,4569280,"TERMINAL",0,0,"4229",,terminal_output +3519,4570289,"TERMINAL",0,0,"53330",,terminal_output +3520,4571417,"TERMINAL",0,0,"6441",,terminal_output +3521,4572378,"TERMINAL",0,0,"7552",,terminal_output +3522,4573465,"TERMINAL",0,0,"8663",,terminal_output +3523,4574593,"TERMINAL",0,0,"9774",,terminal_output +3524,4575616,"TERMINAL",0,0,"50885",,terminal_output +3525,4576641,"TERMINAL",0,0,"1996",,terminal_output +3526,4577662,"TERMINAL",0,0,"210107",,terminal_output +3527,4578686,"TERMINAL",0,0,"3118",,terminal_output +3528,4579710,"TERMINAL",0,0,"4229",,terminal_output +3529,4580835,"TERMINAL",0,0,"54441",,terminal_output +3530,4581866,"TERMINAL",0,0,"7552",,terminal_output +3531,4582883,"TERMINAL",0,0,"8663",,terminal_output +3532,4583910,"TERMINAL",0,0,"9774",,terminal_output +3533,4584934,"TERMINAL",0,0,"1:00885",,terminal_output +3534,4585956,"TERMINAL",0,0,"1996",,terminal_output +3535,4586981,"TERMINAL",0,0,"220207",,terminal_output +3536,4588015,"TERMINAL",0,0,"3118",,terminal_output +3537,4589067,"TERMINAL",0,0,"4229",,terminal_output +3538,4590098,"TERMINAL",0,0,"53350",,terminal_output +3539,4591182,"TERMINAL",0,0,"6441",,terminal_output +3540,4592197,"TERMINAL",0,0,"7552",,terminal_output +3541,4593331,"TERMINAL",0,0,"8663",,terminal_output +3542,4594275,"TERMINAL",0,0,"9774",,terminal_output +3543,4595360,"TERMINAL",0,0,"10885",,terminal_output +3544,4596375,"TERMINAL",0,0,"1996",,terminal_output +3545,4597427,"TERMINAL",0,0,"230307",,terminal_output +3546,4598554,"TERMINAL",0,0,"3118",,terminal_output +3547,4599576,"TERMINAL",0,0,"4229",,terminal_output +3548,4600600,"TERMINAL",0,0,"5336:00",,terminal_output +3549,4601626,"TERMINAL",0,0,"6441",,terminal_output +3550,4602764,"TERMINAL",0,0,"7552",,terminal_output +3551,4603774,"TERMINAL",0,0,"8663",,terminal_output +3552,4605002,"TERMINAL",0,0,"9885",,terminal_output +3553,4606135,"TERMINAL",0,0,"21996",,terminal_output +3554,4607200,"TERMINAL",0,0,"240407",,terminal_output +3555,4608280,"TERMINAL",0,0,"3118",,terminal_output +3556,4609277,"TERMINAL",0,0,"4229",,terminal_output +3557,4610358,"TERMINAL",0,0,"53310",,terminal_output +3558,4611454,"TERMINAL",0,0,"6441",,terminal_output +3559,4612480,"TERMINAL",0,0,"7552",,terminal_output +3560,4613506,"TERMINAL",0,0,"8663",,terminal_output +3561,4614503,"TERMINAL",0,0,"9774",,terminal_output +3562,4615549,"TERMINAL",0,0,"30885",,terminal_output +3563,4616593,"TERMINAL",0,0,"1996",,terminal_output +3564,4617641,"TERMINAL",0,0,"250507",,terminal_output +3565,4618688,"TERMINAL",0,0,"3118",,terminal_output +3566,4619735,"TERMINAL",0,0,"43320",,terminal_output +3567,4620767,"TERMINAL",0,0,"6441",,terminal_output +3568,4621905,"TERMINAL",0,0,"7552",,terminal_output +3569,4622924,"TERMINAL",0,0,"8663",,terminal_output +3570,4623947,"TERMINAL",0,0,"9774",,terminal_output +3571,4624934,"TERMINAL",0,0,"40885",,terminal_output +3572,4625981,"TERMINAL",0,0,"1996",,terminal_output +3573,4627026,"TERMINAL",0,0,"26:006:007",,terminal_output +3574,4628146,"TERMINAL",0,0,"3118",,terminal_output +3575,4629170,"TERMINAL",0,0,"4229",,terminal_output +3576,4630144,"TERMINAL",0,0,"53330",,terminal_output +3577,4631219,"TERMINAL",0,0,"6441",,terminal_output +3578,4632237,"TERMINAL",0,0,"7552",,terminal_output +3579,4633292,"TERMINAL",0,0,"8663",,terminal_output +3580,4634330,"TERMINAL",0,0,"9774",,terminal_output +3581,4635379,"TERMINAL",0,0,"50885",,terminal_output +3582,4636421,"TERMINAL",0,0,"1996",,terminal_output +3583,4637785,"TERMINAL",0,0,"210107",,terminal_output +3584,4638589,"TERMINAL",0,0,"3118",,terminal_output +3585,4639616,"TERMINAL",0,0,"4229",,terminal_output +3586,4640638,"TERMINAL",0,0,"53340",,terminal_output +3587,4641661,"TERMINAL",0,0,"6441",,terminal_output +3588,4642790,"TERMINAL",0,0,"7552",,terminal_output +3589,4643812,"TERMINAL",0,0,"8774",,terminal_output +3590,4644798,"TERMINAL",0,0,"2:00885",,terminal_output +3591,4645878,"TERMINAL",0,0,"1996",,terminal_output +3592,4646890,"TERMINAL",0,0,"220207",,terminal_output +3593,4648019,"TERMINAL",0,0,"3118",,terminal_output +3594,4648980,"TERMINAL",0,0,"4229",,terminal_output +3595,4650027,"TERMINAL",0,0,"53350",,terminal_output +3596,4651091,"TERMINAL",0,0,"6441",,terminal_output +3597,4652126,"TERMINAL",0,0,"7552",,terminal_output +3598,4653236,"TERMINAL",0,0,"8663",,terminal_output +3599,4654260,"TERMINAL",0,0,"9774",,terminal_output +3600,4655264,"TERMINAL",0,0,"10885",,terminal_output +3601,4656410,"TERMINAL",0,0,"1996",,terminal_output +3602,4657356,"TERMINAL",0,0,"230307",,terminal_output +3603,4658620,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,0,"",shellscript,tab +3604,4658622,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",450,0,"",shellscript,selection_mouse +3605,4658657,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",450,22,"xla_gpu_autotune_level",shellscript,selection_mouse +3606,4658658,"TERMINAL",0,0,"3118",,terminal_output +3607,4659477,"TERMINAL",0,0,"4229",,terminal_output +3608,4659542,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",464,0,"",shellscript,selection_mouse +3609,4659748,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",450,22,"xla_gpu_autotune_level",shellscript,selection_mouse +3610,4659894,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",438,70,"XLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_tokenizer.py \\n",shellscript,selection_mouse +3611,4660475,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",464,0,"",shellscript,selection_mouse +3612,4660554,"TERMINAL",0,0,"5337:00",,terminal_output +3613,4660641,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",450,22,"xla_gpu_autotune_level",shellscript,selection_mouse +3614,4661524,"TERMINAL",0,0,"6441",,terminal_output +3615,4662593,"TERMINAL",0,0,"7552",,terminal_output +3616,4663679,"TERMINAL",0,0,"8663",,terminal_output +3617,4664804,"TERMINAL",0,0,"9774",,terminal_output +3618,4665830,"TERMINAL",0,0,"20996",,terminal_output +3619,4666802,"TERMINAL",0,0,"240407",,terminal_output +3620,4667949,"TERMINAL",0,0,"3118",,terminal_output +3621,4668996,"TERMINAL",0,0,"4229",,terminal_output +3622,4670047,"TERMINAL",0,0,"53310",,terminal_output +3623,4671125,"TERMINAL",0,0,"6441",,terminal_output +3624,4672178,"TERMINAL",0,0,"7552",,terminal_output +3625,4673201,"TERMINAL",0,0,"8663",,terminal_output +3626,4674241,"TERMINAL",0,0,"9774",,terminal_output +3627,4675279,"TERMINAL",0,0,"30885",,terminal_output +3628,4676375,"TERMINAL",0,0,"1996",,terminal_output +3629,4677405,"TERMINAL",0,0,"250507",,terminal_output +3630,4678425,"TERMINAL",0,0,"3118",,terminal_output +3631,4679463,"TERMINAL",0,0,"4229",,terminal_output +3632,4680573,"TERMINAL",0,0,"53320",,terminal_output +3633,4681600,"TERMINAL",0,0,"6441",,terminal_output +3634,4682650,"TERMINAL",0,0,"7552",,terminal_output +3635,4683480,"TERMINAL",0,0,"watch",,terminal_focus +3636,4683658,"TERMINAL",0,0,"8663",,terminal_output +3637,4684773,"TERMINAL",0,0,"9774",,terminal_output +3638,4685797,"TERMINAL",0,0,"40996",,terminal_output +3639,4686825,"TERMINAL",0,0,"27:007:007",,terminal_output +3640,4687847,"TERMINAL",0,0,"3118",,terminal_output +3641,4688875,"TERMINAL",0,0,"4229",,terminal_output +3642,4689996,"TERMINAL",0,0,"53330",,terminal_output +3643,4690968,"TERMINAL",0,0,"6441",,terminal_output +3644,4692009,"TERMINAL",0,0,"7552",,terminal_output +3645,4693051,"TERMINAL",0,0,"8663",,terminal_output +3646,4694100,"TERMINAL",0,0,"9774",,terminal_output +3647,4695153,"TERMINAL",0,0,"50885",,terminal_output +3648,4696246,"TERMINAL",0,0,"1996",,terminal_output +3649,4697260,"TERMINAL",0,0,"210107",,terminal_output +3650,4698309,"TERMINAL",0,0,"3118",,terminal_output +3651,4699360,"TERMINAL",0,0,"4229",,terminal_output +3652,4700405,"TERMINAL",0,0,"53340",,terminal_output +3653,4701458,"TERMINAL",0,0,"6441",,terminal_output +3654,4702520,"TERMINAL",0,0,"7552",,terminal_output +3655,4703567,"TERMINAL",0,0,"8663",,terminal_output +3656,4704616,"TERMINAL",0,0,"9774",,terminal_output +3657,4705765,"TERMINAL",0,0,"3:00885",,terminal_output +3658,4706790,"TERMINAL",0,0,"1996",,terminal_output +3659,4707746,"TERMINAL",0,0,"221218",,terminal_output +3660,4708993,"TERMINAL",0,0,"4229",,terminal_output +3661,4709609,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",0,0,"",plaintext,tab +3662,4710065,"TERMINAL",0,0,"53350",,terminal_output +3663,4710988,"TERMINAL",0,0,"6441",,terminal_output +3664,4712012,"TERMINAL",0,0,"7552",,terminal_output +3665,4713039,"TERMINAL",0,0,"8663",,terminal_output +3666,4714004,"TERMINAL",0,0,"9774",,terminal_output +3667,4715054,"TERMINAL",0,0,"10885",,terminal_output +3668,4716104,"TERMINAL",0,0,"1996",,terminal_output +3669,4717187,"TERMINAL",0,0,"230307",,terminal_output +3670,4718261,"TERMINAL",0,0,"3118",,terminal_output +3671,4719303,"TERMINAL",0,0,"4229",,terminal_output +3672,4720327,"TERMINAL",0,0,"5338:00",,terminal_output +3673,4721433,"TERMINAL",0,0,"6441",,terminal_output +3674,4721704,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",395,0,"",plaintext,selection_mouse +3675,4722253,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",211,0,"",plaintext,selection_mouse +3676,4722378,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",208,7,"scratch",plaintext,selection_mouse +3677,4722420,"TERMINAL",0,0,"7552",,terminal_output +3678,4723051,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",309,0,"",plaintext,selection_mouse +3679,4723193,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",306,7,"scratch",plaintext,selection_mouse +3680,4723482,"TERMINAL",0,0,"8663",,terminal_output +3681,4724504,"TERMINAL",0,0,"9774",,terminal_output +3682,4725529,"TERMINAL",0,0,"20885",,terminal_output +3683,4726655,"TERMINAL",0,0,"1996",,terminal_output +3684,4727681,"TERMINAL",0,0,"240407",,terminal_output +3685,4728710,"TERMINAL",0,0,"3118",,terminal_output +3686,4729728,"TERMINAL",0,0,"4229",,terminal_output +3687,4729953,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",874,0,"",plaintext,selection_mouse +3688,4730258,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",873,1,"0",plaintext,selection_mouse +3689,4730259,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",871,3,"l=0",plaintext,selection_mouse +3690,4730259,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",867,7,"level=0",plaintext,selection_mouse +3691,4730259,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",864,10,"ne_level=0",plaintext,selection_mouse +3692,4730260,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",861,13,"otune_level=0",plaintext,selection_mouse +3693,4730260,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",857,17,"_autotune_level=0",plaintext,selection_mouse +3694,4730296,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",855,19,"pu_autotune_level=0",plaintext,selection_mouse +3695,4730326,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",849,25,"-xla_gpu_autotune_level=0",plaintext,selection_mouse +3696,4730350,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",847,27,"=--xla_gpu_autotune_level=0",plaintext,selection_mouse +3697,4730371,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",845,29,"GS=--xla_gpu_autotune_level=0",plaintext,selection_mouse +3698,4730393,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",844,30,"AGS=--xla_gpu_autotune_level=0",plaintext,selection_mouse +3699,4730425,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",843,31,"LAGS=--xla_gpu_autotune_level=0",plaintext,selection_mouse +3700,4730460,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",842,32,"FLAGS=--xla_gpu_autotune_level=0",plaintext,selection_mouse +3701,4730490,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",840,34,"A_FLAGS=--xla_gpu_autotune_level=0",plaintext,selection_mouse +3702,4730525,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",838,36,"XLA_FLAGS=--xla_gpu_autotune_level=0",plaintext,selection_mouse +3703,4730757,"TERMINAL",0,0,"54411",,terminal_output +3704,4731532,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",838,36,"",plaintext,content +3705,4731878,"TERMINAL",0,0,"7552",,terminal_output +3706,4732087,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",838,1,"",plaintext,content +3707,4733003,"TERMINAL",0,0,"8663",,terminal_output +3708,4733341,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",864,0,"",plaintext,selection_mouse +3709,4733352,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",863,0,"",plaintext,selection_command +3710,4733983,"TERMINAL",0,0,"9774",,terminal_output +3711,4735032,"TERMINAL",0,0,"30885",,terminal_output +3712,4736091,"TERMINAL",0,0,"1996",,terminal_output +3713,4737123,"TERMINAL",0,0,"250507",,terminal_output +3714,4737952,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",538,0,"",plaintext,selection_mouse +3715,4738165,"TERMINAL",0,0,"3118",,terminal_output +3716,4738554,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",599,0,"",plaintext,selection_mouse +3717,4738704,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",595,14,"jafa_ws_shared",plaintext,selection_mouse +3718,4738837,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",539,103,"tf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\n",plaintext,selection_mouse +3719,4739213,"TERMINAL",0,0,"4229",,terminal_output +3720,4739457,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",599,0,"",plaintext,selection_mouse +3721,4739540,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",595,14,"jafa_ws_shared",plaintext,selection_mouse +3722,4739878,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",539,103,"tf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\n",plaintext,selection_mouse +3723,4740244,"TERMINAL",0,0,"53320",,terminal_output +3724,4740896,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",753,0,"",plaintext,selection_mouse +3725,4741065,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",749,8,"Projects",plaintext,selection_mouse +3726,4741280,"TERMINAL",0,0,"6441",,terminal_output +3727,4741403,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",696,98,"CHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\n",plaintext,selection_mouse +3728,4742424,"TERMINAL",0,0,"7552",,terminal_output +3729,4743448,"TERMINAL",0,0,"8663",,terminal_output +3730,4744475,"TERMINAL",0,0,"9774",,terminal_output +3731,4745468,"TERMINAL",0,0,"40885",,terminal_output +3732,4746622,"TERMINAL",0,0,"1996",,terminal_output +3733,4747579,"TERMINAL",0,0,"28:008:007",,terminal_output +3734,4748611,"TERMINAL",0,0,"3118",,terminal_output +3735,4749695,"TERMINAL",0,0,"4229",,terminal_output +3736,4750691,"TERMINAL",0,0,"53330",,terminal_output +3737,4751746,"TERMINAL",0,0,"6552",,terminal_output +3738,4752764,"TERMINAL",0,0,"8663",,terminal_output +3739,4753897,"TERMINAL",0,0,"9774",,terminal_output +3740,4754889,"TERMINAL",0,0,"50885",,terminal_output +3741,4755894,"TERMINAL",0,0,"1996",,terminal_output +3742,4756942,"TERMINAL",0,0,"210107",,terminal_output +3743,4761009,"TERMINAL",0,0,"3118422953340",,terminal_output +3744,4761123,"TERMINAL",0,0,"6441",,terminal_output +3745,4762223,"TERMINAL",0,0,"7552",,terminal_output +3746,4763228,"TERMINAL",0,0,"8663",,terminal_output +3747,4764341,"TERMINAL",0,0,"9774",,terminal_output +3748,4765318,"TERMINAL",0,0,"4:00885",,terminal_output +3749,4766377,"TERMINAL",0,0,"1996",,terminal_output +3750,4767513,"TERMINAL",0,0,"220207",,terminal_output +3751,4768454,"TERMINAL",0,0,"3118",,terminal_output +3752,4769545,"TERMINAL",0,0,"4229",,terminal_output +3753,4770539,"TERMINAL",0,0,"53350",,terminal_output +3754,4771583,"TERMINAL",0,0,"6441",,terminal_output +3755,4772633,"TERMINAL",0,0,"7552",,terminal_output +3756,4773760,"TERMINAL",0,0,"8663",,terminal_output +3757,4774729,"TERMINAL",0,0,"9885",,terminal_output +3758,4775774,"TERMINAL",0,0,"11996",,terminal_output +3759,4776833,"TERMINAL",0,0,"230307",,terminal_output +3760,4777874,"TERMINAL",0,0,"3118",,terminal_output +3761,4778902,"TERMINAL",0,0,"4229",,terminal_output +3762,4779956,"TERMINAL",0,0,"5339:00",,terminal_output +3763,4781020,"TERMINAL",0,0,"6441",,terminal_output +3764,4782070,"TERMINAL",0,0,"7552",,terminal_output +3765,4783123,"TERMINAL",0,0,"8663",,terminal_output +3766,4784188,"TERMINAL",0,0,"9774",,terminal_output +3767,4785199,"TERMINAL",0,0,"20885",,terminal_output +3768,4786235,"TERMINAL",0,0,"1996",,terminal_output +3769,4787256,"TERMINAL",0,0,"240407",,terminal_output +3770,4788354,"TERMINAL",0,0,"3118",,terminal_output +3771,4789332,"TERMINAL",0,0,"4229",,terminal_output +3772,4790370,"TERMINAL",0,0,"53310",,terminal_output +3773,4791452,"TERMINAL",0,0,"6441",,terminal_output +3774,4792433,"TERMINAL",0,0,"7552",,terminal_output +3775,4792787,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",1092,0,"",plaintext,selection_mouse +3776,4792805,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",1091,0,"",plaintext,selection_command +3777,4793524,"TERMINAL",0,0,"8663",,terminal_output +3778,4794625,"TERMINAL",0,0,"srun",,terminal_focus +3779,4794703,"TERMINAL",0,0,"9774",,terminal_output +3780,4794737,"TERMINAL",0,0,"\r[tum_cte0515@hkn0733 jafar_jobs]$ ",,terminal_output +3781,4795563,"TERMINAL",0,0,"30885",,terminal_output +3782,4796267,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3783,4796331,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3784,4796437,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +3785,4796519,"TERMINAL",0,0,"frame-knoms.png generation_1751384259.6501038.gif generation_1751564417.9236383.gif requirements.txt\r\nframe.png generation_1751385872.8038208.gif generation_1751564628.3937318.gif sample.py\r\nframes generation_1751388988.2757964.gif genie.py scripts_cremers\r\ngenerate_dataset.py generation_1751390697.7517064.gif gifs scripts_horeka\r\ngeneration_1750863858.4915645.gif generation_1751556914.7367506.gif LICENSE slurm\r\ngeneration_1751035879.4153903.gif generation_1751557545.2100096.gif logs slurm-3309772.out\r\ngeneration_1751310068.977446.gif generation_1751558593.4171152.gif models tests\r\ngeneration_1751320204.666793.gif generation_1751560755.4123495.gif overfit_dir train_dynamics.py\r\ngeneration_1751320704.5755262.gif generation_1751561250.3749754.gif __pycache__ train_lam.py\r\ngeneration_1751321003.750392.gif generation_1751561495.0908976.gif README.md train_tokenizer.py\r\ngeneration_1751321516.115979.gif generation_1751563425.1792467.gif read_tf_record.py utils\r\ngeneration_1751373553.4811275.gif generation_1751563622.9616451.gif requirements-franz.txt wandb\r\n]0;tum_cte0515@hkn0733:~/Projects/jafar_jobs[?2004h[tum_cte0515@hkn0733 jafar_jobs]$ ",,terminal_output +3786,4796623,"TERMINAL",0,0,"1996",,terminal_output +3787,4797392,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3788,4797485,"TERMINAL",0,0,"[?25ld[?25h[?25l [?25h",,terminal_output +3789,4797660,"TERMINAL",0,0,"250507",,terminal_output +3790,4797676,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3791,4797816,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +3792,4797876,"TERMINAL",0,0,"gs/",,terminal_output +3793,4798162,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0733:~/Projects/jafar_jobs/logs[?2004h[tum_cte0515@hkn0733 logs]$ ",,terminal_output +3794,4798528,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3795,4798684,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3796,4798693,"TERMINAL",0,0,"3118",,terminal_output +3797,4798792,"TERMINAL",0,0,"\r\n[?2004l\rlogs_training\r\n]0;tum_cte0515@hkn0733:~/Projects/jafar_jobs/logs[?2004h[tum_cte0515@hkn0733 logs]$ ",,terminal_output +3798,4799777,"TERMINAL",0,0,"43320",,terminal_output +3799,4799899,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3800,4799960,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3801,4800172,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +3802,4800278,"TERMINAL",0,0,"logs_training/",,terminal_output +3803,4800621,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0733:~/Projects/jafar_jobs/logs/logs_training[?2004h[tum_cte0515@hkn0733 logs_training]$ ",,terminal_output +3804,4800785,"TERMINAL",0,0,"6441",,terminal_output +3805,4800864,"TERMINAL",0,0,"[?25ll[?25h[?25ls[?25h",,terminal_output +3806,4801321,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +3807,4801442,"TERMINAL",0,0,"train_dynamics_minecraft_overfit_sample_110M_3296571.log train_dynamics_minecraft_overfit_sample_tiny_smol_lr_3301026.log\r\ntrain_dynamics_minecraft_overfit_sample_12M_3297577.log train_dynamics_minecraft_overfit_sample_tiny_smol_lr_3301031.log\r\ntrain_dynamics_minecraft_overfit_sample_12M_3299066.log train_lam_minecraft_overfit_sample_3299069.log\r\ntrain_dynamics_minecraft_overfit_sample_1.5M_3297569.log train_lam_minecraft_overfit_sample_3299259.log\r\ntrain_dynamics_minecraft_overfit_sample_1.5M_3299063.log train_lam_minecraft_overfit_sample_3309663.log\r\ntrain_dynamics_minecraft_overfit_sample_180M_3296573.log train_lam_minecraft_overfit_sample_3311672.log\r\ntrain_dynamics_minecraft_overfit_sample_18M_3297578.log train_tokenizer_batch_size_scaling_1_node_3294600.log\r\ntrain_dynamics_minecraft_overfit_sample_18M_3299062.log train_tokenizer_batch_size_scaling_1_node_3313570.log\r\ntrain_dynamics_minecraft_overfit_sample_270M_3296574.log train_tokenizer_batch_size_scaling_1_node_3316016.log\r\ntrain_dynamics_minecraft_overfit_sample_3.5M_3297575.log train_tokenizer_batch_size_scaling_2_node_3294601.log\r\ntrain_dynamics_minecraft_overfit_sample_36M_3296500.log train_tokenizer_batch_size_scaling_2_node_3313571.log\r\ntrain_dynamics_minecraft_overfit_sample_36M_3296502.log train_tokenizer_batch_size_scaling_2_node_3316017.log\r\ntrain_dynamics_minecraft_overfit_sample_36M_3296540.log train_tokenizer_batch_size_scaling_4_node_3294602.log\r\ntrain_dynamics_minecraft_overfit_sample_500M_3296575.log train_tokenizer_batch_size_scaling_4_node_3313572.log\r\ntrain_dynamics_minecraft_overfit_sample_6M_3297576.log train_tokenizer_batch_size_scaling_4_node_3316018.log\r\ntrain_dynamics_minecraft_overfit_sample_6M_3299065.log train_tokenizer_batch_size_scaling_8_node_3294603.log\r\ntrain_dynamics_minecraft_overfit_sample_tiny_3301025.log train_tokenizer_minecraft_overfit_batch_3311671.log\r\ntrain_dynamics_minecraft_overfit_sample_tiny_3301027.log train_tokenizer_minecraft_overfit_sample_3299016.log\r\ntrain_dynamics_minecraft_overfit_sample_tiny_3301029.log train_tokenizer_minecraft_overfit_sample_3299068.log\r\ntrain_dynamics_minecraft_overfit_sample_tiny_3301030.log train_tokenizer_minecraft_overfit_sample_3299258.log\r\ntrain_dynamics_minecraft_overfit_sample_tiny_3307618.log train_tokenizer_minecraft_overfit_sample_3299272.log\r\ntrain_dynamics_minecraft_overfit_sample_tiny_3307619.log train_tokenizer_minecraft_overfit_sample_3309662.log\r\ntrain_dynamics_minecraft_overfit_sample_tiny_3310436.log\r\n]0;tum_cte0515@hkn0733:~/Projects/jafar_jobs/logs/logs_training[?2004h[tum_cte0515@hkn0733 logs_training]$ ",,terminal_output +3808,4801944,"TERMINAL",0,0,"7552",,terminal_output +3809,4802944,"TERMINAL",0,0,"8663",,terminal_output +3810,4803893,"TERMINAL",0,0,"9774",,terminal_output +3811,4804097,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3812,4804167,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3813,4804309,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +3814,4804936,"TERMINAL",0,0,"40885",,terminal_output +3815,4805762,"TERMINAL",0,0,"[?25l$[?25h",,terminal_output +3816,4806016,"TERMINAL",0,0,"1996",,terminal_output +3817,4806231,"TERMINAL",0,0,"[?25lw[?25h",,terminal_output +3818,4806421,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3819,4806886,"TERMINAL",0,0,"[?25l:[?25h",,terminal_output +3820,4807030,"TERMINAL",0,0,"29:009:007",,terminal_output +3821,4808079,"TERMINAL",0,0,"3118",,terminal_output +3822,4808142,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +3823,4808502,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3824,4808562,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +3825,4808713,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +3826,4808841,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0733:~[?2004h[tum_cte0515@hkn0733 ~]$ ",,terminal_output +3827,4809078,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3828,4809125,"TERMINAL",0,0,"4229",,terminal_output +3829,4809146,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3830,4809340,"TERMINAL",0,0,"\r\n[?2004l\rProjects\r\n]0;tum_cte0515@hkn0733:~[?2004h[tum_cte0515@hkn0733 ~]$ ",,terminal_output +3831,4810217,"TERMINAL",0,0,"53330",,terminal_output +3832,4811048,"TERMINAL",0,0,"ls",,terminal_output +3833,4811233,"TERMINAL",0,0,"6441",,terminal_output +3834,4811914,"TERMINAL",0,0,"cd $wd_dir",,terminal_output +3835,4812252,"TERMINAL",0,0,"7552",,terminal_output +3836,4813266,"TERMINAL",0,0,"[?25ld_dir[?25h",,terminal_output +3837,4813326,"TERMINAL",0,0,"8663",,terminal_output +3838,4813388,"TERMINAL",0,0,"[?25ls_dir[?25h",,terminal_output +3839,4813517,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data[?2004h[tum_cte0515@hkn0733 data]$ ",,terminal_output +3840,4814333,"TERMINAL",0,0,"9774",,terminal_output +3841,4815377,"TERMINAL",0,0,"50885",,terminal_output +3842,4816468,"TERMINAL",0,0,"1996",,terminal_output +3843,4817468,"TERMINAL",0,0,"210107",,terminal_output +3844,4818509,"TERMINAL",0,0,"3118",,terminal_output +3845,4819608,"TERMINAL",0,0,"4229",,terminal_output +3846,4820367,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3847,4820430,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3848,4820605,"TERMINAL",0,0,"53340",,terminal_output +3849,4820622,"TERMINAL",0,0,"[?25l [?25h[?25l.[?25h",,terminal_output +3850,4820839,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +3851,4820966,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared[?2004h[tum_cte0515@hkn0733 tum_ind3695-jafa_ws_shared]$ ",,terminal_output +3852,4821072,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3853,4821470,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3854,4821615,"TERMINAL",0,0,"\r\n[?2004l\rcheckpoints count_items.sh data huggingface logs scripts\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared[?2004h[tum_cte0515@hkn0733 tum_ind3695-jafa_ws_shared]$ ",,terminal_output +3855,4821647,"TERMINAL",0,0,"6441",,terminal_output +3856,4821861,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3857,4822381,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3858,4822487,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +3859,4822658,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3860,4822718,"TERMINAL",0,0,"7552",,terminal_output +3861,4822779,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +3862,4822841,"TERMINAL",0,0,"gs/",,terminal_output +3863,4823096,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs[?2004h[tum_cte0515@hkn0733 logs]$ ",,terminal_output +3864,4823427,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3865,4823738,"TERMINAL",0,0,"8774",,terminal_output +3866,4823997,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3867,4824131,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3868,4824203,"TERMINAL",0,0,"\r\n[?2004l\r3306965 logs_alfred logs_mihir\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs[?2004h[tum_cte0515@hkn0733 logs]$ ",,terminal_output +3869,4824769,"TERMINAL",0,0,"5:00885",,terminal_output +3870,4825006,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3871,4825131,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3872,4825207,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +3873,4825291,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3874,4825531,"TERMINAL",0,0,"[?25lo[?25hgs_",,terminal_output +3875,4825834,"TERMINAL",0,0,"\r\n[?2004l\rbash: cd: logs_: No such file or directory\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs[?2004h[tum_cte0515@hkn0733 logs]$ ",,terminal_output +3876,4825854,"TERMINAL",0,0,"1996",,terminal_output +3877,4825993,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3878,4826146,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3879,4826228,"TERMINAL",0,0,"\r\n[?2004l\r3306965 logs_alfred logs_mihir\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs[?2004h[tum_cte0515@hkn0733 logs]$ ",,terminal_output +3880,4826905,"TERMINAL",0,0,"220207",,terminal_output +3881,4826968,"TERMINAL",0,0,"ls",,terminal_output +3882,4827498,"TERMINAL",0,0,"cd logs_",,terminal_output +3883,4827930,"TERMINAL",0,0,"3118",,terminal_output +3884,4828378,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +3885,4828439,"TERMINAL",0,0,"ihir/",,terminal_output +3886,4828670,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir[?2004h[tum_cte0515@hkn0733 logs_mihir]$ ",,terminal_output +3887,4828939,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3888,4828939,"TERMINAL",0,0,"4229",,terminal_output +3889,4829001,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3890,4829099,"TERMINAL",0,0,"\r\n[?2004l\rtrain_lam_minecraft_overfit_sample_3309655.log train_tokenizer_model_size_scaling_200M_3313563.log\r\ntrain_lam_model_size_scaling_38M_3317098.log train_tokenizer_model_size_scaling_200M_3316020.log\r\ntrain_lam_model_size_scaling_38M_3317115.log train_tokenizer_model_size_scaling_37M_3313565.log\r\ntrain_tokenizer_minecraft_overfit_sample_3309656.log train_tokenizer_model_size_scaling_37M_3316022.log\r\ntrain_tokenizer_model_size_scaling_140M_3313562.log train_tokenizer_model_size_scaling_80M_3313564.log\r\ntrain_tokenizer_model_size_scaling_140M_3316019.log train_tokenizer_model_size_scaling_80M_3316026.log\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir[?2004h[tum_cte0515@hkn0733 logs_mihir]$ ",,terminal_output +3891,4829987,"TERMINAL",0,0,"53350",,terminal_output +3892,4831013,"TERMINAL",0,0,"6441",,terminal_output +3893,4832052,"TERMINAL",0,0,"7552",,terminal_output +3894,4833099,"TERMINAL",0,0,"8663",,terminal_output +3895,4834141,"TERMINAL",0,0,"9774",,terminal_output +3896,4835200,"TERMINAL",0,0,"10885",,terminal_output +3897,4836226,"TERMINAL",0,0,"1996",,terminal_output +3898,4837245,"TERMINAL",0,0,"230307",,terminal_output +3899,4838374,"TERMINAL",0,0,"3118",,terminal_output +3900,4839324,"TERMINAL",0,0,"4229",,terminal_output +3901,4839822,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3902,4839936,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3903,4840030,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +3904,4840381,"TERMINAL",0,0,"train_lam_model_size_scaling_38M_3317098.log",,terminal_output +3905,4840382,"TERMINAL",0,0,"53320:00",,terminal_output +3906,4840696,"TERMINAL",0,0,"\rtrain_lam_model_size_scaling_38M_3317098.log\r\n[?2004l\rtrain_lam_model_size_scaling_38M_3317098.log\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir[?2004h[tum_cte0515@hkn0733 logs_mihir]$ ",,terminal_output +3907,4841401,"TERMINAL",0,0,"6441",,terminal_output +3908,4842472,"TERMINAL",0,0,"7552",,terminal_output +3909,4843597,"TERMINAL",0,0,"8663",,terminal_output +3910,4844097,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +3911,4844271,"TERMINAL",0,0,"[?25lw[?25h",,terminal_output +3912,4844334,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3913,4844546,"TERMINAL",0,0,"9774",,terminal_output +3914,4845035,"TERMINAL",0,0,"\r\n[?2004l\r/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir[?2004h[tum_cte0515@hkn0733 logs_mihir]$ ",,terminal_output +3915,4845592,"TERMINAL",0,0,"20885",,terminal_output +3916,4846669,"TERMINAL",0,0,"1996",,terminal_output +3917,4847693,"TERMINAL",0,0,"240407",,terminal_output +3918,4848714,"TERMINAL",0,0,"3118",,terminal_output +3919,4849780,"TERMINAL",0,0,"43310",,terminal_output +3920,4850799,"TERMINAL",0,0,"6441",,terminal_output +3921,4851835,"TERMINAL",0,0,"7552",,terminal_output +3922,4852914,"TERMINAL",0,0,"8663",,terminal_output +3923,4853900,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3924,4853952,"TERMINAL",0,0,"9774",,terminal_output +3925,4854064,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3926,4854128,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +3927,4854484,"TERMINAL",0,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir",,terminal_output +3928,4854964,"TERMINAL",0,0,"30885",,terminal_output +3929,4856122,"TERMINAL",0,0,"1996",,terminal_output +3930,4856555,"TERMINAL",0,0,"[?25l\r/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/[?25h",,terminal_output +3931,4857067,"TERMINAL",0,0,"250507",,terminal_output +3932,4858100,"TERMINAL",0,0,"3118",,terminal_output +3933,4858764,"TERMINAL",0,0,"train_lam_model_size_scaling_38M_3317098.log",,terminal_output +3934,4859137,"TERMINAL",0,0,"4229",,terminal_output +3935,4859537,"TERMINAL",0,0,"train_lam_model_size_scaling_38M_3317098.log\r\n[?2004l\r/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log\r\n]0;tum_cte0515@hkn0733:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir[?2004h[tum_cte0515@hkn0733 logs_mihir]$ ",,terminal_output +3936,4860196,"TERMINAL",0,0,"53320",,terminal_output +3937,4861295,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_lam_model_size_scaling_38M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/home/hk-project-p0023960/tum_cte0515/Projects/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=lam-model-size-scaling-38M-$slurm_job_id \\n --tags lam model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n/var/spool/slurmd/job3317098/slurm_script: line 18: .venv/bin/activate: No such file or directory\nSLURM_JOB_USER=tum_cte0515\nSLURM_TASKS_PER_NODE=4(x2)\nSLURM_JOB_UID=999226\nSLURM_TASK_PID=2460540\nSLURM_JOB_GPUS=0,1,2,3\nSLURM_LOCALID=0\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs\nSLURMD_NODENAME=hkn0629\nSLURM_JOB_START_TIME=1751620187\nSLURM_CLUSTER_NAME=hk\nSLURM_JOB_END_TIME=1751674187\nSLURM_CPUS_ON_NODE=24\nSLURM_JOB_CPUS_PER_NODE=24(x2)\nSLURM_GPUS_ON_NODE=4\nSLURM_GTIDS=0\nSLURM_JOB_PARTITION=accelerated\nSLURM_TRES_PER_TASK=cpu=5\nSLURM_OOM_KILL_STEP=0\nSLURM_JOB_NUM_NODES=2\nSLURM_JOBID=3317098\nSLURM_JOB_QOS=normal\nSLURM_PROCID=0\nSLURM_CPUS_PER_TASK=5\nSLURM_NTASKS=8\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e30.hkn0629\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\nSLURM_SCRIPT_CONTEXT=prolog_task\nSLURM_NODELIST=hkn[0629,0631]\nSLURM_JOB_ACCOUNT=hk-project-p0023960\nSLURM_PRIO_PROCESS=0\nSLURM_NPROCS=8\nSLURM_NNODES=2\nSLURM_SUBMIT_HOST=hkn1993.localdomain\nSLURM_JOB_ID=3317098\nSLURM_NODEID=0\nSLURM_CONF=/etc/slurm/slurm.conf\nSLURM_JOB_NAME=train_lam_model_size_scaling_38M\nSLURM_NTASKS_PER_NODE=4\nSLURM_JOB_GID=502226\nSLURM_JOB_NODELIST=hkn[0629,0631]\nGpuFreq=control_disabled\nGpuFreq=control_disabled\n2025-07-04 11:10:43.637694: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n2025-07-04 11:10:43.637700: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n2025-07-04 11:10:43.637693: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n2025-07-04 11:10:43.637818: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n2025-07-04 11:10:43.641547: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n2025-07-04 11:10:43.641552: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n2025-07-04 11:10:43.641547: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n2025-07-04 11:10:43.641547: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nE0000 00:00:1751620243.708739 2460599 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nE0000 00:00:1751620243.708821 2655479 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nE0000 00:00:1751620243.708963 2460600 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nE0000 00:00:1751620243.708875 2655480 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nE0000 00:00:1751620243.708979 2460601 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nE0000 00:00:1751620243.708977 2655481 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nE0000 00:00:1751620243.708989 2655482 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nE0000 00:00:1751620243.709180 2460602 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\nE0000 00:00:1751620243.714162 2460600 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\nE0000 00:00:1751620243.714164 2460601 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\nE0000 00:00:1751620243.714163 2460602 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\nE0000 00:00:1751620243.714347 2460599 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\nE0000 00:00:1751620243.714919 2655479 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\nE0000 00:00:1751620243.714913 2655480 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\nE0000 00:00:1751620243.714921 2655481 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\nE0000 00:00:1751620243.714918 2655482 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\nW0000 00:00:1751620244.086468 2460599 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086509 2460599 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086511 2460599 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086513 2460599 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086469 2460600 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086496 2460600 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086498 2460600 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086500 2460600 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086466 2460601 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086501 2460601 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086503 2460601 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086505 2460601 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086470 2460602 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086505 2460602 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086508 2460602 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.086510 2460602 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087849 2655479 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087880 2655479 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087882 2655479 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087884 2655479 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087852 2655480 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087881 2655480 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087884 2655480 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087886 2655480 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087845 2655481 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087877 2655481 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087879 2655481 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087881 2655481 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087846 2655482 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087881 2655482 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087883 2655482 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620244.087885 2655482 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1751620278.917210 2460599 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\nSkipping registering GPU devices...\nW0000 00:00:1751620278.917252 2460601 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\nSkipping registering GPU devices...\nW0000 00:00:1751620278.917346 2460600 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\nSkipping registering GPU devices...\nW0000 00:00:1751620278.917337 2460602 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\nSkipping registering GPU devices...\nW0000 00:00:1751620278.917355 2655481 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\nSkipping registering GPU devices...\nW0000 00:00:1751620278.918175 2655479 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\nSkipping registering GPU devices...\nW0000 00:00:1751620278.918766 2655480 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\nSkipping registering GPU devices...\nW0000 00:00:1751620278.918653 2655482 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\nSkipping registering GPU devices...\n2025-07-04 11:11:30.835814: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:11:30.860530: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:11:30.883671: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:11:30.930952: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:11:30.961207: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:11:30.967069: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:11:30.967089: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:11:30.974957: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\nwandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\nwandb: Tracking run with wandb version 0.19.11\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/wandb/run-20250704_111137-eds49a9r\nwandb: Run `wandb offline` to turn off syncing.\nwandb: Syncing run lam-model-size-scaling-38M-3317098\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/eds49a9r\n2025-07-04 11:12:04.950181: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:04.951307: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:04.951325: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:04.951942: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:04.967874: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:04.968990: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:04.969008: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:04.969622: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:05.017246: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:05.018377: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:05.018396: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:05.019016: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:05.051636: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:05.052756: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:05.052774: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:05.053390: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:05.828171: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:05.829319: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:05.829338: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:05.829967: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:06.294816: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:06.295960: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:06.295980: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:06.296604: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:06.743338: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:06.744497: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:06.744516: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:06.745131: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:07.018137: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:07.019274: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:07.019293: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-07-04 11:12:07.019911: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\nRunning on 8 devices.\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\nParameter counts:\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\nStarting training from step 0...\nStep 0, loss: 0.3309202492237091\nStep 1, loss: 0.27616071701049805\nStep 2, loss: 0.23833505809307098\nStep 3, loss: 0.20353123545646667\nStep 4, loss: 0.18312352895736694\nStep 5, loss: 0.16567449271678925\nStep 6, loss: 0.1496116816997528\nStep 7, loss: 0.1397426575422287\nStep 8, loss: 0.1301795393228531\nStep 9, loss: 0.12435252219438553\nStep 10, loss: 0.11484285444021225\nStep 11, loss: 0.11342452466487885\nStep 12, loss: 0.10503888875246048\nStep 13, loss: 0.09828095138072968\nStep 14, loss: 0.10296234488487244\nStep 15, loss: 0.09688253700733185\nStep 16, loss: 0.09001464396715164\nStep 17, loss: 0.08598079532384872\nStep 18, loss: 0.08745668828487396\nStep 19, loss: 0.0775802731513977\nStep 20, loss: 0.08166162669658661\nStep 21, loss: 0.07300376892089844\nStep 22, loss: 0.07714924216270447\nStep 23, loss: 0.07810085266828537\nStep 24, loss: 0.0794411152601242\nStep 25, loss: 0.07307913899421692\nStep 26, loss: 0.07097838819026947\nStep 27, loss: 0.06554681807756424\nStep 28, loss: 0.065010204911232\nStep 29, loss: 0.06810400635004044\nStep 30, loss: 0.06351493299007416\nStep 31, loss: 0.057667750865221024\nStep 32, loss: 0.062011126428842545\nStep 33, loss: 0.0609549917280674\nStep 34, loss: 0.06150144338607788\nStep 35, loss: 0.057500164955854416\nStep 36, loss: 0.05634911358356476\nStep 37, loss: 0.054869506508111954\nStep 38, loss: 0.05245821550488472\nStep 39, loss: 0.05741355940699577\nStep 40, loss: 0.05354347452521324\nStep 41, loss: 0.05392030254006386\nStep 42, loss: 0.05377781391143799\nStep 43, loss: 0.04958712309598923\nStep 44, loss: 0.05107380822300911\nStep 45, loss: 0.05087074637413025\nStep 46, loss: 0.0468355156481266\nStep 47, loss: 0.051542893052101135\nStep 48, loss: 0.0509573332965374\nStep 49, loss: 0.0446152463555336\nRunning on 8 devices.\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\nParameter counts:\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\nStarting training from step 0...\nStep 0, loss: 0.3309202492237091\nStep 1, loss: 0.27616071701049805\nStep 2, loss: 0.23833505809307098\nStep 3, loss: 0.20353123545646667\nStep 4, loss: 0.18312352895736694\nStep 5, loss: 0.16567449271678925\nStep 6, loss: 0.1496116816997528\nStep 7, loss: 0.1397426575422287\nStep 8, loss: 0.1301795393228531\nStep 9, loss: 0.12435252219438553\nStep 10, loss: 0.11484285444021225\nStep 11, loss: 0.11342452466487885\nStep 12, loss: 0.10503888875246048\nStep 13, loss: 0.09828095138072968\nStep 14, loss: 0.10296234488487244\nStep 15, loss: 0.09688253700733185\nStep 16, loss: 0.09001464396715164\nStep 17, loss: 0.08598079532384872\nStep 18, loss: 0.08745668828487396\nStep 19, loss: 0.0775802731513977\nStep 20, loss: 0.08166162669658661\nStep 50, loss: 0.041748303920030594\nStep 51, loss: 0.04902645945549011\nStep 52, loss: 0.047454170882701874\nStep 53, loss: 0.040505826473236084\nStep 54, loss: 0.03718667849898338\nStep 55, loss: 0.03961941972374916\nStep 56, loss: 0.03947916626930237\nStep 57, loss: 0.03693949803709984\nStep 58, loss: 0.03782138228416443\nStep 59, loss: 0.03795648366212845\nStep 60, loss: 0.03371633216738701\nStep 61, loss: 0.044886138290166855\nStep 62, loss: 0.038592565804719925\nStep 63, loss: 0.0360867939889431\nStep 64, loss: 0.036005713045597076\nStep 65, loss: 0.034621596336364746\nStep 66, loss: 0.03188462182879448\nStep 67, loss: 0.029557932168245316\nStep 68, loss: 0.0318872295320034\nStep 69, loss: 0.030645810067653656\nStep 70, loss: 0.028607860207557678\nStep 71, loss: 0.03495610132813454\nStep 72, loss: 0.029537387192249298\nStep 73, loss: 0.033071547746658325\nStep 74, loss: 0.03534964472055435\nStep 75, loss: 0.03360242769122124\nStep 76, loss: 0.028890511021018028\nStep 77, loss: 0.029018152505159378\nStep 21, loss: 0.07300376892089844\nStep 22, loss: 0.07714924216270447\nStep 23, loss: 0.07810085266828537\nStep 24, loss: 0.0794411152601242\nStep 25, loss: 0.07307913899421692\nStep 26, loss: 0.07097838819026947\nStep 27, loss: 0.06554681807756424\nStep 28, loss: 0.065010204911232\nStep 29, loss: 0.06810400635004044\nStep 30, loss: 0.06351493299007416\nStep 31, loss: 0.057667750865221024\nStep 32, loss: 0.062011126428842545\nStep 33, loss: 0.0609549917280674\nStep 34, loss: 0.06150144338607788\nStep 35, loss: 0.057500164955854416\nStep 36, loss: 0.05634911358356476\nStep 37, loss: 0.054869506508111954\nStep 38, loss: 0.05245821550488472\nStep 39, loss: 0.05741355940699577\nStep 40, loss: 0.05354347452521324\nStep 41, loss: 0.05392030254006386\nStep 42, loss: 0.05377781391143799\nStep 43, loss: 0.04958712309598923\nStep 44, loss: 0.05107380822300911\nStep 45, loss: 0.05087074637413025\nStep 46, loss: 0.0468355156481266\nStep 47, loss: 0.051542893052101135\nStep 48, loss: 0.0509573332965374\nStep 49, loss: 0.0446152463555336\nStep 78, loss: 0.029858632013201714\nStep 79, loss: 0.02790442295372486\nStep 80, loss: 0.030148373916745186\nStep 81, loss: 0.02741095796227455\nStep 82, loss: 0.02769358456134796\nStep 83, loss: 0.03266967460513115\nStep 84, loss: 0.030720101669430733\nStep 85, loss: 0.02795012854039669\nStep 86, loss: 0.027471110224723816\nStep 87, loss: 0.028233686462044716\nStep 88, loss: 0.026210511103272438\nStep 89, loss: 0.027847111225128174\nStep 90, loss: 0.02239195443689823\nStep 91, loss: 0.02604525536298752\nStep 92, loss: 0.022576235234737396\nStep 93, loss: 0.025205545127391815\nStep 94, loss: 0.022538412362337112\nStep 95, loss: 0.023945016786456108\nStep 96, loss: 0.02386571280658245\nStep 97, loss: 0.02088065817952156\nStep 98, loss: 0.02455727569758892\nStep 99, loss: 0.024158356711268425\nStep 100, loss: 0.02200816199183464\nStep 101, loss: 0.023882700130343437\nStep 102, loss: 0.023078344762325287\nStep 103, loss: 0.021429037675261497\nStep 104, loss: 0.02174082212150097\nStep 105, loss: 0.023439496755599976\nStep 50, loss: 0.041748303920030594\nStep 51, loss: 0.04902645945549011\nStep 52, loss: 0.047454170882701874\nStep 53, loss: 0.040505826473236084\nStep 54, loss: 0.03718667849898338\nStep 55, loss: 0.03961941972374916\nStep 56, loss: 0.03947916626930237\nStep 57, loss: 0.03693949803709984\nStep 58, loss: 0.03782138228416443\nStep 59, loss: 0.03795648366212845\nStep 60, loss: 0.03371633216738701\nStep 61, loss: 0.044886138290166855\nStep 62, loss: 0.038592565804719925\nStep 63, loss: 0.0360867939889431\nStep 64, loss: 0.036005713045597076\nStep 65, loss: 0.034621596336364746\nStep 66, loss: 0.03188462182879448\nStep 67, loss: 0.029557932168245316\nStep 68, loss: 0.0318872295320034\nStep 69, loss: 0.030645810067653656\nStep 70, loss: 0.028607860207557678\nStep 71, loss: 0.03495610132813454\nStep 72, loss: 0.029537387192249298\nStep 73, loss: 0.033071547746658325\nStep 74, loss: 0.03534964472055435\nStep 75, loss: 0.03360242769122124\nStep 76, loss: 0.028890511021018028\nStep 77, loss: 0.029018152505159378\nStep 106, loss: 0.02335542067885399\nStep 107, loss: 0.020467158406972885\nStep 78, loss: 0.029858632013201714\nStep 79, loss: 0.02790442295372486\nStep 80, loss: 0.030148373916745186\nStep 81, loss: 0.02741095796227455\nStep 82, loss: 0.02769358456134796\nStep 83, loss: 0.03266967460513115\nStep 84, loss: 0.030720101669430733\nStep 85, loss: 0.02795012854039669\nStep 86, loss: 0.027471110224723816\nStep 87, loss: 0.028233686462044716\nStep 88, loss: 0.026210511103272438\nStep 89, loss: 0.027847111225128174\nStep 90, loss: 0.02239195443689823\nStep 91, loss: 0.02604525536298752\nStep 92, loss: 0.022576235234737396\nStep 93, loss: 0.025205545127391815\nStep 94, loss: 0.022538412362337112\nStep 95, loss: 0.023945016786456108\nStep 96, loss: 0.02386571280658245\nStep 97, loss: 0.02088065817952156\nStep 98, loss: 0.02455727569758892\nStep 99, loss: 0.024158356711268425\nStep 100, loss: 0.02200816199183464\nStep 101, loss: 0.023882700130343437\nStep 102, loss: 0.023078344762325287\nStep 103, loss: 0.021429037675261497\nStep 104, loss: 0.02174082212150097\nStep 105, loss: 0.023439496755599976\nStep 108, loss: 0.023992128670215607\nStep 109, loss: 0.02077864110469818\nStep 110, loss: 0.022285621613264084\nStep 111, loss: 0.021920684725046158\nStep 112, loss: 0.020692449063062668\nStep 113, loss: 0.018958691507577896\nStep 114, loss: 0.02061193436384201\nStep 115, loss: 0.020444221794605255\nStep 116, loss: 0.02078094705939293\nStep 117, loss: 0.019729148596525192\nStep 118, loss: 0.020966283977031708\nStep 119, loss: 0.017715569585561752\nStep 120, loss: 0.018365614116191864\nStep 121, loss: 0.01908261515200138\nStep 122, loss: 0.019502971321344376\nStep 123, loss: 0.017858631908893585\nStep 124, loss: 0.015927297994494438\nStep 125, loss: 0.019642645493149757\nStep 126, loss: 0.018544591963291168\nStep 127, loss: 0.015933632850646973\nStep 128, loss: 0.01618993654847145\nStep 129, loss: 0.016362672671675682\nStep 130, loss: 0.015333064831793308\nStep 131, loss: 0.016816189512610435\nStep 132, loss: 0.0164976604282856\nStep 133, loss: 0.016223587095737457\nStep 134, loss: 0.017775867134332657\nStep 106, loss: 0.02335542067885399\nStep 107, loss: 0.020467158406972885\nStep 135, loss: 0.015498277731239796\nStep 136, loss: 0.015683045610785484\nStep 137, loss: 0.01383606344461441\nStep 138, loss: 0.01694605126976967\nStep 139, loss: 0.017274845391511917\nStep 140, loss: 0.015150913037359715\nStep 141, loss: 0.015680069103837013\nStep 142, loss: 0.014308595098555088\nStep 143, loss: 0.015988625586032867\nStep 144, loss: 0.01708797924220562\nStep 145, loss: 0.015064721927046776\nStep 146, loss: 0.015196548774838448\nStep 147, loss: 0.015233844518661499\nStep 148, loss: 0.015244445763528347\nStep 149, loss: 0.013996983878314495\nStep 150, loss: 0.013958653435111046\nStep 151, loss: 0.01511000283062458\nStep 152, loss: 0.013374081812798977\nStep 153, loss: 0.014145979657769203\nStep 154, loss: 0.015165048651397228\nStep 155, loss: 0.014963418245315552\nStep 156, loss: 0.015931718051433563\nStep 157, loss: 0.015752702951431274\nStep 158, loss: 0.01360904797911644\nStep 159, loss: 0.015732955187559128\nStep 160, loss: 0.016268953680992126\nStep 161, loss: 0.012776114046573639\nStep 108, loss: 0.023992128670215607\nStep 109, loss: 0.02077864110469818\nStep 110, loss: 0.022285621613264084\nStep 111, loss: 0.021920684725046158\nStep 112, loss: 0.020692449063062668\nStep 113, loss: 0.018958691507577896\nStep 114, loss: 0.02061193436384201\nStep 115, loss: 0.020444221794605255\nStep 116, loss: 0.02078094705939293\nStep 117, loss: 0.019729148596525192\nStep 118, loss: 0.020966283977031708\nStep 119, loss: 0.017715569585561752\nStep 120, loss: 0.018365614116191864\nStep 121, loss: 0.01908261515200138\nStep 122, loss: 0.019502971321344376\nStep 123, loss: 0.017858631908893585\nStep 124, loss: 0.015927297994494438\nStep 125, loss: 0.019642645493149757\nStep 126, loss: 0.018544591963291168\nStep 127, loss: 0.015933632850646973\nStep 128, loss: 0.01618993654847145\nStep 129, loss: 0.016362672671675682\nStep 130, loss: 0.015333064831793308\nStep 131, loss: 0.016816189512610435\nStep 132, loss: 0.0164976604282856\nStep 133, loss: 0.016223587095737457\nStep 134, loss: 0.017775867134332657\nStep 162, loss: 0.014002346433699131\nStep 163, loss: 0.016474682837724686\nStep 164, loss: 0.014050696976482868\nStep 165, loss: 0.015429678373038769\nStep 166, loss: 0.014365949667990208\nStep 167, loss: 0.014085184782743454\nStep 168, loss: 0.015173561871051788\nStep 169, loss: 0.014162395149469376\nStep 170, loss: 0.014322423376142979\nStep 171, loss: 0.013332856819033623\nStep 172, loss: 0.012761498801410198\nStep 173, loss: 0.01492702029645443\nStep 174, loss: 0.013961599208414555\nStep 175, loss: 0.012958317995071411\nStep 176, loss: 0.015379429794847965\nStep 177, loss: 0.014328060671687126\nStep 178, loss: 0.014507713727653027\nStep 179, loss: 0.013650377281010151\nStep 180, loss: 0.013451213017106056\nStep 181, loss: 0.012091542594134808\nStep 182, loss: 0.013077723793685436\nStep 183, loss: 0.01345899235457182\nStep 184, loss: 0.01339371595531702\nStep 185, loss: 0.011480382643640041\nStep 186, loss: 0.01359980646520853\nStep 187, loss: 0.01263586338609457\nStep 188, loss: 0.012540409341454506\nStep 135, loss: 0.015498277731239796\nStep 136, loss: 0.015683045610785484\nStep 137, loss: 0.01383606344461441\nStep 138, loss: 0.01694605126976967\nStep 139, loss: 0.017274845391511917\nStep 140, loss: 0.015150913037359715\nStep 141, loss: 0.015680069103837013\nStep 142, loss: 0.014308595098555088\nStep 143, loss: 0.015988625586032867\nStep 144, loss: 0.01708797924220562\nStep 145, loss: 0.015064721927046776\nStep 146, loss: 0.015196548774838448\nStep 147, loss: 0.015233844518661499\nStep 148, loss: 0.015244445763528347\nStep 149, loss: 0.013996983878314495\nStep 150, loss: 0.013958653435111046\nStep 151, loss: 0.01511000283062458\nStep 152, loss: 0.013374081812798977\nStep 153, loss: 0.014145979657769203\nStep 154, loss: 0.015165048651397228\nStep 155, loss: 0.014963418245315552\nStep 156, loss: 0.015931718051433563\nStep 157, loss: 0.015752702951431274\nStep 158, loss: 0.01360904797911644\nStep 159, loss: 0.015732955187559128\nStep 160, loss: 0.016268953680992126\nStep 161, loss: 0.012776114046573639\nStep 189, loss: 0.013751196675002575\nStep 190, loss: 0.012528365477919579\nStep 191, loss: 0.01227360125631094\nStep 192, loss: 0.012847634963691235\nStep 193, loss: 0.011203471571207047\nStep 194, loss: 0.013195750303566456\nStep 195, loss: 0.012723291292786598\nStep 196, loss: 0.010990786366164684\nStep 197, loss: 0.009713550098240376\nStep 198, loss: 0.011404153890907764\nStep 199, loss: 0.010602226480841637\nStep 200, loss: 0.010594911873340607\nStep 201, loss: 0.0118786059319973\nStep 202, loss: 0.012351179495453835\nStep 203, loss: 0.012053634971380234\nStep 204, loss: 0.01243047509342432\nStep 205, loss: 0.012431791052222252\nStep 206, loss: 0.012530690059065819\nStep 207, loss: 0.009664309211075306\nStep 208, loss: 0.01006846223026514\nStep 209, loss: 0.010861100628972054\nStep 210, loss: 0.011857938952744007\nStep 211, loss: 0.01253808755427599\nStep 212, loss: 0.011201965622603893\nStep 213, loss: 0.012819544412195683\nStep 214, loss: 0.010710623115301132\nStep 215, loss: 0.011954248882830143\nStep 162, loss: 0.014002346433699131\nStep 163, loss: 0.016474682837724686\nStep 164, loss: 0.014050696976482868\nStep 165, loss: 0.015429678373038769\nStep 166, loss: 0.014365949667990208\nStep 167, loss: 0.014085184782743454\nStep 168, loss: 0.015173561871051788\nStep 169, loss: 0.014162395149469376\nStep 170, loss: 0.014322423376142979\nStep 171, loss: 0.013332856819033623\nStep 172, loss: 0.012761498801410198\nStep 173, loss: 0.01492702029645443\nStep 174, loss: 0.013961599208414555\nStep 175, loss: 0.012958317995071411\nStep 176, loss: 0.015379429794847965\nStep 177, loss: 0.014328060671687126\nStep 178, loss: 0.014507713727653027\nStep 179, loss: 0.013650377281010151\nStep 180, loss: 0.013451213017106056\nStep 181, loss: 0.012091542594134808\nStep 182, loss: 0.013077723793685436\nStep 183, loss: 0.01345899235457182\nStep 184, loss: 0.01339371595531702\nStep 185, loss: 0.011480382643640041\nStep 186, loss: 0.01359980646520853\nStep 187, loss: 0.01263586338609457\nStep 188, loss: 0.012540409341454506\nStep 216, loss: 0.012150179594755173\nStep 217, loss: 0.012084055691957474\nStep 218, loss: 0.013123934157192707\nStep 189, loss: 0.013751196675002575\nStep 190, loss: 0.012528365477919579\nStep 191, loss: 0.01227360125631094\nStep 192, loss: 0.012847634963691235\nStep 193, loss: 0.011203471571207047\nStep 194, loss: 0.013195750303566456\nStep 195, loss: 0.012723291292786598\nStep 196, loss: 0.010990786366164684\nStep 197, loss: 0.009713550098240376\nStep 198, loss: 0.011404153890907764\nStep 199, loss: 0.010602226480841637\nStep 200, loss: 0.010594911873340607\nStep 201, loss: 0.0118786059319973\nStep 202, loss: 0.012351179495453835\nStep 203, loss: 0.012053634971380234\nStep 204, loss: 0.01243047509342432\nStep 205, loss: 0.012431791052222252\nStep 206, loss: 0.012530690059065819\nStep 207, loss: 0.009664309211075306\nStep 208, loss: 0.01006846223026514\nStep 209, loss: 0.010861100628972054\nStep 210, loss: 0.011857938952744007\nStep 211, loss: 0.01253808755427599\nStep 212, loss: 0.011201965622603893\nStep 213, loss: 0.012819544412195683\nStep 214, loss: 0.010710623115301132\nStep 215, loss: 0.011954248882830143\nRunning on 8 devices.\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\nParameter counts:\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\nStarting training from step 0...\nStep 0, loss: 0.3309202492237091\nStep 1, loss: 0.27616071701049805\nStep 2, loss: 0.23833505809307098\nStep 3, loss: 0.20353123545646667\nStep 4, loss: 0.18312352895736694\nStep 5, loss: 0.16567449271678925\nStep 6, loss: 0.1496116816997528\nStep 7, loss: 0.1397426575422287\nStep 8, loss: 0.1301795393228531\nStep 9, loss: 0.12435252219438553\nStep 10, loss: 0.11484285444021225\nStep 11, loss: 0.11342452466487885\nStep 12, loss: 0.10503888875246048\nStep 13, loss: 0.09828095138072968\nStep 14, loss: 0.10296234488487244\nStep 15, loss: 0.09688253700733185\nStep 16, loss: 0.09001464396715164\nStep 17, loss: 0.08598079532384872\nStep 18, loss: 0.08745668828487396\nStep 19, loss: 0.0775802731513977\nStep 20, loss: 0.08166162669658661\nStep 216, loss: 0.012150179594755173\nStep 217, loss: 0.012084055691957474\nStep 218, loss: 0.013123934157192707\nStep 21, loss: 0.07300376892089844\nStep 22, loss: 0.07714924216270447\nStep 23, loss: 0.07810085266828537\nStep 24, loss: 0.0794411152601242\nStep 25, loss: 0.07307913899421692\nStep 26, loss: 0.07097838819026947\nStep 27, loss: 0.06554681807756424\nStep 28, loss: 0.065010204911232\nStep 29, loss: 0.06810400635004044\nStep 30, loss: 0.06351493299007416\nStep 31, loss: 0.057667750865221024\nStep 32, loss: 0.062011126428842545\nStep 33, loss: 0.0609549917280674\nStep 34, loss: 0.06150144338607788\nStep 35, loss: 0.057500164955854416\nStep 36, loss: 0.05634911358356476\nStep 37, loss: 0.054869506508111954\nStep 38, loss: 0.05245821550488472\nStep 39, loss: 0.05741355940699577\nStep 40, loss: 0.05354347452521324\nStep 41, loss: 0.05392030254006386\nStep 42, loss: 0.05377781391143799\nStep 43, loss: 0.04958712309598923\nStep 44, loss: 0.05107380822300911\nStep 45, loss: 0.05087074637413025\nStep 46, loss: 0.0468355156481266\nStep 47, loss: 0.051542893052101135\nStep 48, loss: 0.0509573332965374\nStep 49, loss: 0.0446152463555336\nRunning on 8 devices.\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\nParameter counts:\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\nStarting training from step 0...\nStep 0, loss: 0.3309202492237091\nStep 1, loss: 0.27616071701049805\nStep 2, loss: 0.23833505809307098\nStep 3, loss: 0.20353123545646667\nStep 4, loss: 0.18312352895736694\nStep 5, loss: 0.16567449271678925\nStep 6, loss: 0.1496116816997528\nStep 7, loss: 0.1397426575422287\nStep 8, loss: 0.1301795393228531\nStep 9, loss: 0.12435252219438553\nStep 10, loss: 0.11484285444021225\nStep 11, loss: 0.11342452466487885\nStep 12, loss: 0.10503888875246048\nStep 13, loss: 0.09828095138072968\nStep 14, loss: 0.10296234488487244\nStep 15, loss: 0.09688253700733185\nStep 16, loss: 0.09001464396715164\nStep 17, loss: 0.08598079532384872\nStep 18, loss: 0.08745668828487396\nStep 19, loss: 0.0775802731513977\nStep 20, loss: 0.08166162669658661\nStep 50, loss: 0.041748303920030594\nStep 51, loss: 0.04902645945549011\nStep 52, loss: 0.047454170882701874\nStep 53, loss: 0.040505826473236084\nStep 54, loss: 0.03718667849898338\nStep 55, loss: 0.03961941972374916\nStep 56, loss: 0.03947916626930237\nStep 57, loss: 0.03693949803709984\nStep 58, loss: 0.03782138228416443\nStep 59, loss: 0.03795648366212845\nStep 60, loss: 0.03371633216738701\nStep 61, loss: 0.044886138290166855\nStep 62, loss: 0.038592565804719925\nStep 63, loss: 0.0360867939889431\nStep 64, loss: 0.036005713045597076\nStep 65, loss: 0.034621596336364746\nStep 66, loss: 0.03188462182879448\nStep 67, loss: 0.029557932168245316\nStep 68, loss: 0.0318872295320034\nStep 69, loss: 0.030645810067653656\nStep 70, loss: 0.028607860207557678\nStep 71, loss: 0.03495610132813454\nStep 72, loss: 0.029537387192249298\nStep 73, loss: 0.033071547746658325\nStep 74, loss: 0.03534964472055435\nStep 75, loss: 0.03360242769122124\nStep 76, loss: 0.028890511021018028\nStep 77, loss: 0.029018152505159378\nStep 21, loss: 0.07300376892089844\nStep 22, loss: 0.07714924216270447\nStep 23, loss: 0.07810085266828537\nStep 24, loss: 0.0794411152601242\nStep 25, loss: 0.07307913899421692\nStep 26, loss: 0.07097838819026947\nStep 27, loss: 0.06554681807756424\nStep 28, loss: 0.065010204911232\nStep 29, loss: 0.06810400635004044\nStep 30, loss: 0.06351493299007416\nStep 31, loss: 0.057667750865221024\nStep 32, loss: 0.062011126428842545\nStep 33, loss: 0.0609549917280674\nStep 34, loss: 0.06150144338607788\nStep 35, loss: 0.057500164955854416\nStep 36, loss: 0.05634911358356476\nStep 37, loss: 0.054869506508111954\nStep 38, loss: 0.05245821550488472\nStep 39, loss: 0.05741355940699577\nStep 40, loss: 0.05354347452521324\nStep 41, loss: 0.05392030254006386\nStep 42, loss: 0.05377781391143799\nStep 43, loss: 0.04958712309598923\nStep 44, loss: 0.05107380822300911\nStep 45, loss: 0.05087074637413025\nStep 46, loss: 0.0468355156481266\nStep 47, loss: 0.051542893052101135\nStep 48, loss: 0.0509573332965374\nStep 49, loss: 0.0446152463555336\nStep 78, loss: 0.029858632013201714\nStep 79, loss: 0.02790442295372486\nStep 80, loss: 0.030148373916745186\nStep 81, loss: 0.02741095796227455\nStep 82, loss: 0.02769358456134796\nStep 83, loss: 0.03266967460513115\nStep 84, loss: 0.030720101669430733\nStep 85, loss: 0.02795012854039669\nStep 86, loss: 0.027471110224723816\nStep 87, loss: 0.028233686462044716\nStep 88, loss: 0.026210511103272438\nStep 89, loss: 0.027847111225128174\nStep 90, loss: 0.02239195443689823\nStep 91, loss: 0.02604525536298752\nStep 92, loss: 0.022576235234737396\nStep 93, loss: 0.025205545127391815\nStep 94, loss: 0.022538412362337112\nStep 95, loss: 0.023945016786456108\nStep 96, loss: 0.02386571280658245\nStep 97, loss: 0.02088065817952156\nStep 98, loss: 0.02455727569758892\nStep 99, loss: 0.024158356711268425\nStep 100, loss: 0.02200816199183464\nStep 101, loss: 0.023882700130343437\nStep 102, loss: 0.023078344762325287\nStep 103, loss: 0.021429037675261497\nStep 104, loss: 0.02174082212150097\nStep 105, loss: 0.023439496755599976\nStep 50, loss: 0.041748303920030594\nStep 51, loss: 0.04902645945549011\nStep 52, loss: 0.047454170882701874\nStep 53, loss: 0.040505826473236084\nStep 54, loss: 0.03718667849898338\nStep 55, loss: 0.03961941972374916\nStep 56, loss: 0.03947916626930237\nStep 57, loss: 0.03693949803709984\nStep 58, loss: 0.03782138228416443\nStep 59, loss: 0.03795648366212845\nStep 60, loss: 0.03371633216738701\nStep 61, loss: 0.044886138290166855\nStep 62, loss: 0.038592565804719925\nStep 63, loss: 0.0360867939889431\nStep 64, loss: 0.036005713045597076\nStep 65, loss: 0.034621596336364746\nStep 66, loss: 0.03188462182879448\nStep 67, loss: 0.029557932168245316\nStep 68, loss: 0.0318872295320034\nStep 69, loss: 0.030645810067653656\nStep 70, loss: 0.028607860207557678\nStep 71, loss: 0.03495610132813454\nStep 72, loss: 0.029537387192249298\nStep 73, loss: 0.033071547746658325\nStep 74, loss: 0.03534964472055435\nStep 75, loss: 0.03360242769122124\nStep 76, loss: 0.028890511021018028\nStep 77, loss: 0.029018152505159378\nStep 106, loss: 0.02335542067885399\nStep 107, loss: 0.020467158406972885\nStep 78, loss: 0.029858632013201714\nStep 79, loss: 0.02790442295372486\nStep 80, loss: 0.030148373916745186\nStep 81, loss: 0.02741095796227455\nStep 82, loss: 0.02769358456134796\nStep 83, loss: 0.03266967460513115\nStep 84, loss: 0.030720101669430733\nStep 85, loss: 0.02795012854039669\nStep 86, loss: 0.027471110224723816\nStep 87, loss: 0.028233686462044716\nStep 88, loss: 0.026210511103272438\nStep 89, loss: 0.027847111225128174\nStep 90, loss: 0.02239195443689823\nStep 91, loss: 0.02604525536298752\nStep 92, loss: 0.022576235234737396\nStep 93, loss: 0.025205545127391815\nStep 94, loss: 0.022538412362337112\nStep 95, loss: 0.023945016786456108\nStep 96, loss: 0.02386571280658245\nStep 97, loss: 0.02088065817952156\nStep 98, loss: 0.02455727569758892\nStep 99, loss: 0.024158356711268425\nStep 100, loss: 0.02200816199183464\nStep 101, loss: 0.023882700130343437\nStep 102, loss: 0.023078344762325287\nStep 103, loss: 0.021429037675261497\nStep 104, loss: 0.02174082212150097\nStep 105, loss: 0.023439496755599976\nStep 108, loss: 0.023992128670215607\nStep 109, loss: 0.02077864110469818\nStep 110, loss: 0.022285621613264084\nStep 111, loss: 0.021920684725046158\nStep 112, loss: 0.020692449063062668\nStep 113, loss: 0.018958691507577896\nStep 114, loss: 0.02061193436384201\nStep 115, loss: 0.020444221794605255\nStep 116, loss: 0.02078094705939293\nStep 117, loss: 0.019729148596525192\nStep 118, loss: 0.020966283977031708\nStep 119, loss: 0.017715569585561752\nStep 120, loss: 0.018365614116191864\nStep 121, loss: 0.01908261515200138\nStep 122, loss: 0.019502971321344376\nStep 123, loss: 0.017858631908893585\nStep 124, loss: 0.015927297994494438\nStep 125, loss: 0.019642645493149757\nStep 126, loss: 0.018544591963291168\nStep 127, loss: 0.015933632850646973\nStep 128, loss: 0.01618993654847145\nStep 129, loss: 0.016362672671675682\nStep 130, loss: 0.015333064831793308\nStep 131, loss: 0.016816189512610435\nStep 132, loss: 0.0164976604282856\nStep 133, loss: 0.016223587095737457\nStep 134, loss: 0.017775867134332657\nStep 106, loss: 0.02335542067885399\nStep 107, loss: 0.020467158406972885\nStep 135, loss: 0.015498277731239796\nStep 136, loss: 0.015683045610785484\nStep 137, loss: 0.01383606344461441\nStep 138, loss: 0.01694605126976967\nStep 139, loss: 0.017274845391511917\nStep 140, loss: 0.015150913037359715\nStep 141, loss: 0.015680069103837013\nStep 142, loss: 0.014308595098555088\nStep 143, loss: 0.015988625586032867\nStep 144, loss: 0.01708797924220562\nStep 145, loss: 0.015064721927046776\nStep 146, loss: 0.015196548774838448\nStep 147, loss: 0.015233844518661499\nStep 148, loss: 0.015244445763528347\nStep 149, loss: 0.013996983878314495\nStep 150, loss: 0.013958653435111046\nStep 151, loss: 0.01511000283062458\nStep 152, loss: 0.013374081812798977\nStep 153, loss: 0.014145979657769203\nStep 154, loss: 0.015165048651397228\nStep 155, loss: 0.014963418245315552\nStep 156, loss: 0.015931718051433563\nStep 157, loss: 0.015752702951431274\nStep 158, loss: 0.01360904797911644\nStep 159, loss: 0.015732955187559128\nStep 160, loss: 0.016268953680992126\nStep 161, loss: 0.012776114046573639\nStep 108, loss: 0.023992128670215607\nStep 109, loss: 0.02077864110469818\nStep 110, loss: 0.022285621613264084\nStep 111, loss: 0.021920684725046158\nStep 112, loss: 0.020692449063062668\nStep 113, loss: 0.018958691507577896\nStep 114, loss: 0.02061193436384201\nStep 115, loss: 0.020444221794605255\nStep 116, loss: 0.02078094705939293\nStep 117, loss: 0.019729148596525192\nStep 118, loss: 0.020966283977031708\nStep 119, loss: 0.017715569585561752\nStep 120, loss: 0.018365614116191864\nStep 121, loss: 0.01908261515200138\nStep 122, loss: 0.019502971321344376\nStep 123, loss: 0.017858631908893585\nStep 124, loss: 0.015927297994494438\nStep 125, loss: 0.019642645493149757\nStep 126, loss: 0.018544591963291168\nStep 127, loss: 0.015933632850646973\nStep 128, loss: 0.01618993654847145\nStep 129, loss: 0.016362672671675682\nStep 130, loss: 0.015333064831793308\nStep 131, loss: 0.016816189512610435\nStep 132, loss: 0.0164976604282856\nStep 133, loss: 0.016223587095737457\nStep 134, loss: 0.017775867134332657\nStep 162, loss: 0.014002346433699131\nStep 163, loss: 0.016474682837724686\nStep 164, loss: 0.014050696976482868\nStep 165, loss: 0.015429678373038769\nStep 166, loss: 0.014365949667990208\nStep 167, loss: 0.014085184782743454\nStep 168, loss: 0.015173561871051788\nStep 169, loss: 0.014162395149469376\nStep 170, loss: 0.014322423376142979\nStep 171, loss: 0.013332856819033623\nStep 172, loss: 0.012761498801410198\nStep 173, loss: 0.01492702029645443\nStep 174, loss: 0.013961599208414555\nStep 175, loss: 0.012958317995071411\nStep 176, loss: 0.015379429794847965\nStep 177, loss: 0.014328060671687126\nStep 178, loss: 0.014507713727653027\nStep 179, loss: 0.013650377281010151\nStep 180, loss: 0.013451213017106056\nStep 181, loss: 0.012091542594134808\nStep 182, loss: 0.013077723793685436\nStep 183, loss: 0.01345899235457182\nStep 184, loss: 0.01339371595531702\nStep 185, loss: 0.011480382643640041\nStep 186, loss: 0.01359980646520853\nStep 187, loss: 0.01263586338609457\nStep 188, loss: 0.012540409341454506\nStep 135, loss: 0.015498277731239796\nStep 136, loss: 0.015683045610785484\nStep 137, loss: 0.01383606344461441\nStep 138, loss: 0.01694605126976967\nStep 139, loss: 0.017274845391511917\nStep 140, loss: 0.015150913037359715\nStep 141, loss: 0.015680069103837013\nStep 142, loss: 0.014308595098555088\nStep 143, loss: 0.015988625586032867\nStep 144, loss: 0.01708797924220562\nStep 145, loss: 0.015064721927046776\nStep 146, loss: 0.015196548774838448\nStep 147, loss: 0.015233844518661499\nStep 148, loss: 0.015244445763528347\nStep 149, loss: 0.013996983878314495\nStep 150, loss: 0.013958653435111046\nStep 151, loss: 0.01511000283062458\nStep 152, loss: 0.013374081812798977\nStep 153, loss: 0.014145979657769203\nStep 154, loss: 0.015165048651397228\nStep 155, loss: 0.014963418245315552\nStep 156, loss: 0.015931718051433563\nStep 157, loss: 0.015752702951431274\nStep 158, loss: 0.01360904797911644\nStep 159, loss: 0.015732955187559128\nStep 160, loss: 0.016268953680992126\nStep 161, loss: 0.012776114046573639\nStep 189, loss: 0.013751196675002575\nStep 190, loss: 0.012528365477919579\nStep 191, loss: 0.01227360125631094\nStep 192, loss: 0.012847634963691235\nStep 193, loss: 0.011203471571207047\nStep 194, loss: 0.013195750303566456\nStep 195, loss: 0.012723291292786598\nStep 196, loss: 0.010990786366164684\nStep 197, loss: 0.009713550098240376\nStep 198, loss: 0.011404153890907764\nStep 199, loss: 0.010602226480841637\nStep 200, loss: 0.010594911873340607\nStep 201, loss: 0.0118786059319973\nStep 202, loss: 0.012351179495453835\nStep 203, loss: 0.012053634971380234\nStep 204, loss: 0.01243047509342432\nStep 205, loss: 0.012431791052222252\nStep 206, loss: 0.012530690059065819\nStep 207, loss: 0.009664309211075306\nStep 208, loss: 0.01006846223026514\nStep 209, loss: 0.010861100628972054\nStep 210, loss: 0.011857938952744007\nStep 211, loss: 0.01253808755427599\nStep 212, loss: 0.011201965622603893\nStep 213, loss: 0.012819544412195683\nStep 214, loss: 0.010710623115301132\nStep 215, loss: 0.011954248882830143\nStep 162, loss: 0.014002346433699131\nStep 163, loss: 0.016474682837724686\nStep 164, loss: 0.014050696976482868\nStep 165, loss: 0.015429678373038769\nStep 166, loss: 0.014365949667990208\nStep 167, loss: 0.014085184782743454\nStep 168, loss: 0.015173561871051788\nStep 169, loss: 0.014162395149469376\nStep 170, loss: 0.014322423376142979\nStep 171, loss: 0.013332856819033623\nStep 172, loss: 0.012761498801410198\nStep 173, loss: 0.01492702029645443\nStep 174, loss: 0.013961599208414555\nStep 175, loss: 0.012958317995071411\nStep 176, loss: 0.015379429794847965\nStep 177, loss: 0.014328060671687126\nStep 178, loss: 0.014507713727653027\nStep 179, loss: 0.013650377281010151\nStep 180, loss: 0.013451213017106056\nStep 181, loss: 0.012091542594134808\nStep 182, loss: 0.013077723793685436\nStep 183, loss: 0.01345899235457182\nStep 184, loss: 0.01339371595531702\nStep 185, loss: 0.011480382643640041\nStep 186, loss: 0.01359980646520853\nStep 187, loss: 0.01263586338609457\nStep 188, loss: 0.012540409341454506\nStep 216, loss: 0.012150179594755173\nStep 217, loss: 0.012084055691957474\nStep 218, loss: 0.013123934157192707\nStep 189, loss: 0.013751196675002575\nStep 190, loss: 0.012528365477919579\nStep 191, loss: 0.01227360125631094\nStep 192, loss: 0.012847634963691235\nStep 193, loss: 0.011203471571207047\nStep 194, loss: 0.013195750303566456\nStep 195, loss: 0.012723291292786598\nStep 196, loss: 0.010990786366164684\nStep 197, loss: 0.009713550098240376\nStep 198, loss: 0.011404153890907764\nStep 199, loss: 0.010602226480841637\nStep 200, loss: 0.010594911873340607\nStep 201, loss: 0.0118786059319973\nStep 202, loss: 0.012351179495453835\nStep 203, loss: 0.012053634971380234\nStep 204, loss: 0.01243047509342432\nStep 205, loss: 0.012431791052222252\nStep 206, loss: 0.012530690059065819\nStep 207, loss: 0.009664309211075306\nStep 208, loss: 0.01006846223026514\nStep 209, loss: 0.010861100628972054\nStep 210, loss: 0.011857938952744007\nStep 211, loss: 0.01253808755427599\nStep 212, loss: 0.011201965622603893\nStep 213, loss: 0.012819544412195683\nStep 214, loss: 0.010710623115301132\nStep 215, loss: 0.011954248882830143\nRunning on 8 devices.\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\nParameter counts:\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\nStarting training from step 0...\nStep 0, loss: 0.3309202492237091\nStep 1, loss: 0.27616071701049805\nStep 2, loss: 0.23833505809307098\nStep 3, loss: 0.20353123545646667\nStep 4, loss: 0.18312352895736694\nStep 5, loss: 0.16567449271678925\nStep 6, loss: 0.1496116816997528\nStep 7, loss: 0.1397426575422287\nStep 8, loss: 0.1301795393228531\nStep 9, loss: 0.12435252219438553\nStep 10, loss: 0.11484285444021225\nStep 11, loss: 0.11342452466487885\nStep 12, loss: 0.10503888875246048\nStep 13, loss: 0.09828095138072968\nStep 14, loss: 0.10296234488487244\nStep 15, loss: 0.09688253700733185\nStep 16, loss: 0.09001464396715164\nStep 17, loss: 0.08598079532384872\nStep 18, loss: 0.08745668828487396\nStep 19, loss: 0.0775802731513977\nStep 20, loss: 0.08166162669658661\nStep 216, loss: 0.012150179594755173\nStep 217, loss: 0.012084055691957474\nStep 218, loss: 0.013123934157192707\nStep 21, loss: 0.07300376892089844\nStep 22, loss: 0.07714924216270447\nStep 23, loss: 0.07810085266828537\nStep 24, loss: 0.0794411152601242\nStep 25, loss: 0.07307913899421692\nStep 26, loss: 0.07097838819026947\nStep 27, loss: 0.06554681807756424\nStep 28, loss: 0.065010204911232\nStep 29, loss: 0.06810400635004044\nStep 30, loss: 0.06351493299007416\nStep 31, loss: 0.057667750865221024\nStep 32, loss: 0.062011126428842545\nStep 33, loss: 0.0609549917280674\nStep 34, loss: 0.06150144338607788\nStep 35, loss: 0.057500164955854416\nStep 36, loss: 0.05634911358356476\nStep 37, loss: 0.054869506508111954\nStep 38, loss: 0.05245821550488472\nStep 39, loss: 0.05741355940699577\nStep 40, loss: 0.05354347452521324\nStep 41, loss: 0.05392030254006386\nStep 42, loss: 0.05377781391143799\nStep 43, loss: 0.04958712309598923\nStep 44, loss: 0.05107380822300911\nStep 45, loss: 0.05087074637413025\nStep 46, loss: 0.0468355156481266\nStep 47, loss: 0.051542893052101135\nStep 48, loss: 0.0509573332965374\nStep 49, loss: 0.0446152463555336\nRunning on 8 devices.\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\nParameter counts:\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\nStarting training from step 0...\nStep 0, loss: 0.3309202492237091\nStep 1, loss: 0.27616071701049805\nStep 2, loss: 0.23833505809307098\nStep 3, loss: 0.20353123545646667\nStep 4, loss: 0.18312352895736694\nStep 5, loss: 0.16567449271678925\nStep 6, loss: 0.1496116816997528\nStep 7, loss: 0.1397426575422287\nStep 8, loss: 0.1301795393228531\nStep 9, loss: 0.12435252219438553\nStep 10, loss: 0.11484285444021225\nStep 11, loss: 0.11342452466487885\nStep 12, loss: 0.10503888875246048\nStep 13, loss: 0.09828095138072968\nStep 14, loss: 0.10296234488487244\nStep 15, loss: 0.09688253700733185\nStep 16, loss: 0.09001464396715164\nStep 17, loss: 0.08598079532384872\nStep 18, loss: 0.08745668828487396\nStep 19, loss: 0.0775802731513977\nStep 20, loss: 0.08166162669658661\nStep 50, loss: 0.041748303920030594\nStep 51, loss: 0.04902645945549011\nStep 52, loss: 0.047454170882701874\nStep 53, loss: 0.040505826473236084\nStep 54, loss: 0.03718667849898338\nStep 55, loss: 0.03961941972374916\nStep 56, loss: 0.03947916626930237\nStep 57, loss: 0.03693949803709984\nStep 58, loss: 0.03782138228416443\nStep 59, loss: 0.03795648366212845\nStep 60, loss: 0.03371633216738701\nStep 61, loss: 0.044886138290166855\nStep 62, loss: 0.038592565804719925\nStep 63, loss: 0.0360867939889431\nStep 64, loss: 0.036005713045597076\nStep 65, loss: 0.034621596336364746\nStep 66, loss: 0.03188462182879448\nStep 67, loss: 0.029557932168245316\nStep 68, loss: 0.0318872295320034\nStep 69, loss: 0.030645810067653656\nStep 70, loss: 0.028607860207557678\nStep 71, loss: 0.03495610132813454\nStep 72, loss: 0.029537387192249298\nStep 73, loss: 0.033071547746658325\nStep 74, loss: 0.03534964472055435\nStep 75, loss: 0.03360242769122124\nStep 76, loss: 0.028890511021018028\nStep 77, loss: 0.029018152505159378\nStep 21, loss: 0.07300376892089844\nStep 22, loss: 0.07714924216270447\nStep 23, loss: 0.07810085266828537\nStep 24, loss: 0.0794411152601242\nStep 25, loss: 0.07307913899421692\nStep 26, loss: 0.07097838819026947\nStep 27, loss: 0.06554681807756424\nStep 28, loss: 0.065010204911232\nStep 29, loss: 0.06810400635004044\nStep 30, loss: 0.06351493299007416\nStep 31, loss: 0.057667750865221024\nStep 32, loss: 0.062011126428842545\nStep 33, loss: 0.0609549917280674\nStep 34, loss: 0.06150144338607788\nStep 35, loss: 0.057500164955854416\nStep 36, loss: 0.05634911358356476\nStep 37, loss: 0.054869506508111954\nStep 38, loss: 0.05245821550488472\nStep 39, loss: 0.05741355940699577\nStep 40, loss: 0.05354347452521324\nStep 41, loss: 0.05392030254006386\nStep 42, loss: 0.05377781391143799\nStep 43, loss: 0.04958712309598923\nStep 44, loss: 0.05107380822300911\nStep 45, loss: 0.05087074637413025\nStep 46, loss: 0.0468355156481266\nStep 47, loss: 0.051542893052101135\nStep 48, loss: 0.0509573332965374\nStep 49, loss: 0.0446152463555336\nStep 78, loss: 0.029858632013201714\nStep 79, loss: 0.02790442295372486\nStep 80, loss: 0.030148373916745186\nStep 81, loss: 0.02741095796227455\nStep 82, loss: 0.02769358456134796\nStep 83, loss: 0.03266967460513115\nStep 84, loss: 0.030720101669430733\nStep 85, loss: 0.02795012854039669\nStep 86, loss: 0.027471110224723816\nStep 87, loss: 0.028233686462044716\nStep 88, loss: 0.026210511103272438\nStep 89, loss: 0.027847111225128174\nStep 90, loss: 0.02239195443689823\nStep 91, loss: 0.02604525536298752\nStep 92, loss: 0.022576235234737396\nStep 93, loss: 0.025205545127391815\nStep 94, loss: 0.022538412362337112\nStep 95, loss: 0.023945016786456108\nStep 96, loss: 0.02386571280658245\nStep 97, loss: 0.02088065817952156\nStep 98, loss: 0.02455727569758892\nStep 99, loss: 0.024158356711268425\nStep 100, loss: 0.02200816199183464\nStep 101, loss: 0.023882700130343437\nStep 102, loss: 0.023078344762325287\nStep 103, loss: 0.021429037675261497\nStep 104, loss: 0.02174082212150097\nStep 105, loss: 0.023439496755599976\nStep 50, loss: 0.041748303920030594\nStep 51, loss: 0.04902645945549011\nStep 52, loss: 0.047454170882701874\nStep 53, loss: 0.040505826473236084\nStep 54, loss: 0.03718667849898338\nStep 55, loss: 0.03961941972374916\nStep 56, loss: 0.03947916626930237\nStep 57, loss: 0.03693949803709984\nStep 58, loss: 0.03782138228416443\nStep 59, loss: 0.03795648366212845\nStep 60, loss: 0.03371633216738701\nStep 61, loss: 0.044886138290166855\nStep 62, loss: 0.038592565804719925\nStep 63, loss: 0.0360867939889431\nStep 64, loss: 0.036005713045597076\nStep 65, loss: 0.034621596336364746\nStep 66, loss: 0.03188462182879448\nStep 67, loss: 0.029557932168245316\nStep 68, loss: 0.0318872295320034\nStep 69, loss: 0.030645810067653656\nStep 70, loss: 0.028607860207557678\nStep 71, loss: 0.03495610132813454\nStep 72, loss: 0.029537387192249298\nStep 73, loss: 0.033071547746658325\nStep 74, loss: 0.03534964472055435\nStep 75, loss: 0.03360242769122124\nStep 76, loss: 0.028890511021018028\nStep 77, loss: 0.029018152505159378\nStep 106, loss: 0.02335542067885399\nStep 107, loss: 0.020467158406972885\nStep 78, loss: 0.029858632013201714\nStep 79, loss: 0.02790442295372486\nStep 80, loss: 0.030148373916745186\nStep 81, loss: 0.02741095796227455\nStep 82, loss: 0.02769358456134796\nStep 83, loss: 0.03266967460513115\nStep 84, loss: 0.030720101669430733\nStep 85, loss: 0.02795012854039669\nStep 86, loss: 0.027471110224723816\nStep 87, loss: 0.028233686462044716\nStep 88, loss: 0.026210511103272438\nStep 89, loss: 0.027847111225128174\nStep 90, loss: 0.02239195443689823\nStep 91, loss: 0.02604525536298752\nStep 92, loss: 0.022576235234737396\nStep 93, loss: 0.025205545127391815\nStep 94, loss: 0.022538412362337112\nStep 95, loss: 0.023945016786456108\nStep 96, loss: 0.02386571280658245\nStep 97, loss: 0.02088065817952156\nStep 98, loss: 0.02455727569758892\nStep 99, loss: 0.024158356711268425\nStep 100, loss: 0.02200816199183464\nStep 101, loss: 0.023882700130343437\nStep 102, loss: 0.023078344762325287\nStep 103, loss: 0.021429037675261497\nStep 104, loss: 0.02174082212150097\nStep 105, loss: 0.023439496755599976\nStep 108, loss: 0.023992128670215607\nStep 109, loss: 0.02077864110469818\nStep 110, loss: 0.022285621613264084\nStep 111, loss: 0.021920684725046158\nStep 112, loss: 0.020692449063062668\nStep 113, loss: 0.018958691507577896\nStep 114, loss: 0.02061193436384201\nStep 115, loss: 0.020444221794605255\nStep 116, loss: 0.02078094705939293\nStep 117, loss: 0.019729148596525192\nStep 118, loss: 0.020966283977031708\nStep 119, loss: 0.017715569585561752\nStep 120, loss: 0.018365614116191864\nStep 121, loss: 0.01908261515200138\nStep 122, loss: 0.019502971321344376\nStep 123, loss: 0.017858631908893585\nStep 124, loss: 0.015927297994494438\nStep 125, loss: 0.019642645493149757\nStep 126, loss: 0.018544591963291168\nStep 127, loss: 0.015933632850646973\nStep 128, loss: 0.01618993654847145\nStep 129, loss: 0.016362672671675682\nStep 130, loss: 0.015333064831793308\nStep 131, loss: 0.016816189512610435\nStep 132, loss: 0.0164976604282856\nStep 133, loss: 0.016223587095737457\nStep 134, loss: 0.017775867134332657\nStep 106, loss: 0.02335542067885399\nStep 107, loss: 0.020467158406972885\nStep 135, loss: 0.015498277731239796\nStep 136, loss: 0.015683045610785484\nStep 137, loss: 0.01383606344461441\nStep 138, loss: 0.01694605126976967\nStep 139, loss: 0.017274845391511917\nStep 140, loss: 0.015150913037359715\nStep 141, loss: 0.015680069103837013\nStep 142, loss: 0.014308595098555088\nStep 143, loss: 0.015988625586032867\nStep 144, loss: 0.01708797924220562\nStep 145, loss: 0.015064721927046776\nStep 146, loss: 0.015196548774838448\nStep 147, loss: 0.015233844518661499\nStep 148, loss: 0.015244445763528347\nStep 149, loss: 0.013996983878314495\nStep 150, loss: 0.013958653435111046\nStep 151, loss: 0.01511000283062458\nStep 152, loss: 0.013374081812798977\nStep 153, loss: 0.014145979657769203\nStep 154, loss: 0.015165048651397228\nStep 155, loss: 0.014963418245315552\nStep 156, loss: 0.015931718051433563\nStep 157, loss: 0.015752702951431274\nStep 158, loss: 0.01360904797911644\nStep 159, loss: 0.015732955187559128\nStep 160, loss: 0.016268953680992126\nStep 161, loss: 0.012776114046573639\nStep 108, loss: 0.023992128670215607\nStep 109, loss: 0.02077864110469818\nStep 110, loss: 0.022285621613264084\nStep 111, loss: 0.021920684725046158\nStep 112, loss: 0.020692449063062668\nStep 113, loss: 0.018958691507577896\nStep 114, loss: 0.02061193436384201\nStep 115, loss: 0.020444221794605255\nStep 116, loss: 0.02078094705939293\nStep 117, loss: 0.019729148596525192\nStep 118, loss: 0.020966283977031708\nStep 119, loss: 0.017715569585561752\nStep 120, loss: 0.018365614116191864\nStep 121, loss: 0.01908261515200138\nStep 122, loss: 0.019502971321344376\nStep 123, loss: 0.017858631908893585\nStep 124, loss: 0.015927297994494438\nStep 125, loss: 0.019642645493149757\nStep 126, loss: 0.018544591963291168\nStep 127, loss: 0.015933632850646973\nStep 128, loss: 0.01618993654847145\nStep 129, loss: 0.016362672671675682\nStep 130, loss: 0.015333064831793308\nStep 131, loss: 0.016816189512610435\nStep 132, loss: 0.0164976604282856\nStep 133, loss: 0.016223587095737457\nStep 134, loss: 0.017775867134332657\nStep 162, loss: 0.014002346433699131\nStep 163, loss: 0.016474682837724686\nStep 164, loss: 0.014050696976482868\nStep 165, loss: 0.015429678373038769\nStep 166, loss: 0.014365949667990208\nStep 167, loss: 0.014085184782743454\nStep 168, loss: 0.015173561871051788\nStep 169, loss: 0.014162395149469376\nStep 170, loss: 0.014322423376142979\nStep 171, loss: 0.013332856819033623\nStep 172, loss: 0.012761498801410198\nStep 173, loss: 0.01492702029645443\nStep 174, loss: 0.013961599208414555\nStep 175, loss: 0.012958317995071411\nStep 176, loss: 0.015379429794847965\nStep 177, loss: 0.014328060671687126\nStep 178, loss: 0.014507713727653027\nStep 179, loss: 0.013650377281010151\nStep 180, loss: 0.013451213017106056\nStep 181, loss: 0.012091542594134808\nStep 182, loss: 0.013077723793685436\nStep 183, loss: 0.01345899235457182\nStep 184, loss: 0.01339371595531702\nStep 185, loss: 0.011480382643640041\nStep 186, loss: 0.01359980646520853\nStep 187, loss: 0.01263586338609457\nStep 188, loss: 0.012540409341454506\nStep 135, loss: 0.015498277731239796\nStep 136, loss: 0.015683045610785484\nStep 137, loss: 0.01383606344461441\nStep 138, loss: 0.01694605126976967\nStep 139, loss: 0.017274845391511917\nStep 140, loss: 0.015150913037359715\nStep 141, loss: 0.015680069103837013\nStep 142, loss: 0.014308595098555088\nStep 143, loss: 0.015988625586032867\nStep 144, loss: 0.01708797924220562\nStep 145, loss: 0.015064721927046776\nStep 146, loss: 0.015196548774838448\nStep 147, loss: 0.015233844518661499\nStep 148, loss: 0.015244445763528347\nStep 149, loss: 0.013996983878314495\nStep 150, loss: 0.013958653435111046\nStep 151, loss: 0.01511000283062458\nStep 152, loss: 0.013374081812798977\nStep 153, loss: 0.014145979657769203\nStep 154, loss: 0.015165048651397228\nStep 155, loss: 0.014963418245315552\nStep 156, loss: 0.015931718051433563\nStep 157, loss: 0.015752702951431274\nStep 158, loss: 0.01360904797911644\nStep 159, loss: 0.015732955187559128\nStep 160, loss: 0.016268953680992126\nStep 161, loss: 0.012776114046573639\nStep 189, loss: 0.013751196675002575\nStep 190, loss: 0.012528365477919579\nStep 191, loss: 0.01227360125631094\nStep 192, loss: 0.012847634963691235\nStep 193, loss: 0.011203471571207047\nStep 194, loss: 0.013195750303566456\nStep 195, loss: 0.012723291292786598\nStep 196, loss: 0.010990786366164684\nStep 197, loss: 0.009713550098240376\nStep 198, loss: 0.011404153890907764\nStep 199, loss: 0.010602226480841637\nStep 200, loss: 0.010594911873340607\nStep 201, loss: 0.0118786059319973\nStep 202, loss: 0.012351179495453835\nStep 203, loss: 0.012053634971380234\nStep 204, loss: 0.01243047509342432\nStep 205, loss: 0.012431791052222252\nStep 206, loss: 0.012530690059065819\nStep 207, loss: 0.009664309211075306\nStep 208, loss: 0.01006846223026514\nStep 209, loss: 0.010861100628972054\nStep 210, loss: 0.011857938952744007\nStep 211, loss: 0.01253808755427599\nStep 212, loss: 0.011201965622603893\nStep 213, loss: 0.012819544412195683\nStep 214, loss: 0.010710623115301132\nStep 215, loss: 0.011954248882830143\nStep 162, loss: 0.014002346433699131\nStep 163, loss: 0.016474682837724686\nStep 164, loss: 0.014050696976482868\nStep 165, loss: 0.015429678373038769\nStep 166, loss: 0.014365949667990208\nStep 167, loss: 0.014085184782743454\nStep 168, loss: 0.015173561871051788\nStep 169, loss: 0.014162395149469376\nStep 170, loss: 0.014322423376142979\nStep 171, loss: 0.013332856819033623\nStep 172, loss: 0.012761498801410198\nStep 173, loss: 0.01492702029645443\nStep 174, loss: 0.013961599208414555\nStep 175, loss: 0.012958317995071411\nStep 176, loss: 0.015379429794847965\nStep 177, loss: 0.014328060671687126\nStep 178, loss: 0.014507713727653027\nStep 179, loss: 0.013650377281010151\nStep 180, loss: 0.013451213017106056\nStep 181, loss: 0.012091542594134808\nStep 182, loss: 0.013077723793685436\nStep 183, loss: 0.01345899235457182\nStep 184, loss: 0.01339371595531702\nStep 185, loss: 0.011480382643640041\nStep 186, loss: 0.01359980646520853\nStep 187, loss: 0.01263586338609457\nStep 188, loss: 0.012540409341454506\nStep 216, loss: 0.012150179594755173\nStep 217, loss: 0.012084055691957474\nStep 218, loss: 0.013123934157192707\nStep 189, loss: 0.013751196675002575\nStep 190, loss: 0.012528365477919579\nStep 191, loss: 0.01227360125631094\nStep 192, loss: 0.012847634963691235\nStep 193, loss: 0.011203471571207047\nStep 194, loss: 0.013195750303566456\nStep 195, loss: 0.012723291292786598\nStep 196, loss: 0.010990786366164684\nStep 197, loss: 0.009713550098240376\nStep 198, loss: 0.011404153890907764\nStep 199, loss: 0.010602226480841637\nStep 200, loss: 0.010594911873340607\nStep 201, loss: 0.0118786059319973\nStep 202, loss: 0.012351179495453835\nStep 203, loss: 0.012053634971380234\nStep 204, loss: 0.01243047509342432\nStep 205, loss: 0.012431791052222252\nStep 206, loss: 0.012530690059065819\nStep 207, loss: 0.009664309211075306\nStep 208, loss: 0.01006846223026514\nStep 209, loss: 0.010861100628972054\nStep 210, loss: 0.011857938952744007\nStep 211, loss: 0.01253808755427599\nStep 212, loss: 0.011201965622603893\nStep 213, loss: 0.012819544412195683\nStep 214, loss: 0.010710623115301132\nStep 215, loss: 0.011954248882830143\nStep 216, loss: 0.012150179594755173\nStep 217, loss: 0.012084055691957474\nStep 218, loss: 0.013123934157192707\nRunning on 8 devices.\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\nParameter counts:\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\nStarting training from step 0...\nStep 0, loss: 0.3309202492237091\nStep 1, loss: 0.27616071701049805\nStep 2, loss: 0.23833505809307098\nStep 3, loss: 0.20353123545646667\nStep 4, loss: 0.18312352895736694\nStep 5, loss: 0.16567449271678925\nStep 6, loss: 0.1496116816997528\nStep 7, loss: 0.1397426575422287\nStep 8, loss: 0.1301795393228531\nStep 9, loss: 0.12435252219438553\nStep 10, loss: 0.11484285444021225\nStep 11, loss: 0.11342452466487885\nStep 12, loss: 0.10503888875246048\nStep 13, loss: 0.09828095138072968\nStep 14, loss: 0.10296234488487244\nStep 15, loss: 0.09688253700733185\nStep 16, loss: 0.09001464396715164\nStep 17, loss: 0.08598079532384872\nStep 18, loss: 0.08745668828487396\nStep 19, loss: 0.0775802731513977\nStep 20, loss: 0.08166162669658661\nStep 21, loss: 0.07300376892089844\nStep 22, loss: 0.07714924216270447\nStep 23, loss: 0.07810085266828537\nStep 24, loss: 0.0794411152601242\nStep 25, loss: 0.07307913899421692\nStep 26, loss: 0.07097838819026947\nStep 27, loss: 0.06554681807756424\nStep 28, loss: 0.065010204911232\nStep 29, loss: 0.06810400635004044\nStep 30, loss: 0.06351493299007416\nStep 31, loss: 0.057667750865221024\nStep 32, loss: 0.062011126428842545\nStep 33, loss: 0.0609549917280674\nStep 34, loss: 0.06150144338607788\nStep 35, loss: 0.057500164955854416\nStep 36, loss: 0.05634911358356476\nStep 37, loss: 0.054869506508111954\nStep 38, loss: 0.05245821550488472\nStep 39, loss: 0.05741355940699577\nStep 40, loss: 0.05354347452521324\nStep 41, loss: 0.05392030254006386\nStep 42, loss: 0.05377781391143799\nStep 43, loss: 0.04958712309598923\nStep 44, loss: 0.05107380822300911\nStep 45, loss: 0.05087074637413025\nStep 46, loss: 0.0468355156481266\nStep 47, loss: 0.051542893052101135\nStep 48, loss: 0.0509573332965374\nStep 49, loss: 0.0446152463555336\nStep 50, loss: 0.041748303920030594\nStep 51, loss: 0.04902645945549011\nStep 52, loss: 0.047454170882701874\nStep 53, loss: 0.040505826473236084\nStep 54, loss: 0.03718667849898338\nStep 55, loss: 0.03961941972374916\nStep 56, loss: 0.03947916626930237\nStep 57, loss: 0.03693949803709984\nStep 58, loss: 0.03782138228416443\nStep 59, loss: 0.03795648366212845\nStep 60, loss: 0.03371633216738701\nStep 61, loss: 0.044886138290166855\nStep 62, loss: 0.038592565804719925\nStep 63, loss: 0.0360867939889431\nStep 64, loss: 0.036005713045597076\nStep 65, loss: 0.034621596336364746\nStep 66, loss: 0.03188462182879448\nStep 67, loss: 0.029557932168245316\nStep 68, loss: 0.0318872295320034\nStep 69, loss: 0.030645810067653656\nStep 70, loss: 0.028607860207557678\nStep 71, loss: 0.03495610132813454\nStep 72, loss: 0.029537387192249298\nStep 73, loss: 0.033071547746658325\nStep 74, loss: 0.03534964472055435\nStep 75, loss: 0.03360242769122124\nStep 76, loss: 0.028890511021018028\nStep 77, loss: 0.029018152505159378\nStep 78, loss: 0.029858632013201714\nStep 79, loss: 0.02790442295372486\nStep 80, loss: 0.030148373916745186\nStep 81, loss: 0.02741095796227455\nStep 82, loss: 0.02769358456134796\nStep 83, loss: 0.03266967460513115\nStep 84, loss: 0.030720101669430733\nStep 85, loss: 0.02795012854039669\nStep 86, loss: 0.027471110224723816\nStep 87, loss: 0.028233686462044716\nStep 88, loss: 0.026210511103272438\nStep 89, loss: 0.027847111225128174\nStep 90, loss: 0.02239195443689823\nStep 91, loss: 0.02604525536298752\nStep 92, loss: 0.022576235234737396\nStep 93, loss: 0.025205545127391815\nStep 94, loss: 0.022538412362337112\nStep 95, loss: 0.023945016786456108\nStep 96, loss: 0.02386571280658245\nStep 97, loss: 0.02088065817952156\nStep 98, loss: 0.02455727569758892\nStep 99, loss: 0.024158356711268425\nStep 100, loss: 0.02200816199183464\nStep 101, loss: 0.023882700130343437\nStep 102, loss: 0.023078344762325287\nStep 103, loss: 0.021429037675261497\nStep 104, loss: 0.02174082212150097\nStep 105, loss: 0.023439496755599976\nStep 106, loss: 0.02335542067885399\nStep 107, loss: 0.020467158406972885\nStep 108, loss: 0.023992128670215607\nStep 109, loss: 0.02077864110469818\nStep 110, loss: 0.022285621613264084\nStep 111, loss: 0.021920684725046158\nStep 112, loss: 0.020692449063062668\nStep 113, loss: 0.018958691507577896\nStep 114, loss: 0.02061193436384201\nStep 115, loss: 0.020444221794605255\nStep 116, loss: 0.02078094705939293\nStep 117, loss: 0.019729148596525192\nStep 118, loss: 0.020966283977031708\nStep 119, loss: 0.017715569585561752\nStep 120, loss: 0.018365614116191864\nStep 121, loss: 0.01908261515200138\nStep 122, loss: 0.019502971321344376\nStep 123, loss: 0.017858631908893585\nStep 124, loss: 0.015927297994494438\nStep 125, loss: 0.019642645493149757\nStep 126, loss: 0.018544591963291168\nStep 127, loss: 0.015933632850646973\nStep 128, loss: 0.01618993654847145\nStep 129, loss: 0.016362672671675682\nStep 130, loss: 0.015333064831793308\nStep 131, loss: 0.016816189512610435\nStep 132, loss: 0.0164976604282856\nStep 133, loss: 0.016223587095737457\nStep 134, loss: 0.017775867134332657\nStep 135, loss: 0.015498277731239796\nStep 136, loss: 0.015683045610785484\nStep 137, loss: 0.01383606344461441\nStep 138, loss: 0.01694605126976967\nStep 139, loss: 0.017274845391511917\nStep 140, loss: 0.015150913037359715\nStep 141, loss: 0.015680069103837013\nStep 142, loss: 0.014308595098555088\nStep 143, loss: 0.015988625586032867\nStep 144, loss: 0.01708797924220562\nStep 145, loss: 0.015064721927046776\nStep 146, loss: 0.015196548774838448\nStep 147, loss: 0.015233844518661499\nStep 148, loss: 0.015244445763528347\nStep 149, loss: 0.013996983878314495\nStep 150, loss: 0.013958653435111046\nStep 151, loss: 0.01511000283062458\nStep 152, loss: 0.013374081812798977\nStep 153, loss: 0.014145979657769203\nStep 154, loss: 0.015165048651397228\nStep 155, loss: 0.014963418245315552\nStep 156, loss: 0.015931718051433563\nStep 157, loss: 0.015752702951431274\nStep 158, loss: 0.01360904797911644\nStep 159, loss: 0.015732955187559128\nStep 160, loss: 0.016268953680992126\nStep 161, loss: 0.012776114046573639\nStep 162, loss: 0.014002346433699131\nStep 163, loss: 0.016474682837724686\nStep 164, loss: 0.014050696976482868\nStep 165, loss: 0.015429678373038769\nStep 166, loss: 0.014365949667990208\nStep 167, loss: 0.014085184782743454\nStep 168, loss: 0.015173561871051788\nStep 169, loss: 0.014162395149469376\nStep 170, loss: 0.014322423376142979\nStep 171, loss: 0.013332856819033623\nStep 172, loss: 0.012761498801410198\nStep 173, loss: 0.01492702029645443\nStep 174, loss: 0.013961599208414555\nStep 175, loss: 0.012958317995071411\nStep 176, loss: 0.015379429794847965\nStep 177, loss: 0.014328060671687126\nStep 178, loss: 0.014507713727653027\nStep 179, loss: 0.013650377281010151\nStep 180, loss: 0.013451213017106056\nStep 181, loss: 0.012091542594134808\nStep 182, loss: 0.013077723793685436\nStep 183, loss: 0.01345899235457182\nStep 184, loss: 0.01339371595531702\nStep 185, loss: 0.011480382643640041\nStep 186, loss: 0.01359980646520853\nStep 187, loss: 0.01263586338609457\nStep 188, loss: 0.012540409341454506\nStep 189, loss: 0.013751196675002575\nStep 190, loss: 0.012528365477919579\nStep 191, loss: 0.01227360125631094\nStep 192, loss: 0.012847634963691235\nStep 193, loss: 0.011203471571207047\nStep 194, loss: 0.013195750303566456\nStep 195, loss: 0.012723291292786598\nStep 196, loss: 0.010990786366164684\nStep 197, loss: 0.009713550098240376\nStep 198, loss: 0.011404153890907764\nStep 199, loss: 0.010602226480841637\nStep 200, loss: 0.010594911873340607\nStep 201, loss: 0.0118786059319973\nStep 202, loss: 0.012351179495453835\nStep 203, loss: 0.012053634971380234\nStep 204, loss: 0.01243047509342432\nStep 205, loss: 0.012431791052222252\nStep 206, loss: 0.012530690059065819\nStep 207, loss: 0.009664309211075306\nStep 208, loss: 0.01006846223026514\nStep 209, loss: 0.010861100628972054\nStep 210, loss: 0.011857938952744007\nStep 211, loss: 0.01253808755427599\nStep 212, loss: 0.011201965622603893\nStep 213, loss: 0.012819544412195683\nStep 214, loss: 0.010710623115301132\nStep 215, loss: 0.011954248882830143\nStep 216, loss: 0.012150179594755173\nStep 217, loss: 0.012084055691957474\nStep 218, loss: 0.013123934157192707\nRunning on 8 devices.\nCounting all components: ['action_in', 'encoder', 'vq', 'action_up', 'patch_up', 'decoder']\nParameter counts:\n{'action_in': 768, 'encoder': 19348512, 'vq': 192, 'action_up': 16896, 'patch_up': 393728, 'decoder': 19594496, 'total': 39354592}\nStarting training from step 0...\nStep 0, loss: 0.3309202492237091\nStep 1, loss: 0.27616071701049805\nStep 2, loss: 0.23833505809307098\nStep 3, loss: 0.20353123545646667\nStep 4, loss: 0.18312352895736694\nStep 5, loss: 0.16567449271678925\nStep 6, loss: 0.1496116816997528\nStep 7, loss: 0.1397426575422287\nStep 8, loss: 0.1301795393228531\nStep 9, loss: 0.12435252219438553\nStep 10, loss: 0.11484285444021225\nStep 11, loss: 0.11342452466487885\nStep 12, loss: 0.10503888875246048\nStep 13, loss: 0.09828095138072968\nStep 14, loss: 0.10296234488487244\nStep 15, loss: 0.09688253700733185\nStep 16, loss: 0.09001464396715164\nStep 17, loss: 0.08598079532384872\nStep 18, loss: 0.08745668828487396\nStep 19, loss: 0.0775802731513977\nStep 20, loss: 0.08166162669658661\nStep 21, loss: 0.07300376892089844\nStep 22, loss: 0.07714924216270447\nStep 23, loss: 0.07810085266828537\nStep 24, loss: 0.0794411152601242\nStep 25, loss: 0.07307913899421692\nStep 26, loss: 0.07097838819026947\nStep 27, loss: 0.06554681807756424\nStep 28, loss: 0.065010204911232\nStep 29, loss: 0.06810400635004044\nStep 30, loss: 0.06351493299007416\nStep 31, loss: 0.057667750865221024\nStep 32, loss: 0.062011126428842545\nStep 33, loss: 0.0609549917280674\nStep 34, loss: 0.06150144338607788\nStep 35, loss: 0.057500164955854416\nStep 36, loss: 0.05634911358356476\nStep 37, loss: 0.054869506508111954\nStep 38, loss: 0.05245821550488472\nStep 39, loss: 0.05741355940699577\nStep 40, loss: 0.05354347452521324\nStep 41, loss: 0.05392030254006386\nStep 42, loss: 0.05377781391143799\nStep 43, loss: 0.04958712309598923\nStep 44, loss: 0.05107380822300911\nStep 45, loss: 0.05087074637413025\nStep 46, loss: 0.0468355156481266\nStep 47, loss: 0.051542893052101135\nStep 48, loss: 0.0509573332965374\nStep 49, loss: 0.0446152463555336\nStep 50, loss: 0.041748303920030594\nStep 51, loss: 0.04902645945549011\nStep 52, loss: 0.047454170882701874\nStep 53, loss: 0.040505826473236084\nStep 54, loss: 0.03718667849898338\nStep 55, loss: 0.03961941972374916\nStep 56, loss: 0.03947916626930237\nStep 57, loss: 0.03693949803709984\nStep 58, loss: 0.03782138228416443\nStep 59, loss: 0.03795648366212845\nStep 60, loss: 0.03371633216738701\nStep 61, loss: 0.044886138290166855\nStep 62, loss: 0.038592565804719925\nStep 63, loss: 0.0360867939889431\nStep 64, loss: 0.036005713045597076\nStep 65, loss: 0.034621596336364746\nStep 66, loss: 0.03188462182879448\nStep 67, loss: 0.029557932168245316\nStep 68, loss: 0.0318872295320034\nStep 69, loss: 0.030645810067653656\nStep 70, loss: 0.028607860207557678\nStep 71, loss: 0.03495610132813454\nStep 72, loss: 0.029537387192249298\nStep 73, loss: 0.033071547746658325\nStep 74, loss: 0.03534964472055435\nStep 75, loss: 0.03360242769122124\nStep 76, loss: 0.028890511021018028\nStep 77, loss: 0.029018152505159378\nStep 78, loss: 0.029858632013201714\nStep 79, loss: 0.02790442295372486\nStep 80, loss: 0.030148373916745186\nStep 81, loss: 0.02741095796227455\nStep 82, loss: 0.02769358456134796\nStep 83, loss: 0.03266967460513115\nStep 84, loss: 0.030720101669430733\nStep 85, loss: 0.02795012854039669\nStep 86, loss: 0.027471110224723816\nStep 87, loss: 0.028233686462044716\nStep 88, loss: 0.026210511103272438\nStep 89, loss: 0.027847111225128174\nStep 90, loss: 0.02239195443689823\nStep 91, loss: 0.02604525536298752\nStep 92, loss: 0.022576235234737396\nStep 93, loss: 0.025205545127391815\nStep 94, loss: 0.022538412362337112\nStep 95, loss: 0.023945016786456108\nStep 96, loss: 0.02386571280658245\nStep 97, loss: 0.02088065817952156\nStep 98, loss: 0.02455727569758892\nStep 99, loss: 0.024158356711268425\nStep 100, loss: 0.02200816199183464\nStep 101, loss: 0.023882700130343437\nStep 102, loss: 0.023078344762325287\nStep 103, loss: 0.021429037675261497\nStep 104, loss: 0.02174082212150097\nStep 105, loss: 0.023439496755599976\nStep 106, loss: 0.02335542067885399\nStep 107, loss: 0.020467158406972885\nStep 108, loss: 0.023992128670215607\nStep 109, loss: 0.02077864110469818\nStep 110, loss: 0.022285621613264084\nStep 111, loss: 0.021920684725046158\nStep 112, loss: 0.020692449063062668\nStep 113, loss: 0.018958691507577896\nStep 114, loss: 0.02061193436384201\nStep 115, loss: 0.020444221794605255\nStep 116, loss: 0.02078094705939293\nStep 117, loss: 0.019729148596525192\nStep 118, loss: 0.020966283977031708\nStep 119, loss: 0.017715569585561752\nStep 120, loss: 0.018365614116191864\nStep 121, loss: 0.01908261515200138\nStep 122, loss: 0.019502971321344376\nStep 123, loss: 0.017858631908893585\nStep 124, loss: 0.015927297994494438\nStep 125, loss: 0.019642645493149757\nStep 126, loss: 0.018544591963291168\nStep 127, loss: 0.015933632850646973\nStep 128, loss: 0.01618993654847145\nStep 129, loss: 0.016362672671675682\nStep 130, loss: 0.015333064831793308\nStep 131, loss: 0.016816189512610435\nStep 132, loss: 0.0164976604282856\nStep 133, loss: 0.016223587095737457\nStep 134, loss: 0.017775867134332657\nStep 135, loss: 0.015498277731239796\nStep 136, loss: 0.015683045610785484\nStep 137, loss: 0.01383606344461441\nStep 138, loss: 0.01694605126976967\nStep 139, loss: 0.017274845391511917\nStep 140, loss: 0.015150913037359715\nStep 141, loss: 0.015680069103837013\nStep 142, loss: 0.014308595098555088\nStep 143, loss: 0.015988625586032867\nStep 144, loss: 0.01708797924220562\nStep 145, loss: 0.015064721927046776\nStep 146, loss: 0.015196548774838448\nStep 147, loss: 0.015233844518661499\nStep 148, loss: 0.015244445763528347\nStep 149, loss: 0.013996983878314495\nStep 150, loss: 0.013958653435111046\nStep 151, loss: 0.01511000283062458\nStep 152, loss: 0.013374081812798977\nStep 153, loss: 0.014145979657769203\nStep 154, loss: 0.015165048651397228\nStep 155, loss: 0.014963418245315552\nStep 156, loss: 0.015931718051433563\nStep 157, loss: 0.015752702951431274\nStep 158, loss: 0.01360904797911644\nStep 159, loss: 0.015732955187559128\nStep 160, loss: 0.016268953680992126\nStep 161, loss: 0.012776114046573639\nStep 162, loss: 0.014002346433699131\nStep 163, loss: 0.016474682837724686\nStep 164, loss: 0.014050696976482868\nStep 165, loss: 0.015429678373038769\nStep 166, loss: 0.014365949667990208\nStep 167, loss: 0.014085184782743454\nStep 168, loss: 0.015173561871051788\nStep 169, loss: 0.014162395149469376\nStep 170, loss: 0.014322423376142979\nStep 171, loss: 0.013332856819033623\nStep 172, loss: 0.012761498801410198\nStep 173, loss: 0.01492702029645443\nStep 174, loss: 0.013961599208414555\nStep 175, loss: 0.012958317995071411\nStep 176, loss: 0.015379429794847965\nStep 177, loss: 0.014328060671687126\nStep 178, loss: 0.014507713727653027\nStep 179, loss: 0.013650377281010151\nStep 180, loss: 0.013451213017106056\nStep 181, loss: 0.012091542594134808\nStep 182, loss: 0.013077723793685436\nStep 183, loss: 0.01345899235457182\nStep 184, loss: 0.01339371595531702\nStep 185, loss: 0.011480382643640041\nStep 186, loss: 0.01359980646520853\nStep 187, loss: 0.01263586338609457\nStep 188, loss: 0.012540409341454506\nStep 189, loss: 0.013751196675002575\nStep 190, loss: 0.012528365477919579\nStep 191, loss: 0.01227360125631094\nStep 192, loss: 0.012847634963691235\nStep 193, loss: 0.011203471571207047\nStep 194, loss: 0.013195750303566456\nStep 195, loss: 0.012723291292786598\nStep 196, loss: 0.010990786366164684\nStep 197, loss: 0.009713550098240376\nStep 198, loss: 0.011404153890907764\nStep 199, loss: 0.010602226480841637\nStep 200, loss: 0.010594911873340607\nStep 201, loss: 0.0118786059319973\nStep 202, loss: 0.012351179495453835\nStep 203, loss: 0.012053634971380234\nStep 204, loss: 0.01243047509342432\nStep 205, loss: 0.012431791052222252\nStep 206, loss: 0.012530690059065819\nStep 207, loss: 0.009664309211075306\nStep 208, loss: 0.01006846223026514\nStep 209, loss: 0.010861100628972054\nStep 210, loss: 0.011857938952744007\nStep 211, loss: 0.01253808755427599\nStep 212, loss: 0.011201965622603893\nStep 213, loss: 0.012819544412195683\nStep 214, loss: 0.010710623115301132\nStep 215, loss: 0.011954248882830143\nStep 216, loss: 0.012150179594755173\nStep 217, loss: 0.012084055691957474\nStep 218, loss: 0.013123934157192707\nStep 219, loss: 0.012402343563735485\nStep 220, loss: 0.011631792411208153\nStep 221, loss: 0.011584474705159664\nStep 222, loss: 0.01097952201962471\nStep 223, loss: 0.012712574563920498\nStep 224, loss: 0.010229759849607944\nStep 225, loss: 0.011558798141777515\nStep 226, loss: 0.010583264753222466\nStep 227, loss: 0.010705210268497467\nStep 228, loss: 0.0116357933729887\nStep 229, loss: 0.010814939625561237\nStep 230, loss: 0.010558605194091797\nStep 231, loss: 0.009908916428685188\nStep 232, loss: 0.011581497266888618\nStep 233, loss: 0.011802499182522297\nStep 234, loss: 0.011465274728834629\nStep 235, loss: 0.012010223232209682\nStep 236, loss: 0.010722370818257332\nStep 237, loss: 0.010274405591189861\nStep 238, loss: 0.011278481222689152\nStep 239, loss: 0.010846936143934727\nStep 240, loss: 0.01259419322013855\nStep 241, loss: 0.012422946281731129\nStep 242, loss: 0.013480153866112232\nStep 243, loss: 0.01149264071136713\nStep 244, loss: 0.012327280826866627\nStep 245, loss: 0.011576535180211067\nStep 219, loss: 0.012402343563735485\nStep 220, loss: 0.011631792411208153\nStep 221, loss: 0.011584474705159664\nStep 222, loss: 0.01097952201962471\nStep 223, loss: 0.012712574563920498\nStep 224, loss: 0.010229759849607944\nStep 225, loss: 0.011558798141777515\nStep 226, loss: 0.010583264753222466\nStep 227, loss: 0.010705210268497467\nStep 228, loss: 0.0116357933729887\nStep 229, loss: 0.010814939625561237\nStep 230, loss: 0.010558605194091797\nStep 231, loss: 0.009908916428685188\nStep 232, loss: 0.011581497266888618\nStep 233, loss: 0.011802499182522297\nStep 234, loss: 0.011465274728834629\nStep 235, loss: 0.012010223232209682\nStep 236, loss: 0.010722370818257332\nStep 237, loss: 0.010274405591189861\nStep 238, loss: 0.011278481222689152\nStep 239, loss: 0.010846936143934727\nStep 240, loss: 0.01259419322013855\nStep 241, loss: 0.012422946281731129\nStep 242, loss: 0.013480153866112232\nStep 243, loss: 0.01149264071136713\nStep 244, loss: 0.012327280826866627\nStep 245, loss: 0.011576535180211067\nStep 246, loss: 0.011629262007772923\nStep 247, loss: 0.012494875118136406\nStep 248, loss: 0.011843645945191383\nStep 249, loss: 0.012646166607737541\nStep 250, loss: 0.011376439593732357\nStep 251, loss: 0.011232168413698673\nStep 252, loss: 0.011057781055569649\nStep 253, loss: 0.010992903262376785\nStep 254, loss: 0.011301021091639996\nStep 255, loss: 0.01089110691100359\nStep 256, loss: 0.0103179682046175\nStep 257, loss: 0.010743721388280392\nStep 258, loss: 0.010402617044746876\nStep 259, loss: 0.010314985178411007\nStep 260, loss: 0.010662691667675972\nStep 261, loss: 0.011964893899857998\nStep 262, loss: 0.011444470845162868\nStep 263, loss: 0.010359481908380985\nStep 264, loss: 0.011071950197219849\nStep 265, loss: 0.010385474190115929\nStep 266, loss: 0.010786756873130798\nStep 267, loss: 0.009867917746305466\nStep 268, loss: 0.011203785426914692\nStep 269, loss: 0.011108278296887875\nStep 270, loss: 0.011300038546323776\nStep 271, loss: 0.010646146722137928\nStep 272, loss: 0.010200191289186478\nStep 246, loss: 0.011629262007772923\nStep 247, loss: 0.012494875118136406\nStep 248, loss: 0.011843645945191383\nStep 249, loss: 0.012646166607737541\nStep 250, loss: 0.011376439593732357\nStep 251, loss: 0.011232168413698673\nStep 252, loss: 0.011057781055569649\nStep 253, loss: 0.010992903262376785\nStep 254, loss: 0.011301021091639996\nStep 255, loss: 0.01089110691100359\nStep 256, loss: 0.0103179682046175\nStep 257, loss: 0.010743721388280392\nStep 258, loss: 0.010402617044746876\nStep 259, loss: 0.010314985178411007\nStep 260, loss: 0.010662691667675972\nStep 261, loss: 0.011964893899857998\nStep 262, loss: 0.011444470845162868\nStep 263, loss: 0.010359481908380985\nStep 264, loss: 0.011071950197219849\nStep 265, loss: 0.010385474190115929\nStep 266, loss: 0.010786756873130798\nStep 267, loss: 0.009867917746305466\nStep 268, loss: 0.011203785426914692\nStep 269, loss: 0.011108278296887875\nStep 270, loss: 0.011300038546323776\nStep 271, loss: 0.010646146722137928\nStep 272, loss: 0.010200191289186478\nStep 273, loss: 0.010808263905346394\nStep 274, loss: 0.011265968903899193\nStep 275, loss: 0.010250569321215153\nStep 276, loss: 0.008533592335879803\nStep 277, loss: 0.009490706957876682\nStep 278, loss: 0.009791299700737\nStep 279, loss: 0.01029213983565569\nStep 280, loss: 0.011753804981708527\nStep 281, loss: 0.00971137173473835\nStep 282, loss: 0.012008721940219402\nStep 283, loss: 0.011298278346657753\nStep 284, loss: 0.011328218504786491\nStep 285, loss: 0.011197622865438461\nStep 286, loss: 0.010864348150789738\nStep 287, loss: 0.010775812901556492\nStep 288, loss: 0.010691273957490921\nStep 289, loss: 0.009966089390218258\nStep 290, loss: 0.010224885307252407\nStep 291, loss: 0.009930402971804142\nStep 292, loss: 0.010686405934393406\nStep 293, loss: 0.011255526915192604\nStep 294, loss: 0.010371316224336624\nStep 295, loss: 0.009607769548892975\nStep 296, loss: 0.010344200767576694\nStep 297, loss: 0.010129978880286217\nStep 298, loss: 0.010164660401642323\nStep 299, loss: 0.010222935117781162\nStep 273, loss: 0.010808263905346394\nStep 274, loss: 0.011265968903899193\nStep 275, loss: 0.010250569321215153\nStep 276, loss: 0.008533592335879803\nStep 277, loss: 0.009490706957876682\nStep 278, loss: 0.009791299700737\nStep 279, loss: 0.01029213983565569\nStep 280, loss: 0.011753804981708527\nStep 281, loss: 0.00971137173473835\nStep 282, loss: 0.012008721940219402\nStep 283, loss: 0.011298278346657753\nStep 284, loss: 0.011328218504786491\nStep 285, loss: 0.011197622865438461\nStep 286, loss: 0.010864348150789738\nStep 287, loss: 0.010775812901556492\nStep 288, loss: 0.010691273957490921\nStep 289, loss: 0.009966089390218258\nStep 290, loss: 0.010224885307252407\nStep 291, loss: 0.009930402971804142\nStep 292, loss: 0.010686405934393406\nStep 293, loss: 0.011255526915192604\nStep 294, loss: 0.010371316224336624\nStep 295, loss: 0.009607769548892975\nStep 296, loss: 0.010344200767576694\nStep 297, loss: 0.010129978880286217\nStep 298, loss: 0.010164660401642323\nStep 299, loss: 0.010222935117781162\nStep 300, loss: 0.01120209414511919\nStep 301, loss: 0.009847558103501797\nStep 302, loss: 0.011294144205749035\nStep 303, loss: 0.010222863405942917\nStep 304, loss: 0.009446301497519016\nStep 305, loss: 0.008409097790718079\nStep 306, loss: 0.00865502841770649\nStep 307, loss: 0.009920651093125343\nStep 308, loss: 0.009057370945811272\nStep 309, loss: 0.010928399860858917\nStep 310, loss: 0.010560085996985435\nStep 311, loss: 0.009174142964184284\nStep 312, loss: 0.008631434291601181\nStep 313, loss: 0.010008814744651318\nStep 314, loss: 0.00838238000869751\nStep 315, loss: 0.008831451646983624\nStep 316, loss: 0.00959621462970972\nStep 317, loss: 0.009931536391377449\nStep 318, loss: 0.009141447022557259\nStep 319, loss: 0.009971554391086102\nStep 320, loss: 0.009367017075419426\nStep 321, loss: 0.009710235521197319\nStep 322, loss: 0.010341423563659191\nStep 323, loss: 0.01046605221927166\nStep 324, loss: 0.010934630408883095\nStep 325, loss: 0.009343990124762058\nStep 326, loss: 0.010263442061841488\nStep 300, loss: 0.01120209414511919\nStep 301, loss: 0.009847558103501797\nStep 302, loss: 0.011294144205749035\nStep 303, loss: 0.010222863405942917\nStep 304, loss: 0.009446301497519016\nStep 305, loss: 0.008409097790718079\nStep 306, loss: 0.00865502841770649\nStep 307, loss: 0.009920651093125343\nStep 308, loss: 0.009057370945811272\nStep 309, loss: 0.010928399860858917\nStep 310, loss: 0.010560085996985435\nStep 311, loss: 0.009174142964184284\nStep 312, loss: 0.008631434291601181\nStep 313, loss: 0.010008814744651318\nStep 314, loss: 0.00838238000869751\nStep 315, loss: 0.008831451646983624\nStep 316, loss: 0.00959621462970972\nStep 317, loss: 0.009931536391377449\nStep 318, loss: 0.009141447022557259\nStep 319, loss: 0.009971554391086102\nStep 320, loss: 0.009367017075419426\nStep 321, loss: 0.009710235521197319\nStep 322, loss: 0.010341423563659191\nStep 323, loss: 0.01046605221927166\nStep 324, loss: 0.010934630408883095\nStep 325, loss: 0.009343990124762058\nStep 326, loss: 0.010263442061841488\nStep 327, loss: 0.00978117436170578\nStep 328, loss: 0.011806083843111992\nStep 329, loss: 0.010895607993006706\nStep 327, loss: 0.00978117436170578\nStep 328, loss: 0.011806083843111992\nStep 329, loss: 0.010895607993006706\nStep 219, loss: 0.012402343563735485\nStep 220, loss: 0.011631792411208153\nStep 221, loss: 0.011584474705159664\nStep 222, loss: 0.01097952201962471\nStep 223, loss: 0.012712574563920498\nStep 224, loss: 0.010229759849607944\nStep 225, loss: 0.011558798141777515\nStep 226, loss: 0.010583264753222466\nStep 227, loss: 0.010705210268497467\nStep 228, loss: 0.0116357933729887\nStep 229, loss: 0.010814939625561237\nStep 230, loss: 0.010558605194091797\nStep 231, loss: 0.009908916428685188\nStep 232, loss: 0.011581497266888618\nStep 233, loss: 0.011802499182522297\nStep 234, loss: 0.011465274728834629\nStep 235, loss: 0.012010223232209682\nStep 236, loss: 0.010722370818257332\nStep 237, loss: 0.010274405591189861\nStep 238, loss: 0.011278481222689152\nStep 239, loss: 0.010846936143934727\nStep 240, loss: 0.01259419322013855\nStep 241, loss: 0.012422946281731129\nStep 242, loss: 0.013480153866112232\nStep 243, loss: 0.01149264071136713\nStep 244, loss: 0.012327280826866627\nStep 245, loss: 0.011576535180211067\nStep 330, loss: 0.008183012716472149\nStep 331, loss: 0.009584175422787666\nStep 332, loss: 0.009803099557757378\nStep 333, loss: 0.008213256485760212\nStep 334, loss: 0.00965780857950449\nStep 335, loss: 0.008655296638607979\nStep 336, loss: 0.01056174747645855\nStep 337, loss: 0.008038876578211784\nStep 338, loss: 0.01079742144793272\nStep 339, loss: 0.008959968574345112\nStep 340, loss: 0.00852648913860321\nStep 341, loss: 0.008913596160709858\nStep 342, loss: 0.008657476864755154\nStep 343, loss: 0.008968519978225231\nStep 344, loss: 0.009772698394954205\nStep 345, loss: 0.009810158982872963\nStep 346, loss: 0.010945646092295647\nStep 347, loss: 0.010864058509469032\nStep 348, loss: 0.00884501077234745\nStep 349, loss: 0.010526054538786411\nStep 350, loss: 0.0088763777166605\nStep 351, loss: 0.009007164277136326\nStep 352, loss: 0.008891397155821323\nStep 353, loss: 0.0092351408675313\nStep 354, loss: 0.01059066690504551\nStep 355, loss: 0.010139535181224346\nStep 356, loss: 0.010005930438637733\nStep 246, loss: 0.011629262007772923\nStep 247, loss: 0.012494875118136406\nStep 248, loss: 0.011843645945191383\nStep 249, loss: 0.012646166607737541\nStep 250, loss: 0.011376439593732357\nStep 251, loss: 0.011232168413698673\nStep 252, loss: 0.011057781055569649\nStep 253, loss: 0.010992903262376785\nStep 254, loss: 0.011301021091639996\nStep 255, loss: 0.01089110691100359\nStep 256, loss: 0.0103179682046175\nStep 257, loss: 0.010743721388280392\nStep 258, loss: 0.010402617044746876\nStep 259, loss: 0.010314985178411007\nStep 260, loss: 0.010662691667675972\nStep 261, loss: 0.011964893899857998\nStep 262, loss: 0.011444470845162868\nStep 263, loss: 0.010359481908380985\nStep 264, loss: 0.011071950197219849\nStep 265, loss: 0.010385474190115929\nStep 266, loss: 0.010786756873130798\nStep 267, loss: 0.009867917746305466\nStep 268, loss: 0.011203785426914692\nStep 269, loss: 0.011108278296887875\nStep 270, loss: 0.011300038546323776\nStep 271, loss: 0.010646146722137928\nStep 272, loss: 0.010200191289186478\nStep 357, loss: 0.008771003223955631\nStep 358, loss: 0.008067444898188114\nStep 359, loss: 0.008601492270827293\nStep 360, loss: 0.009435067884624004\nStep 361, loss: 0.009560937993228436\nStep 362, loss: 0.008613449521362782\nStep 363, loss: 0.008572123013436794\nStep 364, loss: 0.009219275787472725\nStep 365, loss: 0.01010737195611\nStep 366, loss: 0.008756442926824093\nStep 367, loss: 0.010213833302259445\nStep 368, loss: 0.00893139187246561\nStep 369, loss: 0.009876120835542679\nStep 370, loss: 0.009222878143191338\nStep 371, loss: 0.009084029123187065\nStep 372, loss: 0.009581203572452068\nStep 373, loss: 0.0099358931183815\nStep 374, loss: 0.008505059406161308\nStep 375, loss: 0.009633081965148449\nStep 376, loss: 0.00966871902346611\nStep 377, loss: 0.008310618810355663\nStep 378, loss: 0.008672129362821579\nStep 379, loss: 0.007983251474797726\nStep 380, loss: 0.009981289505958557\nStep 381, loss: 0.008883092552423477\nStep 382, loss: 0.009355833753943443\nStep 383, loss: 0.008394931443035603\nStep 273, loss: 0.010808263905346394\nStep 274, loss: 0.011265968903899193\nStep 275, loss: 0.010250569321215153\nStep 276, loss: 0.008533592335879803\nStep 277, loss: 0.009490706957876682\nStep 278, loss: 0.009791299700737\nStep 279, loss: 0.01029213983565569\nStep 280, loss: 0.011753804981708527\nStep 281, loss: 0.00971137173473835\nStep 282, loss: 0.012008721940219402\nStep 283, loss: 0.011298278346657753\nStep 284, loss: 0.011328218504786491\nStep 285, loss: 0.011197622865438461\nStep 286, loss: 0.010864348150789738\nStep 287, loss: 0.010775812901556492\nStep 288, loss: 0.010691273957490921\nStep 289, loss: 0.009966089390218258\nStep 290, loss: 0.010224885307252407\nStep 291, loss: 0.009930402971804142\nStep 292, loss: 0.010686405934393406\nStep 293, loss: 0.011255526915192604\nStep 294, loss: 0.010371316224336624\nStep 295, loss: 0.009607769548892975\nStep 296, loss: 0.010344200767576694\nStep 297, loss: 0.010129978880286217\nStep 298, loss: 0.010164660401642323\nStep 299, loss: 0.010222935117781162\nStep 384, loss: 0.011625821702182293\nStep 385, loss: 0.008715483359992504\nStep 386, loss: 0.00883227027952671\nStep 387, loss: 0.009458016604185104\nStep 388, loss: 0.009332780726253986\nStep 389, loss: 0.009258671663701534\nStep 390, loss: 0.009851478971540928\nStep 391, loss: 0.009628290310502052\nStep 392, loss: 0.007471262477338314\nStep 393, loss: 0.009414088912308216\nStep 394, loss: 0.009029863402247429\nStep 395, loss: 0.008434828370809555\nStep 396, loss: 0.009456830099225044\nStep 397, loss: 0.008712932467460632\nStep 398, loss: 0.009513208642601967\nStep 399, loss: 0.008333476260304451\nStep 400, loss: 0.007227268535643816\nStep 401, loss: 0.008709742687642574\nStep 402, loss: 0.008780714124441147\nStep 403, loss: 0.009415601380169392\nStep 404, loss: 0.009301510639488697\nStep 405, loss: 0.008122518658638\nStep 406, loss: 0.008958345279097557\nStep 407, loss: 0.01046680472791195\nStep 408, loss: 0.0080665722489357\nStep 409, loss: 0.009830777533352375\nStep 410, loss: 0.009133830666542053\nStep 300, loss: 0.01120209414511919\nStep 301, loss: 0.009847558103501797\nStep 302, loss: 0.011294144205749035\nStep 303, loss: 0.010222863405942917\nStep 304, loss: 0.009446301497519016\nStep 305, loss: 0.008409097790718079\nStep 306, loss: 0.00865502841770649\nStep 307, loss: 0.009920651093125343\nStep 308, loss: 0.009057370945811272\nStep 309, loss: 0.010928399860858917\nStep 310, loss: 0.010560085996985435\nStep 311, loss: 0.009174142964184284\nStep 312, loss: 0.008631434291601181\nStep 313, loss: 0.010008814744651318\nStep 314, loss: 0.00838238000869751\nStep 315, loss: 0.008831451646983624\nStep 316, loss: 0.00959621462970972\nStep 317, loss: 0.009931536391377449\nStep 318, loss: 0.009141447022557259\nStep 319, loss: 0.009971554391086102\nStep 320, loss: 0.009367017075419426\nStep 321, loss: 0.009710235521197319\nStep 322, loss: 0.010341423563659191\nStep 323, loss: 0.01046605221927166\nStep 324, loss: 0.010934630408883095\nStep 325, loss: 0.009343990124762058\nStep 326, loss: 0.010263442061841488\nStep 411, loss: 0.008492544293403625\nStep 412, loss: 0.007803461514413357\nStep 413, loss: 0.009163353592157364\nStep 414, loss: 0.010654965415596962\nStep 415, loss: 0.010023774579167366\nStep 416, loss: 0.008814546279609203\nStep 417, loss: 0.00868968479335308\nStep 418, loss: 0.009255973622202873\nStep 419, loss: 0.008926976472139359\nStep 420, loss: 0.009140003472566605\nStep 421, loss: 0.010159704834222794\nStep 422, loss: 0.008238519541919231\nStep 423, loss: 0.008558676578104496\nStep 424, loss: 0.008526754565536976\nStep 425, loss: 0.010752619244158268\nStep 426, loss: 0.009448438882827759\nStep 427, loss: 0.008833286352455616\nStep 428, loss: 0.009218545630574226\nStep 429, loss: 0.009204461239278316\nStep 430, loss: 0.008507666178047657\nStep 431, loss: 0.008917532861232758\nStep 432, loss: 0.008146978914737701\nStep 433, loss: 0.008149527944624424\nStep 434, loss: 0.008935561403632164\nStep 435, loss: 0.009877092204988003\nStep 436, loss: 0.0077535370364785194\nStep 437, loss: 0.009797759354114532\nStep 327, loss: 0.00978117436170578\nStep 328, loss: 0.011806083843111992\nStep 329, loss: 0.010895607993006706\nStep 438, loss: 0.008172751404345036\nStep 439, loss: 0.008792747743427753\nStep 440, loss: 0.008777088485658169\nStep 330, loss: 0.008183012716472149\nStep 331, loss: 0.009584175422787666\nStep 332, loss: 0.009803099557757378\nStep 333, loss: 0.008213256485760212\nStep 334, loss: 0.00965780857950449\nStep 335, loss: 0.008655296638607979\nStep 336, loss: 0.01056174747645855\nStep 337, loss: 0.008038876578211784\nStep 338, loss: 0.01079742144793272\nStep 339, loss: 0.008959968574345112\nStep 340, loss: 0.00852648913860321\nStep 341, loss: 0.008913596160709858\nStep 342, loss: 0.008657476864755154\nStep 343, loss: 0.008968519978225231\nStep 344, loss: 0.009772698394954205\nStep 345, loss: 0.009810158982872963\nStep 346, loss: 0.010945646092295647\nStep 347, loss: 0.010864058509469032\nStep 348, loss: 0.00884501077234745\nStep 349, loss: 0.010526054538786411\nStep 350, loss: 0.0088763777166605\nStep 351, loss: 0.009007164277136326\nStep 352, loss: 0.008891397155821323\nStep 353, loss: 0.0092351408675313\nStep 354, loss: 0.01059066690504551\nStep 355, loss: 0.010139535181224346\nStep 356, loss: 0.010005930438637733\nStep 219, loss: 0.012402343563735485\nStep 220, loss: 0.011631792411208153\nStep 221, loss: 0.011584474705159664\nStep 222, loss: 0.01097952201962471\nStep 223, loss: 0.012712574563920498\nStep 224, loss: 0.010229759849607944\nStep 225, loss: 0.011558798141777515\nStep 226, loss: 0.010583264753222466\nStep 227, loss: 0.010705210268497467\nStep 228, loss: 0.0116357933729887\nStep 229, loss: 0.010814939625561237\nStep 230, loss: 0.010558605194091797\nStep 231, loss: 0.009908916428685188\nStep 232, loss: 0.011581497266888618\nStep 233, loss: 0.011802499182522297\nStep 234, loss: 0.011465274728834629\nStep 235, loss: 0.012010223232209682\nStep 236, loss: 0.010722370818257332\nStep 237, loss: 0.010274405591189861\nStep 238, loss: 0.011278481222689152\nStep 239, loss: 0.010846936143934727\nStep 240, loss: 0.01259419322013855\nStep 241, loss: 0.012422946281731129\nStep 242, loss: 0.013480153866112232\nStep 243, loss: 0.01149264071136713\nStep 244, loss: 0.012327280826866627\nStep 245, loss: 0.011576535180211067\nStep 357, loss: 0.008771003223955631\nStep 358, loss: 0.008067444898188114\nStep 359, loss: 0.008601492270827293\nStep 360, loss: 0.009435067884624004\nStep 361, loss: 0.009560937993228436\nStep 362, loss: 0.008613449521362782\nStep 363, loss: 0.008572123013436794\nStep 364, loss: 0.009219275787472725\nStep 365, loss: 0.01010737195611\nStep 366, loss: 0.008756442926824093\nStep 367, loss: 0.010213833302259445\nStep 368, loss: 0.00893139187246561\nStep 369, loss: 0.009876120835542679\nStep 370, loss: 0.009222878143191338\nStep 371, loss: 0.009084029123187065\nStep 372, loss: 0.009581203572452068\nStep 373, loss: 0.0099358931183815\nStep 374, loss: 0.008505059406161308\nStep 375, loss: 0.009633081965148449\nStep 376, loss: 0.00966871902346611\nStep 377, loss: 0.008310618810355663\nStep 378, loss: 0.008672129362821579\nStep 379, loss: 0.007983251474797726\nStep 380, loss: 0.009981289505958557\nStep 381, loss: 0.008883092552423477\nStep 382, loss: 0.009355833753943443\nStep 383, loss: 0.008394931443035603\nStep 246, loss: 0.011629262007772923\nStep 247, loss: 0.012494875118136406\nStep 248, loss: 0.011843645945191383\nStep 249, loss: 0.012646166607737541\nStep 250, loss: 0.011376439593732357\nStep 251, loss: 0.011232168413698673\nStep 252, loss: 0.011057781055569649\nStep 253, loss: 0.010992903262376785\nStep 254, loss: 0.011301021091639996\nStep 255, loss: 0.01089110691100359\nStep 256, loss: 0.0103179682046175\nStep 257, loss: 0.010743721388280392\nStep 258, loss: 0.010402617044746876\nStep 259, loss: 0.010314985178411007\nStep 260, loss: 0.010662691667675972\nStep 261, loss: 0.011964893899857998\nStep 262, loss: 0.011444470845162868\nStep 263, loss: 0.010359481908380985\nStep 264, loss: 0.011071950197219849\nStep 265, loss: 0.010385474190115929\nStep 266, loss: 0.010786756873130798\nStep 267, loss: 0.009867917746305466\nStep 268, loss: 0.011203785426914692\nStep 269, loss: 0.011108278296887875\nStep 270, loss: 0.011300038546323776\nStep 271, loss: 0.010646146722137928\nStep 272, loss: 0.010200191289186478\nStep 384, loss: 0.011625821702182293\nStep 385, loss: 0.008715483359992504\nStep 386, loss: 0.00883227027952671\nStep 387, loss: 0.009458016604185104\nStep 388, loss: 0.009332780726253986\nStep 389, loss: 0.009258671663701534\nStep 390, loss: 0.009851478971540928\nStep 391, loss: 0.009628290310502052\nStep 392, loss: 0.007471262477338314\nStep 393, loss: 0.009414088912308216\nStep 394, loss: 0.009029863402247429\nStep 395, loss: 0.008434828370809555\nStep 396, loss: 0.009456830099225044\nStep 397, loss: 0.008712932467460632\nStep 398, loss: 0.009513208642601967\nStep 399, loss: 0.008333476260304451\nStep 400, loss: 0.007227268535643816\nStep 401, loss: 0.008709742687642574\nStep 402, loss: 0.008780714124441147\nStep 403, loss: 0.009415601380169392\nStep 404, loss: 0.009301510639488697\nStep 405, loss: 0.008122518658638\nStep 406, loss: 0.008958345279097557\nStep 407, loss: 0.01046680472791195\nStep 408, loss: 0.0080665722489357\nStep 409, loss: 0.009830777533352375\nStep 410, loss: 0.009133830666542053\nStep 273, loss: 0.010808263905346394\nStep 274, loss: 0.011265968903899193\nStep 275, loss: 0.010250569321215153\nStep 276, loss: 0.008533592335879803\nStep 277, loss: 0.009490706957876682\nStep 278, loss: 0.009791299700737\nStep 279, loss: 0.01029213983565569\nStep 280, loss: 0.011753804981708527\nStep 281, loss: 0.00971137173473835\nStep 282, loss: 0.012008721940219402\nStep 283, loss: 0.011298278346657753\nStep 284, loss: 0.011328218504786491\nStep 285, loss: 0.011197622865438461\nStep 286, loss: 0.010864348150789738\nStep 287, loss: 0.010775812901556492\nStep 288, loss: 0.010691273957490921\nStep 289, loss: 0.009966089390218258\nStep 290, loss: 0.010224885307252407\nStep 291, loss: 0.009930402971804142\nStep 292, loss: 0.010686405934393406\nStep 293, loss: 0.011255526915192604\nStep 294, loss: 0.010371316224336624\nStep 295, loss: 0.009607769548892975\nStep 296, loss: 0.010344200767576694\nStep 297, loss: 0.010129978880286217\nStep 298, loss: 0.010164660401642323\nStep 299, loss: 0.010222935117781162\nStep 411, loss: 0.008492544293403625\nStep 412, loss: 0.007803461514413357\nStep 413, loss: 0.009163353592157364\nStep 414, loss: 0.010654965415596962\nStep 415, loss: 0.010023774579167366\nStep 416, loss: 0.008814546279609203\nStep 417, loss: 0.00868968479335308\nStep 418, loss: 0.009255973622202873\nStep 419, loss: 0.008926976472139359\nStep 420, loss: 0.009140003472566605\nStep 421, loss: 0.010159704834222794\nStep 422, loss: 0.008238519541919231\nStep 423, loss: 0.008558676578104496\nStep 424, loss: 0.008526754565536976\nStep 425, loss: 0.010752619244158268\nStep 426, loss: 0.009448438882827759\nStep 427, loss: 0.008833286352455616\nStep 428, loss: 0.009218545630574226\nStep 429, loss: 0.009204461239278316\nStep 430, loss: 0.008507666178047657\nStep 431, loss: 0.008917532861232758\nStep 432, loss: 0.008146978914737701\nStep 433, loss: 0.008149527944624424\nStep 434, loss: 0.008935561403632164\nStep 435, loss: 0.009877092204988003\nStep 436, loss: 0.0077535370364785194\nStep 437, loss: 0.009797759354114532\nStep 300, loss: 0.01120209414511919\nStep 301, loss: 0.009847558103501797\nStep 302, loss: 0.011294144205749035\nStep 303, loss: 0.010222863405942917\nStep 304, loss: 0.009446301497519016\nStep 305, loss: 0.008409097790718079\nStep 306, loss: 0.00865502841770649\nStep 307, loss: 0.009920651093125343\nStep 308, loss: 0.009057370945811272\nStep 309, loss: 0.010928399860858917\nStep 310, loss: 0.010560085996985435\nStep 311, loss: 0.009174142964184284\nStep 312, loss: 0.008631434291601181\nStep 313, loss: 0.010008814744651318\nStep 314, loss: 0.00838238000869751\nStep 315, loss: 0.008831451646983624\nStep 316, loss: 0.00959621462970972\nStep 317, loss: 0.009931536391377449\nStep 318, loss: 0.009141447022557259\nStep 319, loss: 0.009971554391086102\nStep 320, loss: 0.009367017075419426\nStep 321, loss: 0.009710235521197319\nStep 322, loss: 0.010341423563659191\nStep 323, loss: 0.01046605221927166\nStep 324, loss: 0.010934630408883095\nStep 325, loss: 0.009343990124762058\nStep 326, loss: 0.010263442061841488\nStep 438, loss: 0.008172751404345036\nStep 439, loss: 0.008792747743427753\nStep 440, loss: 0.008777088485658169\nStep 327, loss: 0.00978117436170578\nStep 328, loss: 0.011806083843111992\nStep 329, loss: 0.010895607993006706\nStep 330, loss: 0.008183012716472149\nStep 331, loss: 0.009584175422787666\nStep 332, loss: 0.009803099557757378\nStep 333, loss: 0.008213256485760212\nStep 334, loss: 0.00965780857950449\nStep 335, loss: 0.008655296638607979\nStep 336, loss: 0.01056174747645855\nStep 337, loss: 0.008038876578211784\nStep 338, loss: 0.01079742144793272\nStep 339, loss: 0.008959968574345112\nStep 340, loss: 0.00852648913860321\nStep 341, loss: 0.008913596160709858\nStep 342, loss: 0.008657476864755154\nStep 343, loss: 0.008968519978225231\nStep 344, loss: 0.009772698394954205\nStep 345, loss: 0.009810158982872963\nStep 346, loss: 0.010945646092295647\nStep 347, loss: 0.010864058509469032\nStep 348, loss: 0.00884501077234745\nStep 349, loss: 0.010526054538786411\nStep 350, loss: 0.0088763777166605\nStep 351, loss: 0.009007164277136326\nStep 352, loss: 0.008891397155821323\nStep 353, loss: 0.0092351408675313\nStep 354, loss: 0.01059066690504551\nStep 355, loss: 0.010139535181224346\nStep 356, loss: 0.010005930438637733\nStep 330, loss: 0.008183012716472149\nStep 331, loss: 0.009584175422787666\nStep 332, loss: 0.009803099557757378\nStep 333, loss: 0.008213256485760212\nStep 334, loss: 0.00965780857950449\nStep 335, loss: 0.008655296638607979\nStep 336, loss: 0.01056174747645855\nStep 337, loss: 0.008038876578211784\nStep 338, loss: 0.01079742144793272\nStep 339, loss: 0.008959968574345112\nStep 340, loss: 0.00852648913860321\nStep 341, loss: 0.008913596160709858\nStep 342, loss: 0.008657476864755154\nStep 343, loss: 0.008968519978225231\nStep 344, loss: 0.009772698394954205\nStep 345, loss: 0.009810158982872963\nStep 346, loss: 0.010945646092295647\nStep 347, loss: 0.010864058509469032\nStep 348, loss: 0.00884501077234745\nStep 349, loss: 0.010526054538786411\nStep 350, loss: 0.0088763777166605\nStep 351, loss: 0.009007164277136326\nStep 352, loss: 0.008891397155821323\nStep 353, loss: 0.0092351408675313\nStep 354, loss: 0.01059066690504551\nStep 355, loss: 0.010139535181224346\nStep 356, loss: 0.010005930438637733\nStep 357, loss: 0.008771003223955631\nStep 358, loss: 0.008067444898188114\nStep 359, loss: 0.008601492270827293\nStep 360, loss: 0.009435067884624004\nStep 361, loss: 0.009560937993228436\nStep 362, loss: 0.008613449521362782\nStep 363, loss: 0.008572123013436794\nStep 364, loss: 0.009219275787472725\nStep 365, loss: 0.01010737195611\nStep 366, loss: 0.008756442926824093\nStep 367, loss: 0.010213833302259445\nStep 368, loss: 0.00893139187246561\nStep 369, loss: 0.009876120835542679\nStep 370, loss: 0.009222878143191338\nStep 371, loss: 0.009084029123187065\nStep 372, loss: 0.009581203572452068\nStep 373, loss: 0.0099358931183815\nStep 374, loss: 0.008505059406161308\nStep 375, loss: 0.009633081965148449\nStep 376, loss: 0.00966871902346611\nStep 377, loss: 0.008310618810355663\nStep 378, loss: 0.008672129362821579\nStep 379, loss: 0.007983251474797726\nStep 380, loss: 0.009981289505958557\nStep 381, loss: 0.008883092552423477\nStep 382, loss: 0.009355833753943443\nStep 383, loss: 0.008394931443035603\nStep 357, loss: 0.008771003223955631\nStep 358, loss: 0.008067444898188114\nStep 359, loss: 0.008601492270827293\nStep 360, loss: 0.009435067884624004\nStep 361, loss: 0.009560937993228436\nStep 362, loss: 0.008613449521362782\nStep 363, loss: 0.008572123013436794\nStep 364, loss: 0.009219275787472725\nStep 365, loss: 0.01010737195611\nStep 366, loss: 0.008756442926824093\nStep 367, loss: 0.010213833302259445\nStep 368, loss: 0.00893139187246561\nStep 369, loss: 0.009876120835542679\nStep 370, loss: 0.009222878143191338\nStep 371, loss: 0.009084029123187065\nStep 372, loss: 0.009581203572452068\nStep 373, loss: 0.0099358931183815\nStep 374, loss: 0.008505059406161308\nStep 375, loss: 0.009633081965148449\nStep 376, loss: 0.00966871902346611\nStep 377, loss: 0.008310618810355663\nStep 378, loss: 0.008672129362821579\nStep 379, loss: 0.007983251474797726\nStep 380, loss: 0.009981289505958557\nStep 381, loss: 0.008883092552423477\nStep 382, loss: 0.009355833753943443\nStep 383, loss: 0.008394931443035603\nStep 384, loss: 0.011625821702182293\nStep 385, loss: 0.008715483359992504\nStep 386, loss: 0.00883227027952671\nStep 387, loss: 0.009458016604185104\nStep 388, loss: 0.009332780726253986\nStep 389, loss: 0.009258671663701534\nStep 390, loss: 0.009851478971540928\nStep 391, loss: 0.009628290310502052\nStep 392, loss: 0.007471262477338314\nStep 393, loss: 0.009414088912308216\nStep 394, loss: 0.009029863402247429\nStep 395, loss: 0.008434828370809555\nStep 396, loss: 0.009456830099225044\nStep 397, loss: 0.008712932467460632\nStep 398, loss: 0.009513208642601967\nStep 399, loss: 0.008333476260304451\nStep 400, loss: 0.007227268535643816\nStep 401, loss: 0.008709742687642574\nStep 402, loss: 0.008780714124441147\nStep 403, loss: 0.009415601380169392\nStep 404, loss: 0.009301510639488697\nStep 405, loss: 0.008122518658638\nStep 406, loss: 0.008958345279097557\nStep 407, loss: 0.01046680472791195\nStep 408, loss: 0.0080665722489357\nStep 409, loss: 0.009830777533352375\nStep 410, loss: 0.009133830666542053\nStep 384, loss: 0.011625821702182293\nStep 385, loss: 0.008715483359992504\nStep 386, loss: 0.00883227027952671\nStep 387, loss: 0.009458016604185104\nStep 388, loss: 0.009332780726253986\nStep 389, loss: 0.009258671663701534\nStep 390, loss: 0.009851478971540928\nStep 391, loss: 0.009628290310502052\nStep 392, loss: 0.007471262477338314\nStep 393, loss: 0.009414088912308216\nStep 394, loss: 0.009029863402247429\nStep 395, loss: 0.008434828370809555\nStep 396, loss: 0.009456830099225044\nStep 397, loss: 0.008712932467460632\nStep 398, loss: 0.009513208642601967\nStep 399, loss: 0.008333476260304451\nStep 400, loss: 0.007227268535643816\nStep 401, loss: 0.008709742687642574\nStep 402, loss: 0.008780714124441147\nStep 403, loss: 0.009415601380169392\nStep 404, loss: 0.009301510639488697\nStep 405, loss: 0.008122518658638\nStep 406, loss: 0.008958345279097557\nStep 407, loss: 0.01046680472791195\nStep 408, loss: 0.0080665722489357\nStep 409, loss: 0.009830777533352375\nStep 410, loss: 0.009133830666542053\nStep 411, loss: 0.008492544293403625\nStep 412, loss: 0.007803461514413357\nStep 413, loss: 0.009163353592157364\nStep 414, loss: 0.010654965415596962\nStep 415, loss: 0.010023774579167366\nStep 416, loss: 0.008814546279609203\nStep 417, loss: 0.00868968479335308\nStep 418, loss: 0.009255973622202873\nStep 419, loss: 0.008926976472139359\nStep 420, loss: 0.009140003472566605\nStep 421, loss: 0.010159704834222794\nStep 422, loss: 0.008238519541919231\nStep 423, loss: 0.008558676578104496\nStep 424, loss: 0.008526754565536976\nStep 425, loss: 0.010752619244158268\nStep 426, loss: 0.009448438882827759\nStep 427, loss: 0.008833286352455616\nStep 428, loss: 0.009218545630574226\nStep 429, loss: 0.009204461239278316\nStep 430, loss: 0.008507666178047657\nStep 431, loss: 0.008917532861232758\nStep 432, loss: 0.008146978914737701\nStep 433, loss: 0.008149527944624424\nStep 434, loss: 0.008935561403632164\nStep 435, loss: 0.009877092204988003\nStep 436, loss: 0.0077535370364785194\nStep 437, loss: 0.009797759354114532\nStep 411, loss: 0.008492544293403625\nStep 412, loss: 0.007803461514413357\nStep 413, loss: 0.009163353592157364\nStep 414, loss: 0.010654965415596962\nStep 415, loss: 0.010023774579167366\nStep 416, loss: 0.008814546279609203\nStep 417, loss: 0.00868968479335308\nStep 418, loss: 0.009255973622202873\nStep 419, loss: 0.008926976472139359\nStep 420, loss: 0.009140003472566605\nStep 421, loss: 0.010159704834222794\nStep 422, loss: 0.008238519541919231\nStep 423, loss: 0.008558676578104496\nStep 424, loss: 0.008526754565536976\nStep 425, loss: 0.010752619244158268\nStep 426, loss: 0.009448438882827759\nStep 427, loss: 0.008833286352455616\nStep 428, loss: 0.009218545630574226\nStep 429, loss: 0.009204461239278316\nStep 430, loss: 0.008507666178047657\nStep 431, loss: 0.008917532861232758\nStep 432, loss: 0.008146978914737701\nStep 433, loss: 0.008149527944624424\nStep 434, loss: 0.008935561403632164\nStep 435, loss: 0.009877092204988003\nStep 436, loss: 0.0077535370364785194\nStep 437, loss: 0.009797759354114532\nStep 438, loss: 0.008172751404345036\nStep 439, loss: 0.008792747743427753\nStep 440, loss: 0.008777088485658169\nStep 438, loss: 0.008172751404345036\nStep 439, loss: 0.008792747743427753\nStep 440, loss: 0.008777088485658169\nStep 219, loss: 0.012402343563735485\nStep 220, loss: 0.011631792411208153\nStep 221, loss: 0.011584474705159664\nStep 222, loss: 0.01097952201962471\nStep 223, loss: 0.012712574563920498\nStep 224, loss: 0.010229759849607944\nStep 225, loss: 0.011558798141777515\nStep 226, loss: 0.010583264753222466\nStep 227, loss: 0.010705210268497467\nStep 228, loss: 0.0116357933729887\nStep 229, loss: 0.010814939625561237\nStep 230, loss: 0.010558605194091797\nStep 231, loss: 0.009908916428685188\nStep 232, loss: 0.011581497266888618\nStep 233, loss: 0.011802499182522297\nStep 234, loss: 0.011465274728834629\nStep 235, loss: 0.012010223232209682\nStep 236, loss: 0.010722370818257332\nStep 237, loss: 0.010274405591189861\nStep 238, loss: 0.011278481222689152\nStep 239, loss: 0.010846936143934727\nStep 240, loss: 0.01259419322013855\nStep 241, loss: 0.012422946281731129\nStep 242, loss: 0.013480153866112232\nStep 243, loss: 0.01149264071136713\nStep 244, loss: 0.012327280826866627\nStep 245, loss: 0.011576535180211067\nStep 219, loss: 0.012402343563735485\nStep 220, loss: 0.011631792411208153\nStep 221, loss: 0.011584474705159664\nStep 222, loss: 0.01097952201962471\nStep 223, loss: 0.012712574563920498\nStep 224, loss: 0.010229759849607944\nStep 225, loss: 0.011558798141777515\nStep 226, loss: 0.010583264753222466\nStep 227, loss: 0.010705210268497467\nStep 228, loss: 0.0116357933729887\nStep 229, loss: 0.010814939625561237\nStep 230, loss: 0.010558605194091797\nStep 231, loss: 0.009908916428685188\nStep 232, loss: 0.011581497266888618\nStep 233, loss: 0.011802499182522297\nStep 234, loss: 0.011465274728834629\nStep 235, loss: 0.012010223232209682\nStep 236, loss: 0.010722370818257332\nStep 237, loss: 0.010274405591189861\nStep 238, loss: 0.011278481222689152\nStep 239, loss: 0.010846936143934727\nStep 240, loss: 0.01259419322013855\nStep 241, loss: 0.012422946281731129\nStep 242, loss: 0.013480153866112232\nStep 243, loss: 0.01149264071136713\nStep 244, loss: 0.012327280826866627\nStep 245, loss: 0.011576535180211067\nStep 246, loss: 0.011629262007772923\nStep 247, loss: 0.012494875118136406\nStep 248, loss: 0.011843645945191383\nStep 249, loss: 0.012646166607737541\nStep 250, loss: 0.011376439593732357\nStep 251, loss: 0.011232168413698673\nStep 252, loss: 0.011057781055569649\nStep 253, loss: 0.010992903262376785\nStep 254, loss: 0.011301021091639996\nStep 255, loss: 0.01089110691100359\nStep 256, loss: 0.0103179682046175\nStep 257, loss: 0.010743721388280392\nStep 258, loss: 0.010402617044746876\nStep 259, loss: 0.010314985178411007\nStep 260, loss: 0.010662691667675972\nStep 261, loss: 0.011964893899857998\nStep 262, loss: 0.011444470845162868\nStep 263, loss: 0.010359481908380985\nStep 264, loss: 0.011071950197219849\nStep 265, loss: 0.010385474190115929\nStep 266, loss: 0.010786756873130798\nStep 267, loss: 0.009867917746305466\nStep 268, loss: 0.011203785426914692\nStep 269, loss: 0.011108278296887875\nStep 270, loss: 0.011300038546323776\nStep 271, loss: 0.010646146722137928\nStep 272, loss: 0.010200191289186478\nStep 246, loss: 0.011629262007772923\nStep 247, loss: 0.012494875118136406\nStep 248, loss: 0.011843645945191383\nStep 249, loss: 0.012646166607737541\nStep 250, loss: 0.011376439593732357\nStep 251, loss: 0.011232168413698673\nStep 252, loss: 0.011057781055569649\nStep 253, loss: 0.010992903262376785\nStep 254, loss: 0.011301021091639996\nStep 255, loss: 0.01089110691100359\nStep 256, loss: 0.0103179682046175\nStep 257, loss: 0.010743721388280392\nStep 258, loss: 0.010402617044746876\nStep 259, loss: 0.010314985178411007\nStep 260, loss: 0.010662691667675972\nStep 261, loss: 0.011964893899857998\nStep 262, loss: 0.011444470845162868\nStep 263, loss: 0.010359481908380985\nStep 264, loss: 0.011071950197219849\nStep 265, loss: 0.010385474190115929\nStep 266, loss: 0.010786756873130798\nStep 267, loss: 0.009867917746305466\nStep 268, loss: 0.011203785426914692\nStep 269, loss: 0.011108278296887875\nStep 270, loss: 0.011300038546323776\nStep 271, loss: 0.010646146722137928\nStep 272, loss: 0.010200191289186478\nStep 273, loss: 0.010808263905346394\nStep 274, loss: 0.011265968903899193\nStep 275, loss: 0.010250569321215153\nStep 276, loss: 0.008533592335879803\nStep 277, loss: 0.009490706957876682\nStep 278, loss: 0.009791299700737\nStep 279, loss: 0.01029213983565569\nStep 280, loss: 0.011753804981708527\nStep 281, loss: 0.00971137173473835\nStep 282, loss: 0.012008721940219402\nStep 283, loss: 0.011298278346657753\nStep 284, loss: 0.011328218504786491\nStep 285, loss: 0.011197622865438461\nStep 286, loss: 0.010864348150789738\nStep 287, loss: 0.010775812901556492\nStep 288, loss: 0.010691273957490921\nStep 289, loss: 0.009966089390218258\nStep 290, loss: 0.010224885307252407\nStep 291, loss: 0.009930402971804142\nStep 292, loss: 0.010686405934393406\nStep 293, loss: 0.011255526915192604\nStep 294, loss: 0.010371316224336624\nStep 295, loss: 0.009607769548892975\nStep 296, loss: 0.010344200767576694\nStep 297, loss: 0.010129978880286217\nStep 298, loss: 0.010164660401642323\nStep 299, loss: 0.010222935117781162\nStep 273, loss: 0.010808263905346394\nStep 274, loss: 0.011265968903899193\nStep 275, loss: 0.010250569321215153\nStep 276, loss: 0.008533592335879803\nStep 277, loss: 0.009490706957876682\nStep 278, loss: 0.009791299700737\nStep 279, loss: 0.01029213983565569\nStep 280, loss: 0.011753804981708527\nStep 281, loss: 0.00971137173473835\nStep 282, loss: 0.012008721940219402\nStep 283, loss: 0.011298278346657753\nStep 284, loss: 0.011328218504786491\nStep 285, loss: 0.011197622865438461\nStep 286, loss: 0.010864348150789738\nStep 287, loss: 0.010775812901556492\nStep 288, loss: 0.010691273957490921\nStep 289, loss: 0.009966089390218258\nStep 290, loss: 0.010224885307252407\nStep 291, loss: 0.009930402971804142\nStep 292, loss: 0.010686405934393406\nStep 293, loss: 0.011255526915192604\nStep 294, loss: 0.010371316224336624\nStep 295, loss: 0.009607769548892975\nStep 296, loss: 0.010344200767576694\nStep 297, loss: 0.010129978880286217\nStep 298, loss: 0.010164660401642323\nStep 299, loss: 0.010222935117781162\nStep 300, loss: 0.01120209414511919\nStep 301, loss: 0.009847558103501797\nStep 302, loss: 0.011294144205749035\nStep 303, loss: 0.010222863405942917\nStep 304, loss: 0.009446301497519016\nStep 305, loss: 0.008409097790718079\nStep 306, loss: 0.00865502841770649\nStep 307, loss: 0.009920651093125343\nStep 308, loss: 0.009057370945811272\nStep 309, loss: 0.010928399860858917\nStep 310, loss: 0.010560085996985435\nStep 311, loss: 0.009174142964184284\nStep 312, loss: 0.008631434291601181\nStep 313, loss: 0.010008814744651318\nStep 314, loss: 0.00838238000869751\nStep 315, loss: 0.008831451646983624\nStep 316, loss: 0.00959621462970972\nStep 317, loss: 0.009931536391377449\nStep 318, loss: 0.009141447022557259\nStep 319, loss: 0.009971554391086102\nStep 320, loss: 0.009367017075419426\nStep 321, loss: 0.009710235521197319\nStep 322, loss: 0.010341423563659191\nStep 323, loss: 0.01046605221927166\nStep 324, loss: 0.010934630408883095\nStep 325, loss: 0.009343990124762058\nStep 326, loss: 0.010263442061841488\nStep 300, loss: 0.01120209414511919\nStep 301, loss: 0.009847558103501797\nStep 302, loss: 0.011294144205749035\nStep 303, loss: 0.010222863405942917\nStep 304, loss: 0.009446301497519016\nStep 305, loss: 0.008409097790718079\nStep 306, loss: 0.00865502841770649\nStep 307, loss: 0.009920651093125343\nStep 308, loss: 0.009057370945811272\nStep 309, loss: 0.010928399860858917\nStep 310, loss: 0.010560085996985435\nStep 311, loss: 0.009174142964184284\nStep 312, loss: 0.008631434291601181\nStep 313, loss: 0.010008814744651318\nStep 314, loss: 0.00838238000869751\nStep 315, loss: 0.008831451646983624\nStep 316, loss: 0.00959621462970972\nStep 317, loss: 0.009931536391377449\nStep 318, loss: 0.009141447022557259\nStep 319, loss: 0.009971554391086102\nStep 320, loss: 0.009367017075419426\nStep 321, loss: 0.009710235521197319\nStep 322, loss: 0.010341423563659191\nStep 323, loss: 0.01046605221927166\nStep 324, loss: 0.010934630408883095\nStep 325, loss: 0.009343990124762058\nStep 326, loss: 0.010263442061841488\nStep 327, loss: 0.00978117436170578\nStep 328, loss: 0.011806083843111992\nStep 329, loss: 0.010895607993006706\nStep 327, loss: 0.00978117436170578\nStep 328, loss: 0.011806083843111992\nStep 329, loss: 0.010895607993006706\nStep 330, loss: 0.008183012716472149\nStep 331, loss: 0.009584175422787666\nStep 332, loss: 0.009803099557757378\nStep 333, loss: 0.008213256485760212\nStep 334, loss: 0.00965780857950449\nStep 335, loss: 0.008655296638607979\nStep 336, loss: 0.01056174747645855\nStep 337, loss: 0.008038876578211784\nStep 338, loss: 0.01079742144793272\nStep 339, loss: 0.008959968574345112\nStep 340, loss: 0.00852648913860321\nStep 341, loss: 0.008913596160709858\nStep 342, loss: 0.008657476864755154\nStep 343, loss: 0.008968519978225231\nStep 344, loss: 0.009772698394954205\nStep 345, loss: 0.009810158982872963\nStep 346, loss: 0.010945646092295647\nStep 347, loss: 0.010864058509469032\nStep 348, loss: 0.00884501077234745\nStep 349, loss: 0.010526054538786411\nStep 350, loss: 0.0088763777166605\nStep 351, loss: 0.009007164277136326\nStep 352, loss: 0.008891397155821323\nStep 353, loss: 0.0092351408675313\nStep 354, loss: 0.01059066690504551\nStep 355, loss: 0.010139535181224346\nStep 356, loss: 0.010005930438637733\nStep 330, loss: 0.008183012716472149\nStep 331, loss: 0.009584175422787666\nStep 332, loss: 0.009803099557757378\nStep 333, loss: 0.008213256485760212\nStep 334, loss: 0.00965780857950449\nStep 335, loss: 0.008655296638607979\nStep 336, loss: 0.01056174747645855\nStep 337, loss: 0.008038876578211784\nStep 338, loss: 0.01079742144793272\nStep 339, loss: 0.008959968574345112\nStep 340, loss: 0.00852648913860321\nStep 341, loss: 0.008913596160709858\nStep 342, loss: 0.008657476864755154\nStep 343, loss: 0.008968519978225231\nStep 344, loss: 0.009772698394954205\nStep 345, loss: 0.009810158982872963\nStep 346, loss: 0.010945646092295647\nStep 347, loss: 0.010864058509469032\nStep 348, loss: 0.00884501077234745\nStep 349, loss: 0.010526054538786411\nStep 350, loss: 0.0088763777166605\nStep 351, loss: 0.009007164277136326\nStep 352, loss: 0.008891397155821323\nStep 353, loss: 0.0092351408675313\nStep 354, loss: 0.01059066690504551\nStep 355, loss: 0.010139535181224346\nStep 356, loss: 0.010005930438637733\nStep 357, loss: 0.008771003223955631\nStep 358, loss: 0.008067444898188114\nStep 359, loss: 0.008601492270827293\nStep 360, loss: 0.009435067884624004\nStep 361, loss: 0.009560937993228436\nStep 362, loss: 0.008613449521362782\nStep 363, loss: 0.008572123013436794\nStep 364, loss: 0.009219275787472725\nStep 365, loss: 0.01010737195611\nStep 366, loss: 0.008756442926824093\nStep 367, loss: 0.010213833302259445\nStep 368, loss: 0.00893139187246561\nStep 369, loss: 0.009876120835542679\nStep 370, loss: 0.009222878143191338\nStep 371, loss: 0.009084029123187065\nStep 372, loss: 0.009581203572452068\nStep 373, loss: 0.0099358931183815\nStep 374, loss: 0.008505059406161308\nStep 375, loss: 0.009633081965148449\nStep 376, loss: 0.00966871902346611\nStep 377, loss: 0.008310618810355663\nStep 378, loss: 0.008672129362821579\nStep 379, loss: 0.007983251474797726\nStep 380, loss: 0.009981289505958557\nStep 381, loss: 0.008883092552423477\nStep 382, loss: 0.009355833753943443\nStep 383, loss: 0.008394931443035603\nStep 357, loss: 0.008771003223955631\nStep 358, loss: 0.008067444898188114\nStep 359, loss: 0.008601492270827293\nStep 360, loss: 0.009435067884624004\nStep 361, loss: 0.009560937993228436\nStep 362, loss: 0.008613449521362782\nStep 363, loss: 0.008572123013436794\nStep 364, loss: 0.009219275787472725\nStep 365, loss: 0.01010737195611\nStep 366, loss: 0.008756442926824093\nStep 367, loss: 0.010213833302259445\nStep 368, loss: 0.00893139187246561\nStep 369, loss: 0.009876120835542679\nStep 370, loss: 0.009222878143191338\nStep 371, loss: 0.009084029123187065\nStep 372, loss: 0.009581203572452068\nStep 373, loss: 0.0099358931183815\nStep 374, loss: 0.008505059406161308\nStep 375, loss: 0.009633081965148449\nStep 376, loss: 0.00966871902346611\nStep 377, loss: 0.008310618810355663\nStep 378, loss: 0.008672129362821579\nStep 379, loss: 0.007983251474797726\nStep 380, loss: 0.009981289505958557\nStep 381, loss: 0.008883092552423477\nStep 382, loss: 0.009355833753943443\nStep 383, loss: 0.008394931443035603\nStep 384, loss: 0.011625821702182293\nStep 385, loss: 0.008715483359992504\nStep 386, loss: 0.00883227027952671\nStep 387, loss: 0.009458016604185104\nStep 388, loss: 0.009332780726253986\nStep 389, loss: 0.009258671663701534\nStep 390, loss: 0.009851478971540928\nStep 391, loss: 0.009628290310502052\nStep 392, loss: 0.007471262477338314\nStep 393, loss: 0.009414088912308216\nStep 394, loss: 0.009029863402247429\nStep 395, loss: 0.008434828370809555\nStep 396, loss: 0.009456830099225044\nStep 397, loss: 0.008712932467460632\nStep 398, loss: 0.009513208642601967\nStep 399, loss: 0.008333476260304451\nStep 400, loss: 0.007227268535643816\nStep 401, loss: 0.008709742687642574\nStep 402, loss: 0.008780714124441147\nStep 403, loss: 0.009415601380169392\nStep 404, loss: 0.009301510639488697\nStep 405, loss: 0.008122518658638\nStep 406, loss: 0.008958345279097557\nStep 407, loss: 0.01046680472791195\nStep 408, loss: 0.0080665722489357\nStep 409, loss: 0.009830777533352375\nStep 410, loss: 0.009133830666542053\nStep 384, loss: 0.011625821702182293\nStep 385, loss: 0.008715483359992504\nStep 386, loss: 0.00883227027952671\nStep 387, loss: 0.009458016604185104\nStep 388, loss: 0.009332780726253986\nStep 389, loss: 0.009258671663701534\nStep 390, loss: 0.009851478971540928\nStep 391, loss: 0.009628290310502052\nStep 392, loss: 0.007471262477338314\nStep 393, loss: 0.009414088912308216\nStep 394, loss: 0.009029863402247429\nStep 395, loss: 0.008434828370809555\nStep 396, loss: 0.009456830099225044\nStep 397, loss: 0.008712932467460632\nStep 398, loss: 0.009513208642601967\nStep 399, loss: 0.008333476260304451\nStep 400, loss: 0.007227268535643816\nStep 401, loss: 0.008709742687642574\nStep 402, loss: 0.008780714124441147\nStep 403, loss: 0.009415601380169392\nStep 404, loss: 0.009301510639488697\nStep 405, loss: 0.008122518658638\nStep 406, loss: 0.008958345279097557\nStep 407, loss: 0.01046680472791195\nStep 408, loss: 0.0080665722489357\nStep 409, loss: 0.009830777533352375\nStep 410, loss: 0.009133830666542053\nStep 411, loss: 0.008492544293403625\nStep 412, loss: 0.007803461514413357\nStep 413, loss: 0.009163353592157364\nStep 414, loss: 0.010654965415596962\nStep 415, loss: 0.010023774579167366\nStep 416, loss: 0.008814546279609203\nStep 417, loss: 0.00868968479335308\nStep 418, loss: 0.009255973622202873\nStep 419, loss: 0.008926976472139359\nStep 420, loss: 0.009140003472566605\nStep 421, loss: 0.010159704834222794\nStep 422, loss: 0.008238519541919231\nStep 423, loss: 0.008558676578104496\nStep 424, loss: 0.008526754565536976\nStep 425, loss: 0.010752619244158268\nStep 426, loss: 0.009448438882827759\nStep 427, loss: 0.008833286352455616\nStep 428, loss: 0.009218545630574226\nStep 429, loss: 0.009204461239278316\nStep 430, loss: 0.008507666178047657\nStep 431, loss: 0.008917532861232758\nStep 432, loss: 0.008146978914737701\nStep 433, loss: 0.008149527944624424\nStep 434, loss: 0.008935561403632164\nStep 435, loss: 0.009877092204988003\nStep 436, loss: 0.0077535370364785194\nStep 437, loss: 0.009797759354114532\nStep 411, loss: 0.008492544293403625\nStep 412, loss: 0.007803461514413357\nStep 413, loss: 0.009163353592157364\nStep 414, loss: 0.010654965415596962\nStep 415, loss: 0.010023774579167366\nStep 416, loss: 0.008814546279609203\nStep 417, loss: 0.00868968479335308\nStep 418, loss: 0.009255973622202873\nStep 419, loss: 0.008926976472139359\nStep 420, loss: 0.009140003472566605\nStep 421, loss: 0.010159704834222794\nStep 422, loss: 0.008238519541919231\nStep 423, loss: 0.008558676578104496\nStep 424, loss: 0.008526754565536976\nStep 425, loss: 0.010752619244158268\nStep 426, loss: 0.009448438882827759\nStep 427, loss: 0.008833286352455616\nStep 428, loss: 0.009218545630574226\nStep 429, loss: 0.009204461239278316\nStep 430, loss: 0.008507666178047657\nStep 431, loss: 0.008917532861232758\nStep 432, loss: 0.008146978914737701\nStep 433, loss: 0.008149527944624424\nStep 434, loss: 0.008935561403632164\nStep 435, loss: 0.009877092204988003\nStep 436, loss: 0.0077535370364785194\nStep 437, loss: 0.009797759354114532\nStep 438, loss: 0.008172751404345036\nStep 439, loss: 0.008792747743427753\nStep 440, loss: 0.008777088485658169\nStep 438, loss: 0.008172751404345036\nStep 439, loss: 0.008792747743427753\nStep 440, loss: 0.008777088485658169\nStep 219, loss: 0.012402343563735485\nStep 220, loss: 0.011631792411208153\nStep 221, loss: 0.011584474705159664\nStep 222, loss: 0.01097952201962471\nStep 223, loss: 0.012712574563920498\nStep 224, loss: 0.010229759849607944\nStep 225, loss: 0.011558798141777515\nStep 226, loss: 0.010583264753222466\nStep 227, loss: 0.010705210268497467\nStep 228, loss: 0.0116357933729887\nStep 229, loss: 0.010814939625561237\nStep 230, loss: 0.010558605194091797\nStep 231, loss: 0.009908916428685188\nStep 232, loss: 0.011581497266888618\nStep 233, loss: 0.011802499182522297\nStep 234, loss: 0.011465274728834629\nStep 235, loss: 0.012010223232209682\nStep 236, loss: 0.010722370818257332\nStep 237, loss: 0.010274405591189861\nStep 238, loss: 0.011278481222689152\nStep 239, loss: 0.010846936143934727\nStep 240, loss: 0.01259419322013855\nStep 241, loss: 0.012422946281731129\nStep 242, loss: 0.013480153866112232\nStep 243, loss: 0.01149264071136713\nStep 244, loss: 0.012327280826866627\nStep 245, loss: 0.011576535180211067\nStep 219, loss: 0.012402343563735485\nStep 220, loss: 0.011631792411208153\nStep 221, loss: 0.011584474705159664\nStep 222, loss: 0.01097952201962471\nStep 223, loss: 0.012712574563920498\nStep 224, loss: 0.010229759849607944\nStep 225, loss: 0.011558798141777515\nStep 226, loss: 0.010583264753222466\nStep 227, loss: 0.010705210268497467\nStep 228, loss: 0.0116357933729887\nStep 229, loss: 0.010814939625561237\nStep 230, loss: 0.010558605194091797\nStep 231, loss: 0.009908916428685188\nStep 232, loss: 0.011581497266888618\nStep 233, loss: 0.011802499182522297\nStep 234, loss: 0.011465274728834629\nStep 235, loss: 0.012010223232209682\nStep 236, loss: 0.010722370818257332\nStep 237, loss: 0.010274405591189861\nStep 238, loss: 0.011278481222689152\nStep 239, loss: 0.010846936143934727\nStep 240, loss: 0.01259419322013855\nStep 241, loss: 0.012422946281731129\nStep 242, loss: 0.013480153866112232\nStep 243, loss: 0.01149264071136713\nStep 244, loss: 0.012327280826866627\nStep 245, loss: 0.011576535180211067\nStep 246, loss: 0.011629262007772923\nStep 247, loss: 0.012494875118136406\nStep 248, loss: 0.011843645945191383\nStep 249, loss: 0.012646166607737541\nStep 250, loss: 0.011376439593732357\nStep 251, loss: 0.011232168413698673\nStep 252, loss: 0.011057781055569649\nStep 253, loss: 0.010992903262376785\nStep 254, loss: 0.011301021091639996\nStep 255, loss: 0.01089110691100359\nStep 256, loss: 0.0103179682046175\nStep 257, loss: 0.010743721388280392\nStep 258, loss: 0.010402617044746876\nStep 259, loss: 0.010314985178411007\nStep 260, loss: 0.010662691667675972\nStep 261, loss: 0.011964893899857998\nStep 262, loss: 0.011444470845162868\nStep 263, loss: 0.010359481908380985\nStep 264, loss: 0.011071950197219849\nStep 265, loss: 0.010385474190115929\nStep 266, loss: 0.010786756873130798\nStep 267, loss: 0.009867917746305466\nStep 268, loss: 0.011203785426914692\nStep 269, loss: 0.011108278296887875\nStep 270, loss: 0.011300038546323776\nStep 271, loss: 0.010646146722137928\nStep 272, loss: 0.010200191289186478\nStep 246, loss: 0.011629262007772923\nStep 247, loss: 0.012494875118136406\nStep 248, loss: 0.011843645945191383\nStep 249, loss: 0.012646166607737541\nStep 250, loss: 0.011376439593732357\nStep 251, loss: 0.011232168413698673\nStep 252, loss: 0.011057781055569649\nStep 253, loss: 0.010992903262376785\nStep 254, loss: 0.011301021091639996\nStep 255, loss: 0.01089110691100359\nStep 256, loss: 0.0103179682046175\nStep 257, loss: 0.010743721388280392\nStep 258, loss: 0.010402617044746876\nStep 259, loss: 0.010314985178411007\nStep 260, loss: 0.010662691667675972\nStep 261, loss: 0.011964893899857998\nStep 262, loss: 0.011444470845162868\nStep 263, loss: 0.010359481908380985\nStep 264, loss: 0.011071950197219849\nStep 265, loss: 0.010385474190115929\nStep 266, loss: 0.010786756873130798\nStep 267, loss: 0.009867917746305466\nStep 268, loss: 0.011203785426914692\nStep 269, loss: 0.011108278296887875\nStep 270, loss: 0.011300038546323776\nStep 271, loss: 0.010646146722137928\nStep 272, loss: 0.010200191289186478\nStep 273, loss: 0.010808263905346394\nStep 274, loss: 0.011265968903899193\nStep 275, loss: 0.010250569321215153\nStep 276, loss: 0.008533592335879803\nStep 277, loss: 0.009490706957876682\nStep 278, loss: 0.009791299700737\nStep 279, loss: 0.01029213983565569\nStep 280, loss: 0.011753804981708527\nStep 281, loss: 0.00971137173473835\nStep 282, loss: 0.012008721940219402\nStep 283, loss: 0.011298278346657753\nStep 284, loss: 0.011328218504786491\nStep 285, loss: 0.011197622865438461\nStep 286, loss: 0.010864348150789738\nStep 287, loss: 0.010775812901556492\nStep 288, loss: 0.010691273957490921\nStep 289, loss: 0.009966089390218258\nStep 290, loss: 0.010224885307252407\nStep 291, loss: 0.009930402971804142\nStep 292, loss: 0.010686405934393406\nStep 293, loss: 0.011255526915192604\nStep 294, loss: 0.010371316224336624\nStep 295, loss: 0.009607769548892975\nStep 296, loss: 0.010344200767576694\nStep 297, loss: 0.010129978880286217\nStep 298, loss: 0.010164660401642323\nStep 299, loss: 0.010222935117781162\nStep 273, loss: 0.010808263905346394\nStep 274, loss: 0.011265968903899193\nStep 275, loss: 0.010250569321215153\nStep 276, loss: 0.008533592335879803\nStep 277, loss: 0.009490706957876682\nStep 278, loss: 0.009791299700737\nStep 279, loss: 0.01029213983565569\nStep 280, loss: 0.011753804981708527\nStep 281, loss: 0.00971137173473835\nStep 282, loss: 0.012008721940219402\nStep 283, loss: 0.011298278346657753\nStep 284, loss: 0.011328218504786491\nStep 285, loss: 0.011197622865438461\nStep 286, loss: 0.010864348150789738\nStep 287, loss: 0.010775812901556492\nStep 288, loss: 0.010691273957490921\nStep 289, loss: 0.009966089390218258\nStep 290, loss: 0.010224885307252407\nStep 291, loss: 0.009930402971804142\nStep 292, loss: 0.010686405934393406\nStep 293, loss: 0.011255526915192604\nStep 294, loss: 0.010371316224336624\nStep 295, loss: 0.009607769548892975\nStep 296, loss: 0.010344200767576694\nStep 297, loss: 0.010129978880286217\nStep 298, loss: 0.010164660401642323\nStep 299, loss: 0.010222935117781162\nStep 300, loss: 0.01120209414511919\nStep 301, loss: 0.009847558103501797\nStep 302, loss: 0.011294144205749035\nStep 303, loss: 0.010222863405942917\nStep 304, loss: 0.009446301497519016\nStep 305, loss: 0.008409097790718079\nStep 306, loss: 0.00865502841770649\nStep 307, loss: 0.009920651093125343\nStep 308, loss: 0.009057370945811272\nStep 309, loss: 0.010928399860858917\nStep 310, loss: 0.010560085996985435\nStep 311, loss: 0.009174142964184284\nStep 312, loss: 0.008631434291601181\nStep 313, loss: 0.010008814744651318\nStep 314, loss: 0.00838238000869751\nStep 315, loss: 0.008831451646983624\nStep 316, loss: 0.00959621462970972\nStep 317, loss: 0.009931536391377449\nStep 318, loss: 0.009141447022557259\nStep 319, loss: 0.009971554391086102\nStep 320, loss: 0.009367017075419426\nStep 321, loss: 0.009710235521197319\nStep 322, loss: 0.010341423563659191\nStep 323, loss: 0.01046605221927166\nStep 324, loss: 0.010934630408883095\nStep 325, loss: 0.009343990124762058\nStep 326, loss: 0.010263442061841488\nStep 300, loss: 0.01120209414511919\nStep 301, loss: 0.009847558103501797\nStep 302, loss: 0.011294144205749035\nStep 303, loss: 0.010222863405942917\nStep 304, loss: 0.009446301497519016\nStep 305, loss: 0.008409097790718079\nStep 306, loss: 0.00865502841770649\nStep 307, loss: 0.009920651093125343\nStep 308, loss: 0.009057370945811272\nStep 309, loss: 0.010928399860858917\nStep 310, loss: 0.010560085996985435\nStep 311, loss: 0.009174142964184284\nStep 312, loss: 0.008631434291601181\nStep 313, loss: 0.010008814744651318\nStep 314, loss: 0.00838238000869751\nStep 315, loss: 0.008831451646983624\nStep 316, loss: 0.00959621462970972\nStep 317, loss: 0.009931536391377449\nStep 318, loss: 0.009141447022557259\nStep 319, loss: 0.009971554391086102\nStep 320, loss: 0.009367017075419426\nStep 321, loss: 0.009710235521197319\nStep 322, loss: 0.010341423563659191\nStep 323, loss: 0.01046605221927166\nStep 324, loss: 0.010934630408883095\nStep 325, loss: 0.009343990124762058\nStep 326, loss: 0.010263442061841488\nStep 327, loss: 0.00978117436170578\nStep 328, loss: 0.011806083843111992\nStep 329, loss: 0.010895607993006706\nStep 327, loss: 0.00978117436170578\nStep 328, loss: 0.011806083843111992\nStep 329, loss: 0.010895607993006706\nStep 330, loss: 0.008183012716472149\nStep 331, loss: 0.009584175422787666\nStep 332, loss: 0.009803099557757378\nStep 333, loss: 0.008213256485760212\nStep 334, loss: 0.00965780857950449\nStep 335, loss: 0.008655296638607979\nStep 336, loss: 0.01056174747645855\nStep 337, loss: 0.008038876578211784\nStep 338, loss: 0.01079742144793272\nStep 339, loss: 0.008959968574345112\nStep 340, loss: 0.00852648913860321\nStep 341, loss: 0.008913596160709858\nStep 342, loss: 0.008657476864755154\nStep 343, loss: 0.008968519978225231\nStep 344, loss: 0.009772698394954205\nStep 345, loss: 0.009810158982872963\nStep 346, loss: 0.010945646092295647\nStep 347, loss: 0.010864058509469032\nStep 348, loss: 0.00884501077234745\nStep 349, loss: 0.010526054538786411\nStep 350, loss: 0.0088763777166605\nStep 351, loss: 0.009007164277136326\nStep 352, loss: 0.008891397155821323\nStep 353, loss: 0.0092351408675313\nStep 354, loss: 0.01059066690504551\nStep 355, loss: 0.010139535181224346\nStep 356, loss: 0.010005930438637733\nStep 330, loss: 0.008183012716472149\nStep 331, loss: 0.009584175422787666\nStep 332, loss: 0.009803099557757378\nStep 333, loss: 0.008213256485760212\nStep 334, loss: 0.00965780857950449\nStep 335, loss: 0.008655296638607979\nStep 336, loss: 0.01056174747645855\nStep 337, loss: 0.008038876578211784\nStep 338, loss: 0.01079742144793272\nStep 339, loss: 0.008959968574345112\nStep 340, loss: 0.00852648913860321\nStep 341, loss: 0.008913596160709858\nStep 342, loss: 0.008657476864755154\nStep 343, loss: 0.008968519978225231\nStep 344, loss: 0.009772698394954205\nStep 345, loss: 0.009810158982872963\nStep 346, loss: 0.010945646092295647\nStep 347, loss: 0.010864058509469032\nStep 348, loss: 0.00884501077234745\nStep 349, loss: 0.010526054538786411\nStep 350, loss: 0.0088763777166605\nStep 351, loss: 0.009007164277136326\nStep 352, loss: 0.008891397155821323\nStep 353, loss: 0.0092351408675313\nStep 354, loss: 0.01059066690504551\nStep 355, loss: 0.010139535181224346\nStep 356, loss: 0.010005930438637733\nStep 357, loss: 0.008771003223955631\nStep 358, loss: 0.008067444898188114\nStep 359, loss: 0.008601492270827293\nStep 360, loss: 0.009435067884624004\nStep 361, loss: 0.009560937993228436\nStep 362, loss: 0.008613449521362782\nStep 363, loss: 0.008572123013436794\nStep 364, loss: 0.009219275787472725\nStep 365, loss: 0.01010737195611\nStep 366, loss: 0.008756442926824093\nStep 367, loss: 0.010213833302259445\nStep 368, loss: 0.00893139187246561\nStep 369, loss: 0.009876120835542679\nStep 370, loss: 0.009222878143191338\nStep 371, loss: 0.009084029123187065\nStep 372, loss: 0.009581203572452068\nStep 373, loss: 0.0099358931183815\nStep 374, loss: 0.008505059406161308\nStep 375, loss: 0.009633081965148449\nStep 376, loss: 0.00966871902346611\nStep 377, loss: 0.008310618810355663\nStep 378, loss: 0.008672129362821579\nStep 379, loss: 0.007983251474797726\nStep 380, loss: 0.009981289505958557\nStep 381, loss: 0.008883092552423477\nStep 382, loss: 0.009355833753943443\nStep 383, loss: 0.008394931443035603\nStep 357, loss: 0.008771003223955631\nStep 358, loss: 0.008067444898188114\nStep 359, loss: 0.008601492270827293\nStep 360, loss: 0.009435067884624004\nStep 361, loss: 0.009560937993228436\nStep 362, loss: 0.008613449521362782\nStep 363, loss: 0.008572123013436794\nStep 364, loss: 0.009219275787472725\nStep 365, loss: 0.01010737195611\nStep 366, loss: 0.008756442926824093\nStep 367, loss: 0.010213833302259445\nStep 368, loss: 0.00893139187246561\nStep 369, loss: 0.009876120835542679\nStep 370, loss: 0.009222878143191338\nStep 371, loss: 0.009084029123187065\nStep 372, loss: 0.009581203572452068\nStep 373, loss: 0.0099358931183815\nStep 374, loss: 0.008505059406161308\nStep 375, loss: 0.009633081965148449\nStep 376, loss: 0.00966871902346611\nStep 377, loss: 0.008310618810355663\nStep 378, loss: 0.008672129362821579\nStep 379, loss: 0.007983251474797726\nStep 380, loss: 0.009981289505958557\nStep 381, loss: 0.008883092552423477\nStep 382, loss: 0.009355833753943443\nStep 383, loss: 0.008394931443035603\nStep 384, loss: 0.011625821702182293\nStep 385, loss: 0.008715483359992504\nStep 386, loss: 0.00883227027952671\nStep 387, loss: 0.009458016604185104\nStep 388, loss: 0.009332780726253986\nStep 389, loss: 0.009258671663701534\nStep 390, loss: 0.009851478971540928\nStep 391, loss: 0.009628290310502052\nStep 392, loss: 0.007471262477338314\nStep 393, loss: 0.009414088912308216\nStep 394, loss: 0.009029863402247429\nStep 395, loss: 0.008434828370809555\nStep 396, loss: 0.009456830099225044\nStep 397, loss: 0.008712932467460632\nStep 398, loss: 0.009513208642601967\nStep 399, loss: 0.008333476260304451\nStep 400, loss: 0.007227268535643816\nStep 401, loss: 0.008709742687642574\nStep 402, loss: 0.008780714124441147\nStep 403, loss: 0.009415601380169392\nStep 404, loss: 0.009301510639488697\nStep 405, loss: 0.008122518658638\nStep 406, loss: 0.008958345279097557\nStep 407, loss: 0.01046680472791195\nStep 408, loss: 0.0080665722489357\nStep 409, loss: 0.009830777533352375\nStep 410, loss: 0.009133830666542053\nStep 384, loss: 0.011625821702182293\nStep 385, loss: 0.008715483359992504\nStep 386, loss: 0.00883227027952671\nStep 387, loss: 0.009458016604185104\nStep 388, loss: 0.009332780726253986\nStep 389, loss: 0.009258671663701534\nStep 390, loss: 0.009851478971540928\nStep 391, loss: 0.009628290310502052\nStep 392, loss: 0.007471262477338314\nStep 393, loss: 0.009414088912308216\nStep 394, loss: 0.009029863402247429\nStep 395, loss: 0.008434828370809555\nStep 396, loss: 0.009456830099225044\nStep 397, loss: 0.008712932467460632\nStep 398, loss: 0.009513208642601967\nStep 399, loss: 0.008333476260304451\nStep 400, loss: 0.007227268535643816\nStep 401, loss: 0.008709742687642574\nStep 402, loss: 0.008780714124441147\nStep 403, loss: 0.009415601380169392\nStep 404, loss: 0.009301510639488697\nStep 405, loss: 0.008122518658638\nStep 406, loss: 0.008958345279097557\nStep 407, loss: 0.01046680472791195\nStep 408, loss: 0.0080665722489357\nStep 409, loss: 0.009830777533352375\nStep 410, loss: 0.009133830666542053\nStep 411, loss: 0.008492544293403625\nStep 412, loss: 0.007803461514413357\nStep 413, loss: 0.009163353592157364\nStep 414, loss: 0.010654965415596962\nStep 415, loss: 0.010023774579167366\nStep 416, loss: 0.008814546279609203\nStep 417, loss: 0.00868968479335308\nStep 418, loss: 0.009255973622202873\nStep 419, loss: 0.008926976472139359\nStep 420, loss: 0.009140003472566605\nStep 421, loss: 0.010159704834222794\nStep 422, loss: 0.008238519541919231\nStep 423, loss: 0.008558676578104496\nStep 424, loss: 0.008526754565536976\nStep 425, loss: 0.010752619244158268\nStep 426, loss: 0.009448438882827759\nStep 427, loss: 0.008833286352455616\nStep 428, loss: 0.009218545630574226\nStep 429, loss: 0.009204461239278316\nStep 430, loss: 0.008507666178047657\nStep 431, loss: 0.008917532861232758\nStep 432, loss: 0.008146978914737701\nStep 433, loss: 0.008149527944624424\nStep 434, loss: 0.008935561403632164\nStep 435, loss: 0.009877092204988003\nStep 436, loss: 0.0077535370364785194\nStep 437, loss: 0.009797759354114532\nStep 411, loss: 0.008492544293403625\nStep 412, loss: 0.007803461514413357\nStep 413, loss: 0.009163353592157364\nStep 414, loss: 0.010654965415596962\nStep 415, loss: 0.010023774579167366\nStep 416, loss: 0.008814546279609203\nStep 417, loss: 0.00868968479335308\nStep 418, loss: 0.009255973622202873\nStep 419, loss: 0.008926976472139359\nStep 420, loss: 0.009140003472566605\nStep 421, loss: 0.010159704834222794\nStep 422, loss: 0.008238519541919231\nStep 423, loss: 0.008558676578104496\nStep 424, loss: 0.008526754565536976\nStep 425, loss: 0.010752619244158268\nStep 426, loss: 0.009448438882827759\nStep 427, loss: 0.008833286352455616\nStep 428, loss: 0.009218545630574226\nStep 429, loss: 0.009204461239278316\nStep 430, loss: 0.008507666178047657\nStep 431, loss: 0.008917532861232758\nStep 432, loss: 0.008146978914737701\nStep 433, loss: 0.008149527944624424\nStep 434, loss: 0.008935561403632164\nStep 435, loss: 0.009877092204988003\nStep 436, loss: 0.0077535370364785194\nStep 437, loss: 0.009797759354114532\nStep 438, loss: 0.008172751404345036\nStep 439, loss: 0.008792747743427753\nStep 440, loss: 0.008777088485658169\nStep 438, loss: 0.008172751404345036\nStep 439, loss: 0.008792747743427753\nStep 440, loss: 0.008777088485658169\nStep 441, loss: 0.008795896545052528\nStep 442, loss: 0.009172515943646431\nStep 443, loss: 0.008903468027710915\nStep 444, loss: 0.010192408226430416\nStep 445, loss: 0.008738502860069275\nStep 446, loss: 0.009220161475241184\nStep 447, loss: 0.00937176775187254\nStep 448, loss: 0.009126019664108753\nStep 449, loss: 0.008642616681754589\nStep 450, loss: 0.008268099278211594\nStep 451, loss: 0.009160791523754597\nStep 452, loss: 0.009702763520181179\nStep 453, loss: 0.009337768889963627\nStep 454, loss: 0.008436117321252823\nStep 455, loss: 0.00928843766450882\nStep 456, loss: 0.008119070902466774\nStep 457, loss: 0.007788764778524637\nStep 458, loss: 0.008930844254791737\nStep 459, loss: 0.010214053094387054\nStep 460, loss: 0.00870064552873373\nStep 461, loss: 0.009047522209584713\nStep 462, loss: 0.009532912634313107\nStep 463, loss: 0.007899757474660873\nStep 464, loss: 0.009032838977873325\nStep 465, loss: 0.008456055074930191\nStep 466, loss: 0.00895161833614111\nStep 467, loss: 0.00792618002742529\nStep 468, loss: 0.007693788968026638\nStep 469, loss: 0.009316098876297474\nStep 470, loss: 0.009146071039140224\nStep 471, loss: 0.008873512037098408\nStep 472, loss: 0.009403959847986698\nStep 473, loss: 0.009459185414016247\nStep 474, loss: 0.008951995521783829\nStep 475, loss: 0.0092973243445158\nStep 476, loss: 0.008307360112667084\nStep 477, loss: 0.00865758117288351\nStep 478, loss: 0.00900979619473219\nStep 479, loss: 0.007995071820914745\nStep 480, loss: 0.008432473056018353\nStep 481, loss: 0.007788315415382385\nStep 482, loss: 0.00980622973293066\nStep 483, loss: 0.010403353720903397\nStep 484, loss: 0.009630637243390083\nStep 485, loss: 0.009670045226812363\nStep 486, loss: 0.0090246656909585\nStep 487, loss: 0.00780887296423316\nStep 488, loss: 0.008700089529156685\nStep 489, loss: 0.00829536933451891\nStep 490, loss: 0.01011315081268549\nStep 491, loss: 0.009101804345846176\nStep 492, loss: 0.00783759355545044\nStep 493, loss: 0.007564619183540344\nStep 494, loss: 0.008335075341165066\nStep 441, loss: 0.008795896545052528\nStep 442, loss: 0.009172515943646431\nStep 443, loss: 0.008903468027710915\nStep 444, loss: 0.010192408226430416\nStep 445, loss: 0.008738502860069275\nStep 446, loss: 0.009220161475241184\nStep 447, loss: 0.00937176775187254\nStep 448, loss: 0.009126019664108753\nStep 449, loss: 0.008642616681754589\nStep 450, loss: 0.008268099278211594\nStep 451, loss: 0.009160791523754597\nStep 452, loss: 0.009702763520181179\nStep 453, loss: 0.009337768889963627\nStep 454, loss: 0.008436117321252823\nStep 455, loss: 0.00928843766450882\nStep 456, loss: 0.008119070902466774\nStep 457, loss: 0.007788764778524637\nStep 458, loss: 0.008930844254791737\nStep 459, loss: 0.010214053094387054\nStep 460, loss: 0.00870064552873373\nStep 461, loss: 0.009047522209584713\nStep 462, loss: 0.009532912634313107\nStep 463, loss: 0.007899757474660873\nStep 464, loss: 0.009032838977873325\nStep 465, loss: 0.008456055074930191\nStep 466, loss: 0.00895161833614111\nStep 467, loss: 0.00792618002742529\nStep 495, loss: 0.008907189592719078\nStep 496, loss: 0.009713673032820225\nStep 497, loss: 0.009148812852799892\nStep 498, loss: 0.008763691410422325\nStep 499, loss: 0.008145195432007313\nStep 500, loss: 0.008057684637606144\nStep 501, loss: 0.008521325886249542\nStep 502, loss: 0.009541303850710392\nStep 503, loss: 0.008878038264811039\nStep 504, loss: 0.008646117523312569\nStep 505, loss: 0.008850847370922565\nStep 506, loss: 0.008876285515725613\nStep 507, loss: 0.009227375499904156\nStep 508, loss: 0.010148166678845882\nStep 509, loss: 0.00796295516192913\nStep 510, loss: 0.008789869025349617\nStep 511, loss: 0.008849036879837513\nStep 512, loss: 0.007304091937839985\nStep 513, loss: 0.00913157407194376\nStep 514, loss: 0.010260028764605522\nStep 515, loss: 0.007284593302756548\nStep 516, loss: 0.009409734979271889\nStep 517, loss: 0.009817644022405148\nStep 518, loss: 0.009014584124088287\nStep 519, loss: 0.008146864362061024\nStep 520, loss: 0.007268956862390041\nStep 521, loss: 0.009374018758535385\nStep 468, loss: 0.007693788968026638\nStep 469, loss: 0.009316098876297474\nStep 470, loss: 0.009146071039140224\nStep 471, loss: 0.008873512037098408\nStep 472, loss: 0.009403959847986698\nStep 473, loss: 0.009459185414016247\nStep 474, loss: 0.008951995521783829\nStep 475, loss: 0.0092973243445158\nStep 476, loss: 0.008307360112667084\nStep 477, loss: 0.00865758117288351\nStep 478, loss: 0.00900979619473219\nStep 479, loss: 0.007995071820914745\nStep 480, loss: 0.008432473056018353\nStep 481, loss: 0.007788315415382385\nStep 482, loss: 0.00980622973293066\nStep 483, loss: 0.010403353720903397\nStep 484, loss: 0.009630637243390083\nStep 485, loss: 0.009670045226812363\nStep 486, loss: 0.0090246656909585\nStep 487, loss: 0.00780887296423316\nStep 488, loss: 0.008700089529156685\nStep 489, loss: 0.00829536933451891\nStep 490, loss: 0.01011315081268549\nStep 491, loss: 0.009101804345846176\nStep 492, loss: 0.00783759355545044\nStep 493, loss: 0.007564619183540344\nStep 494, loss: 0.008335075341165066\nStep 522, loss: 0.00889340229332447\nStep 523, loss: 0.00863998755812645\nStep 524, loss: 0.007970698177814484\nStep 525, loss: 0.009422356262803078\nStep 526, loss: 0.007659026421606541\nStep 527, loss: 0.009303899481892586\nStep 528, loss: 0.008360748179256916\nStep 529, loss: 0.009176895022392273\nStep 530, loss: 0.00965078640729189\nStep 531, loss: 0.009549814276397228\nStep 532, loss: 0.008250241167843342\nStep 533, loss: 0.008716493844985962\nStep 534, loss: 0.009742394089698792\nStep 535, loss: 0.008888865821063519\nStep 536, loss: 0.008021022193133831\nStep 537, loss: 0.008457610383629799\nStep 538, loss: 0.008732296526432037\nStep 539, loss: 0.008125531487166882\nStep 540, loss: 0.007404815871268511\nStep 541, loss: 0.008951316587626934\nStep 542, loss: 0.009075532667338848\nStep 543, loss: 0.008221290074288845\nStep 544, loss: 0.009991692379117012\nStep 545, loss: 0.010783315636217594\nStep 546, loss: 0.009887837804853916\nStep 547, loss: 0.009044837206602097\nStep 548, loss: 0.008554411120712757\nStep 495, loss: 0.008907189592719078\nStep 496, loss: 0.009713673032820225\nStep 497, loss: 0.009148812852799892\nStep 498, loss: 0.008763691410422325\nStep 499, loss: 0.008145195432007313\nStep 500, loss: 0.008057684637606144\nStep 501, loss: 0.008521325886249542\nStep 502, loss: 0.009541303850710392\nStep 503, loss: 0.008878038264811039\nStep 504, loss: 0.008646117523312569\nStep 505, loss: 0.008850847370922565\nStep 506, loss: 0.008876285515725613\nStep 507, loss: 0.009227375499904156\nStep 508, loss: 0.010148166678845882\nStep 509, loss: 0.00796295516192913\nStep 510, loss: 0.008789869025349617\nStep 511, loss: 0.008849036879837513\nStep 512, loss: 0.007304091937839985\nStep 513, loss: 0.00913157407194376\nStep 514, loss: 0.010260028764605522\nStep 515, loss: 0.007284593302756548\nStep 516, loss: 0.009409734979271889\nStep 517, loss: 0.009817644022405148\nStep 518, loss: 0.009014584124088287\nStep 519, loss: 0.008146864362061024\nStep 520, loss: 0.007268956862390041\nStep 521, loss: 0.009374018758535385\nStep 549, loss: 0.008303544484078884\nStep 550, loss: 0.009198504500091076\nStep 551, loss: 0.008177240379154682\nStep 522, loss: 0.00889340229332447\nStep 523, loss: 0.00863998755812645\nStep 524, loss: 0.007970698177814484\nStep 525, loss: 0.009422356262803078\nStep 526, loss: 0.007659026421606541\nStep 527, loss: 0.009303899481892586\nStep 528, loss: 0.008360748179256916\nStep 529, loss: 0.009176895022392273\nStep 530, loss: 0.00965078640729189\nStep 531, loss: 0.009549814276397228\nStep 532, loss: 0.008250241167843342\nStep 533, loss: 0.008716493844985962\nStep 534, loss: 0.009742394089698792\nStep 535, loss: 0.008888865821063519\nStep 536, loss: 0.008021022193133831\nStep 537, loss: 0.008457610383629799\nStep 538, loss: 0.008732296526432037\nStep 539, loss: 0.008125531487166882\nStep 540, loss: 0.007404815871268511\nStep 541, loss: 0.008951316587626934\nStep 542, loss: 0.009075532667338848\nStep 543, loss: 0.008221290074288845\nStep 544, loss: 0.009991692379117012\nStep 545, loss: 0.010783315636217594\nStep 546, loss: 0.009887837804853916\nStep 547, loss: 0.009044837206602097\nStep 548, loss: 0.008554411120712757\nStep 552, loss: 0.008611418306827545\nStep 553, loss: 0.008411903865635395\nStep 554, loss: 0.010022832080721855\nStep 555, loss: 0.008083075284957886\nStep 556, loss: 0.008184091188013554\nStep 557, loss: 0.009950775653123856\nStep 558, loss: 0.007724710740149021\nStep 559, loss: 0.010604195296764374\nStep 560, loss: 0.010046249255537987\nStep 561, loss: 0.008444340899586678\nStep 562, loss: 0.00951379630714655\nStep 563, loss: 0.009500275366008282\nStep 564, loss: 0.0098471874371171\nStep 565, loss: 0.009236135520040989\nStep 566, loss: 0.008145434781908989\nStep 567, loss: 0.008019921369850636\nStep 568, loss: 0.008847599849104881\nStep 569, loss: 0.009217997081577778\nStep 570, loss: 0.008589738979935646\nStep 571, loss: 0.008038138970732689\nStep 572, loss: 0.008367457427084446\nStep 573, loss: 0.008337149396538734\nStep 574, loss: 0.007989796809852123\nStep 575, loss: 0.010115966200828552\nStep 576, loss: 0.008840972557663918\nStep 577, loss: 0.007866596803069115\nStep 578, loss: 0.008375775068998337\nStep 549, loss: 0.008303544484078884\nStep 550, loss: 0.009198504500091076\nStep 551, loss: 0.008177240379154682\nStep 579, loss: 0.008921980857849121\nStep 580, loss: 0.008208159357309341\nStep 581, loss: 0.008356332778930664\nStep 582, loss: 0.01047302596271038\nStep 583, loss: 0.010049899108707905\nStep 584, loss: 0.008961194194853306\nStep 585, loss: 0.009844383224844933\nStep 586, loss: 0.008554906584322453\nStep 587, loss: 0.010228397324681282\nStep 588, loss: 0.00855023879557848\nStep 589, loss: 0.008921374566853046\nStep 590, loss: 0.00938184279948473\nStep 591, loss: 0.007259429432451725\nStep 592, loss: 0.008099344559013844\nStep 593, loss: 0.00787374097853899\nStep 594, loss: 0.009231695905327797\nStep 595, loss: 0.008884775452315807\nStep 596, loss: 0.008983580395579338\nStep 597, loss: 0.008393236435949802\nStep 598, loss: 0.00823807530105114\nStep 599, loss: 0.00898067932575941\nStep 600, loss: 0.008824114687740803\nStep 601, loss: 0.00879320502281189\nStep 602, loss: 0.007165095303207636\nStep 603, loss: 0.00796660128980875\nStep 604, loss: 0.0073905885219573975\nStep 605, loss: 0.008228345774114132\nStep 552, loss: 0.008611418306827545\nStep 553, loss: 0.008411903865635395\nStep 554, loss: 0.010022832080721855\nStep 555, loss: 0.008083075284957886\nStep 556, loss: 0.008184091188013554\nStep 557, loss: 0.009950775653123856\nStep 558, loss: 0.007724710740149021\nStep 559, loss: 0.010604195296764374\nStep 560, loss: 0.010046249255537987\nStep 561, loss: 0.008444340899586678\nStep 562, loss: 0.00951379630714655\nStep 563, loss: 0.009500275366008282\nStep 564, loss: 0.0098471874371171\nStep 565, loss: 0.009236135520040989\nStep 566, loss: 0.008145434781908989\nStep 567, loss: 0.008019921369850636\nStep 568, loss: 0.008847599849104881\nStep 569, loss: 0.009217997081577778\nStep 570, loss: 0.008589738979935646\nStep 571, loss: 0.008038138970732689\nStep 572, loss: 0.008367457427084446\nStep 573, loss: 0.008337149396538734\nStep 574, loss: 0.007989796809852123\nStep 575, loss: 0.010115966200828552\nStep 576, loss: 0.008840972557663918\nStep 577, loss: 0.007866596803069115\nStep 578, loss: 0.008375775068998337\nStep 606, loss: 0.008373823948204517\nStep 607, loss: 0.008092723786830902\nStep 608, loss: 0.009472115896642208\nStep 609, loss: 0.009028247557580471\nStep 610, loss: 0.009447307325899601\nStep 611, loss: 0.006945042870938778\nStep 612, loss: 0.008965948596596718\nStep 613, loss: 0.009417682886123657\nStep 614, loss: 0.01058974489569664\nStep 615, loss: 0.009123829193413258\nStep 616, loss: 0.008527892641723156\nStep 617, loss: 0.006956911645829678\nStep 618, loss: 0.008063074201345444\nStep 619, loss: 0.008881963789463043\nStep 620, loss: 0.009354100562632084\nStep 621, loss: 0.007262085098773241\nStep 622, loss: 0.009552895091474056\nStep 623, loss: 0.007794888224452734\nStep 624, loss: 0.00895579531788826\nStep 625, loss: 0.007704522460699081\nStep 626, loss: 0.00867677852511406\nStep 627, loss: 0.008672015741467476\nStep 628, loss: 0.00815113540738821\nStep 629, loss: 0.009535595774650574\nStep 630, loss: 0.007677331566810608\nStep 631, loss: 0.009011753834784031\nStep 632, loss: 0.009445476345717907\nStep 579, loss: 0.008921980857849121\nStep 580, loss: 0.008208159357309341\nStep 581, loss: 0.008356332778930664\nStep 582, loss: 0.01047302596271038\nStep 583, loss: 0.010049899108707905\nStep 584, loss: 0.008961194194853306\nStep 585, loss: 0.009844383224844933\nStep 586, loss: 0.008554906584322453\nStep 587, loss: 0.010228397324681282\nStep 588, loss: 0.00855023879557848\nStep 589, loss: 0.008921374566853046\nStep 590, loss: 0.00938184279948473\nStep 591, loss: 0.007259429432451725\nStep 592, loss: 0.008099344559013844\nStep 593, loss: 0.00787374097853899\nStep 594, loss: 0.009231695905327797\nStep 595, loss: 0.008884775452315807\nStep 596, loss: 0.008983580395579338\nStep 597, loss: 0.008393236435949802\nStep 598, loss: 0.00823807530105114\nStep 599, loss: 0.00898067932575941\nStep 600, loss: 0.008824114687740803\nStep 601, loss: 0.00879320502281189\nStep 602, loss: 0.007165095303207636\nStep 603, loss: 0.00796660128980875\nStep 604, loss: 0.0073905885219573975\nStep 605, loss: 0.008228345774114132\nStep 633, loss: 0.010466964915394783\nStep 634, loss: 0.009875749237835407\nStep 635, loss: 0.008144666440784931\nStep 636, loss: 0.009463016875088215\nStep 637, loss: 0.011229046620428562\nStep 638, loss: 0.009615120477974415\nStep 639, loss: 0.007913789711892605\nStep 640, loss: 0.00920387078076601\nStep 641, loss: 0.009785228408873081\nStep 642, loss: 0.008528223261237144\nStep 643, loss: 0.009180537424981594\nStep 644, loss: 0.007542653940618038\nStep 645, loss: 0.00836759153753519\nStep 646, loss: 0.010244078934192657\nStep 647, loss: 0.00858389399945736\nStep 648, loss: 0.008339446038007736\nStep 649, loss: 0.008229510858654976\nStep 650, loss: 0.009147602133452892\nStep 651, loss: 0.007746502757072449\nStep 652, loss: 0.008200840093195438\nStep 653, loss: 0.007780694402754307\nStep 654, loss: 0.008361638523638248\nStep 655, loss: 0.008371165953576565\nStep 656, loss: 0.007468099240213633\nStep 657, loss: 0.009446601383388042\nStep 658, loss: 0.009341231547296047\nStep 659, loss: 0.009194646961987019\nStep 606, loss: 0.008373823948204517\nStep 607, loss: 0.008092723786830902\nStep 608, loss: 0.009472115896642208\nStep 609, loss: 0.009028247557580471\nStep 610, loss: 0.009447307325899601\nStep 611, loss: 0.006945042870938778\nStep 612, loss: 0.008965948596596718\nStep 613, loss: 0.009417682886123657\nStep 614, loss: 0.01058974489569664\nStep 615, loss: 0.009123829193413258\nStep 616, loss: 0.008527892641723156\nStep 617, loss: 0.006956911645829678\nStep 618, loss: 0.008063074201345444\nStep 619, loss: 0.008881963789463043\nStep 620, loss: 0.009354100562632084\nStep 621, loss: 0.007262085098773241\nStep 622, loss: 0.009552895091474056\nStep 623, loss: 0.007794888224452734\nStep 624, loss: 0.00895579531788826\nStep 625, loss: 0.007704522460699081\nStep 626, loss: 0.00867677852511406\nStep 627, loss: 0.008672015741467476\nStep 628, loss: 0.00815113540738821\nStep 629, loss: 0.009535595774650574\nStep 630, loss: 0.007677331566810608\nStep 631, loss: 0.009011753834784031\nStep 632, loss: 0.009445476345717907\nStep 660, loss: 0.009197157807648182\nStep 661, loss: 0.006940855644643307\nStep 662, loss: 0.007554870098829269\nStep 633, loss: 0.010466964915394783\nStep 634, loss: 0.009875749237835407\nStep 635, loss: 0.008144666440784931\nStep 636, loss: 0.009463016875088215\nStep 637, loss: 0.011229046620428562\nStep 638, loss: 0.009615120477974415\nStep 639, loss: 0.007913789711892605\nStep 640, loss: 0.00920387078076601\nStep 641, loss: 0.009785228408873081\nStep 642, loss: 0.008528223261237144\nStep 643, loss: 0.009180537424981594\nStep 644, loss: 0.007542653940618038\nStep 645, loss: 0.00836759153753519\nStep 646, loss: 0.010244078934192657\nStep 647, loss: 0.00858389399945736\nStep 648, loss: 0.008339446038007736\nStep 649, loss: 0.008229510858654976\nStep 650, loss: 0.009147602133452892\nStep 651, loss: 0.007746502757072449\nStep 652, loss: 0.008200840093195438\nStep 653, loss: 0.007780694402754307\nStep 654, loss: 0.008361638523638248\nStep 655, loss: 0.008371165953576565\nStep 656, loss: 0.007468099240213633\nStep 657, loss: 0.009446601383388042\nStep 658, loss: 0.009341231547296047\nStep 659, loss: 0.009194646961987019\nStep 441, loss: 0.008795896545052528\nStep 442, loss: 0.009172515943646431\nStep 443, loss: 0.008903468027710915\nStep 444, loss: 0.010192408226430416\nStep 445, loss: 0.008738502860069275\nStep 446, loss: 0.009220161475241184\nStep 447, loss: 0.00937176775187254\nStep 448, loss: 0.009126019664108753\nStep 449, loss: 0.008642616681754589\nStep 450, loss: 0.008268099278211594\nStep 451, loss: 0.009160791523754597\nStep 452, loss: 0.009702763520181179\nStep 453, loss: 0.009337768889963627\nStep 454, loss: 0.008436117321252823\nStep 455, loss: 0.00928843766450882\nStep 456, loss: 0.008119070902466774\nStep 457, loss: 0.007788764778524637\nStep 458, loss: 0.008930844254791737\nStep 459, loss: 0.010214053094387054\nStep 460, loss: 0.00870064552873373\nStep 461, loss: 0.009047522209584713\nStep 462, loss: 0.009532912634313107\nStep 463, loss: 0.007899757474660873\nStep 464, loss: 0.009032838977873325\nStep 465, loss: 0.008456055074930191\nStep 466, loss: 0.00895161833614111\nStep 467, loss: 0.00792618002742529\nStep 660, loss: 0.009197157807648182\nStep 661, loss: 0.006940855644643307\nStep 662, loss: 0.007554870098829269\nStep 468, loss: 0.007693788968026638\nStep 469, loss: 0.009316098876297474\nStep 470, loss: 0.009146071039140224\nStep 471, loss: 0.008873512037098408\nStep 472, loss: 0.009403959847986698\nStep 473, loss: 0.009459185414016247\nStep 474, loss: 0.008951995521783829\nStep 475, loss: 0.0092973243445158\nStep 476, loss: 0.008307360112667084\nStep 477, loss: 0.00865758117288351\nStep 478, loss: 0.00900979619473219\nStep 479, loss: 0.007995071820914745\nStep 480, loss: 0.008432473056018353\nStep 481, loss: 0.007788315415382385\nStep 482, loss: 0.00980622973293066\nStep 483, loss: 0.010403353720903397\nStep 484, loss: 0.009630637243390083\nStep 485, loss: 0.009670045226812363\nStep 486, loss: 0.0090246656909585\nStep 487, loss: 0.00780887296423316\nStep 488, loss: 0.008700089529156685\nStep 489, loss: 0.00829536933451891\nStep 490, loss: 0.01011315081268549\nStep 491, loss: 0.009101804345846176\nStep 492, loss: 0.00783759355545044\nStep 493, loss: 0.007564619183540344\nStep 494, loss: 0.008335075341165066\nStep 441, loss: 0.008795896545052528\nStep 442, loss: 0.009172515943646431\nStep 443, loss: 0.008903468027710915\nStep 444, loss: 0.010192408226430416\nStep 445, loss: 0.008738502860069275\nStep 446, loss: 0.009220161475241184\nStep 447, loss: 0.00937176775187254\nStep 448, loss: 0.009126019664108753\nStep 449, loss: 0.008642616681754589\nStep 450, loss: 0.008268099278211594\nStep 451, loss: 0.009160791523754597\nStep 452, loss: 0.009702763520181179\nStep 453, loss: 0.009337768889963627\nStep 454, loss: 0.008436117321252823\nStep 455, loss: 0.00928843766450882\nStep 456, loss: 0.008119070902466774\nStep 457, loss: 0.007788764778524637\nStep 458, loss: 0.008930844254791737\nStep 459, loss: 0.010214053094387054\nStep 460, loss: 0.00870064552873373\nStep 461, loss: 0.009047522209584713\nStep 462, loss: 0.009532912634313107\nStep 463, loss: 0.007899757474660873\nStep 464, loss: 0.009032838977873325\nStep 465, loss: 0.008456055074930191\nStep 466, loss: 0.00895161833614111\nStep 467, loss: 0.00792618002742529\nStep 495, loss: 0.008907189592719078\nStep 496, loss: 0.009713673032820225\nStep 497, loss: 0.009148812852799892\nStep 498, loss: 0.008763691410422325\nStep 499, loss: 0.008145195432007313\nStep 500, loss: 0.008057684637606144\nStep 501, loss: 0.008521325886249542\nStep 502, loss: 0.009541303850710392\nStep 503, loss: 0.008878038264811039\nStep 504, loss: 0.008646117523312569\nStep 505, loss: 0.008850847370922565\nStep 506, loss: 0.008876285515725613\nStep 507, loss: 0.009227375499904156\nStep 508, loss: 0.010148166678845882\nStep 509, loss: 0.00796295516192913\nStep 510, loss: 0.008789869025349617\nStep 511, loss: 0.008849036879837513\nStep 512, loss: 0.007304091937839985\nStep 513, loss: 0.00913157407194376\nStep 514, loss: 0.010260028764605522\nStep 515, loss: 0.007284593302756548\nStep 516, loss: 0.009409734979271889\nStep 517, loss: 0.009817644022405148\nStep 518, loss: 0.009014584124088287\nStep 519, loss: 0.008146864362061024\nStep 520, loss: 0.007268956862390041\nStep 521, loss: 0.009374018758535385\nStep 468, loss: 0.007693788968026638\nStep 469, loss: 0.009316098876297474\nStep 470, loss: 0.009146071039140224\nStep 471, loss: 0.008873512037098408\nStep 472, loss: 0.009403959847986698\nStep 473, loss: 0.009459185414016247\nStep 474, loss: 0.008951995521783829\nStep 475, loss: 0.0092973243445158\nStep 476, loss: 0.008307360112667084\nStep 477, loss: 0.00865758117288351\nStep 478, loss: 0.00900979619473219\nStep 479, loss: 0.007995071820914745\nStep 480, loss: 0.008432473056018353\nStep 481, loss: 0.007788315415382385\nStep 482, loss: 0.00980622973293066\nStep 483, loss: 0.010403353720903397\nStep 484, loss: 0.009630637243390083\nStep 485, loss: 0.009670045226812363\nStep 486, loss: 0.0090246656909585\nStep 487, loss: 0.00780887296423316\nStep 488, loss: 0.008700089529156685\nStep 489, loss: 0.00829536933451891\nStep 490, loss: 0.01011315081268549\nStep 491, loss: 0.009101804345846176\nStep 492, loss: 0.00783759355545044\nStep 493, loss: 0.007564619183540344\nStep 494, loss: 0.008335075341165066\nStep 522, loss: 0.00889340229332447\nStep 523, loss: 0.00863998755812645\nStep 524, loss: 0.007970698177814484\nStep 525, loss: 0.009422356262803078\nStep 526, loss: 0.007659026421606541\nStep 527, loss: 0.009303899481892586\nStep 528, loss: 0.008360748179256916\nStep 529, loss: 0.009176895022392273\nStep 530, loss: 0.00965078640729189\nStep 531, loss: 0.009549814276397228\nStep 532, loss: 0.008250241167843342\nStep 533, loss: 0.008716493844985962\nStep 534, loss: 0.009742394089698792\nStep 535, loss: 0.008888865821063519\nStep 536, loss: 0.008021022193133831\nStep 537, loss: 0.008457610383629799\nStep 538, loss: 0.008732296526432037\nStep 539, loss: 0.008125531487166882\nStep 540, loss: 0.007404815871268511\nStep 541, loss: 0.008951316587626934\nStep 542, loss: 0.009075532667338848\nStep 543, loss: 0.008221290074288845\nStep 544, loss: 0.009991692379117012\nStep 545, loss: 0.010783315636217594\nStep 546, loss: 0.009887837804853916\nStep 547, loss: 0.009044837206602097\nStep 548, loss: 0.008554411120712757\nStep 495, loss: 0.008907189592719078\nStep 496, loss: 0.009713673032820225\nStep 497, loss: 0.009148812852799892\nStep 498, loss: 0.008763691410422325\nStep 499, loss: 0.008145195432007313\nStep 500, loss: 0.008057684637606144\nStep 501, loss: 0.008521325886249542\nStep 502, loss: 0.009541303850710392\nStep 503, loss: 0.008878038264811039\nStep 504, loss: 0.008646117523312569\nStep 505, loss: 0.008850847370922565\nStep 506, loss: 0.008876285515725613\nStep 507, loss: 0.009227375499904156\nStep 508, loss: 0.010148166678845882\nStep 509, loss: 0.00796295516192913\nStep 510, loss: 0.008789869025349617\nStep 511, loss: 0.008849036879837513\nStep 512, loss: 0.007304091937839985\nStep 513, loss: 0.00913157407194376\nStep 514, loss: 0.010260028764605522\nStep 515, loss: 0.007284593302756548\nStep 516, loss: 0.009409734979271889\nStep 517, loss: 0.009817644022405148\nStep 518, loss: 0.009014584124088287\nStep 519, loss: 0.008146864362061024\nStep 520, loss: 0.007268956862390041\nStep 521, loss: 0.009374018758535385\nStep 549, loss: 0.008303544484078884\nStep 550, loss: 0.009198504500091076\nStep 551, loss: 0.008177240379154682\nStep 522, loss: 0.00889340229332447\nStep 523, loss: 0.00863998755812645\nStep 524, loss: 0.007970698177814484\nStep 525, loss: 0.009422356262803078\nStep 526, loss: 0.007659026421606541\nStep 527, loss: 0.009303899481892586\nStep 528, loss: 0.008360748179256916\nStep 529, loss: 0.009176895022392273\nStep 530, loss: 0.00965078640729189\nStep 531, loss: 0.009549814276397228\nStep 532, loss: 0.008250241167843342\nStep 533, loss: 0.008716493844985962\nStep 534, loss: 0.009742394089698792\nStep 535, loss: 0.008888865821063519\nStep 536, loss: 0.008021022193133831\nStep 537, loss: 0.008457610383629799\nStep 538, loss: 0.008732296526432037\nStep 539, loss: 0.008125531487166882\nStep 540, loss: 0.007404815871268511\nStep 541, loss: 0.008951316587626934\nStep 542, loss: 0.009075532667338848\nStep 543, loss: 0.008221290074288845\nStep 544, loss: 0.009991692379117012\nStep 545, loss: 0.010783315636217594\nStep 546, loss: 0.009887837804853916\nStep 547, loss: 0.009044837206602097\nStep 548, loss: 0.008554411120712757\nStep 552, loss: 0.008611418306827545\nStep 553, loss: 0.008411903865635395\nStep 554, loss: 0.010022832080721855\nStep 555, loss: 0.008083075284957886\nStep 556, loss: 0.008184091188013554\nStep 557, loss: 0.009950775653123856\nStep 558, loss: 0.007724710740149021\nStep 559, loss: 0.010604195296764374\nStep 560, loss: 0.010046249255537987\nStep 561, loss: 0.008444340899586678\nStep 562, loss: 0.00951379630714655\nStep 563, loss: 0.009500275366008282\nStep 564, loss: 0.0098471874371171\nStep 565, loss: 0.009236135520040989\nStep 566, loss: 0.008145434781908989\nStep 567, loss: 0.008019921369850636\nStep 568, loss: 0.008847599849104881\nStep 569, loss: 0.009217997081577778\nStep 570, loss: 0.008589738979935646\nStep 571, loss: 0.008038138970732689\nStep 572, loss: 0.008367457427084446\nStep 573, loss: 0.008337149396538734\nStep 574, loss: 0.007989796809852123\nStep 575, loss: 0.010115966200828552\nStep 576, loss: 0.008840972557663918\nStep 577, loss: 0.007866596803069115\nStep 578, loss: 0.008375775068998337\nStep 549, loss: 0.008303544484078884\nStep 550, loss: 0.009198504500091076\nStep 551, loss: 0.008177240379154682\nStep 579, loss: 0.008921980857849121\nStep 580, loss: 0.008208159357309341\nStep 581, loss: 0.008356332778930664\nStep 582, loss: 0.01047302596271038\nStep 583, loss: 0.010049899108707905\nStep 584, loss: 0.008961194194853306\nStep 585, loss: 0.009844383224844933\nStep 586, loss: 0.008554906584322453\nStep 587, loss: 0.010228397324681282\nStep 588, loss: 0.00855023879557848\nStep 589, loss: 0.008921374566853046\nStep 590, loss: 0.00938184279948473\nStep 591, loss: 0.007259429432451725\nStep 592, loss: 0.008099344559013844\nStep 593, loss: 0.00787374097853899\nStep 594, loss: 0.009231695905327797\nStep 595, loss: 0.008884775452315807\nStep 596, loss: 0.008983580395579338\nStep 597, loss: 0.008393236435949802\nStep 598, loss: 0.00823807530105114\nStep 599, loss: 0.00898067932575941\nStep 600, loss: 0.008824114687740803\nStep 601, loss: 0.00879320502281189\nStep 602, loss: 0.007165095303207636\nStep 603, loss: 0.00796660128980875\nStep 604, loss: 0.0073905885219573975\nStep 605, loss: 0.008228345774114132\nStep 441, loss: 0.008795896545052528\nStep 442, loss: 0.009172515943646431\nStep 443, loss: 0.008903468027710915\nStep 444, loss: 0.010192408226430416\nStep 445, loss: 0.008738502860069275\nStep 446, loss: 0.009220161475241184\nStep 447, loss: 0.00937176775187254\nStep 448, loss: 0.009126019664108753\nStep 449, loss: 0.008642616681754589\nStep 450, loss: 0.008268099278211594\nStep 451, loss: 0.009160791523754597\nStep 452, loss: 0.009702763520181179\nStep 453, loss: 0.009337768889963627\nStep 454, loss: 0.008436117321252823\nStep 455, loss: 0.00928843766450882\nStep 456, loss: 0.008119070902466774\nStep 457, loss: 0.007788764778524637\nStep 458, loss: 0.008930844254791737\nStep 459, loss: 0.010214053094387054\nStep 460, loss: 0.00870064552873373\nStep 461, loss: 0.009047522209584713\nStep 462, loss: 0.009532912634313107\nStep 463, loss: 0.007899757474660873\nStep 464, loss: 0.009032838977873325\nStep 465, loss: 0.008456055074930191\nStep 466, loss: 0.00895161833614111\nStep 467, loss: 0.00792618002742529\nStep 606, loss: 0.008373823948204517\nStep 607, loss: 0.008092723786830902\nStep 608, loss: 0.009472115896642208\nStep 609, loss: 0.009028247557580471\nStep 610, loss: 0.009447307325899601\nStep 611, loss: 0.006945042870938778\nStep 612, loss: 0.008965948596596718\nStep 613, loss: 0.009417682886123657\nStep 614, loss: 0.01058974489569664\nStep 615, loss: 0.009123829193413258\nStep 616, loss: 0.008527892641723156\nStep 617, loss: 0.006956911645829678\nStep 618, loss: 0.008063074201345444\nStep 619, loss: 0.008881963789463043\nStep 620, loss: 0.009354100562632084\nStep 621, loss: 0.007262085098773241\nStep 622, loss: 0.009552895091474056\nStep 623, loss: 0.007794888224452734\nStep 624, loss: 0.00895579531788826\nStep 625, loss: 0.007704522460699081\nStep 626, loss: 0.00867677852511406\nStep 627, loss: 0.008672015741467476\nStep 628, loss: 0.00815113540738821\nStep 629, loss: 0.009535595774650574\nStep 630, loss: 0.007677331566810608\nStep 631, loss: 0.009011753834784031\nStep 632, loss: 0.009445476345717907\nStep 468, loss: 0.007693788968026638\nStep 469, loss: 0.009316098876297474\nStep 470, loss: 0.009146071039140224\nStep 471, loss: 0.008873512037098408\nStep 472, loss: 0.009403959847986698\nStep 473, loss: 0.009459185414016247\nStep 474, loss: 0.008951995521783829\nStep 475, loss: 0.0092973243445158\nStep 476, loss: 0.008307360112667084\nStep 477, loss: 0.00865758117288351\nStep 478, loss: 0.00900979619473219\nStep 479, loss: 0.007995071820914745\nStep 480, loss: 0.008432473056018353\nStep 481, loss: 0.007788315415382385\nStep 482, loss: 0.00980622973293066\nStep 483, loss: 0.010403353720903397\nStep 484, loss: 0.009630637243390083\nStep 485, loss: 0.009670045226812363\nStep 486, loss: 0.0090246656909585\nStep 487, loss: 0.00780887296423316\nStep 488, loss: 0.008700089529156685\nStep 489, loss: 0.00829536933451891\nStep 490, loss: 0.01011315081268549\nStep 491, loss: 0.009101804345846176\nStep 492, loss: 0.00783759355545044\nStep 493, loss: 0.007564619183540344\nStep 494, loss: 0.008335075341165066\nStep 633, loss: 0.010466964915394783\nStep 634, loss: 0.009875749237835407\nStep 635, loss: 0.008144666440784931\nStep 636, loss: 0.009463016875088215\nStep 637, loss: 0.011229046620428562\nStep 638, loss: 0.009615120477974415\nStep 639, loss: 0.007913789711892605\nStep 640, loss: 0.00920387078076601\nStep 641, loss: 0.009785228408873081\nStep 642, loss: 0.008528223261237144\nStep 643, loss: 0.009180537424981594\nStep 644, loss: 0.007542653940618038\nStep 645, loss: 0.00836759153753519\nStep 646, loss: 0.010244078934192657\nStep 647, loss: 0.00858389399945736\nStep 648, loss: 0.008339446038007736\nStep 649, loss: 0.008229510858654976\nStep 650, loss: 0.009147602133452892\nStep 651, loss: 0.007746502757072449\nStep 652, loss: 0.008200840093195438\nStep 653, loss: 0.007780694402754307\nStep 654, loss: 0.008361638523638248\nStep 655, loss: 0.008371165953576565\nStep 656, loss: 0.007468099240213633\nStep 657, loss: 0.009446601383388042\nStep 658, loss: 0.009341231547296047\nStep 659, loss: 0.009194646961987019\nStep 495, loss: 0.008907189592719078\nStep 496, loss: 0.009713673032820225\nStep 497, loss: 0.009148812852799892\nStep 498, loss: 0.008763691410422325\nStep 499, loss: 0.008145195432007313\nStep 500, loss: 0.008057684637606144\nStep 501, loss: 0.008521325886249542\nStep 502, loss: 0.009541303850710392\nStep 503, loss: 0.008878038264811039\nStep 504, loss: 0.008646117523312569\nStep 505, loss: 0.008850847370922565\nStep 506, loss: 0.008876285515725613\nStep 507, loss: 0.009227375499904156\nStep 508, loss: 0.010148166678845882\nStep 509, loss: 0.00796295516192913\nStep 510, loss: 0.008789869025349617\nStep 511, loss: 0.008849036879837513\nStep 512, loss: 0.007304091937839985\nStep 513, loss: 0.00913157407194376\nStep 514, loss: 0.010260028764605522\nStep 515, loss: 0.007284593302756548\nStep 516, loss: 0.009409734979271889\nStep 517, loss: 0.009817644022405148\nStep 518, loss: 0.009014584124088287\nStep 519, loss: 0.008146864362061024\nStep 520, loss: 0.007268956862390041\nStep 521, loss: 0.009374018758535385\nStep 660, loss: 0.009197157807648182\nStep 661, loss: 0.006940855644643307\nStep 662, loss: 0.007554870098829269\nStep 522, loss: 0.00889340229332447\nStep 523, loss: 0.00863998755812645\nStep 524, loss: 0.007970698177814484\nStep 525, loss: 0.009422356262803078\nStep 526, loss: 0.007659026421606541\nStep 527, loss: 0.009303899481892586\nStep 528, loss: 0.008360748179256916\nStep 529, loss: 0.009176895022392273\nStep 530, loss: 0.00965078640729189\nStep 531, loss: 0.009549814276397228\nStep 532, loss: 0.008250241167843342\nStep 533, loss: 0.008716493844985962\nStep 534, loss: 0.009742394089698792\nStep 535, loss: 0.008888865821063519\nStep 536, loss: 0.008021022193133831\nStep 537, loss: 0.008457610383629799\nStep 538, loss: 0.008732296526432037\nStep 539, loss: 0.008125531487166882\nStep 540, loss: 0.007404815871268511\nStep 541, loss: 0.008951316587626934\nStep 542, loss: 0.009075532667338848\nStep 543, loss: 0.008221290074288845\nStep 544, loss: 0.009991692379117012\nStep 545, loss: 0.010783315636217594\nStep 546, loss: 0.009887837804853916\nStep 547, loss: 0.009044837206602097\nStep 548, loss: 0.008554411120712757\nStep 441, loss: 0.008795896545052528\nStep 442, loss: 0.009172515943646431\nStep 443, loss: 0.008903468027710915\nStep 444, loss: 0.010192408226430416\nStep 445, loss: 0.008738502860069275\nStep 446, loss: 0.009220161475241184\nStep 447, loss: 0.00937176775187254\nStep 448, loss: 0.009126019664108753\nStep 449, loss: 0.008642616681754589\nStep 450, loss: 0.008268099278211594\nStep 451, loss: 0.009160791523754597\nStep 452, loss: 0.009702763520181179\nStep 453, loss: 0.009337768889963627\nStep 454, loss: 0.008436117321252823\nStep 455, loss: 0.00928843766450882\nStep 456, loss: 0.008119070902466774\nStep 457, loss: 0.007788764778524637\nStep 458, loss: 0.008930844254791737\nStep 459, loss: 0.010214053094387054\nStep 460, loss: 0.00870064552873373\nStep 461, loss: 0.009047522209584713\nStep 462, loss: 0.009532912634313107\nStep 463, loss: 0.007899757474660873\nStep 464, loss: 0.009032838977873325\nStep 465, loss: 0.008456055074930191\nStep 466, loss: 0.00895161833614111\nStep 467, loss: 0.00792618002742529\nStep 549, loss: 0.008303544484078884\nStep 550, loss: 0.009198504500091076\nStep 551, loss: 0.008177240379154682\nStep 468, loss: 0.007693788968026638\nStep 469, loss: 0.009316098876297474\nStep 470, loss: 0.009146071039140224\nStep 471, loss: 0.008873512037098408\nStep 472, loss: 0.009403959847986698\nStep 473, loss: 0.009459185414016247\nStep 474, loss: 0.008951995521783829\nStep 475, loss: 0.0092973243445158\nStep 476, loss: 0.008307360112667084\nStep 477, loss: 0.00865758117288351\nStep 478, loss: 0.00900979619473219\nStep 479, loss: 0.007995071820914745\nStep 480, loss: 0.008432473056018353\nStep 481, loss: 0.007788315415382385\nStep 482, loss: 0.00980622973293066\nStep 483, loss: 0.010403353720903397\nStep 484, loss: 0.009630637243390083\nStep 485, loss: 0.009670045226812363\nStep 486, loss: 0.0090246656909585\nStep 487, loss: 0.00780887296423316\nStep 488, loss: 0.008700089529156685\nStep 489, loss: 0.00829536933451891\nStep 490, loss: 0.01011315081268549\nStep 491, loss: 0.009101804345846176\nStep 492, loss: 0.00783759355545044\nStep 493, loss: 0.007564619183540344\nStep 494, loss: 0.008335075341165066\nStep 552, loss: 0.008611418306827545\nStep 553, loss: 0.008411903865635395\nStep 554, loss: 0.010022832080721855\nStep 555, loss: 0.008083075284957886\nStep 556, loss: 0.008184091188013554\nStep 557, loss: 0.009950775653123856\nStep 558, loss: 0.007724710740149021\nStep 559, loss: 0.010604195296764374\nStep 560, loss: 0.010046249255537987\nStep 561, loss: 0.008444340899586678\nStep 562, loss: 0.00951379630714655\nStep 563, loss: 0.009500275366008282\nStep 564, loss: 0.0098471874371171\nStep 565, loss: 0.009236135520040989\nStep 566, loss: 0.008145434781908989\nStep 567, loss: 0.008019921369850636\nStep 568, loss: 0.008847599849104881\nStep 569, loss: 0.009217997081577778\nStep 570, loss: 0.008589738979935646\nStep 571, loss: 0.008038138970732689\nStep 572, loss: 0.008367457427084446\nStep 573, loss: 0.008337149396538734\nStep 574, loss: 0.007989796809852123\nStep 575, loss: 0.010115966200828552\nStep 576, loss: 0.008840972557663918\nStep 577, loss: 0.007866596803069115\nStep 578, loss: 0.008375775068998337\nStep 495, loss: 0.008907189592719078\nStep 496, loss: 0.009713673032820225\nStep 497, loss: 0.009148812852799892\nStep 498, loss: 0.008763691410422325\nStep 499, loss: 0.008145195432007313\nStep 500, loss: 0.008057684637606144\nStep 501, loss: 0.008521325886249542\nStep 502, loss: 0.009541303850710392\nStep 503, loss: 0.008878038264811039\nStep 504, loss: 0.008646117523312569\nStep 505, loss: 0.008850847370922565\nStep 506, loss: 0.008876285515725613\nStep 507, loss: 0.009227375499904156\nStep 508, loss: 0.010148166678845882\nStep 509, loss: 0.00796295516192913\nStep 510, loss: 0.008789869025349617\nStep 511, loss: 0.008849036879837513\nStep 512, loss: 0.007304091937839985\nStep 513, loss: 0.00913157407194376\nStep 514, loss: 0.010260028764605522\nStep 515, loss: 0.007284593302756548\nStep 516, loss: 0.009409734979271889\nStep 517, loss: 0.009817644022405148\nStep 518, loss: 0.009014584124088287\nStep 519, loss: 0.008146864362061024\nStep 520, loss: 0.007268956862390041\nStep 521, loss: 0.009374018758535385\nStep 579, loss: 0.008921980857849121\nStep 580, loss: 0.008208159357309341\nStep 581, loss: 0.008356332778930664\nStep 582, loss: 0.01047302596271038\nStep 583, loss: 0.010049899108707905\nStep 584, loss: 0.008961194194853306\nStep 585, loss: 0.009844383224844933\nStep 586, loss: 0.008554906584322453\nStep 587, loss: 0.010228397324681282\nStep 588, loss: 0.00855023879557848\nStep 589, loss: 0.008921374566853046\nStep 590, loss: 0.00938184279948473\nStep 591, loss: 0.007259429432451725\nStep 592, loss: 0.008099344559013844\nStep 593, loss: 0.00787374097853899\nStep 594, loss: 0.009231695905327797\nStep 595, loss: 0.008884775452315807\nStep 596, loss: 0.008983580395579338\nStep 597, loss: 0.008393236435949802\nStep 598, loss: 0.00823807530105114\nStep 599, loss: 0.00898067932575941\nStep 600, loss: 0.008824114687740803\nStep 601, loss: 0.00879320502281189\nStep 602, loss: 0.007165095303207636\nStep 603, loss: 0.00796660128980875\nStep 604, loss: 0.0073905885219573975\nStep 605, loss: 0.008228345774114132\nStep 522, loss: 0.00889340229332447\nStep 523, loss: 0.00863998755812645\nStep 524, loss: 0.007970698177814484\nStep 525, loss: 0.009422356262803078\nStep 526, loss: 0.007659026421606541\nStep 527, loss: 0.009303899481892586\nStep 528, loss: 0.008360748179256916\nStep 529, loss: 0.009176895022392273\nStep 530, loss: 0.00965078640729189\nStep 531, loss: 0.009549814276397228\nStep 532, loss: 0.008250241167843342\nStep 533, loss: 0.008716493844985962\nStep 534, loss: 0.009742394089698792\nStep 535, loss: 0.008888865821063519\nStep 536, loss: 0.008021022193133831\nStep 537, loss: 0.008457610383629799\nStep 538, loss: 0.008732296526432037\nStep 539, loss: 0.008125531487166882\nStep 540, loss: 0.007404815871268511\nStep 541, loss: 0.008951316587626934\nStep 542, loss: 0.009075532667338848\nStep 543, loss: 0.008221290074288845\nStep 544, loss: 0.009991692379117012\nStep 545, loss: 0.010783315636217594\nStep 546, loss: 0.009887837804853916\nStep 547, loss: 0.009044837206602097\nStep 548, loss: 0.008554411120712757\nStep 606, loss: 0.008373823948204517\nStep 607, loss: 0.008092723786830902\nStep 608, loss: 0.009472115896642208\nStep 609, loss: 0.009028247557580471\nStep 610, loss: 0.009447307325899601\nStep 611, loss: 0.006945042870938778\nStep 612, loss: 0.008965948596596718\nStep 613, loss: 0.009417682886123657\nStep 614, loss: 0.01058974489569664\nStep 615, loss: 0.009123829193413258\nStep 616, loss: 0.008527892641723156\nStep 617, loss: 0.006956911645829678\nStep 618, loss: 0.008063074201345444\nStep 619, loss: 0.008881963789463043\nStep 620, loss: 0.009354100562632084\nStep 621, loss: 0.007262085098773241\nStep 622, loss: 0.009552895091474056\nStep 623, loss: 0.007794888224452734\nStep 624, loss: 0.00895579531788826\nStep 625, loss: 0.007704522460699081\nStep 626, loss: 0.00867677852511406\nStep 627, loss: 0.008672015741467476\nStep 628, loss: 0.00815113540738821\nStep 629, loss: 0.009535595774650574\nStep 630, loss: 0.007677331566810608\nStep 631, loss: 0.009011753834784031\nStep 632, loss: 0.009445476345717907\nStep 549, loss: 0.008303544484078884\nStep 550, loss: 0.009198504500091076\nStep 551, loss: 0.008177240379154682\nStep 633, loss: 0.010466964915394783\nStep 634, loss: 0.009875749237835407\nStep 635, loss: 0.008144666440784931\nStep 636, loss: 0.009463016875088215\nStep 637, loss: 0.011229046620428562\nStep 638, loss: 0.009615120477974415\nStep 639, loss: 0.007913789711892605\nStep 640, loss: 0.00920387078076601\nStep 641, loss: 0.009785228408873081\nStep 642, loss: 0.008528223261237144\nStep 643, loss: 0.009180537424981594\nStep 644, loss: 0.007542653940618038\nStep 645, loss: 0.00836759153753519\nStep 646, loss: 0.010244078934192657\nStep 647, loss: 0.00858389399945736\nStep 648, loss: 0.008339446038007736\nStep 649, loss: 0.008229510858654976\nStep 650, loss: 0.009147602133452892\nStep 651, loss: 0.007746502757072449\nStep 652, loss: 0.008200840093195438\nStep 653, loss: 0.007780694402754307\nStep 654, loss: 0.008361638523638248\nStep 655, loss: 0.008371165953576565\nStep 656, loss: 0.007468099240213633\nStep 657, loss: 0.009446601383388042\nStep 658, loss: 0.009341231547296047\nStep 659, loss: 0.009194646961987019\nStep 552, loss: 0.008611418306827545\nStep 553, loss: 0.008411903865635395\nStep 554, loss: 0.010022832080721855\nStep 555, loss: 0.008083075284957886\nStep 556, loss: 0.008184091188013554\nStep 557, loss: 0.009950775653123856\nStep 558, loss: 0.007724710740149021\nStep 559, loss: 0.010604195296764374\nStep 560, loss: 0.010046249255537987\nStep 561, loss: 0.008444340899586678\nStep 562, loss: 0.00951379630714655\nStep 563, loss: 0.009500275366008282\nStep 564, loss: 0.0098471874371171\nStep 565, loss: 0.009236135520040989\nStep 566, loss: 0.008145434781908989\nStep 567, loss: 0.008019921369850636\nStep 568, loss: 0.008847599849104881\nStep 569, loss: 0.009217997081577778\nStep 570, loss: 0.008589738979935646\nStep 571, loss: 0.008038138970732689\nStep 572, loss: 0.008367457427084446\nStep 573, loss: 0.008337149396538734\nStep 574, loss: 0.007989796809852123\nStep 575, loss: 0.010115966200828552\nStep 576, loss: 0.008840972557663918\nStep 577, loss: 0.007866596803069115\nStep 578, loss: 0.008375775068998337\nStep 660, loss: 0.009197157807648182\nStep 661, loss: 0.006940855644643307\nStep 662, loss: 0.007554870098829269\nStep 579, loss: 0.008921980857849121\nStep 580, loss: 0.008208159357309341\nStep 581, loss: 0.008356332778930664\nStep 582, loss: 0.01047302596271038\nStep 583, loss: 0.010049899108707905\nStep 584, loss: 0.008961194194853306\nStep 585, loss: 0.009844383224844933\nStep 586, loss: 0.008554906584322453\nStep 587, loss: 0.010228397324681282\nStep 588, loss: 0.00855023879557848\nStep 589, loss: 0.008921374566853046\nStep 590, loss: 0.00938184279948473\nStep 591, loss: 0.007259429432451725\nStep 592, loss: 0.008099344559013844\nStep 593, loss: 0.00787374097853899\nStep 594, loss: 0.009231695905327797\nStep 595, loss: 0.008884775452315807\nStep 596, loss: 0.008983580395579338\nStep 597, loss: 0.008393236435949802\nStep 598, loss: 0.00823807530105114\nStep 599, loss: 0.00898067932575941\nStep 600, loss: 0.008824114687740803\nStep 601, loss: 0.00879320502281189\nStep 602, loss: 0.007165095303207636\nStep 603, loss: 0.00796660128980875\nStep 604, loss: 0.0073905885219573975\nStep 605, loss: 0.008228345774114132\nStep 552, loss: 0.008611418306827545\nStep 553, loss: 0.008411903865635395\nStep 554, loss: 0.010022832080721855\nStep 555, loss: 0.008083075284957886\nStep 556, loss: 0.008184091188013554\nStep 557, loss: 0.009950775653123856\nStep 558, loss: 0.007724710740149021\nStep 559, loss: 0.010604195296764374\nStep 560, loss: 0.010046249255537987\nStep 561, loss: 0.008444340899586678\nStep 562, loss: 0.00951379630714655\nStep 563, loss: 0.009500275366008282\nStep 564, loss: 0.0098471874371171\nStep 565, loss: 0.009236135520040989\nStep 566, loss: 0.008145434781908989\nStep 567, loss: 0.008019921369850636\nStep 568, loss: 0.008847599849104881\nStep 569, loss: 0.009217997081577778\nStep 570, loss: 0.008589738979935646\nStep 571, loss: 0.008038138970732689\nStep 572, loss: 0.008367457427084446\nStep 573, loss: 0.008337149396538734\nStep 574, loss: 0.007989796809852123\nStep 575, loss: 0.010115966200828552\nStep 576, loss: 0.008840972557663918\nStep 577, loss: 0.007866596803069115\nStep 578, loss: 0.008375775068998337\nStep 606, loss: 0.008373823948204517\nStep 607, loss: 0.008092723786830902\nStep 608, loss: 0.009472115896642208\nStep 609, loss: 0.009028247557580471\nStep 610, loss: 0.009447307325899601\nStep 611, loss: 0.006945042870938778\nStep 612, loss: 0.008965948596596718\nStep 613, loss: 0.009417682886123657\nStep 614, loss: 0.01058974489569664\nStep 615, loss: 0.009123829193413258\nStep 616, loss: 0.008527892641723156\nStep 617, loss: 0.006956911645829678\nStep 618, loss: 0.008063074201345444\nStep 619, loss: 0.008881963789463043\nStep 620, loss: 0.009354100562632084\nStep 621, loss: 0.007262085098773241\nStep 622, loss: 0.009552895091474056\nStep 623, loss: 0.007794888224452734\nStep 624, loss: 0.00895579531788826\nStep 625, loss: 0.007704522460699081\nStep 626, loss: 0.00867677852511406\nStep 627, loss: 0.008672015741467476\nStep 628, loss: 0.00815113540738821\nStep 629, loss: 0.009535595774650574\nStep 630, loss: 0.007677331566810608\nStep 631, loss: 0.009011753834784031\nStep 632, loss: 0.009445476345717907\nStep 579, loss: 0.008921980857849121\nStep 580, loss: 0.008208159357309341\nStep 581, loss: 0.008356332778930664\nStep 582, loss: 0.01047302596271038\nStep 583, loss: 0.010049899108707905\nStep 584, loss: 0.008961194194853306\nStep 585, loss: 0.009844383224844933\nStep 586, loss: 0.008554906584322453\nStep 587, loss: 0.010228397324681282\nStep 588, loss: 0.00855023879557848\nStep 589, loss: 0.008921374566853046\nStep 590, loss: 0.00938184279948473\nStep 591, loss: 0.007259429432451725\nStep 592, loss: 0.008099344559013844\nStep 593, loss: 0.00787374097853899\nStep 594, loss: 0.009231695905327797\nStep 595, loss: 0.008884775452315807\nStep 596, loss: 0.008983580395579338\nStep 597, loss: 0.008393236435949802\nStep 598, loss: 0.00823807530105114\nStep 599, loss: 0.00898067932575941\nStep 600, loss: 0.008824114687740803\nStep 601, loss: 0.00879320502281189\nStep 602, loss: 0.007165095303207636\nStep 603, loss: 0.00796660128980875\nStep 604, loss: 0.0073905885219573975\nStep 605, loss: 0.008228345774114132\nStep 633, loss: 0.010466964915394783\nStep 634, loss: 0.009875749237835407\nStep 635, loss: 0.008144666440784931\nStep 636, loss: 0.009463016875088215\nStep 637, loss: 0.011229046620428562\nStep 638, loss: 0.009615120477974415\nStep 639, loss: 0.007913789711892605\nStep 640, loss: 0.00920387078076601\nStep 641, loss: 0.009785228408873081\nStep 642, loss: 0.008528223261237144\nStep 643, loss: 0.009180537424981594\nStep 644, loss: 0.007542653940618038\nStep 645, loss: 0.00836759153753519\nStep 646, loss: 0.010244078934192657\nStep 647, loss: 0.00858389399945736\nStep 648, loss: 0.008339446038007736\nStep 649, loss: 0.008229510858654976\nStep 650, loss: 0.009147602133452892\nStep 651, loss: 0.007746502757072449\nStep 652, loss: 0.008200840093195438\nStep 653, loss: 0.007780694402754307\nStep 654, loss: 0.008361638523638248\nStep 655, loss: 0.008371165953576565\nStep 656, loss: 0.007468099240213633\nStep 657, loss: 0.009446601383388042\nStep 658, loss: 0.009341231547296047\nStep 659, loss: 0.009194646961987019\nStep 606, loss: 0.008373823948204517\nStep 607, loss: 0.008092723786830902\nStep 608, loss: 0.009472115896642208\nStep 609, loss: 0.009028247557580471\nStep 610, loss: 0.009447307325899601\nStep 611, loss: 0.006945042870938778\nStep 612, loss: 0.008965948596596718\nStep 613, loss: 0.009417682886123657\nStep 614, loss: 0.01058974489569664\nStep 615, loss: 0.009123829193413258\nStep 616, loss: 0.008527892641723156\nStep 617, loss: 0.006956911645829678\nStep 618, loss: 0.008063074201345444\nStep 619, loss: 0.008881963789463043\nStep 620, loss: 0.009354100562632084\nStep 621, loss: 0.007262085098773241\nStep 622, loss: 0.009552895091474056\nStep 623, loss: 0.007794888224452734\nStep 624, loss: 0.00895579531788826\nStep 625, loss: 0.007704522460699081\nStep 626, loss: 0.00867677852511406\nStep 627, loss: 0.008672015741467476\nStep 628, loss: 0.00815113540738821\nStep 629, loss: 0.009535595774650574\nStep 630, loss: 0.007677331566810608\nStep 631, loss: 0.009011753834784031\nStep 632, loss: 0.009445476345717907\nStep 660, loss: 0.009197157807648182\nStep 661, loss: 0.006940855644643307\nStep 662, loss: 0.007554870098829269\nStep 633, loss: 0.010466964915394783\nStep 634, loss: 0.009875749237835407\nStep 635, loss: 0.008144666440784931\nStep 636, loss: 0.009463016875088215\nStep 637, loss: 0.011229046620428562\nStep 638, loss: 0.009615120477974415\nStep 639, loss: 0.007913789711892605\nStep 640, loss: 0.00920387078076601\nStep 641, loss: 0.009785228408873081\nStep 642, loss: 0.008528223261237144\nStep 643, loss: 0.009180537424981594\nStep 644, loss: 0.007542653940618038\nStep 645, loss: 0.00836759153753519\nStep 646, loss: 0.010244078934192657\nStep 647, loss: 0.00858389399945736\nStep 648, loss: 0.008339446038007736\nStep 649, loss: 0.008229510858654976\nStep 650, loss: 0.009147602133452892\nStep 651, loss: 0.007746502757072449\nStep 652, loss: 0.008200840093195438\nStep 653, loss: 0.007780694402754307\nStep 654, loss: 0.008361638523638248\nStep 655, loss: 0.008371165953576565\nStep 656, loss: 0.007468099240213633\nStep 657, loss: 0.009446601383388042\nStep 658, loss: 0.009341231547296047\nStep 659, loss: 0.009194646961987019\nStep 441, loss: 0.008795896545052528\nStep 442, loss: 0.009172515943646431\nStep 443, loss: 0.008903468027710915\nStep 444, loss: 0.010192408226430416\nStep 445, loss: 0.008738502860069275\nStep 446, loss: 0.009220161475241184\nStep 447, loss: 0.00937176775187254\nStep 448, loss: 0.009126019664108753\nStep 449, loss: 0.008642616681754589\nStep 450, loss: 0.008268099278211594\nStep 451, loss: 0.009160791523754597\nStep 452, loss: 0.009702763520181179\nStep 453, loss: 0.009337768889963627\nStep 454, loss: 0.008436117321252823\nStep 455, loss: 0.00928843766450882\nStep 456, loss: 0.008119070902466774\nStep 457, loss: 0.007788764778524637\nStep 458, loss: 0.008930844254791737\nStep 459, loss: 0.010214053094387054\nStep 460, loss: 0.00870064552873373\nStep 461, loss: 0.009047522209584713\nStep 462, loss: 0.009532912634313107\nStep 463, loss: 0.007899757474660873\nStep 464, loss: 0.009032838977873325\nStep 465, loss: 0.008456055074930191\nStep 466, loss: 0.00895161833614111\nStep 467, loss: 0.00792618002742529\nStep 660, loss: 0.009197157807648182\nStep 661, loss: 0.006940855644643307\nStep 662, loss: 0.007554870098829269\nStep 468, loss: 0.007693788968026638\nStep 469, loss: 0.009316098876297474\nStep 470, loss: 0.009146071039140224\nStep 471, loss: 0.008873512037098408\nStep 472, loss: 0.009403959847986698\nStep 473, loss: 0.009459185414016247\nStep 474, loss: 0.008951995521783829\nStep 475, loss: 0.0092973243445158\nStep 476, loss: 0.008307360112667084\nStep 477, loss: 0.00865758117288351\nStep 478, loss: 0.00900979619473219\nStep 479, loss: 0.007995071820914745\nStep 480, loss: 0.008432473056018353\nStep 481, loss: 0.007788315415382385\nStep 482, loss: 0.00980622973293066\nStep 483, loss: 0.010403353720903397\nStep 484, loss: 0.009630637243390083\nStep 485, loss: 0.009670045226812363\nStep 486, loss: 0.0090246656909585\nStep 487, loss: 0.00780887296423316\nStep 488, loss: 0.008700089529156685\nStep 489, loss: 0.00829536933451891\nStep 490, loss: 0.01011315081268549\nStep 491, loss: 0.009101804345846176\nStep 492, loss: 0.00783759355545044\nStep 493, loss: 0.007564619183540344\nStep 494, loss: 0.008335075341165066\nStep 441, loss: 0.008795896545052528\nStep 442, loss: 0.009172515943646431\nStep 443, loss: 0.008903468027710915\nStep 444, loss: 0.010192408226430416\nStep 445, loss: 0.008738502860069275\nStep 446, loss: 0.009220161475241184\nStep 447, loss: 0.00937176775187254\nStep 448, loss: 0.009126019664108753\nStep 449, loss: 0.008642616681754589\nStep 450, loss: 0.008268099278211594\nStep 451, loss: 0.009160791523754597\nStep 452, loss: 0.009702763520181179\nStep 453, loss: 0.009337768889963627\nStep 454, loss: 0.008436117321252823\nStep 455, loss: 0.00928843766450882\nStep 456, loss: 0.008119070902466774\nStep 457, loss: 0.007788764778524637\nStep 458, loss: 0.008930844254791737\nStep 459, loss: 0.010214053094387054\nStep 460, loss: 0.00870064552873373\nStep 461, loss: 0.009047522209584713\nStep 462, loss: 0.009532912634313107\nStep 463, loss: 0.007899757474660873\nStep 464, loss: 0.009032838977873325\nStep 465, loss: 0.008456055074930191\nStep 466, loss: 0.00895161833614111\nStep 467, loss: 0.00792618002742529\nStep 495, loss: 0.008907189592719078\nStep 496, loss: 0.009713673032820225\nStep 497, loss: 0.009148812852799892\nStep 498, loss: 0.008763691410422325\nStep 499, loss: 0.008145195432007313\nStep 500, loss: 0.008057684637606144\nStep 501, loss: 0.008521325886249542\nStep 502, loss: 0.009541303850710392\nStep 503, loss: 0.008878038264811039\nStep 504, loss: 0.008646117523312569\nStep 505, loss: 0.008850847370922565\nStep 506, loss: 0.008876285515725613\nStep 507, loss: 0.009227375499904156\nStep 508, loss: 0.010148166678845882\nStep 509, loss: 0.00796295516192913\nStep 510, loss: 0.008789869025349617\nStep 511, loss: 0.008849036879837513\nStep 512, loss: 0.007304091937839985\nStep 513, loss: 0.00913157407194376\nStep 514, loss: 0.010260028764605522\nStep 515, loss: 0.007284593302756548\nStep 516, loss: 0.009409734979271889\nStep 517, loss: 0.009817644022405148\nStep 518, loss: 0.009014584124088287\nStep 519, loss: 0.008146864362061024\nStep 520, loss: 0.007268956862390041\nStep 521, loss: 0.009374018758535385\nStep 468, loss: 0.007693788968026638\nStep 469, loss: 0.009316098876297474\nStep 470, loss: 0.009146071039140224\nStep 471, loss: 0.008873512037098408\nStep 472, loss: 0.009403959847986698\nStep 473, loss: 0.009459185414016247\nStep 474, loss: 0.008951995521783829\nStep 475, loss: 0.0092973243445158\nStep 476, loss: 0.008307360112667084\nStep 477, loss: 0.00865758117288351\nStep 478, loss: 0.00900979619473219\nStep 479, loss: 0.007995071820914745\nStep 480, loss: 0.008432473056018353\nStep 481, loss: 0.007788315415382385\nStep 482, loss: 0.00980622973293066\nStep 483, loss: 0.010403353720903397\nStep 484, loss: 0.009630637243390083\nStep 485, loss: 0.009670045226812363\nStep 486, loss: 0.0090246656909585\nStep 487, loss: 0.00780887296423316\nStep 488, loss: 0.008700089529156685\nStep 489, loss: 0.00829536933451891\nStep 490, loss: 0.01011315081268549\nStep 491, loss: 0.009101804345846176\nStep 492, loss: 0.00783759355545044\nStep 493, loss: 0.007564619183540344\nStep 494, loss: 0.008335075341165066\nStep 522, loss: 0.00889340229332447\nStep 523, loss: 0.00863998755812645\nStep 524, loss: 0.007970698177814484\nStep 525, loss: 0.009422356262803078\nStep 526, loss: 0.007659026421606541\nStep 527, loss: 0.009303899481892586\nStep 528, loss: 0.008360748179256916\nStep 529, loss: 0.009176895022392273\nStep 530, loss: 0.00965078640729189\nStep 531, loss: 0.009549814276397228\nStep 532, loss: 0.008250241167843342\nStep 533, loss: 0.008716493844985962\nStep 534, loss: 0.009742394089698792\nStep 535, loss: 0.008888865821063519\nStep 536, loss: 0.008021022193133831\nStep 537, loss: 0.008457610383629799\nStep 538, loss: 0.008732296526432037\nStep 539, loss: 0.008125531487166882\nStep 540, loss: 0.007404815871268511\nStep 541, loss: 0.008951316587626934\nStep 542, loss: 0.009075532667338848\nStep 543, loss: 0.008221290074288845\nStep 544, loss: 0.009991692379117012\nStep 545, loss: 0.010783315636217594\nStep 546, loss: 0.009887837804853916\nStep 547, loss: 0.009044837206602097\nStep 548, loss: 0.008554411120712757\nStep 495, loss: 0.008907189592719078\nStep 496, loss: 0.009713673032820225\nStep 497, loss: 0.009148812852799892\nStep 498, loss: 0.008763691410422325\nStep 499, loss: 0.008145195432007313\nStep 500, loss: 0.008057684637606144\nStep 501, loss: 0.008521325886249542\nStep 502, loss: 0.009541303850710392\nStep 503, loss: 0.008878038264811039\nStep 504, loss: 0.008646117523312569\nStep 505, loss: 0.008850847370922565\nStep 506, loss: 0.008876285515725613\nStep 507, loss: 0.009227375499904156\nStep 508, loss: 0.010148166678845882\nStep 509, loss: 0.00796295516192913\nStep 510, loss: 0.008789869025349617\nStep 511, loss: 0.008849036879837513\nStep 512, loss: 0.007304091937839985\nStep 513, loss: 0.00913157407194376\nStep 514, loss: 0.010260028764605522\nStep 515, loss: 0.007284593302756548\nStep 516, loss: 0.009409734979271889\nStep 517, loss: 0.009817644022405148\nStep 518, loss: 0.009014584124088287\nStep 519, loss: 0.008146864362061024\nStep 520, loss: 0.007268956862390041\nStep 521, loss: 0.009374018758535385\nStep 549, loss: 0.008303544484078884\nStep 550, loss: 0.009198504500091076\nStep 551, loss: 0.008177240379154682\nStep 522, loss: 0.00889340229332447\nStep 523, loss: 0.00863998755812645\nStep 524, loss: 0.007970698177814484\nStep 525, loss: 0.009422356262803078\nStep 526, loss: 0.007659026421606541\nStep 527, loss: 0.009303899481892586\nStep 528, loss: 0.008360748179256916\nStep 529, loss: 0.009176895022392273\nStep 530, loss: 0.00965078640729189\nStep 531, loss: 0.009549814276397228\nStep 532, loss: 0.008250241167843342\nStep 533, loss: 0.008716493844985962\nStep 534, loss: 0.009742394089698792\nStep 535, loss: 0.008888865821063519\nStep 536, loss: 0.008021022193133831\nStep 537, loss: 0.008457610383629799\nStep 538, loss: 0.008732296526432037\nStep 539, loss: 0.008125531487166882\nStep 540, loss: 0.007404815871268511\nStep 541, loss: 0.008951316587626934\nStep 542, loss: 0.009075532667338848\nStep 543, loss: 0.008221290074288845\nStep 544, loss: 0.009991692379117012\nStep 545, loss: 0.010783315636217594\nStep 546, loss: 0.009887837804853916\nStep 547, loss: 0.009044837206602097\nStep 548, loss: 0.008554411120712757\nStep 552, loss: 0.008611418306827545\nStep 553, loss: 0.008411903865635395\nStep 554, loss: 0.010022832080721855\nStep 555, loss: 0.008083075284957886\nStep 556, loss: 0.008184091188013554\nStep 557, loss: 0.009950775653123856\nStep 558, loss: 0.007724710740149021\nStep 559, loss: 0.010604195296764374\nStep 560, loss: 0.010046249255537987\nStep 561, loss: 0.008444340899586678\nStep 562, loss: 0.00951379630714655\nStep 563, loss: 0.009500275366008282\nStep 564, loss: 0.0098471874371171\nStep 565, loss: 0.009236135520040989\nStep 566, loss: 0.008145434781908989\nStep 567, loss: 0.008019921369850636\nStep 568, loss: 0.008847599849104881\nStep 569, loss: 0.009217997081577778\nStep 570, loss: 0.008589738979935646\nStep 571, loss: 0.008038138970732689\nStep 572, loss: 0.008367457427084446\nStep 573, loss: 0.008337149396538734\nStep 574, loss: 0.007989796809852123\nStep 575, loss: 0.010115966200828552\nStep 576, loss: 0.008840972557663918\nStep 577, loss: 0.007866596803069115\nStep 578, loss: 0.008375775068998337\nStep 549, loss: 0.008303544484078884\nStep 550, loss: 0.009198504500091076\nStep 551, loss: 0.008177240379154682\nStep 579, loss: 0.008921980857849121\nStep 580, loss: 0.008208159357309341\nStep 581, loss: 0.008356332778930664\nStep 582, loss: 0.01047302596271038\nStep 583, loss: 0.010049899108707905\nStep 584, loss: 0.008961194194853306\nStep 585, loss: 0.009844383224844933\nStep 586, loss: 0.008554906584322453\nStep 587, loss: 0.010228397324681282\nStep 588, loss: 0.00855023879557848\nStep 589, loss: 0.008921374566853046\nStep 590, loss: 0.00938184279948473\nStep 591, loss: 0.007259429432451725\nStep 592, loss: 0.008099344559013844\nStep 593, loss: 0.00787374097853899\nStep 594, loss: 0.009231695905327797\nStep 595, loss: 0.008884775452315807\nStep 596, loss: 0.008983580395579338\nStep 597, loss: 0.008393236435949802\nStep 598, loss: 0.00823807530105114\nStep 599, loss: 0.00898067932575941\nStep 600, loss: 0.008824114687740803\nStep 601, loss: 0.00879320502281189\nStep 602, loss: 0.007165095303207636\nStep 603, loss: 0.00796660128980875\nStep 604, loss: 0.0073905885219573975\nStep 605, loss: 0.008228345774114132\nStep 552, loss: 0.008611418306827545\nStep 553, loss: 0.008411903865635395\nStep 554, loss: 0.010022832080721855\nStep 555, loss: 0.008083075284957886\nStep 556, loss: 0.008184091188013554\nStep 557, loss: 0.009950775653123856\nStep 558, loss: 0.007724710740149021\nStep 559, loss: 0.010604195296764374\nStep 560, loss: 0.010046249255537987\nStep 561, loss: 0.008444340899586678\nStep 562, loss: 0.00951379630714655\nStep 563, loss: 0.009500275366008282\nStep 564, loss: 0.0098471874371171\nStep 565, loss: 0.009236135520040989\nStep 566, loss: 0.008145434781908989\nStep 567, loss: 0.008019921369850636\nStep 568, loss: 0.008847599849104881\nStep 569, loss: 0.009217997081577778\nStep 570, loss: 0.008589738979935646\nStep 571, loss: 0.008038138970732689\nStep 572, loss: 0.008367457427084446\nStep 573, loss: 0.008337149396538734\nStep 574, loss: 0.007989796809852123\nStep 575, loss: 0.010115966200828552\nStep 576, loss: 0.008840972557663918\nStep 577, loss: 0.007866596803069115\nStep 578, loss: 0.008375775068998337\nStep 606, loss: 0.008373823948204517\nStep 607, loss: 0.008092723786830902\nStep 608, loss: 0.009472115896642208\nStep 609, loss: 0.009028247557580471\nStep 610, loss: 0.009447307325899601\nStep 611, loss: 0.006945042870938778\nStep 612, loss: 0.008965948596596718\nStep 613, loss: 0.009417682886123657\nStep 614, loss: 0.01058974489569664\nStep 615, loss: 0.009123829193413258\nStep 616, loss: 0.008527892641723156\nStep 617, loss: 0.006956911645829678\nStep 618, loss: 0.008063074201345444\nStep 619, loss: 0.008881963789463043\nStep 620, loss: 0.009354100562632084\nStep 621, loss: 0.007262085098773241\nStep 622, loss: 0.009552895091474056\nStep 623, loss: 0.007794888224452734\nStep 624, loss: 0.00895579531788826\nStep 625, loss: 0.007704522460699081\nStep 626, loss: 0.00867677852511406\nStep 627, loss: 0.008672015741467476\nStep 628, loss: 0.00815113540738821\nStep 629, loss: 0.009535595774650574\nStep 630, loss: 0.007677331566810608\nStep 631, loss: 0.009011753834784031\nStep 632, loss: 0.009445476345717907\nStep 579, loss: 0.008921980857849121\nStep 580, loss: 0.008208159357309341\nStep 581, loss: 0.008356332778930664\nStep 582, loss: 0.01047302596271038\nStep 583, loss: 0.010049899108707905\nStep 584, loss: 0.008961194194853306\nStep 585, loss: 0.009844383224844933\nStep 586, loss: 0.008554906584322453\nStep 587, loss: 0.010228397324681282\nStep 588, loss: 0.00855023879557848\nStep 589, loss: 0.008921374566853046\nStep 590, loss: 0.00938184279948473\nStep 591, loss: 0.007259429432451725\nStep 592, loss: 0.008099344559013844\nStep 593, loss: 0.00787374097853899\nStep 594, loss: 0.009231695905327797\nStep 595, loss: 0.008884775452315807\nStep 596, loss: 0.008983580395579338\nStep 597, loss: 0.008393236435949802\nStep 598, loss: 0.00823807530105114\nStep 599, loss: 0.00898067932575941\nStep 600, loss: 0.008824114687740803\nStep 601, loss: 0.00879320502281189\nStep 602, loss: 0.007165095303207636\nStep 603, loss: 0.00796660128980875\nStep 604, loss: 0.0073905885219573975\nStep 605, loss: 0.008228345774114132\nStep 633, loss: 0.010466964915394783\nStep 634, loss: 0.009875749237835407\nStep 635, loss: 0.008144666440784931\nStep 636, loss: 0.009463016875088215\nStep 637, loss: 0.011229046620428562\nStep 638, loss: 0.009615120477974415\nStep 639, loss: 0.007913789711892605\nStep 640, loss: 0.00920387078076601\nStep 641, loss: 0.009785228408873081\nStep 642, loss: 0.008528223261237144\nStep 643, loss: 0.009180537424981594\nStep 644, loss: 0.007542653940618038\nStep 645, loss: 0.00836759153753519\nStep 646, loss: 0.010244078934192657\nStep 647, loss: 0.00858389399945736\nStep 648, loss: 0.008339446038007736\nStep 649, loss: 0.008229510858654976\nStep 650, loss: 0.009147602133452892\nStep 651, loss: 0.007746502757072449\nStep 652, loss: 0.008200840093195438\nStep 653, loss: 0.007780694402754307\nStep 654, loss: 0.008361638523638248\nStep 655, loss: 0.008371165953576565\nStep 656, loss: 0.007468099240213633\nStep 657, loss: 0.009446601383388042\nStep 658, loss: 0.009341231547296047\nStep 659, loss: 0.009194646961987019\nStep 606, loss: 0.008373823948204517\nStep 607, loss: 0.008092723786830902\nStep 608, loss: 0.009472115896642208\nStep 609, loss: 0.009028247557580471\nStep 610, loss: 0.009447307325899601\nStep 611, loss: 0.006945042870938778\nStep 612, loss: 0.008965948596596718\nStep 613, loss: 0.009417682886123657\nStep 614, loss: 0.01058974489569664\nStep 615, loss: 0.009123829193413258\nStep 616, loss: 0.008527892641723156\nStep 617, loss: 0.006956911645829678\nStep 618, loss: 0.008063074201345444\nStep 619, loss: 0.008881963789463043\nStep 620, loss: 0.009354100562632084\nStep 621, loss: 0.007262085098773241\nStep 622, loss: 0.009552895091474056\nStep 623, loss: 0.007794888224452734\nStep 624, loss: 0.00895579531788826\nStep 625, loss: 0.007704522460699081\nStep 626, loss: 0.00867677852511406\nStep 627, loss: 0.008672015741467476\nStep 628, loss: 0.00815113540738821\nStep 629, loss: 0.009535595774650574\nStep 630, loss: 0.007677331566810608\nStep 631, loss: 0.009011753834784031\nStep 632, loss: 0.009445476345717907\nStep 660, loss: 0.009197157807648182\nStep 661, loss: 0.006940855644643307\nStep 662, loss: 0.007554870098829269\nStep 633, loss: 0.010466964915394783\nStep 634, loss: 0.009875749237835407\nStep 635, loss: 0.008144666440784931\nStep 636, loss: 0.009463016875088215\nStep 637, loss: 0.011229046620428562\nStep 638, loss: 0.009615120477974415\nStep 639, loss: 0.007913789711892605\nStep 640, loss: 0.00920387078076601\nStep 641, loss: 0.009785228408873081\nStep 642, loss: 0.008528223261237144\nStep 643, loss: 0.009180537424981594\nStep 644, loss: 0.007542653940618038\nStep 645, loss: 0.00836759153753519\nStep 646, loss: 0.010244078934192657\nStep 647, loss: 0.00858389399945736\nStep 648, loss: 0.008339446038007736\nStep 649, loss: 0.008229510858654976\nStep 650, loss: 0.009147602133452892\nStep 651, loss: 0.007746502757072449\nStep 652, loss: 0.008200840093195438\nStep 653, loss: 0.007780694402754307\nStep 654, loss: 0.008361638523638248\nStep 655, loss: 0.008371165953576565\nStep 656, loss: 0.007468099240213633\nStep 657, loss: 0.009446601383388042\nStep 658, loss: 0.009341231547296047\nStep 659, loss: 0.009194646961987019\nStep 660, loss: 0.009197157807648182\nStep 661, loss: 0.006940855644643307\nStep 662, loss: 0.007554870098829269\nStep 663, loss: 0.008035499602556229\nStep 664, loss: 0.008211825042963028\nStep 665, loss: 0.008599837310612202\nStep 666, loss: 0.009215956553816795\nStep 667, loss: 0.00814542081207037\nStep 668, loss: 0.008338076993823051\nStep 669, loss: 0.010524684563279152\nStep 670, loss: 0.00987721886485815\nStep 671, loss: 0.008724103681743145\nStep 672, loss: 0.008830797858536243\nStep 673, loss: 0.009168988093733788\nStep 674, loss: 0.009654577821493149\nStep 675, loss: 0.008422465063631535\nStep 676, loss: 0.008703668601810932\nStep 677, loss: 0.008970540948212147\nStep 678, loss: 0.00894303061068058\nStep 679, loss: 0.009513182565569878\nStep 680, loss: 0.007564784027636051\nStep 681, loss: 0.007781901862472296\nStep 682, loss: 0.009456507861614227\nStep 683, loss: 0.00782136619091034\nStep 684, loss: 0.007891003042459488\nStep 685, loss: 0.009091738611459732\nStep 686, loss: 0.008492560125887394\nStep 687, loss: 0.008510823361575603\nStep 688, loss: 0.007513571064919233\nStep 689, loss: 0.008806859143078327\nStep 690, loss: 0.00823906809091568\nStep 691, loss: 0.008473768830299377\nStep 692, loss: 0.008500708267092705\nStep 693, loss: 0.008233590051531792\nStep 694, loss: 0.00800382811576128\nStep 695, loss: 0.0074405609630048275\nStep 696, loss: 0.00785791128873825\nStep 697, loss: 0.008391282521188259\nStep 698, loss: 0.008259993977844715\nStep 699, loss: 0.008108203299343586\nStep 700, loss: 0.009265334345400333\nStep 701, loss: 0.007261474616825581\nStep 702, loss: 0.008634131401777267\nStep 703, loss: 0.008237874135375023\nStep 704, loss: 0.009144485928118229\nStep 705, loss: 0.007042833138257265\nStep 706, loss: 0.008437125012278557\nStep 707, loss: 0.007596829906105995\nStep 708, loss: 0.007881579920649529\nStep 709, loss: 0.006896924693137407\nStep 710, loss: 0.008413707837462425\nStep 711, loss: 0.009662673808634281\nStep 712, loss: 0.008761812932789326\nStep 713, loss: 0.00932319462299347\nStep 714, loss: 0.008840029127895832\nStep 715, loss: 0.008344517089426517\nStep 716, loss: 0.01156007032841444\nStep 663, loss: 0.008035499602556229\nStep 664, loss: 0.008211825042963028\nStep 665, loss: 0.008599837310612202\nStep 666, loss: 0.009215956553816795\nStep 667, loss: 0.00814542081207037\nStep 668, loss: 0.008338076993823051\nStep 669, loss: 0.010524684563279152\nStep 670, loss: 0.00987721886485815\nStep 671, loss: 0.008724103681743145\nStep 672, loss: 0.008830797858536243\nStep 673, loss: 0.009168988093733788\nStep 674, loss: 0.009654577821493149\nStep 675, loss: 0.008422465063631535\nStep 676, loss: 0.008703668601810932\nStep 677, loss: 0.008970540948212147\nStep 678, loss: 0.00894303061068058\nStep 679, loss: 0.009513182565569878\nStep 680, loss: 0.007564784027636051\nStep 681, loss: 0.007781901862472296\nStep 682, loss: 0.009456507861614227\nStep 683, loss: 0.00782136619091034\nStep 684, loss: 0.007891003042459488\nStep 685, loss: 0.009091738611459732\nStep 686, loss: 0.008492560125887394\nStep 687, loss: 0.008510823361575603\nStep 688, loss: 0.007513571064919233\nStep 689, loss: 0.008806859143078327\nStep 717, loss: 0.008096556179225445\nStep 718, loss: 0.009683777578175068\nStep 719, loss: 0.007742816582322121\nStep 720, loss: 0.007568707689642906\nStep 721, loss: 0.008272613398730755\nStep 722, loss: 0.008740808814764023\nStep 723, loss: 0.008758014999330044\nStep 724, loss: 0.007208905648440123\nStep 725, loss: 0.008533834479749203\nStep 726, loss: 0.0073806592263281345\nStep 727, loss: 0.007143620401620865\nStep 728, loss: 0.007509579416364431\nStep 729, loss: 0.006937501486390829\nStep 730, loss: 0.008098526857793331\nStep 731, loss: 0.009508779272437096\nStep 732, loss: 0.008234464563429356\nStep 733, loss: 0.009609385393559933\nStep 734, loss: 0.007654521614313126\nStep 735, loss: 0.00957720261067152\nStep 736, loss: 0.008412269875407219\nStep 737, loss: 0.007896238006651402\nStep 738, loss: 0.008176438510417938\nStep 739, loss: 0.009090610779821873\nStep 740, loss: 0.007537922356277704\nStep 741, loss: 0.008056262508034706\nStep 742, loss: 0.007985595613718033\nStep 743, loss: 0.009384697303175926\nStep 690, loss: 0.00823906809091568\nStep 691, loss: 0.008473768830299377\nStep 692, loss: 0.008500708267092705\nStep 693, loss: 0.008233590051531792\nStep 694, loss: 0.00800382811576128\nStep 695, loss: 0.0074405609630048275\nStep 696, loss: 0.00785791128873825\nStep 697, loss: 0.008391282521188259\nStep 698, loss: 0.008259993977844715\nStep 699, loss: 0.008108203299343586\nStep 700, loss: 0.009265334345400333\nStep 701, loss: 0.007261474616825581\nStep 702, loss: 0.008634131401777267\nStep 703, loss: 0.008237874135375023\nStep 704, loss: 0.009144485928118229\nStep 705, loss: 0.007042833138257265\nStep 706, loss: 0.008437125012278557\nStep 707, loss: 0.007596829906105995\nStep 708, loss: 0.007881579920649529\nStep 709, loss: 0.006896924693137407\nStep 710, loss: 0.008413707837462425\nStep 711, loss: 0.009662673808634281\nStep 712, loss: 0.008761812932789326\nStep 713, loss: 0.00932319462299347\nStep 714, loss: 0.008840029127895832\nStep 715, loss: 0.008344517089426517\nStep 716, loss: 0.01156007032841444\nStep 744, loss: 0.00739773316308856\nStep 745, loss: 0.008154237642884254\nStep 746, loss: 0.00837451871484518\nStep 747, loss: 0.007659763563424349\nStep 748, loss: 0.009576207958161831\nStep 749, loss: 0.00963237788528204\nStep 750, loss: 0.009114114567637444\nStep 751, loss: 0.007480993866920471\nStep 752, loss: 0.007710766047239304\nStep 753, loss: 0.009427975863218307\nStep 754, loss: 0.007921767421066761\nStep 755, loss: 0.007757353130728006\nStep 756, loss: 0.008287258446216583\nStep 757, loss: 0.008245065808296204\nStep 758, loss: 0.006868288852274418\nStep 759, loss: 0.00997896771878004\nStep 760, loss: 0.007407574448734522\nStep 761, loss: 0.008717725053429604\nStep 762, loss: 0.008936473168432713\nStep 763, loss: 0.008730635046958923\nStep 764, loss: 0.008745621889829636\nStep 765, loss: 0.007911726832389832\nStep 766, loss: 0.007758614141494036\nStep 767, loss: 0.007411055266857147\nStep 768, loss: 0.00896456465125084\nStep 769, loss: 0.009281367063522339\nStep 770, loss: 0.009555153548717499\nStep 717, loss: 0.008096556179225445\nStep 718, loss: 0.009683777578175068\nStep 719, loss: 0.007742816582322121\nStep 720, loss: 0.007568707689642906\nStep 721, loss: 0.008272613398730755\nStep 722, loss: 0.008740808814764023\nStep 723, loss: 0.008758014999330044\nStep 724, loss: 0.007208905648440123\nStep 725, loss: 0.008533834479749203\nStep 726, loss: 0.0073806592263281345\nStep 727, loss: 0.007143620401620865\nStep 728, loss: 0.007509579416364431\nStep 729, loss: 0.006937501486390829\nStep 730, loss: 0.008098526857793331\nStep 731, loss: 0.009508779272437096\nStep 732, loss: 0.008234464563429356\nStep 733, loss: 0.009609385393559933\nStep 734, loss: 0.007654521614313126\nStep 735, loss: 0.00957720261067152\nStep 736, loss: 0.008412269875407219\nStep 737, loss: 0.007896238006651402\nStep 738, loss: 0.008176438510417938\nStep 739, loss: 0.009090610779821873\nStep 740, loss: 0.007537922356277704\nStep 741, loss: 0.008056262508034706\nStep 742, loss: 0.007985595613718033\nStep 743, loss: 0.009384697303175926\nStep 771, loss: 0.007930740714073181\nStep 772, loss: 0.0082415621727705\nStep 773, loss: 0.010451523587107658\nStep 744, loss: 0.00739773316308856\nStep 745, loss: 0.008154237642884254\nStep 746, loss: 0.00837451871484518\nStep 747, loss: 0.007659763563424349\nStep 748, loss: 0.009576207958161831\nStep 749, loss: 0.00963237788528204\nStep 750, loss: 0.009114114567637444\nStep 751, loss: 0.007480993866920471\nStep 752, loss: 0.007710766047239304\nStep 753, loss: 0.009427975863218307\nStep 754, loss: 0.007921767421066761\nStep 755, loss: 0.007757353130728006\nStep 756, loss: 0.008287258446216583\nStep 757, loss: 0.008245065808296204\nStep 758, loss: 0.006868288852274418\nStep 759, loss: 0.00997896771878004\nStep 760, loss: 0.007407574448734522\nStep 761, loss: 0.008717725053429604\nStep 762, loss: 0.008936473168432713\nStep 763, loss: 0.008730635046958923\nStep 764, loss: 0.008745621889829636\nStep 765, loss: 0.007911726832389832\nStep 766, loss: 0.007758614141494036\nStep 767, loss: 0.007411055266857147\nStep 768, loss: 0.00896456465125084\nStep 769, loss: 0.009281367063522339\nStep 770, loss: 0.009555153548717499\nStep 774, loss: 0.009951524436473846\nStep 775, loss: 0.008360268548130989\nStep 776, loss: 0.010047543793916702\nStep 777, loss: 0.007714583072811365\nStep 778, loss: 0.008991550654172897\nStep 779, loss: 0.009161995723843575\nStep 780, loss: 0.009513900615274906\nStep 781, loss: 0.007224121131002903\nStep 782, loss: 0.007405804470181465\nStep 783, loss: 0.00722330529242754\nStep 784, loss: 0.009540022350847721\nStep 785, loss: 0.007725142873823643\nStep 786, loss: 0.009445969946682453\nStep 787, loss: 0.008166223764419556\nStep 788, loss: 0.00848563201725483\nStep 789, loss: 0.008764345198869705\nStep 790, loss: 0.008157470263540745\nStep 791, loss: 0.007171101868152618\nStep 792, loss: 0.008356235921382904\nStep 793, loss: 0.009822948835790157\nStep 794, loss: 0.008953233249485493\nStep 795, loss: 0.009083397686481476\nStep 796, loss: 0.007174731232225895\nStep 797, loss: 0.008239420130848885\nStep 798, loss: 0.007826566696166992\nStep 799, loss: 0.009894120506942272\nStep 800, loss: 0.008832914754748344\nStep 771, loss: 0.007930740714073181\nStep 772, loss: 0.0082415621727705\nStep 773, loss: 0.010451523587107658\nStep 801, loss: 0.008360965177416801\nStep 802, loss: 0.008337488397955894\nStep 803, loss: 0.009190955199301243\nStep 804, loss: 0.010512374341487885\nStep 805, loss: 0.008843621239066124\nStep 806, loss: 0.009071932174265385\nStep 807, loss: 0.008909512311220169\nStep 808, loss: 0.0070503465831279755\nStep 809, loss: 0.007978579960763454\nStep 810, loss: 0.008387651294469833\nStep 811, loss: 0.00875979382544756\nStep 812, loss: 0.00880368985235691\nStep 813, loss: 0.008246597833931446\nStep 814, loss: 0.009645447134971619\nStep 815, loss: 0.009918425232172012\nStep 816, loss: 0.008361190557479858\nStep 817, loss: 0.009313860908150673\nStep 818, loss: 0.008466468192636967\nStep 819, loss: 0.00854664295911789\nStep 820, loss: 0.008501839824020863\nStep 821, loss: 0.006838631816208363\nStep 822, loss: 0.008860883302986622\nStep 823, loss: 0.006882588379085064\nStep 824, loss: 0.007776821032166481\nStep 825, loss: 0.008018260821700096\nStep 826, loss: 0.009293113835155964\nStep 827, loss: 0.007893583737313747\nStep 774, loss: 0.009951524436473846\nStep 775, loss: 0.008360268548130989\nStep 776, loss: 0.010047543793916702\nStep 777, loss: 0.007714583072811365\nStep 778, loss: 0.008991550654172897\nStep 779, loss: 0.009161995723843575\nStep 780, loss: 0.009513900615274906\nStep 781, loss: 0.007224121131002903\nStep 782, loss: 0.007405804470181465\nStep 783, loss: 0.00722330529242754\nStep 784, loss: 0.009540022350847721\nStep 785, loss: 0.007725142873823643\nStep 786, loss: 0.009445969946682453\nStep 787, loss: 0.008166223764419556\nStep 788, loss: 0.00848563201725483\nStep 789, loss: 0.008764345198869705\nStep 790, loss: 0.008157470263540745\nStep 791, loss: 0.007171101868152618\nStep 792, loss: 0.008356235921382904\nStep 793, loss: 0.009822948835790157\nStep 794, loss: 0.008953233249485493\nStep 795, loss: 0.009083397686481476\nStep 796, loss: 0.007174731232225895\nStep 797, loss: 0.008239420130848885\nStep 798, loss: 0.007826566696166992\nStep 799, loss: 0.009894120506942272\nStep 800, loss: 0.008832914754748344\nStep 828, loss: 0.007864481769502163\nStep 829, loss: 0.00944742001593113\nStep 830, loss: 0.008647500537335873\nStep 831, loss: 0.006644923705607653\nStep 832, loss: 0.009370350278913975\nStep 833, loss: 0.008215242065489292\nStep 834, loss: 0.008311529643833637\nStep 835, loss: 0.007736304774880409\nStep 836, loss: 0.009074011817574501\nStep 837, loss: 0.008486744947731495\nStep 838, loss: 0.0074049122631549835\nStep 839, loss: 0.0076318043284118176\nStep 840, loss: 0.0077585927210748196\nStep 841, loss: 0.009308401495218277\nStep 842, loss: 0.008833185769617558\nStep 843, loss: 0.0078051951713860035\nStep 844, loss: 0.008552715182304382\nStep 845, loss: 0.009183364920318127\nStep 846, loss: 0.00813213363289833\nStep 847, loss: 0.008879457600414753\nStep 848, loss: 0.008119485341012478\nStep 849, loss: 0.008061840198934078\nStep 850, loss: 0.009906782768666744\nStep 851, loss: 0.009139358066022396\nStep 852, loss: 0.008225596509873867\nStep 853, loss: 0.009153551422059536\nStep 854, loss: 0.011232911609113216\nStep 801, loss: 0.008360965177416801\nStep 802, loss: 0.008337488397955894\nStep 803, loss: 0.009190955199301243\nStep 804, loss: 0.010512374341487885\nStep 805, loss: 0.008843621239066124\nStep 806, loss: 0.009071932174265385\nStep 807, loss: 0.008909512311220169\nStep 808, loss: 0.0070503465831279755\nStep 809, loss: 0.007978579960763454\nStep 810, loss: 0.008387651294469833\nStep 811, loss: 0.00875979382544756\nStep 812, loss: 0.00880368985235691\nStep 813, loss: 0.008246597833931446\nStep 814, loss: 0.009645447134971619\nStep 815, loss: 0.009918425232172012\nStep 816, loss: 0.008361190557479858\nStep 817, loss: 0.009313860908150673\nStep 818, loss: 0.008466468192636967\nStep 819, loss: 0.00854664295911789\nStep 820, loss: 0.008501839824020863\nStep 821, loss: 0.006838631816208363\nStep 822, loss: 0.008860883302986622\nStep 823, loss: 0.006882588379085064\nStep 824, loss: 0.007776821032166481\nStep 825, loss: 0.008018260821700096\nStep 826, loss: 0.009293113835155964\nStep 827, loss: 0.007893583737313747\nStep 855, loss: 0.007757157552987337\nStep 856, loss: 0.008269049227237701\nStep 857, loss: 0.008513633161783218\nStep 858, loss: 0.00846780650317669\nStep 859, loss: 0.008334596641361713\nStep 860, loss: 0.008069132454693317\nStep 861, loss: 0.008460940793156624\nStep 862, loss: 0.008301100693643093\nStep 863, loss: 0.009122634306550026\nStep 864, loss: 0.009993168525397778\nStep 865, loss: 0.00745188957080245\nStep 866, loss: 0.010728954337537289\nStep 867, loss: 0.01030214224010706\nStep 868, loss: 0.007548552472144365\nStep 869, loss: 0.008158835582435131\nStep 870, loss: 0.008571529760956764\nStep 871, loss: 0.008947694674134254\nStep 872, loss: 0.009027107618749142\nStep 873, loss: 0.007371645420789719\nStep 874, loss: 0.008784396573901176\nStep 875, loss: 0.008641858585178852\nStep 876, loss: 0.007751908153295517\nStep 877, loss: 0.009617197327315807\nStep 878, loss: 0.008151952177286148\nStep 879, loss: 0.008751665242016315\nStep 880, loss: 0.008701476268470287\nStep 881, loss: 0.008113049902021885\nStep 828, loss: 0.007864481769502163\nStep 829, loss: 0.00944742001593113\nStep 830, loss: 0.008647500537335873\nStep 831, loss: 0.006644923705607653\nStep 832, loss: 0.009370350278913975\nStep 833, loss: 0.008215242065489292\nStep 834, loss: 0.008311529643833637\nStep 835, loss: 0.007736304774880409\nStep 836, loss: 0.009074011817574501\nStep 837, loss: 0.008486744947731495\nStep 838, loss: 0.0074049122631549835\nStep 839, loss: 0.0076318043284118176\nStep 840, loss: 0.0077585927210748196\nStep 841, loss: 0.009308401495218277\nStep 842, loss: 0.008833185769617558\nStep 843, loss: 0.0078051951713860035\nStep 844, loss: 0.008552715182304382\nStep 845, loss: 0.009183364920318127\nStep 846, loss: 0.00813213363289833\nStep 847, loss: 0.008879457600414753\nStep 848, loss: 0.008119485341012478\nStep 849, loss: 0.008061840198934078\nStep 850, loss: 0.009906782768666744\nStep 851, loss: 0.009139358066022396\nStep 852, loss: 0.008225596509873867\nStep 853, loss: 0.009153551422059536\nStep 854, loss: 0.011232911609113216\nStep 882, loss: 0.009856571443378925\nStep 883, loss: 0.008030290715396404\nStep 855, loss: 0.007757157552987337\nStep 856, loss: 0.008269049227237701\nStep 857, loss: 0.008513633161783218\nStep 858, loss: 0.00846780650317669\nStep 859, loss: 0.008334596641361713\nStep 860, loss: 0.008069132454693317\nStep 861, loss: 0.008460940793156624\nStep 862, loss: 0.008301100693643093\nStep 863, loss: 0.009122634306550026\nStep 864, loss: 0.009993168525397778\nStep 865, loss: 0.00745188957080245\nStep 866, loss: 0.010728954337537289\nStep 867, loss: 0.01030214224010706\nStep 868, loss: 0.007548552472144365\nStep 869, loss: 0.008158835582435131\nStep 870, loss: 0.008571529760956764\nStep 871, loss: 0.008947694674134254\nStep 872, loss: 0.009027107618749142\nStep 873, loss: 0.007371645420789719\nStep 874, loss: 0.008784396573901176\nStep 875, loss: 0.008641858585178852\nStep 876, loss: 0.007751908153295517\nStep 877, loss: 0.009617197327315807\nStep 878, loss: 0.008151952177286148\nStep 879, loss: 0.008751665242016315\nStep 880, loss: 0.008701476268470287\nStep 881, loss: 0.008113049902021885\nStep 663, loss: 0.008035499602556229\nStep 664, loss: 0.008211825042963028\nStep 665, loss: 0.008599837310612202\nStep 666, loss: 0.009215956553816795\nStep 667, loss: 0.00814542081207037\nStep 668, loss: 0.008338076993823051\nStep 669, loss: 0.010524684563279152\nStep 670, loss: 0.00987721886485815\nStep 671, loss: 0.008724103681743145\nStep 672, loss: 0.008830797858536243\nStep 673, loss: 0.009168988093733788\nStep 674, loss: 0.009654577821493149\nStep 675, loss: 0.008422465063631535\nStep 676, loss: 0.008703668601810932\nStep 677, loss: 0.008970540948212147\nStep 678, loss: 0.00894303061068058\nStep 679, loss: 0.009513182565569878\nStep 680, loss: 0.007564784027636051\nStep 681, loss: 0.007781901862472296\nStep 682, loss: 0.009456507861614227\nStep 683, loss: 0.00782136619091034\nStep 684, loss: 0.007891003042459488\nStep 685, loss: 0.009091738611459732\nStep 686, loss: 0.008492560125887394\nStep 687, loss: 0.008510823361575603\nStep 688, loss: 0.007513571064919233\nStep 689, loss: 0.008806859143078327\nStep 882, loss: 0.009856571443378925\nStep 883, loss: 0.008030290715396404\nStep 690, loss: 0.00823906809091568\nStep 691, loss: 0.008473768830299377\nStep 692, loss: 0.008500708267092705\nStep 693, loss: 0.008233590051531792\nStep 694, loss: 0.00800382811576128\nStep 695, loss: 0.0074405609630048275\nStep 696, loss: 0.00785791128873825\nStep 697, loss: 0.008391282521188259\nStep 698, loss: 0.008259993977844715\nStep 699, loss: 0.008108203299343586\nStep 700, loss: 0.009265334345400333\nStep 701, loss: 0.007261474616825581\nStep 702, loss: 0.008634131401777267\nStep 703, loss: 0.008237874135375023\nStep 704, loss: 0.009144485928118229\nStep 705, loss: 0.007042833138257265\nStep 706, loss: 0.008437125012278557\nStep 707, loss: 0.007596829906105995\nStep 708, loss: 0.007881579920649529\nStep 709, loss: 0.006896924693137407\nStep 710, loss: 0.008413707837462425\nStep 711, loss: 0.009662673808634281\nStep 712, loss: 0.008761812932789326\nStep 713, loss: 0.00932319462299347\nStep 714, loss: 0.008840029127895832\nStep 715, loss: 0.008344517089426517\nStep 716, loss: 0.01156007032841444\nStep 663, loss: 0.008035499602556229\nStep 664, loss: 0.008211825042963028\nStep 665, loss: 0.008599837310612202\nStep 666, loss: 0.009215956553816795\nStep 667, loss: 0.00814542081207037\nStep 668, loss: 0.008338076993823051\nStep 669, loss: 0.010524684563279152\nStep 670, loss: 0.00987721886485815\nStep 671, loss: 0.008724103681743145\nStep 672, loss: 0.008830797858536243\nStep 673, loss: 0.009168988093733788\nStep 674, loss: 0.009654577821493149\nStep 675, loss: 0.008422465063631535\nStep 676, loss: 0.008703668601810932\nStep 677, loss: 0.008970540948212147\nStep 678, loss: 0.00894303061068058\nStep 679, loss: 0.009513182565569878\nStep 680, loss: 0.007564784027636051\nStep 681, loss: 0.007781901862472296\nStep 682, loss: 0.009456507861614227\nStep 683, loss: 0.00782136619091034\nStep 684, loss: 0.007891003042459488\nStep 685, loss: 0.009091738611459732\nStep 686, loss: 0.008492560125887394\nStep 687, loss: 0.008510823361575603\nStep 688, loss: 0.007513571064919233\nStep 689, loss: 0.008806859143078327\nStep 717, loss: 0.008096556179225445\nStep 718, loss: 0.009683777578175068\nStep 719, loss: 0.007742816582322121\nStep 720, loss: 0.007568707689642906\nStep 721, loss: 0.008272613398730755\nStep 722, loss: 0.008740808814764023\nStep 723, loss: 0.008758014999330044\nStep 724, loss: 0.007208905648440123\nStep 725, loss: 0.008533834479749203\nStep 726, loss: 0.0073806592263281345\nStep 727, loss: 0.007143620401620865\nStep 728, loss: 0.007509579416364431\nStep 729, loss: 0.006937501486390829\nStep 730, loss: 0.008098526857793331\nStep 731, loss: 0.009508779272437096\nStep 732, loss: 0.008234464563429356\nStep 733, loss: 0.009609385393559933\nStep 734, loss: 0.007654521614313126\nStep 735, loss: 0.00957720261067152\nStep 736, loss: 0.008412269875407219\nStep 737, loss: 0.007896238006651402\nStep 738, loss: 0.008176438510417938\nStep 739, loss: 0.009090610779821873\nStep 740, loss: 0.007537922356277704\nStep 741, loss: 0.008056262508034706\nStep 742, loss: 0.007985595613718033\nStep 743, loss: 0.009384697303175926\nStep 690, loss: 0.00823906809091568\nStep 691, loss: 0.008473768830299377\nStep 692, loss: 0.008500708267092705\nStep 693, loss: 0.008233590051531792\nStep 694, loss: 0.00800382811576128\nStep 695, loss: 0.0074405609630048275\nStep 696, loss: 0.00785791128873825\nStep 697, loss: 0.008391282521188259\nStep 698, loss: 0.008259993977844715\nStep 699, loss: 0.008108203299343586\nStep 700, loss: 0.009265334345400333\nStep 701, loss: 0.007261474616825581\nStep 702, loss: 0.008634131401777267\nStep 703, loss: 0.008237874135375023\nStep 704, loss: 0.009144485928118229\nStep 705, loss: 0.007042833138257265\nStep 706, loss: 0.008437125012278557\nStep 707, loss: 0.007596829906105995\nStep 708, loss: 0.007881579920649529\nStep 709, loss: 0.006896924693137407\nStep 710, loss: 0.008413707837462425\nStep 711, loss: 0.009662673808634281\nStep 712, loss: 0.008761812932789326\nStep 713, loss: 0.00932319462299347\nStep 714, loss: 0.008840029127895832\nStep 715, loss: 0.008344517089426517\nStep 716, loss: 0.01156007032841444\nStep 744, loss: 0.00739773316308856\nStep 745, loss: 0.008154237642884254\nStep 746, loss: 0.00837451871484518\nStep 747, loss: 0.007659763563424349\nStep 748, loss: 0.009576207958161831\nStep 749, loss: 0.00963237788528204\nStep 750, loss: 0.009114114567637444\nStep 751, loss: 0.007480993866920471\nStep 752, loss: 0.007710766047239304\nStep 753, loss: 0.009427975863218307\nStep 754, loss: 0.007921767421066761\nStep 755, loss: 0.007757353130728006\nStep 756, loss: 0.008287258446216583\nStep 757, loss: 0.008245065808296204\nStep 758, loss: 0.006868288852274418\nStep 759, loss: 0.00997896771878004\nStep 760, loss: 0.007407574448734522\nStep 761, loss: 0.008717725053429604\nStep 762, loss: 0.008936473168432713\nStep 763, loss: 0.008730635046958923\nStep 764, loss: 0.008745621889829636\nStep 765, loss: 0.007911726832389832\nStep 766, loss: 0.007758614141494036\nStep 767, loss: 0.007411055266857147\nStep 768, loss: 0.00896456465125084\nStep 769, loss: 0.009281367063522339\nStep 770, loss: 0.009555153548717499\nStep 717, loss: 0.008096556179225445\nStep 718, loss: 0.009683777578175068\nStep 719, loss: 0.007742816582322121\nStep 720, loss: 0.007568707689642906\nStep 721, loss: 0.008272613398730755\nStep 722, loss: 0.008740808814764023\nStep 723, loss: 0.008758014999330044\nStep 724, loss: 0.007208905648440123\nStep 725, loss: 0.008533834479749203\nStep 726, loss: 0.0073806592263281345\nStep 727, loss: 0.007143620401620865\nStep 728, loss: 0.007509579416364431\nStep 729, loss: 0.006937501486390829\nStep 730, loss: 0.008098526857793331\nStep 731, loss: 0.009508779272437096\nStep 732, loss: 0.008234464563429356\nStep 733, loss: 0.009609385393559933\nStep 734, loss: 0.007654521614313126\nStep 735, loss: 0.00957720261067152\nStep 736, loss: 0.008412269875407219\nStep 737, loss: 0.007896238006651402\nStep 738, loss: 0.008176438510417938\nStep 739, loss: 0.009090610779821873\nStep 740, loss: 0.007537922356277704\nStep 741, loss: 0.008056262508034706\nStep 742, loss: 0.007985595613718033\nStep 743, loss: 0.009384697303175926\nStep 771, loss: 0.007930740714073181\nStep 772, loss: 0.0082415621727705\nStep 773, loss: 0.010451523587107658\nStep 744, loss: 0.00739773316308856\nStep 745, loss: 0.008154237642884254\nStep 746, loss: 0.00837451871484518\nStep 747, loss: 0.007659763563424349\nStep 748, loss: 0.009576207958161831\nStep 749, loss: 0.00963237788528204\nStep 750, loss: 0.009114114567637444\nStep 751, loss: 0.007480993866920471\nStep 752, loss: 0.007710766047239304\nStep 753, loss: 0.009427975863218307\nStep 754, loss: 0.007921767421066761\nStep 755, loss: 0.007757353130728006\nStep 756, loss: 0.008287258446216583\nStep 757, loss: 0.008245065808296204\nStep 758, loss: 0.006868288852274418\nStep 759, loss: 0.00997896771878004\nStep 760, loss: 0.007407574448734522\nStep 761, loss: 0.008717725053429604\nStep 762, loss: 0.008936473168432713\nStep 763, loss: 0.008730635046958923\nStep 764, loss: 0.008745621889829636\nStep 765, loss: 0.007911726832389832\nStep 766, loss: 0.007758614141494036\nStep 767, loss: 0.007411055266857147\nStep 768, loss: 0.00896456465125084\nStep 769, loss: 0.009281367063522339\nStep 770, loss: 0.009555153548717499\nStep 774, loss: 0.009951524436473846\nStep 775, loss: 0.008360268548130989\nStep 776, loss: 0.010047543793916702\nStep 777, loss: 0.007714583072811365\nStep 778, loss: 0.008991550654172897\nStep 779, loss: 0.009161995723843575\nStep 780, loss: 0.009513900615274906\nStep 781, loss: 0.007224121131002903\nStep 782, loss: 0.007405804470181465\nStep 783, loss: 0.00722330529242754\nStep 784, loss: 0.009540022350847721\nStep 785, loss: 0.007725142873823643\nStep 786, loss: 0.009445969946682453\nStep 787, loss: 0.008166223764419556\nStep 788, loss: 0.00848563201725483\nStep 789, loss: 0.008764345198869705\nStep 790, loss: 0.008157470263540745\nStep 791, loss: 0.007171101868152618\nStep 792, loss: 0.008356235921382904\nStep 793, loss: 0.009822948835790157\nStep 794, loss: 0.008953233249485493\nStep 795, loss: 0.009083397686481476\nStep 796, loss: 0.007174731232225895\nStep 797, loss: 0.008239420130848885\nStep 798, loss: 0.007826566696166992\nStep 799, loss: 0.009894120506942272\nStep 800, loss: 0.008832914754748344\nStep 771, loss: 0.007930740714073181\nStep 772, loss: 0.0082415621727705\nStep 773, loss: 0.010451523587107658\nStep 801, loss: 0.008360965177416801\nStep 802, loss: 0.008337488397955894\nStep 803, loss: 0.009190955199301243\nStep 804, loss: 0.010512374341487885\nStep 805, loss: 0.008843621239066124\nStep 806, loss: 0.009071932174265385\nStep 807, loss: 0.008909512311220169\nStep 808, loss: 0.0070503465831279755\nStep 809, loss: 0.007978579960763454\nStep 810, loss: 0.008387651294469833\nStep 811, loss: 0.00875979382544756\nStep 812, loss: 0.00880368985235691\nStep 813, loss: 0.008246597833931446\nStep 814, loss: 0.009645447134971619\nStep 815, loss: 0.009918425232172012\nStep 816, loss: 0.008361190557479858\nStep 817, loss: 0.009313860908150673\nStep 818, loss: 0.008466468192636967\nStep 819, loss: 0.00854664295911789\nStep 820, loss: 0.008501839824020863\nStep 821, loss: 0.006838631816208363\nStep 822, loss: 0.008860883302986622\nStep 823, loss: 0.006882588379085064\nStep 824, loss: 0.007776821032166481\nStep 825, loss: 0.008018260821700096\nStep 826, loss: 0.009293113835155964\nStep 827, loss: 0.007893583737313747\nStep 774, loss: 0.009951524436473846\nStep 775, loss: 0.008360268548130989\nStep 776, loss: 0.010047543793916702\nStep 777, loss: 0.007714583072811365\nStep 778, loss: 0.008991550654172897\nStep 779, loss: 0.009161995723843575\nStep 780, loss: 0.009513900615274906\nStep 781, loss: 0.007224121131002903\nStep 782, loss: 0.007405804470181465\nStep 783, loss: 0.00722330529242754\nStep 784, loss: 0.009540022350847721\nStep 785, loss: 0.007725142873823643\nStep 786, loss: 0.009445969946682453\nStep 787, loss: 0.008166223764419556\nStep 788, loss: 0.00848563201725483\nStep 789, loss: 0.008764345198869705\nStep 790, loss: 0.008157470263540745\nStep 791, loss: 0.007171101868152618\nStep 792, loss: 0.008356235921382904\nStep 793, loss: 0.009822948835790157\nStep 794, loss: 0.008953233249485493\nStep 795, loss: 0.009083397686481476\nStep 796, loss: 0.007174731232225895\nStep 797, loss: 0.008239420130848885\nStep 798, loss: 0.007826566696166992\nStep 799, loss: 0.009894120506942272\nStep 800, loss: 0.008832914754748344\nStep 828, loss: 0.007864481769502163\nStep 829, loss: 0.00944742001593113\nStep 830, loss: 0.008647500537335873\nStep 831, loss: 0.006644923705607653\nStep 832, loss: 0.009370350278913975\nStep 833, loss: 0.008215242065489292\nStep 834, loss: 0.008311529643833637\nStep 835, loss: 0.007736304774880409\nStep 836, loss: 0.009074011817574501\nStep 837, loss: 0.008486744947731495\nStep 838, loss: 0.0074049122631549835\nStep 839, loss: 0.0076318043284118176\nStep 840, loss: 0.0077585927210748196\nStep 841, loss: 0.009308401495218277\nStep 842, loss: 0.008833185769617558\nStep 843, loss: 0.0078051951713860035\nStep 844, loss: 0.008552715182304382\nStep 845, loss: 0.009183364920318127\nStep 846, loss: 0.00813213363289833\nStep 847, loss: 0.008879457600414753\nStep 848, loss: 0.008119485341012478\nStep 849, loss: 0.008061840198934078\nStep 850, loss: 0.009906782768666744\nStep 851, loss: 0.009139358066022396\nStep 852, loss: 0.008225596509873867\nStep 853, loss: 0.009153551422059536\nStep 854, loss: 0.011232911609113216\nStep 801, loss: 0.008360965177416801\nStep 802, loss: 0.008337488397955894\nStep 803, loss: 0.009190955199301243\nStep 804, loss: 0.010512374341487885\nStep 805, loss: 0.008843621239066124\nStep 806, loss: 0.009071932174265385\nStep 807, loss: 0.008909512311220169\nStep 808, loss: 0.0070503465831279755\nStep 809, loss: 0.007978579960763454\nStep 810, loss: 0.008387651294469833\nStep 811, loss: 0.00875979382544756\nStep 812, loss: 0.00880368985235691\nStep 813, loss: 0.008246597833931446\nStep 814, loss: 0.009645447134971619\nStep 815, loss: 0.009918425232172012\nStep 816, loss: 0.008361190557479858\nStep 817, loss: 0.009313860908150673\nStep 818, loss: 0.008466468192636967\nStep 819, loss: 0.00854664295911789\nStep 820, loss: 0.008501839824020863\nStep 821, loss: 0.006838631816208363\nStep 822, loss: 0.008860883302986622\nStep 823, loss: 0.006882588379085064\nStep 824, loss: 0.007776821032166481\nStep 825, loss: 0.008018260821700096\nStep 826, loss: 0.009293113835155964\nStep 827, loss: 0.007893583737313747\nStep 855, loss: 0.007757157552987337\nStep 856, loss: 0.008269049227237701\nStep 857, loss: 0.008513633161783218\nStep 858, loss: 0.00846780650317669\nStep 859, loss: 0.008334596641361713\nStep 860, loss: 0.008069132454693317\nStep 861, loss: 0.008460940793156624\nStep 862, loss: 0.008301100693643093\nStep 863, loss: 0.009122634306550026\nStep 864, loss: 0.009993168525397778\nStep 865, loss: 0.00745188957080245\nStep 866, loss: 0.010728954337537289\nStep 867, loss: 0.01030214224010706\nStep 868, loss: 0.007548552472144365\nStep 869, loss: 0.008158835582435131\nStep 870, loss: 0.008571529760956764\nStep 871, loss: 0.008947694674134254\nStep 872, loss: 0.009027107618749142\nStep 873, loss: 0.007371645420789719\nStep 874, loss: 0.008784396573901176\nStep 875, loss: 0.008641858585178852\nStep 876, loss: 0.007751908153295517\nStep 877, loss: 0.009617197327315807\nStep 878, loss: 0.008151952177286148\nStep 879, loss: 0.008751665242016315\nStep 880, loss: 0.008701476268470287\nStep 881, loss: 0.008113049902021885\nStep 828, loss: 0.007864481769502163\nStep 829, loss: 0.00944742001593113\nStep 830, loss: 0.008647500537335873\nStep 831, loss: 0.006644923705607653\nStep 832, loss: 0.009370350278913975\nStep 833, loss: 0.008215242065489292\nStep 834, loss: 0.008311529643833637\nStep 835, loss: 0.007736304774880409\nStep 836, loss: 0.009074011817574501\nStep 837, loss: 0.008486744947731495\nStep 838, loss: 0.0074049122631549835\nStep 839, loss: 0.0076318043284118176\nStep 840, loss: 0.0077585927210748196\nStep 841, loss: 0.009308401495218277\nStep 842, loss: 0.008833185769617558\nStep 843, loss: 0.0078051951713860035\nStep 844, loss: 0.008552715182304382\nStep 845, loss: 0.009183364920318127\nStep 846, loss: 0.00813213363289833\nStep 847, loss: 0.008879457600414753\nStep 848, loss: 0.008119485341012478\nStep 849, loss: 0.008061840198934078\nStep 850, loss: 0.009906782768666744\nStep 851, loss: 0.009139358066022396\nStep 852, loss: 0.008225596509873867\nStep 853, loss: 0.009153551422059536\nStep 854, loss: 0.011232911609113216\nStep 882, loss: 0.009856571443378925\nStep 883, loss: 0.008030290715396404\nStep 855, loss: 0.007757157552987337\nStep 856, loss: 0.008269049227237701\nStep 857, loss: 0.008513633161783218\nStep 858, loss: 0.00846780650317669\nStep 859, loss: 0.008334596641361713\nStep 860, loss: 0.008069132454693317\nStep 861, loss: 0.008460940793156624\nStep 862, loss: 0.008301100693643093\nStep 863, loss: 0.009122634306550026\nStep 864, loss: 0.009993168525397778\nStep 865, loss: 0.00745188957080245\nStep 866, loss: 0.010728954337537289\nStep 867, loss: 0.01030214224010706\nStep 868, loss: 0.007548552472144365\nStep 869, loss: 0.008158835582435131\nStep 870, loss: 0.008571529760956764\nStep 871, loss: 0.008947694674134254\nStep 872, loss: 0.009027107618749142\nStep 873, loss: 0.007371645420789719\nStep 874, loss: 0.008784396573901176\nStep 875, loss: 0.008641858585178852\nStep 876, loss: 0.007751908153295517\nStep 877, loss: 0.009617197327315807\nStep 878, loss: 0.008151952177286148\nStep 879, loss: 0.008751665242016315\nStep 880, loss: 0.008701476268470287\nStep 881, loss: 0.008113049902021885\nStep 663, loss: 0.008035499602556229\nStep 664, loss: 0.008211825042963028\nStep 665, loss: 0.008599837310612202\nStep 666, loss: 0.009215956553816795\nStep 667, loss: 0.00814542081207037\nStep 668, loss: 0.008338076993823051\nStep 669, loss: 0.010524684563279152\nStep 670, loss: 0.00987721886485815\nStep 671, loss: 0.008724103681743145\nStep 672, loss: 0.008830797858536243\nStep 673, loss: 0.009168988093733788\nStep 674, loss: 0.009654577821493149\nStep 675, loss: 0.008422465063631535\nStep 676, loss: 0.008703668601810932\nStep 677, loss: 0.008970540948212147\nStep 678, loss: 0.00894303061068058\nStep 679, loss: 0.009513182565569878\nStep 680, loss: 0.007564784027636051\nStep 681, loss: 0.007781901862472296\nStep 682, loss: 0.009456507861614227\nStep 683, loss: 0.00782136619091034\nStep 684, loss: 0.007891003042459488\nStep 685, loss: 0.009091738611459732\nStep 686, loss: 0.008492560125887394\nStep 687, loss: 0.008510823361575603\nStep 688, loss: 0.007513571064919233\nStep 689, loss: 0.008806859143078327\nStep 882, loss: 0.009856571443378925\nStep 883, loss: 0.008030290715396404\nStep 690, loss: 0.00823906809091568\nStep 691, loss: 0.008473768830299377\nStep 692, loss: 0.008500708267092705\nStep 693, loss: 0.008233590051531792\nStep 694, loss: 0.00800382811576128\nStep 695, loss: 0.0074405609630048275\nStep 696, loss: 0.00785791128873825\nStep 697, loss: 0.008391282521188259\nStep 698, loss: 0.008259993977844715\nStep 699, loss: 0.008108203299343586\nStep 700, loss: 0.009265334345400333\nStep 701, loss: 0.007261474616825581\nStep 702, loss: 0.008634131401777267\nStep 703, loss: 0.008237874135375023\nStep 704, loss: 0.009144485928118229\nStep 705, loss: 0.007042833138257265\nStep 706, loss: 0.008437125012278557\nStep 707, loss: 0.007596829906105995\nStep 708, loss: 0.007881579920649529\nStep 709, loss: 0.006896924693137407\nStep 710, loss: 0.008413707837462425\nStep 711, loss: 0.009662673808634281\nStep 712, loss: 0.008761812932789326\nStep 713, loss: 0.00932319462299347\nStep 714, loss: 0.008840029127895832\nStep 715, loss: 0.008344517089426517\nStep 716, loss: 0.01156007032841444\nStep 663, loss: 0.008035499602556229\nStep 664, loss: 0.008211825042963028\nStep 665, loss: 0.008599837310612202\nStep 666, loss: 0.009215956553816795\nStep 667, loss: 0.00814542081207037\nStep 668, loss: 0.008338076993823051\nStep 669, loss: 0.010524684563279152\nStep 670, loss: 0.00987721886485815\nStep 671, loss: 0.008724103681743145\nStep 672, loss: 0.008830797858536243\nStep 673, loss: 0.009168988093733788\nStep 674, loss: 0.009654577821493149\nStep 675, loss: 0.008422465063631535\nStep 676, loss: 0.008703668601810932\nStep 677, loss: 0.008970540948212147\nStep 678, loss: 0.00894303061068058\nStep 679, loss: 0.009513182565569878\nStep 680, loss: 0.007564784027636051\nStep 681, loss: 0.007781901862472296\nStep 682, loss: 0.009456507861614227\nStep 683, loss: 0.00782136619091034\nStep 684, loss: 0.007891003042459488\nStep 685, loss: 0.009091738611459732\nStep 686, loss: 0.008492560125887394\nStep 687, loss: 0.008510823361575603\nStep 688, loss: 0.007513571064919233\nStep 689, loss: 0.008806859143078327\nStep 717, loss: 0.008096556179225445\nStep 718, loss: 0.009683777578175068\nStep 719, loss: 0.007742816582322121\nStep 720, loss: 0.007568707689642906\nStep 721, loss: 0.008272613398730755\nStep 722, loss: 0.008740808814764023\nStep 723, loss: 0.008758014999330044\nStep 724, loss: 0.007208905648440123\nStep 725, loss: 0.008533834479749203\nStep 726, loss: 0.0073806592263281345\nStep 727, loss: 0.007143620401620865\nStep 728, loss: 0.007509579416364431\nStep 729, loss: 0.006937501486390829\nStep 730, loss: 0.008098526857793331\nStep 731, loss: 0.009508779272437096\nStep 732, loss: 0.008234464563429356\nStep 733, loss: 0.009609385393559933\nStep 734, loss: 0.007654521614313126\nStep 735, loss: 0.00957720261067152\nStep 736, loss: 0.008412269875407219\nStep 737, loss: 0.007896238006651402\nStep 738, loss: 0.008176438510417938\nStep 739, loss: 0.009090610779821873\nStep 740, loss: 0.007537922356277704\nStep 741, loss: 0.008056262508034706\nStep 742, loss: 0.007985595613718033\nStep 743, loss: 0.009384697303175926\nStep 690, loss: 0.00823906809091568\nStep 691, loss: 0.008473768830299377\nStep 692, loss: 0.008500708267092705\nStep 693, loss: 0.008233590051531792\nStep 694, loss: 0.00800382811576128\nStep 695, loss: 0.0074405609630048275\nStep 696, loss: 0.00785791128873825\nStep 697, loss: 0.008391282521188259\nStep 698, loss: 0.008259993977844715\nStep 699, loss: 0.008108203299343586\nStep 700, loss: 0.009265334345400333\nStep 701, loss: 0.007261474616825581\nStep 702, loss: 0.008634131401777267\nStep 703, loss: 0.008237874135375023\nStep 704, loss: 0.009144485928118229\nStep 705, loss: 0.007042833138257265\nStep 706, loss: 0.008437125012278557\nStep 707, loss: 0.007596829906105995\nStep 708, loss: 0.007881579920649529\nStep 709, loss: 0.006896924693137407\nStep 710, loss: 0.008413707837462425\nStep 711, loss: 0.009662673808634281\nStep 712, loss: 0.008761812932789326\nStep 713, loss: 0.00932319462299347\nStep 714, loss: 0.008840029127895832\nStep 715, loss: 0.008344517089426517\nStep 716, loss: 0.01156007032841444\nStep 744, loss: 0.00739773316308856\nStep 745, loss: 0.008154237642884254\nStep 746, loss: 0.00837451871484518\nStep 747, loss: 0.007659763563424349\nStep 748, loss: 0.009576207958161831\nStep 749, loss: 0.00963237788528204\nStep 750, loss: 0.009114114567637444\nStep 751, loss: 0.007480993866920471\nStep 752, loss: 0.007710766047239304\nStep 753, loss: 0.009427975863218307\nStep 754, loss: 0.007921767421066761\nStep 755, loss: 0.007757353130728006\nStep 756, loss: 0.008287258446216583\nStep 757, loss: 0.008245065808296204\nStep 758, loss: 0.006868288852274418\nStep 759, loss: 0.00997896771878004\nStep 760, loss: 0.007407574448734522\nStep 761, loss: 0.008717725053429604\nStep 762, loss: 0.008936473168432713\nStep 763, loss: 0.008730635046958923\nStep 764, loss: 0.008745621889829636\nStep 765, loss: 0.007911726832389832\nStep 766, loss: 0.007758614141494036\nStep 767, loss: 0.007411055266857147\nStep 768, loss: 0.00896456465125084\nStep 769, loss: 0.009281367063522339\nStep 770, loss: 0.009555153548717499\nStep 717, loss: 0.008096556179225445\nStep 718, loss: 0.009683777578175068\nStep 719, loss: 0.007742816582322121\nStep 720, loss: 0.007568707689642906\nStep 721, loss: 0.008272613398730755\nStep 722, loss: 0.008740808814764023\nStep 723, loss: 0.008758014999330044\nStep 724, loss: 0.007208905648440123\nStep 725, loss: 0.008533834479749203\nStep 726, loss: 0.0073806592263281345\nStep 727, loss: 0.007143620401620865\nStep 728, loss: 0.007509579416364431\nStep 729, loss: 0.006937501486390829\nStep 730, loss: 0.008098526857793331\nStep 731, loss: 0.009508779272437096\nStep 732, loss: 0.008234464563429356\nStep 733, loss: 0.009609385393559933\nStep 734, loss: 0.007654521614313126\nStep 735, loss: 0.00957720261067152\nStep 736, loss: 0.008412269875407219\nStep 737, loss: 0.007896238006651402\nStep 738, loss: 0.008176438510417938\nStep 739, loss: 0.009090610779821873\nStep 740, loss: 0.007537922356277704\nStep 741, loss: 0.008056262508034706\nStep 742, loss: 0.007985595613718033\nStep 743, loss: 0.009384697303175926\nStep 771, loss: 0.007930740714073181\nStep 772, loss: 0.0082415621727705\nStep 773, loss: 0.010451523587107658\nStep 744, loss: 0.00739773316308856\nStep 745, loss: 0.008154237642884254\nStep 746, loss: 0.00837451871484518\nStep 747, loss: 0.007659763563424349\nStep 748, loss: 0.009576207958161831\nStep 749, loss: 0.00963237788528204\nStep 750, loss: 0.009114114567637444\nStep 751, loss: 0.007480993866920471\nStep 752, loss: 0.007710766047239304\nStep 753, loss: 0.009427975863218307\nStep 754, loss: 0.007921767421066761\nStep 755, loss: 0.007757353130728006\nStep 756, loss: 0.008287258446216583\nStep 757, loss: 0.008245065808296204\nStep 758, loss: 0.006868288852274418\nStep 759, loss: 0.00997896771878004\nStep 760, loss: 0.007407574448734522\nStep 761, loss: 0.008717725053429604\nStep 762, loss: 0.008936473168432713\nStep 763, loss: 0.008730635046958923\nStep 764, loss: 0.008745621889829636\nStep 765, loss: 0.007911726832389832\nStep 766, loss: 0.007758614141494036\nStep 767, loss: 0.007411055266857147\nStep 768, loss: 0.00896456465125084\nStep 769, loss: 0.009281367063522339\nStep 770, loss: 0.009555153548717499\nStep 774, loss: 0.009951524436473846\nStep 775, loss: 0.008360268548130989\nStep 776, loss: 0.010047543793916702\nStep 777, loss: 0.007714583072811365\nStep 778, loss: 0.008991550654172897\nStep 779, loss: 0.009161995723843575\nStep 780, loss: 0.009513900615274906\nStep 781, loss: 0.007224121131002903\nStep 782, loss: 0.007405804470181465\nStep 783, loss: 0.00722330529242754\nStep 784, loss: 0.009540022350847721\nStep 785, loss: 0.007725142873823643\nStep 786, loss: 0.009445969946682453\nStep 787, loss: 0.008166223764419556\nStep 788, loss: 0.00848563201725483\nStep 789, loss: 0.008764345198869705\nStep 790, loss: 0.008157470263540745\nStep 791, loss: 0.007171101868152618\nStep 792, loss: 0.008356235921382904\nStep 793, loss: 0.009822948835790157\nStep 794, loss: 0.008953233249485493\nStep 795, loss: 0.009083397686481476\nStep 796, loss: 0.007174731232225895\nStep 797, loss: 0.008239420130848885\nStep 798, loss: 0.007826566696166992\nStep 799, loss: 0.009894120506942272\nStep 800, loss: 0.008832914754748344\nStep 771, loss: 0.007930740714073181\nStep 772, loss: 0.0082415621727705\nStep 773, loss: 0.010451523587107658\nStep 801, loss: 0.008360965177416801\nStep 802, loss: 0.008337488397955894\nStep 803, loss: 0.009190955199301243\nStep 804, loss: 0.010512374341487885\nStep 805, loss: 0.008843621239066124\nStep 806, loss: 0.009071932174265385\nStep 807, loss: 0.008909512311220169\nStep 808, loss: 0.0070503465831279755\nStep 809, loss: 0.007978579960763454\nStep 810, loss: 0.008387651294469833\nStep 811, loss: 0.00875979382544756\nStep 812, loss: 0.00880368985235691\nStep 813, loss: 0.008246597833931446\nStep 814, loss: 0.009645447134971619\nStep 815, loss: 0.009918425232172012\nStep 816, loss: 0.008361190557479858\nStep 817, loss: 0.009313860908150673\nStep 818, loss: 0.008466468192636967\nStep 819, loss: 0.00854664295911789\nStep 820, loss: 0.008501839824020863\nStep 821, loss: 0.006838631816208363\nStep 822, loss: 0.008860883302986622\nStep 823, loss: 0.006882588379085064\nStep 824, loss: 0.007776821032166481\nStep 825, loss: 0.008018260821700096\nStep 826, loss: 0.009293113835155964\nStep 827, loss: 0.007893583737313747\nStep 774, loss: 0.009951524436473846\nStep 775, loss: 0.008360268548130989\nStep 776, loss: 0.010047543793916702\nStep 777, loss: 0.007714583072811365\nStep 778, loss: 0.008991550654172897\nStep 779, loss: 0.009161995723843575\nStep 780, loss: 0.009513900615274906\nStep 781, loss: 0.007224121131002903\nStep 782, loss: 0.007405804470181465\nStep 783, loss: 0.00722330529242754\nStep 784, loss: 0.009540022350847721\nStep 785, loss: 0.007725142873823643\nStep 786, loss: 0.009445969946682453\nStep 787, loss: 0.008166223764419556\nStep 788, loss: 0.00848563201725483\nStep 789, loss: 0.008764345198869705\nStep 790, loss: 0.008157470263540745\nStep 791, loss: 0.007171101868152618\nStep 792, loss: 0.008356235921382904\nStep 793, loss: 0.009822948835790157\nStep 794, loss: 0.008953233249485493\nStep 795, loss: 0.009083397686481476\nStep 796, loss: 0.007174731232225895\nStep 797, loss: 0.008239420130848885\nStep 798, loss: 0.007826566696166992\nStep 799, loss: 0.009894120506942272\nStep 800, loss: 0.008832914754748344\nStep 828, loss: 0.007864481769502163\nStep 829, loss: 0.00944742001593113\nStep 830, loss: 0.008647500537335873\nStep 831, loss: 0.006644923705607653\nStep 832, loss: 0.009370350278913975\nStep 833, loss: 0.008215242065489292\nStep 834, loss: 0.008311529643833637\nStep 835, loss: 0.007736304774880409\nStep 836, loss: 0.009074011817574501\nStep 837, loss: 0.008486744947731495\nStep 838, loss: 0.0074049122631549835\nStep 839, loss: 0.0076318043284118176\nStep 840, loss: 0.0077585927210748196\nStep 841, loss: 0.009308401495218277\nStep 842, loss: 0.008833185769617558\nStep 843, loss: 0.0078051951713860035\nStep 844, loss: 0.008552715182304382\nStep 845, loss: 0.009183364920318127\nStep 846, loss: 0.00813213363289833\nStep 847, loss: 0.008879457600414753\nStep 848, loss: 0.008119485341012478\nStep 849, loss: 0.008061840198934078\nStep 850, loss: 0.009906782768666744\nStep 851, loss: 0.009139358066022396\nStep 852, loss: 0.008225596509873867\nStep 853, loss: 0.009153551422059536\nStep 854, loss: 0.011232911609113216\nStep 801, loss: 0.008360965177416801\nStep 802, loss: 0.008337488397955894\nStep 803, loss: 0.009190955199301243\nStep 804, loss: 0.010512374341487885\nStep 805, loss: 0.008843621239066124\nStep 806, loss: 0.009071932174265385\nStep 807, loss: 0.008909512311220169\nStep 808, loss: 0.0070503465831279755\nStep 809, loss: 0.007978579960763454\nStep 810, loss: 0.008387651294469833\nStep 811, loss: 0.00875979382544756\nStep 812, loss: 0.00880368985235691\nStep 813, loss: 0.008246597833931446\nStep 814, loss: 0.009645447134971619\nStep 815, loss: 0.009918425232172012\nStep 816, loss: 0.008361190557479858\nStep 817, loss: 0.009313860908150673\nStep 818, loss: 0.008466468192636967\nStep 819, loss: 0.00854664295911789\nStep 820, loss: 0.008501839824020863\nStep 821, loss: 0.006838631816208363\nStep 822, loss: 0.008860883302986622\nStep 823, loss: 0.006882588379085064\nStep 824, loss: 0.007776821032166481\nStep 825, loss: 0.008018260821700096\nStep 826, loss: 0.009293113835155964\nStep 827, loss: 0.007893583737313747\nStep 855, loss: 0.007757157552987337\nStep 856, loss: 0.008269049227237701\nStep 857, loss: 0.008513633161783218\nStep 858, loss: 0.00846780650317669\nStep 859, loss: 0.008334596641361713\nStep 860, loss: 0.008069132454693317\nStep 861, loss: 0.008460940793156624\nStep 862, loss: 0.008301100693643093\nStep 863, loss: 0.009122634306550026\nStep 864, loss: 0.009993168525397778\nStep 865, loss: 0.00745188957080245\nStep 866, loss: 0.010728954337537289\nStep 867, loss: 0.01030214224010706\nStep 868, loss: 0.007548552472144365\nStep 869, loss: 0.008158835582435131\nStep 870, loss: 0.008571529760956764\nStep 871, loss: 0.008947694674134254\nStep 872, loss: 0.009027107618749142\nStep 873, loss: 0.007371645420789719\nStep 874, loss: 0.008784396573901176\nStep 875, loss: 0.008641858585178852\nStep 876, loss: 0.007751908153295517\nStep 877, loss: 0.009617197327315807\nStep 878, loss: 0.008151952177286148\nStep 879, loss: 0.008751665242016315\nStep 880, loss: 0.008701476268470287\nStep 881, loss: 0.008113049902021885\nStep 828, loss: 0.007864481769502163\nStep 829, loss: 0.00944742001593113\nStep 830, loss: 0.008647500537335873\nStep 831, loss: 0.006644923705607653\nStep 832, loss: 0.009370350278913975\nStep 833, loss: 0.008215242065489292\nStep 834, loss: 0.008311529643833637\nStep 835, loss: 0.007736304774880409\nStep 836, loss: 0.009074011817574501\nStep 837, loss: 0.008486744947731495\nStep 838, loss: 0.0074049122631549835\nStep 839, loss: 0.0076318043284118176\nStep 840, loss: 0.0077585927210748196\nStep 841, loss: 0.009308401495218277\nStep 842, loss: 0.008833185769617558\nStep 843, loss: 0.0078051951713860035\nStep 844, loss: 0.008552715182304382\nStep 845, loss: 0.009183364920318127\nStep 846, loss: 0.00813213363289833\nStep 847, loss: 0.008879457600414753\nStep 848, loss: 0.008119485341012478\nStep 849, loss: 0.008061840198934078\nStep 850, loss: 0.009906782768666744\nStep 851, loss: 0.009139358066022396\nStep 852, loss: 0.008225596509873867\nStep 853, loss: 0.009153551422059536\nStep 854, loss: 0.011232911609113216\nStep 882, loss: 0.009856571443378925\nStep 883, loss: 0.008030290715396404\nStep 855, loss: 0.007757157552987337\nStep 856, loss: 0.008269049227237701\nStep 857, loss: 0.008513633161783218\nStep 858, loss: 0.00846780650317669\nStep 859, loss: 0.008334596641361713\nStep 860, loss: 0.008069132454693317\nStep 861, loss: 0.008460940793156624\nStep 862, loss: 0.008301100693643093\nStep 863, loss: 0.009122634306550026\nStep 864, loss: 0.009993168525397778\nStep 865, loss: 0.00745188957080245\nStep 866, loss: 0.010728954337537289\nStep 867, loss: 0.01030214224010706\nStep 868, loss: 0.007548552472144365\nStep 869, loss: 0.008158835582435131\nStep 870, loss: 0.008571529760956764\nStep 871, loss: 0.008947694674134254\nStep 872, loss: 0.009027107618749142\nStep 873, loss: 0.007371645420789719\nStep 874, loss: 0.008784396573901176\nStep 875, loss: 0.008641858585178852\nStep 876, loss: 0.007751908153295517\nStep 877, loss: 0.009617197327315807\nStep 878, loss: 0.008151952177286148\nStep 879, loss: 0.008751665242016315\nStep 880, loss: 0.008701476268470287\nStep 881, loss: 0.008113049902021885\nStep 882, loss: 0.009856571443378925\nStep 883, loss: 0.008030290715396404\nStep 663, loss: 0.008035499602556229\nStep 664, loss: 0.008211825042963028\nStep 665, loss: 0.008599837310612202\nStep 666, loss: 0.009215956553816795\nStep 667, loss: 0.00814542081207037\nStep 668, loss: 0.008338076993823051\nStep 669, loss: 0.010524684563279152\nStep 670, loss: 0.00987721886485815\nStep 671, loss: 0.008724103681743145\nStep 672, loss: 0.008830797858536243\nStep 673, loss: 0.009168988093733788\nStep 674, loss: 0.009654577821493149\nStep 675, loss: 0.008422465063631535\nStep 676, loss: 0.008703668601810932\nStep 677, loss: 0.008970540948212147\nStep 678, loss: 0.00894303061068058\nStep 679, loss: 0.009513182565569878\nStep 680, loss: 0.007564784027636051\nStep 681, loss: 0.007781901862472296\nStep 682, loss: 0.009456507861614227\nStep 683, loss: 0.00782136619091034\nStep 684, loss: 0.007891003042459488\nStep 685, loss: 0.009091738611459732\nStep 686, loss: 0.008492560125887394\nStep 687, loss: 0.008510823361575603\nStep 688, loss: 0.007513571064919233\nStep 689, loss: 0.008806859143078327\nStep 690, loss: 0.00823906809091568\nStep 691, loss: 0.008473768830299377\nStep 692, loss: 0.008500708267092705\nStep 693, loss: 0.008233590051531792\nStep 694, loss: 0.00800382811576128\nStep 695, loss: 0.0074405609630048275\nStep 696, loss: 0.00785791128873825\nStep 697, loss: 0.008391282521188259\nStep 698, loss: 0.008259993977844715\nStep 699, loss: 0.008108203299343586\nStep 700, loss: 0.009265334345400333\nStep 701, loss: 0.007261474616825581\nStep 702, loss: 0.008634131401777267\nStep 703, loss: 0.008237874135375023\nStep 704, loss: 0.009144485928118229\nStep 705, loss: 0.007042833138257265\nStep 706, loss: 0.008437125012278557\nStep 707, loss: 0.007596829906105995\nStep 708, loss: 0.007881579920649529\nStep 709, loss: 0.006896924693137407\nStep 710, loss: 0.008413707837462425\nStep 711, loss: 0.009662673808634281\nStep 712, loss: 0.008761812932789326\nStep 713, loss: 0.00932319462299347\nStep 714, loss: 0.008840029127895832\nStep 715, loss: 0.008344517089426517\nStep 716, loss: 0.01156007032841444\nStep 717, loss: 0.008096556179225445\nStep 718, loss: 0.009683777578175068\nStep 719, loss: 0.007742816582322121\nStep 720, loss: 0.007568707689642906\nStep 721, loss: 0.008272613398730755\nStep 722, loss: 0.008740808814764023\nStep 723, loss: 0.008758014999330044\nStep 724, loss: 0.007208905648440123\nStep 725, loss: 0.008533834479749203\nStep 726, loss: 0.0073806592263281345\nStep 727, loss: 0.007143620401620865\nStep 728, loss: 0.007509579416364431\nStep 729, loss: 0.006937501486390829\nStep 730, loss: 0.008098526857793331\nStep 731, loss: 0.009508779272437096\nStep 732, loss: 0.008234464563429356\nStep 733, loss: 0.009609385393559933\nStep 734, loss: 0.007654521614313126\nStep 735, loss: 0.00957720261067152\nStep 736, loss: 0.008412269875407219\nStep 737, loss: 0.007896238006651402\nStep 738, loss: 0.008176438510417938\nStep 739, loss: 0.009090610779821873\nStep 740, loss: 0.007537922356277704\nStep 741, loss: 0.008056262508034706\nStep 742, loss: 0.007985595613718033\nStep 743, loss: 0.009384697303175926\nStep 744, loss: 0.00739773316308856\nStep 745, loss: 0.008154237642884254\nStep 746, loss: 0.00837451871484518\nStep 747, loss: 0.007659763563424349\nStep 748, loss: 0.009576207958161831\nStep 749, loss: 0.00963237788528204\nStep 750, loss: 0.009114114567637444\nStep 751, loss: 0.007480993866920471\nStep 752, loss: 0.007710766047239304\nStep 753, loss: 0.009427975863218307\nStep 754, loss: 0.007921767421066761\nStep 755, loss: 0.007757353130728006\nStep 756, loss: 0.008287258446216583\nStep 757, loss: 0.008245065808296204\nStep 758, loss: 0.006868288852274418\nStep 759, loss: 0.00997896771878004\nStep 760, loss: 0.007407574448734522\nStep 761, loss: 0.008717725053429604\nStep 762, loss: 0.008936473168432713\nStep 763, loss: 0.008730635046958923\nStep 764, loss: 0.008745621889829636\nStep 765, loss: 0.007911726832389832\nStep 766, loss: 0.007758614141494036\nStep 767, loss: 0.007411055266857147\nStep 768, loss: 0.00896456465125084\nStep 769, loss: 0.009281367063522339\nStep 770, loss: 0.009555153548717499\nStep 771, loss: 0.007930740714073181\nStep 772, loss: 0.0082415621727705\nStep 773, loss: 0.010451523587107658\nStep 774, loss: 0.009951524436473846\nStep 775, loss: 0.008360268548130989\nStep 776, loss: 0.010047543793916702\nStep 777, loss: 0.007714583072811365\nStep 778, loss: 0.008991550654172897\nStep 779, loss: 0.009161995723843575\nStep 780, loss: 0.009513900615274906\nStep 781, loss: 0.007224121131002903\nStep 782, loss: 0.007405804470181465\nStep 783, loss: 0.00722330529242754\nStep 784, loss: 0.009540022350847721\nStep 785, loss: 0.007725142873823643\nStep 786, loss: 0.009445969946682453\nStep 787, loss: 0.008166223764419556\nStep 788, loss: 0.00848563201725483\nStep 789, loss: 0.008764345198869705\nStep 790, loss: 0.008157470263540745\nStep 791, loss: 0.007171101868152618\nStep 792, loss: 0.008356235921382904\nStep 793, loss: 0.009822948835790157\nStep 794, loss: 0.008953233249485493\nStep 795, loss: 0.009083397686481476\nStep 796, loss: 0.007174731232225895\nStep 797, loss: 0.008239420130848885\nStep 798, loss: 0.007826566696166992\nStep 799, loss: 0.009894120506942272\nStep 800, loss: 0.008832914754748344\nStep 801, loss: 0.008360965177416801\nStep 802, loss: 0.008337488397955894\nStep 803, loss: 0.009190955199301243\nStep 804, loss: 0.010512374341487885\nStep 805, loss: 0.008843621239066124\nStep 806, loss: 0.009071932174265385\nStep 807, loss: 0.008909512311220169\nStep 808, loss: 0.0070503465831279755\nStep 809, loss: 0.007978579960763454\nStep 810, loss: 0.008387651294469833\nStep 811, loss: 0.00875979382544756\nStep 812, loss: 0.00880368985235691\nStep 813, loss: 0.008246597833931446\nStep 814, loss: 0.009645447134971619\nStep 815, loss: 0.009918425232172012\nStep 816, loss: 0.008361190557479858\nStep 817, loss: 0.009313860908150673\nStep 818, loss: 0.008466468192636967\nStep 819, loss: 0.00854664295911789\nStep 820, loss: 0.008501839824020863\nStep 821, loss: 0.006838631816208363\nStep 822, loss: 0.008860883302986622\nStep 823, loss: 0.006882588379085064\nStep 824, loss: 0.007776821032166481\nStep 825, loss: 0.008018260821700096\nStep 826, loss: 0.009293113835155964\nStep 827, loss: 0.007893583737313747\nStep 828, loss: 0.007864481769502163\nStep 829, loss: 0.00944742001593113\nStep 830, loss: 0.008647500537335873\nStep 831, loss: 0.006644923705607653\nStep 832, loss: 0.009370350278913975\nStep 833, loss: 0.008215242065489292\nStep 834, loss: 0.008311529643833637\nStep 835, loss: 0.007736304774880409\nStep 836, loss: 0.009074011817574501\nStep 837, loss: 0.008486744947731495\nStep 838, loss: 0.0074049122631549835\nStep 839, loss: 0.0076318043284118176\nStep 840, loss: 0.0077585927210748196\nStep 841, loss: 0.009308401495218277\nStep 842, loss: 0.008833185769617558\nStep 843, loss: 0.0078051951713860035\nStep 844, loss: 0.008552715182304382\nStep 845, loss: 0.009183364920318127\nStep 846, loss: 0.00813213363289833\nStep 847, loss: 0.008879457600414753\nStep 848, loss: 0.008119485341012478\nStep 849, loss: 0.008061840198934078\nStep 850, loss: 0.009906782768666744\nStep 851, loss: 0.009139358066022396\nStep 852, loss: 0.008225596509873867\nStep 853, loss: 0.009153551422059536\nStep 854, loss: 0.011232911609113216\nStep 855, loss: 0.007757157552987337\nStep 856, loss: 0.008269049227237701\nStep 857, loss: 0.008513633161783218\nStep 858, loss: 0.00846780650317669\nStep 859, loss: 0.008334596641361713\nStep 860, loss: 0.008069132454693317\nStep 861, loss: 0.008460940793156624\nStep 862, loss: 0.008301100693643093\nStep 863, loss: 0.009122634306550026\nStep 864, loss: 0.009993168525397778\nStep 865, loss: 0.00745188957080245\nStep 866, loss: 0.010728954337537289\nStep 867, loss: 0.01030214224010706\nStep 868, loss: 0.007548552472144365\nStep 869, loss: 0.008158835582435131\nStep 870, loss: 0.008571529760956764\nStep 871, loss: 0.008947694674134254\nStep 872, loss: 0.009027107618749142\nStep 873, loss: 0.007371645420789719\nStep 874, loss: 0.008784396573901176\nStep 875, loss: 0.008641858585178852\nStep 876, loss: 0.007751908153295517\nStep 877, loss: 0.009617197327315807\nStep 878, loss: 0.008151952177286148\nStep 879, loss: 0.008751665242016315\nStep 880, loss: 0.008701476268470287\nStep 881, loss: 0.008113049902021885\nStep 663, loss: 0.008035499602556229\nStep 664, loss: 0.008211825042963028\nStep 665, loss: 0.008599837310612202\nStep 666, loss: 0.009215956553816795\nStep 667, loss: 0.00814542081207037\nStep 668, loss: 0.008338076993823051\nStep 669, loss: 0.010524684563279152\nStep 670, loss: 0.00987721886485815\nStep 671, loss: 0.008724103681743145\nStep 672, loss: 0.008830797858536243\nStep 673, loss: 0.009168988093733788\nStep 674, loss: 0.009654577821493149\nStep 675, loss: 0.008422465063631535\nStep 676, loss: 0.008703668601810932\nStep 677, loss: 0.008970540948212147\nStep 678, loss: 0.00894303061068058\nStep 679, loss: 0.009513182565569878\nStep 680, loss: 0.007564784027636051\nStep 681, loss: 0.007781901862472296\nStep 682, loss: 0.009456507861614227\nStep 683, loss: 0.00782136619091034\nStep 684, loss: 0.007891003042459488\nStep 685, loss: 0.009091738611459732\nStep 686, loss: 0.008492560125887394\nStep 687, loss: 0.008510823361575603\nStep 688, loss: 0.007513571064919233\nStep 689, loss: 0.008806859143078327\nStep 882, loss: 0.009856571443378925\nStep 883, loss: 0.008030290715396404\nStep 690, loss: 0.00823906809091568\nStep 691, loss: 0.008473768830299377\nStep 692, loss: 0.008500708267092705\nStep 693, loss: 0.008233590051531792\nStep 694, loss: 0.00800382811576128\nStep 695, loss: 0.0074405609630048275\nStep 696, loss: 0.00785791128873825\nStep 697, loss: 0.008391282521188259\nStep 698, loss: 0.008259993977844715\nStep 699, loss: 0.008108203299343586\nStep 700, loss: 0.009265334345400333\nStep 701, loss: 0.007261474616825581\nStep 702, loss: 0.008634131401777267\nStep 703, loss: 0.008237874135375023\nStep 704, loss: 0.009144485928118229\nStep 705, loss: 0.007042833138257265\nStep 706, loss: 0.008437125012278557\nStep 707, loss: 0.007596829906105995\nStep 708, loss: 0.007881579920649529\nStep 709, loss: 0.006896924693137407\nStep 710, loss: 0.008413707837462425\nStep 711, loss: 0.009662673808634281\nStep 712, loss: 0.008761812932789326\nStep 713, loss: 0.00932319462299347\nStep 714, loss: 0.008840029127895832\nStep 715, loss: 0.008344517089426517\nStep 716, loss: 0.01156007032841444\nStep 717, loss: 0.008096556179225445\nStep 718, loss: 0.009683777578175068\nStep 719, loss: 0.007742816582322121\nStep 720, loss: 0.007568707689642906\nStep 721, loss: 0.008272613398730755\nStep 722, loss: 0.008740808814764023\nStep 723, loss: 0.008758014999330044\nStep 724, loss: 0.007208905648440123\nStep 725, loss: 0.008533834479749203\nStep 726, loss: 0.0073806592263281345\nStep 727, loss: 0.007143620401620865\nStep 728, loss: 0.007509579416364431\nStep 729, loss: 0.006937501486390829\nStep 730, loss: 0.008098526857793331\nStep 731, loss: 0.009508779272437096\nStep 732, loss: 0.008234464563429356\nStep 733, loss: 0.009609385393559933\nStep 734, loss: 0.007654521614313126\nStep 735, loss: 0.00957720261067152\nStep 736, loss: 0.008412269875407219\nStep 737, loss: 0.007896238006651402\nStep 738, loss: 0.008176438510417938\nStep 739, loss: 0.009090610779821873\nStep 740, loss: 0.007537922356277704\nStep 741, loss: 0.008056262508034706\nStep 742, loss: 0.007985595613718033\nStep 743, loss: 0.009384697303175926\nStep 744, loss: 0.00739773316308856\nStep 745, loss: 0.008154237642884254\nStep 746, loss: 0.00837451871484518\nStep 747, loss: 0.007659763563424349\nStep 748, loss: 0.009576207958161831\nStep 749, loss: 0.00963237788528204\nStep 750, loss: 0.009114114567637444\nStep 751, loss: 0.007480993866920471\nStep 752, loss: 0.007710766047239304\nStep 753, loss: 0.009427975863218307\nStep 754, loss: 0.007921767421066761\nStep 755, loss: 0.007757353130728006\nStep 756, loss: 0.008287258446216583\nStep 757, loss: 0.008245065808296204\nStep 758, loss: 0.006868288852274418\nStep 759, loss: 0.00997896771878004\nStep 760, loss: 0.007407574448734522\nStep 761, loss: 0.008717725053429604\nStep 762, loss: 0.008936473168432713\nStep 763, loss: 0.008730635046958923\nStep 764, loss: 0.008745621889829636\nStep 765, loss: 0.007911726832389832\nStep 766, loss: 0.007758614141494036\nStep 767, loss: 0.007411055266857147\nStep 768, loss: 0.00896456465125084\nStep 769, loss: 0.009281367063522339\nStep 770, loss: 0.009555153548717499\nStep 771, loss: 0.007930740714073181\nStep 772, loss: 0.0082415621727705\nStep 773, loss: 0.010451523587107658\nStep 774, loss: 0.009951524436473846\nStep 775, loss: 0.008360268548130989\nStep 776, loss: 0.010047543793916702\nStep 777, loss: 0.007714583072811365\nStep 778, loss: 0.008991550654172897\nStep 779, loss: 0.009161995723843575\nStep 780, loss: 0.009513900615274906\nStep 781, loss: 0.007224121131002903\nStep 782, loss: 0.007405804470181465\nStep 783, loss: 0.00722330529242754\nStep 784, loss: 0.009540022350847721\nStep 785, loss: 0.007725142873823643\nStep 786, loss: 0.009445969946682453\nStep 787, loss: 0.008166223764419556\nStep 788, loss: 0.00848563201725483\nStep 789, loss: 0.008764345198869705\nStep 790, loss: 0.008157470263540745\nStep 791, loss: 0.007171101868152618\nStep 792, loss: 0.008356235921382904\nStep 793, loss: 0.009822948835790157\nStep 794, loss: 0.008953233249485493\nStep 795, loss: 0.009083397686481476\nStep 796, loss: 0.007174731232225895\nStep 797, loss: 0.008239420130848885\nStep 798, loss: 0.007826566696166992\nStep 799, loss: 0.009894120506942272\nStep 800, loss: 0.008832914754748344\nStep 801, loss: 0.008360965177416801\nStep 802, loss: 0.008337488397955894\nStep 803, loss: 0.009190955199301243\nStep 804, loss: 0.010512374341487885\nStep 805, loss: 0.008843621239066124\nStep 806, loss: 0.009071932174265385\nStep 807, loss: 0.008909512311220169\nStep 808, loss: 0.0070503465831279755\nStep 809, loss: 0.007978579960763454\nStep 810, loss: 0.008387651294469833\nStep 811, loss: 0.00875979382544756\nStep 812, loss: 0.00880368985235691\nStep 813, loss: 0.008246597833931446\nStep 814, loss: 0.009645447134971619\nStep 815, loss: 0.009918425232172012\nStep 816, loss: 0.008361190557479858\nStep 817, loss: 0.009313860908150673\nStep 818, loss: 0.008466468192636967\nStep 819, loss: 0.00854664295911789\nStep 820, loss: 0.008501839824020863\nStep 821, loss: 0.006838631816208363\nStep 822, loss: 0.008860883302986622\nStep 823, loss: 0.006882588379085064\nStep 824, loss: 0.007776821032166481\nStep 825, loss: 0.008018260821700096\nStep 826, loss: 0.009293113835155964\nStep 827, loss: 0.007893583737313747\nStep 828, loss: 0.007864481769502163\nStep 829, loss: 0.00944742001593113\nStep 830, loss: 0.008647500537335873\nStep 831, loss: 0.006644923705607653\nStep 832, loss: 0.009370350278913975\nStep 833, loss: 0.008215242065489292\nStep 834, loss: 0.008311529643833637\nStep 835, loss: 0.007736304774880409\nStep 836, loss: 0.009074011817574501\nStep 837, loss: 0.008486744947731495\nStep 838, loss: 0.0074049122631549835\nStep 839, loss: 0.0076318043284118176\nStep 840, loss: 0.0077585927210748196\nStep 841, loss: 0.009308401495218277\nStep 842, loss: 0.008833185769617558\nStep 843, loss: 0.0078051951713860035\nStep 844, loss: 0.008552715182304382\nStep 845, loss: 0.009183364920318127\nStep 846, loss: 0.00813213363289833\nStep 847, loss: 0.008879457600414753\nStep 848, loss: 0.008119485341012478\nStep 849, loss: 0.008061840198934078\nStep 850, loss: 0.009906782768666744\nStep 851, loss: 0.009139358066022396\nStep 852, loss: 0.008225596509873867\nStep 853, loss: 0.009153551422059536\nStep 854, loss: 0.011232911609113216\nStep 855, loss: 0.007757157552987337\nStep 856, loss: 0.008269049227237701\nStep 857, loss: 0.008513633161783218\nStep 858, loss: 0.00846780650317669\nStep 859, loss: 0.008334596641361713\nStep 860, loss: 0.008069132454693317\nStep 861, loss: 0.008460940793156624\nStep 862, loss: 0.008301100693643093\nStep 863, loss: 0.009122634306550026\nStep 864, loss: 0.009993168525397778\nStep 865, loss: 0.00745188957080245\nStep 866, loss: 0.010728954337537289\nStep 867, loss: 0.01030214224010706\nStep 868, loss: 0.007548552472144365\nStep 869, loss: 0.008158835582435131\nStep 870, loss: 0.008571529760956764\nStep 871, loss: 0.008947694674134254\nStep 872, loss: 0.009027107618749142\nStep 873, loss: 0.007371645420789719\nStep 874, loss: 0.008784396573901176\nStep 875, loss: 0.008641858585178852\nStep 876, loss: 0.007751908153295517\nStep 877, loss: 0.009617197327315807\nStep 878, loss: 0.008151952177286148\nStep 879, loss: 0.008751665242016315\nStep 880, loss: 0.008701476268470287\nStep 881, loss: 0.008113049902021885\nStep 882, loss: 0.009856571443378925\nStep 883, loss: 0.008030290715396404\nsrun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT\nslurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n\n============================= JOB FEEDBACK =============================\n\nJob ID: 3317098\nCluster: hk\nUser/Group: tum_cte0515/hk-project-p0023960\nAccount: hk-project-p0023960\nState: CANCELLED (exit code 0)\nPartition: accelerated\nNodes: 2\nCores per node: 24\nNodelist: hkn[0629,0631]\nCPU Utilized: 03:36:02\nCPU Efficiency: 42.39% of 08:29:36 core-walltime\nJob Wall-clock time: 00:10:37\nStarttime: Fri Jul 4 11:09:47 2025\nEndtime: Fri Jul 4 11:20:24 2025\nMemory Utilized: 85.06 GB (estimated maximum)\nMemory Efficiency: 0.00% of 0.00 MB (0.00 MB/node)\nEnergy Consumed: 1535291 Joule / 426.469722222222 Watthours\nAverage node power draw: 2410.18995290424 Watt\n",log,tab +3938,4861520,"TERMINAL",0,0,"6441",,terminal_output +3939,4862174,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",1169,0,"",log,selection_mouse +3940,4862194,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",1168,0,"",log,selection_command +3941,4862271,"TERMINAL",0,0,"7552",,terminal_output +3942,4862926,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291728,0,"",log,selection_command +3943,4863319,"TERMINAL",0,0,"8663",,terminal_output +3944,4864369,"TERMINAL",0,0,"9774",,terminal_output +3945,4865408,"TERMINAL",0,0,"40885",,terminal_output +3946,4866070,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290982,0,"",log,selection_mouse +3947,4866071,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290981,0,"",log,selection_command +3948,4866421,"TERMINAL",0,0,"1996",,terminal_output +3949,4867008,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290884,0,"",log,selection_mouse +3950,4867153,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290882,5,"error",log,selection_mouse +3951,4867461,"TERMINAL",0,0,"250:0020:007",,terminal_output +3952,4867788,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290856,0,"",log,selection_mouse +3953,4867914,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290852,4,"srun",log,selection_mouse +3954,4868179,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290852,5,"srun:",log,selection_mouse +3955,4868179,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290852,6,"srun: ",log,selection_mouse +3956,4868180,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290852,9,"srun: got",log,selection_mouse +3957,4868180,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290852,10,"srun: got ",log,selection_mouse +3958,4868180,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290852,17,"srun: got SIGCONT",log,selection_mouse +3959,4868585,"TERMINAL",0,0,"3118",,terminal_output +3960,4869607,"TERMINAL",0,0,"4229",,terminal_output +3961,4870583,"TERMINAL",0,0,"53330",,terminal_output +3962,4871659,"TERMINAL",0,0,"6441",,terminal_output +3963,4872657,"TERMINAL",0,0,"7552",,terminal_output +3964,4873694,"TERMINAL",0,0,"8663",,terminal_output +3965,4874970,"TERMINAL",0,0,"9885",,terminal_output +3966,4875839,"TERMINAL",0,0,"51996",,terminal_output +3967,4876854,"TERMINAL",0,0,"210107",,terminal_output +3968,4877865,"TERMINAL",0,0,"3118",,terminal_output +3969,4878864,"TERMINAL",0,0,"4229",,terminal_output +3970,4879949,"TERMINAL",0,0,"53340",,terminal_output +3971,4880932,"TERMINAL",0,0,"6441",,terminal_output +3972,4881974,"TERMINAL",0,0,"7552",,terminal_output +3973,4882358,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290982,0,"",log,selection_mouse +3974,4882377,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290981,0,"",log,selection_command +3975,4883049,"TERMINAL",0,0,"8663",,terminal_output +3976,4884047,"TERMINAL",0,0,"9774",,terminal_output +3977,4885028,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290780,0,"",log,selection_mouse +3978,4885099,"TERMINAL",0,0,"6:00885",,terminal_output +3979,4885203,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,4,"srun",log,selection_mouse +3980,4885430,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,77,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun",log,selection_mouse +3981,4885494,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,78,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun:",log,selection_mouse +3982,4886125,"TERMINAL",0,0,"1996",,terminal_output +3983,4887160,"TERMINAL",0,0,"220207",,terminal_output +3984,4887185,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,79,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: ",log,selection_mouse +3985,4887221,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,82,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got",log,selection_mouse +3986,4887222,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,83,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got ",log,selection_mouse +3987,4887238,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,90,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT",log,selection_mouse +3988,4887897,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,113,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT\nslurmstepd: error: ***",log,selection_mouse +3989,4887936,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,203,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT\nslurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination",log,selection_mouse +3990,4887937,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,224,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT\nslurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *",log,selection_mouse +3991,4888019,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,290,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT\nslurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +3992,4888052,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,231,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT\nslurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP",log,selection_mouse +3993,4888053,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,239,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT\nslurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3317098",log,selection_mouse +3994,4888074,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,203,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT\nslurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination",log,selection_mouse +3995,4888213,"TERMINAL",0,0,"3118",,terminal_output +3996,4888355,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,289,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT\nslurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***",log,selection_mouse +3997,4888436,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,290,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT\nslurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +3998,4888847,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291069,0,"",log,selection_mouse +3999,4889268,"TERMINAL",0,0,"4229",,terminal_output +4000,4889498,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291068,0,"",log,selection_mouse +4001,4889512,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291067,0,"",log,selection_command +4002,4889663,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291067,1,"*",log,selection_mouse +4003,4889666,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291068,0,"",log,selection_command +4004,4889746,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291068,1,"\n",log,selection_mouse +4005,4889746,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291068,61,"\n\n============================= JOB FEEDBACK ================",log,selection_mouse +4006,4889747,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291068,75,"\n\n============================= JOB FEEDBACK =============================\n",log,selection_mouse +4007,4889803,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291068,4,"\n\n==",log,selection_mouse +4008,4889804,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291068,2,"\n\n",log,selection_mouse +4009,4889942,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291068,1,"\n",log,selection_mouse +4010,4889991,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290983,85,"slurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***",log,selection_mouse +4011,4890079,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290953,115,"srun: forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***",log,selection_mouse +4012,4890080,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290870,198,"slurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***",log,selection_mouse +4013,4890080,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290852,216,"srun: got SIGCONT\nslurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***",log,selection_mouse +4014,4890169,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,289,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT\nslurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***",log,selection_mouse +4015,4890289,"TERMINAL",0,0,"53350",,terminal_output +4016,4891419,"TERMINAL",0,0,"6441",,terminal_output +4017,4892385,"TERMINAL",0,0,"7552",,terminal_output +4018,4893429,"TERMINAL",0,0,"8663",,terminal_output +4019,4894511,"TERMINAL",0,0,"9774",,terminal_output +4020,4895560,"TERMINAL",0,0,"10885",,terminal_output +4021,4896497,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291069,0,"",log,selection_mouse +4022,4896587,"TERMINAL",0,0,"1996",,terminal_output +4023,4897477,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291006,63,"STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4024,4897528,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291000,69,": *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4025,4897529,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290997,72,"ror: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4026,4897529,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290995,74,"error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4027,4897567,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290993,76,": error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4028,4897568,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290992,77,"d: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4029,4897580,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290991,78,"pd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4030,4897633,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290960,109,"orcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4031,4897646,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290959,110,"forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4032,4897656,"TERMINAL",0,0,"230307",,terminal_output +4033,4897669,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290958,111," forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4034,4897693,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290957,112,": forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4035,4897750,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290872,197,"urmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4036,4897751,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290871,198,"lurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4037,4897814,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290870,199,"slurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4038,4897836,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290852,217,"srun: got SIGCONT\nslurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4039,4897954,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",290779,290,"srun: Job step aborted: Waiting up to 32 seconds for job step to finish.\nsrun: got SIGCONT\nslurmstepd: error: *** JOB 3317098 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\nsrun: forcing job termination\nslurmstepd: error: *** STEP 3317098.0 ON hkn0629 CANCELLED AT 2025-07-04T11:20:24 ***\n",log,selection_mouse +4040,4898689,"TERMINAL",0,0,"3118",,terminal_output +4041,4899516,"TERMINAL",0,0,"Every 1.0s: squeue --mehkn1993.localdomain: Fri Jul 4 11:56:14 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3316022 accelerat train_to tum_cte0 R 7:50:32\t 2 hkn[0507,0520]3316924 accelerat interact tum_cte0 R 1:20:32\t 1 hkn07333316923 accelerat interact tum_cte0 R 1:20:59\t 4 hkn[0625-0628]",,terminal_output +4042,4900534,"TERMINAL",0,0,"5331:00",,terminal_output +4043,4901586,"TERMINAL",0,0,"6441",,terminal_output +4044,4902727,"TERMINAL",0,0,"7552",,terminal_output +4045,4903678,"TERMINAL",0,0,"8663",,terminal_output +4046,4904704,"TERMINAL",0,0,"9774",,terminal_output +4047,4905757,"TERMINAL",0,0,"20996",,terminal_output +4048,4906814,"TERMINAL",0,0,"240407",,terminal_output +4049,4907825,"TERMINAL",0,0,"3118",,terminal_output +4050,4908866,"TERMINAL",0,0,"4229",,terminal_output +4051,4909934,"TERMINAL",0,0,"53310",,terminal_output +4052,4910962,"TERMINAL",0,0,"6441",,terminal_output +4053,4912009,"TERMINAL",0,0,"7552",,terminal_output +4054,4913071,"TERMINAL",0,0,"8663",,terminal_output +4055,4914112,"TERMINAL",0,0,"9774",,terminal_output +4056,4915203,"TERMINAL",0,0,"30885",,terminal_output +4057,4916200,"TERMINAL",0,0,"1996",,terminal_output +4058,4917257,"TERMINAL",0,0,"250507",,terminal_output +4059,4918313,"TERMINAL",0,0,"3118",,terminal_output +4060,4919369,"TERMINAL",0,0,"4229",,terminal_output +4061,4920385,"TERMINAL",0,0,"53320",,terminal_output +4062,4921431,"TERMINAL",0,0,"6441",,terminal_output +4063,4922482,"TERMINAL",0,0,"7552",,terminal_output +4064,4923574,"TERMINAL",0,0,"8663",,terminal_output +4065,4924568,"TERMINAL",0,0,"9774",,terminal_output +4066,4925662,"TERMINAL",0,0,"40885",,terminal_output +4067,4926750,"TERMINAL",0,0,"1996",,terminal_output +4068,4927771,"TERMINAL",0,0,"21:001:007",,terminal_output +4069,4928755,"TERMINAL",0,0,"3229",,terminal_output +4070,4929819,"TERMINAL",0,0,"53330",,terminal_output +4071,4930830,"TERMINAL",0,0,"6441",,terminal_output +4072,4931968,"TERMINAL",0,0,"7552",,terminal_output +4073,4932928,"TERMINAL",0,0,"8663",,terminal_output +4074,4934017,"TERMINAL",0,0,"9774",,terminal_output +4075,4935023,"TERMINAL",0,0,"50885",,terminal_output +4076,4936074,"TERMINAL",0,0,"1996",,terminal_output +4077,4937116,"TERMINAL",0,0,"210107",,terminal_output +4078,4938161,"TERMINAL",0,0,"3118",,terminal_output +4079,4939235,"TERMINAL",0,0,"4229",,terminal_output +4080,4940074,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",0,0,"",plaintext,tab +4081,4940075,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",83,0,"",plaintext,selection_mouse +4082,4940257,"TERMINAL",0,0,"53340",,terminal_output +4083,4941202,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",84,0,"",plaintext,selection_command +4084,4941292,"TERMINAL",0,0,"6441",,terminal_output +4085,4941643,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",83,1,"",plaintext,content +4086,4941727,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",82,1,"",plaintext,content +4087,4942414,"TERMINAL",0,0,"7552",,terminal_output +4088,4943280,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",82,0,"3",plaintext,content +4089,4943281,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",83,0,"",plaintext,selection_keyboard +4090,4943335,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",83,0,"6",plaintext,content +4091,4943336,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",84,0,"",plaintext,selection_keyboard +4092,4943445,"TERMINAL",0,0,"8663",,terminal_output +4093,4943671,"scripts_horeka/modelsize_scaling/lam/train_lam_38M.sbatch",83,0,"",plaintext,selection_command +4094,4944437,"TERMINAL",0,0,"9774",,terminal_output +4095,4945484,"TERMINAL",0,0,"7:00885",,terminal_output +4096,4946515,"TERMINAL",0,0,"1996",,terminal_output +4097,4947639,"TERMINAL",0,0,"220207",,terminal_output +4098,4948664,"TERMINAL",0,0,"3118",,terminal_output +4099,4949693,"TERMINAL",0,0,"4229",,terminal_output +4100,4950714,"TERMINAL",0,0,"53350",,terminal_output +4101,4951153,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",0,0,"",log,tab +4102,4951153,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291489,0,"",log,selection_mouse +4103,4951194,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291488,0,"",log,selection_command +4104,4951825,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291489,0,"",log,selection_mouse +4105,4951836,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291488,0,"",log,selection_command +4106,4951836,"TERMINAL",0,0,"6552",,terminal_output +4107,4952561,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291400,0,"",log,selection_mouse +4108,4952729,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291398,2,"of",log,selection_mouse +4109,4952868,"TERMINAL",0,0,"8663",,terminal_output +4110,4953011,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291398,52,"of 08:29:36 core-walltime\nJob Wall-clock time: 00:10",log,selection_mouse +4111,4953011,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291398,84,"of 08:29:36 core-walltime\nJob Wall-clock time: 00:10:37\nStarttime: Fri Jul 4 11:09:",log,selection_mouse +4112,4953011,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291398,121,"of 08:29:36 core-walltime\nJob Wall-clock time: 00:10:37\nStarttime: Fri Jul 4 11:09:47 2025\nEndtime: Fri Jul 4 11:20:24 ",log,selection_mouse +4113,4953011,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291398,125,"of 08:29:36 core-walltime\nJob Wall-clock time: 00:10:37\nStarttime: Fri Jul 4 11:09:47 2025\nEndtime: Fri Jul 4 11:20:24 2025",log,selection_mouse +4114,4953011,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291398,162,"of 08:29:36 core-walltime\nJob Wall-clock time: 00:10:37\nStarttime: Fri Jul 4 11:09:47 2025\nEndtime: Fri Jul 4 11:20:24 2025\nMemory Utilized: 85.06 GB (estimated",log,selection_mouse +4115,4953427,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291556,0,"",log,selection_mouse +4116,4953427,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291551,9,"estimated",log,selection_mouse +4117,4953683,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291516,44,"24 2025\nMemory Utilized: 85.06 GB (estimated",log,selection_mouse +4118,4953725,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291515,45,":24 2025\nMemory Utilized: 85.06 GB (estimated",log,selection_mouse +4119,4953763,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291513,47,"20:24 2025\nMemory Utilized: 85.06 GB (estimated",log,selection_mouse +4120,4953823,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291512,48,":20:24 2025\nMemory Utilized: 85.06 GB (estimated",log,selection_mouse +4121,4953823,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291510,50,"11:20:24 2025\nMemory Utilized: 85.06 GB (estimated",log,selection_mouse +4122,4953833,"TERMINAL",0,0,"9774",,terminal_output +4123,4953858,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291474,86,"4 11:09:47 2025\nEndtime: Fri Jul 4 11:20:24 2025\nMemory Utilized: 85.06 GB (estimated",log,selection_mouse +4124,4953883,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291472,88," 4 11:09:47 2025\nEndtime: Fri Jul 4 11:20:24 2025\nMemory Utilized: 85.06 GB (estimated",log,selection_mouse +4125,4953903,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291469,91,"Jul 4 11:09:47 2025\nEndtime: Fri Jul 4 11:20:24 2025\nMemory Utilized: 85.06 GB (estimated",log,selection_mouse +4126,4953946,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291439,121,"time: 00:10:37\nStarttime: Fri Jul 4 11:09:47 2025\nEndtime: Fri Jul 4 11:20:24 2025\nMemory Utilized: 85.06 GB (estimated",log,selection_mouse +4127,4953977,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291438,122," time: 00:10:37\nStarttime: Fri Jul 4 11:09:47 2025\nEndtime: Fri Jul 4 11:20:24 2025\nMemory Utilized: 85.06 GB (estimated",log,selection_mouse +4128,4953997,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291433,127,"clock time: 00:10:37\nStarttime: Fri Jul 4 11:09:47 2025\nEndtime: Fri Jul 4 11:20:24 2025\nMemory Utilized: 85.06 GB (estimated",log,selection_mouse +4129,4954024,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291379,181,"Efficiency: 42.39% of 08:29:36 core-walltime\nJob Wall-clock time: 00:10:37\nStarttime: Fri Jul 4 11:09:47 2025\nEndtime: Fri Jul 4 11:20:24 2025\nMemory Utilized: 85.06 GB (estimated",log,selection_mouse +4130,4954531,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291386,0,"",log,selection_mouse +4131,4954906,"TERMINAL",0,0,"10885",,terminal_output +4132,4955933,"TERMINAL",0,0,"1996",,terminal_output +4133,4957059,"TERMINAL",0,0,"230307",,terminal_output +4134,4958011,"TERMINAL",0,0,"3118",,terminal_output +4135,4959060,"TERMINAL",0,0,"4229",,terminal_output +4136,4960132,"TERMINAL",0,0,"5332:00",,terminal_output +4137,4961154,"TERMINAL",0,0,"6441",,terminal_output +4138,4962196,"TERMINAL",0,0,"7552",,terminal_output +4139,4963271,"TERMINAL",0,0,"8663",,terminal_output +4140,4964279,"TERMINAL",0,0,"9774",,terminal_output +4141,4965330,"TERMINAL",0,0,"20885",,terminal_output +4142,4966379,"TERMINAL",0,0,"1996",,terminal_output +4143,4967502,"TERMINAL",0,0,"240407",,terminal_output +4144,4968466,"TERMINAL",0,0,"3118",,terminal_output +4145,4969552,"TERMINAL",0,0,"4229",,terminal_output +4146,4970573,"TERMINAL",0,0,"53310",,terminal_output +4147,4971580,"TERMINAL",0,0,"6441",,terminal_output +4148,4972604,"TERMINAL",0,0,"7552",,terminal_output +4149,4973643,"TERMINAL",0,0,"8663",,terminal_output +4150,4974698,"TERMINAL",0,0,"9774",,terminal_output +4151,4975796,"TERMINAL",0,0,"30996",,terminal_output +4152,4976921,"TERMINAL",0,0,"250507",,terminal_output +4153,4977953,"TERMINAL",0,0,"3118",,terminal_output +4154,4978625,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291372,0,"",log,selection_mouse +4155,4978989,"TERMINAL",0,0,"4229",,terminal_output +4156,4979758,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291626,0,"",log,selection_mouse +4157,4980104,"TERMINAL",0,0,"53320",,terminal_output +4158,4980376,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291360,0,"",log,selection_mouse +4159,4981110,"TERMINAL",0,0,"6441",,terminal_output +4160,4981148,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/train_lam_model_size_scaling_38M_3317098.log",291188,0,"",log,selection_mouse +4161,4982175,"TERMINAL",0,0,"7552",,terminal_output +4162,4983299,"TERMINAL",0,0,"8663",,terminal_output +4163,4983370,"TERMINAL",0,0,"\r[tum_cte0515@hkn0733 logs_mihir]$ ",,terminal_output +4164,4984234,"TERMINAL",0,0,"9774",,terminal_output +4165,4985350,"TERMINAL",0,0,"40885",,terminal_output +4166,4986322,"TERMINAL",0,0,"1996",,terminal_output +4167,4987471,"TERMINAL",0,0,"22:002:007",,terminal_output +4168,4988429,"TERMINAL",0,0,"3118",,terminal_output +4169,4989518,"TERMINAL",0,0,"4229",,terminal_output +4170,4990520,"TERMINAL",0,0,"53330",,terminal_output +4171,4991568,"TERMINAL",0,0,"6441",,terminal_output +4172,4992593,"TERMINAL",0,0,"7552",,terminal_output +4173,4993715,"TERMINAL",0,0,"8663",,terminal_output +4174,4994233,"TERMINAL",0,0,"srun",,terminal_focus +4175,4994689,"TERMINAL",0,0,"9774",,terminal_output +4176,4995768,"TERMINAL",0,0,"50996",,terminal_output +4177,4996783,"TERMINAL",0,0,"210107",,terminal_output +4178,4997836,"TERMINAL",0,0,"3118",,terminal_output +4179,4998868,"TERMINAL",0,0,"4229",,terminal_output +4180,4999925,"TERMINAL",0,0,"53340",,terminal_output +4181,5000968,"TERMINAL",0,0,"6441",,terminal_output +4182,5002018,"TERMINAL",0,0,"7552",,terminal_output +4183,5003137,"TERMINAL",0,0,"8663",,terminal_output +4184,5004125,"TERMINAL",0,0,"9774",,terminal_output +4185,5005187,"TERMINAL",0,0,"8:00885",,terminal_output +4186,5006218,"TERMINAL",0,0,"1996",,terminal_output +4187,5007255,"TERMINAL",0,0,"220207",,terminal_output +4188,5008383,"TERMINAL",0,0,"3118",,terminal_output +4189,5009349,"TERMINAL",0,0,"4229",,terminal_output +4190,5010404,"TERMINAL",0,0,"53350",,terminal_output +4191,5011459,"TERMINAL",0,0,"6441",,terminal_output +4192,5012513,"TERMINAL",0,0,"7552",,terminal_output +4193,5013589,"TERMINAL",0,0,"8663",,terminal_output +4194,5014600,"TERMINAL",0,0,"9774",,terminal_output +4195,5015737,"TERMINAL",0,0,"10885",,terminal_output +4196,5016703,"TERMINAL",0,0,"1996",,terminal_output +4197,5017782,"TERMINAL",0,0,"231318",,terminal_output +4198,5018804,"TERMINAL",0,0,"4229",,terminal_output +4199,5019932,"TERMINAL",0,0,"5333:00",,terminal_output +4200,5020956,"TERMINAL",0,0,"6441",,terminal_output +4201,5021921,"TERMINAL",0,0,"7552",,terminal_output +4202,5022972,"TERMINAL",0,0,"8663",,terminal_output +4203,5024021,"TERMINAL",0,0,"9774",,terminal_output +4204,5025059,"TERMINAL",0,0,"20885",,terminal_output +4205,5026159,"TERMINAL",0,0,"1996",,terminal_output +4206,5027203,"TERMINAL",0,0,"240407",,terminal_output +4207,5028248,"TERMINAL",0,0,"3118",,terminal_output +4208,5029306,"TERMINAL",0,0,"4229",,terminal_output +4209,5030348,"TERMINAL",0,0,"53310",,terminal_output +4210,5031396,"TERMINAL",0,0,"6441",,terminal_output +4211,5032444,"TERMINAL",0,0,"7552",,terminal_output +4212,5033551,"TERMINAL",0,0,"8663",,terminal_output +4213,5034578,"TERMINAL",0,0,"9774",,terminal_output +4214,5035599,"TERMINAL",0,0,"30885",,terminal_output +4215,5036631,"TERMINAL",0,0,"1996",,terminal_output +4216,5037684,"TERMINAL",0,0,"250507",,terminal_output +4217,5038774,"TERMINAL",0,0,"3229",,terminal_output +4218,5039799,"TERMINAL",0,0,"53320",,terminal_output +4219,5040924,"TERMINAL",0,0,"6441",,terminal_output +4220,5041948,"TERMINAL",0,0,"7552",,terminal_output +4221,5042923,"TERMINAL",0,0,"8663",,terminal_output +4222,5044000,"TERMINAL",0,0,"9774",,terminal_output +4223,5045022,"TERMINAL",0,0,"40885",,terminal_output +4224,5046045,"TERMINAL",0,0,"1996",,terminal_output +4225,5047069,"TERMINAL",0,0,"23:003:007",,terminal_output +4226,5048194,"TERMINAL",0,0,"3118",,terminal_output +4227,5049218,"TERMINAL",0,0,"4229",,terminal_output +4228,5050211,"TERMINAL",0,0,"53330",,terminal_output +4229,5051235,"TERMINAL",0,0,"6441",,terminal_output +4230,5052286,"TERMINAL",0,0,"7552",,terminal_output +4231,5053325,"TERMINAL",0,0,"8663",,terminal_output +4232,5054380,"TERMINAL",0,0,"9774",,terminal_output +4233,5055404,"TERMINAL",0,0,"50885",,terminal_output +4234,5056447,"TERMINAL",0,0,"1996",,terminal_output +4235,5057501,"TERMINAL",0,0,"210107",,terminal_output +4236,5058536,"TERMINAL",0,0,"3118",,terminal_output +4237,5059664,"TERMINAL",0,0,"4229",,terminal_output +4238,5060687,"TERMINAL",0,0,"53340",,terminal_output +4239,5061712,"TERMINAL",0,0,"6441",,terminal_output +4240,5062725,"TERMINAL",0,0,"7663",,terminal_output +4241,5063769,"TERMINAL",0,0,"9774",,terminal_output +4242,5064888,"TERMINAL",0,0,"9:00885",,terminal_output +4243,5065860,"TERMINAL",0,0,"1996",,terminal_output +4244,5066938,"TERMINAL",0,0,"220207",,terminal_output +4245,5067958,"TERMINAL",0,0,"3118",,terminal_output +4246,5068988,"TERMINAL",0,0,"4229",,terminal_output +4247,5070110,"TERMINAL",0,0,"srun",,terminal_focus +4248,5070141,"TERMINAL",0,0,"53350",,terminal_output +4249,5071098,"TERMINAL",0,0,"6441",,terminal_output +4250,5072168,"TERMINAL",0,0,"7552",,terminal_output +4251,5073181,"TERMINAL",0,0,"8663",,terminal_output +4252,5074228,"TERMINAL",0,0,"9774",,terminal_output +4253,5075280,"TERMINAL",0,0,"10885",,terminal_output +4254,5076355,"TERMINAL",0,0,"1996",,terminal_output +4255,5077379,"TERMINAL",0,0,"230307",,terminal_output +4256,5078511,"TERMINAL",0,0,"3118",,terminal_output +4257,5079527,"TERMINAL",0,0,"4229",,terminal_output +4258,5080518,"TERMINAL",0,0,"5334:00",,terminal_output +4259,5081567,"TERMINAL",0,0,"6441",,terminal_output +4260,5082703,"TERMINAL",0,0,"7552",,terminal_output +4261,5083725,"TERMINAL",0,0,"8663",,terminal_output +4262,5084751,"TERMINAL",0,0,"9774",,terminal_output +4263,5085737,"TERMINAL",0,0,"20996",,terminal_output +4264,5086798,"TERMINAL",0,0,"240407",,terminal_output +4265,5087834,"TERMINAL",0,0,"3118",,terminal_output +4266,5088961,"TERMINAL",0,0,"4229",,terminal_output +4267,5089916,"TERMINAL",0,0,"53310",,terminal_output +4268,5091001,"TERMINAL",0,0,"6441",,terminal_output +4269,5092021,"TERMINAL",0,0,"7552",,terminal_output +4270,5093155,"TERMINAL",0,0,"8663",,terminal_output +4271,5094175,"TERMINAL",0,0,"9774",,terminal_output +4272,5095193,"TERMINAL",0,0,"30885",,terminal_output +4273,5096225,"TERMINAL",0,0,"1996",,terminal_output +4274,5097245,"TERMINAL",0,0,"250507",,terminal_output +4275,5098315,"TERMINAL",0,0,"3118",,terminal_output +4276,5099404,"TERMINAL",0,0,"4229",,terminal_output diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-66c1dffb-e395-48ae-8676-da72a2b6a5cb1751540512935-2025_07_03-13.02.33.440/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-66c1dffb-e395-48ae-8676-da72a2b6a5cb1751540512935-2025_07_03-13.02.33.440/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..e5d9d54ece6ba7b5bedacf5bcbef6f12ef54496b --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-66c1dffb-e395-48ae-8676-da72a2b6a5cb1751540512935-2025_07_03-13.02.33.440/source.csv @@ -0,0 +1,2727 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +2,499,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"1:02:33 PM [info] Activating crowd-code\n1:02:33 PM [info] Recording started\n1:02:33 PM [info] Initializing git provider using file system watchers...\n1:02:33 PM [info] Git repository found\n1:02:33 PM [info] Git provider initialized successfully\n1:02:33 PM [info] Initial git state: [object Object]\n",Log,tab +3,688,"extension-output-pdoom-org.crowd-code-#1-crowd-code",1,0,"",Log,selection_command +4,890,"extension-output-pdoom-org.crowd-code-#1-crowd-code",2,0,"",Log,selection_command +5,3350,"TERMINAL",0,0,"/bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command +6,3406,"TERMINAL",0,0,"]633;E;2025-07-03 13:02:36 /bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;7c27158d-a2f0-42de-86f9-3cc2f3a1b706]633;C",,terminal_output +7,3432,"TERMINAL",0,0,"]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output +8,15794,"TERMINAL",0,0,"salloc --time=03:30:00 --partition=accelerated --nodes=2 --ntasks-per-node=5 --gres=gpu:4 --cpus-per-task=10",,terminal_command +9,15848,"TERMINAL",0,0,"]633;E;2025-07-03 13:02:49 salloc --time=03:30:00 --partition=accelerated --nodes=2 --ntasks-per-node=5 --gres=gpu:4 --cpus-per-task=10 ;dd830a57-5de1-42ee-9f5b-a467117add9f]633;Csalloc: Granted job allocation 3314634\r\n",,terminal_output +10,16009,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +11,43015,"TERMINAL",0,0,"salloc: Nodes hkn[0615-0616] are ready for job\r\n",,terminal_output +12,43855,"TERMINAL",0,0,"]0;tum_cte0515@hkn0615:~/Projects/jafar[?2004h[tum_cte0515@hkn0615 jafar]$ ",,terminal_output +13,301819,"TERMINAL",0,0,"q",,terminal_output +14,301884,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +15,302031,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +16,302097,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +17,302175,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +18,302323,"TERMINAL",0,0,"[?25l[?2004l\r[?25h[?1049h(B[?7hEvery 1.0s: squeue --mehkn0615.localdomain: Thu Jul 3 13:07:35 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3313564 accelerat train_to tum_cte0 R 5:36:19\t 4 hkn[0405,0532,0729,0814]3314634 accelerat interact tum_cte0 R\t4:46\t 2 hkn[0615-0616]",,terminal_output +19,303301,"TERMINAL",0,0,"6207",,terminal_output +20,303467,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0615:~/Projects/jafar[?2004h[tum_cte0515@hkn0615 jafar]$ ",,terminal_output +21,303772,"TERMINAL",0,0,"s",,terminal_output +22,303886,"TERMINAL",0,0,"[?25lm[?25h[?25li[?25h",,terminal_output +23,304111,"TERMINAL",0,0,"[?25l[?2004l\r[?25h",,terminal_output +24,305621,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: nvidia-smihkn0615.localdomain: Thu Jul 3 13:07:37 2025Thu Jul 3 13:07:37 2025\r+-----------------------------------------------------------------------------------------+\r| NVIDIA-SMI 570.133.20Driver Version: 570.133.20 CUDA Version: 12.8 |\r|-----------------------------------------+------------------------+----------------------+\r| GPU NamePersistence-M | Bus-IdDisp.A | Volatile Uncorr. ECC |\r| Fan Temp PerfPwr:Usage/Cap |Memory-Usage | GPU-Util Compute M. |\r|||MIG M. |\r|=========================================+========================+======================|\r| 0 NVIDIA A100-SXM4-40GBOn | 00000000:31:00.0 Off |0 |\r| N/A 45C P055W / 300W |\t 27MiB / 40960MiB |\t 0%\t Default |\r|||Disabled |\r+-----------------------------------------+------------------------+----------------------+\r| 1 NVIDIA A100-SXM4-40GBOn | 00000000:4B:00.0 Off |0 |\r| N/A 45C P059W / 300W |\t 27MiB / 40960MiB |\t 0%\t Default |\r|||Disabled |\r+-----------------------------------------+------------------------+----------------------+\r| 2 NVIDIA A100-SXM4-40GBOn | 00000000:CA:00.0 Off |0 |\r| N/A 45C P054W / 300W |\t 27MiB / 40960MiB |\t 0%\t Default |\r|||Disabled |\r+-----------------------------------------+------------------------+----------------------+\r| 3 NVIDIA A100-SXM4-40GBOn | 00000000:E3:00.0 Off |0 |\r| N/A 45C P056W / 300W |\t 27MiB / 40960MiB |\t 0%\t Default |\r|||Disabled |\r+-----------------------------------------+------------------------+----------------------+\r+-----------------------------------------------------------------------------------------+\r| Processes:|\r| GPU GI CIPID Type Process nameGPU Memory |\r|ID IDUsage\t |\r|=========================================================================================|\r| 0 N/A N/A2569G /usr/libexec/Xorg17MiB |\r| 1 N/A N/A2569G /usr/libexec/Xorg17MiB |\r| 2 N/A N/A2569G /usr/libexec/Xorg17MiB |\r| 3 N/A N/A2569G /usr/libexec/Xorg17MiB |\r+-----------------------------------------------------------------------------------------+",,terminal_output +25,306799,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0615:~/Projects/jafar[?2004h[tum_cte0515@hkn0615 jafar]$ ",,terminal_output +26,308706,"TERMINAL",0,0,"s",,terminal_output +27,308778,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +28,308927,"TERMINAL",0,0,"[?25lu[?25h[?25lr[?25h",,terminal_output +29,309114,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +30,309227,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +31,309339,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +32,309445,"TERMINAL",0,0,"[?25l.[?25h[?25lv[?25h",,terminal_output +33,309660,"TERMINAL",0,0,"env/",,terminal_output +34,309968,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +35,310034,"TERMINAL",0,0,"[?25lq[?25h",,terminal_output +36,310596,"TERMINAL",0,0,"in/",,terminal_output +37,310906,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +38,311083,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +39,311205,"TERMINAL",0,0,"tivate",,terminal_output +40,311503,"TERMINAL",0,0,"[?25l[?2004l\r[?25h]0;tum_cte0515@hkn0615:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0615 jafar]$ ",,terminal_output +41,318872,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,tab +42,323328,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M copy.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,tab +43,337729,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log_checkpoint_interval=500 \\n --log \\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\n --tags tokenizer model-size-scaling 38M \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir\n",shellscript,tab +44,339033,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",714,0,"",shellscript,selection_mouse +45,339627,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",116,0,"",shellscript,selection_mouse +46,340457,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",424,0,"",shellscript,selection_mouse +47,340464,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",423,0,"",shellscript,selection_command +48,340592,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",423,1,"M",shellscript,selection_mouse +49,340609,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",424,0,"",shellscript,selection_command +50,340674,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",420,4,"_37M",shellscript,selection_mouse +51,340675,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",307,117,"cratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M",shellscript,selection_mouse +52,340676,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",197,227,"/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M",shellscript,selection_mouse +53,340712,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",138,286,"per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M",shellscript,selection_mouse +54,340738,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",99,325,"--partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M",shellscript,selection_mouse +55,340765,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",97,327,"H --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M",shellscript,selection_mouse +56,340795,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",71,353,"TCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M",shellscript,selection_mouse +57,340852,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",42,382,"ATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M",shellscript,selection_mouse +58,340854,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",23,401,"BATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M",shellscript,selection_mouse +59,340953,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",22,402,"SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M",shellscript,selection_mouse +60,341024,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",20,404,"\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M",shellscript,selection_mouse +61,341130,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,424,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=15:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_tokenizer_model_size_scaling_37M",shellscript,selection_mouse +62,342866,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,424,"",shellscript,content +63,343818,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,1,"",shellscript,content +64,345287,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",1,0,"",shellscript,selection_command +65,345773,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",25,0,"",shellscript,selection_command +66,345869,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",32,0,"",shellscript,selection_command +67,345869,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",33,0,"",shellscript,selection_command +68,347734,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",63,0,"",shellscript,selection_command +69,347885,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",93,0,"",shellscript,selection_command +70,348035,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",119,0,"",shellscript,selection_command +71,348180,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",120,0,"",shellscript,selection_command +72,348350,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",223,0,"",shellscript,selection_command +73,348467,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",289,0,"",shellscript,selection_command +74,348617,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",290,0,"",shellscript,selection_command +75,348867,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",314,0,"",shellscript,selection_command +76,349186,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",300,14,"",shellscript,content +77,349654,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",299,1,"",shellscript,content +78,350135,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",299,0,"""",shellscript,content +79,350136,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",300,0,"",shellscript,selection_keyboard +80,350802,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",300,0,"s",shellscript,content +81,350803,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",301,0,"",shellscript,selection_keyboard +82,351480,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",300,1,"",shellscript,content +83,351543,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",300,0,"d",shellscript,content +84,351544,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",301,0,"",shellscript,selection_keyboard +85,351706,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",301,0,"e",shellscript,content +86,351707,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",302,0,"",shellscript,selection_keyboard +87,351820,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",302,0,"b",shellscript,content +88,351821,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",303,0,"",shellscript,selection_keyboard +89,351933,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",303,0,"u",shellscript,content +90,351934,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",304,0,"",shellscript,selection_keyboard +91,352066,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",304,0,"g",shellscript,content +92,352067,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",305,0,"",shellscript,selection_keyboard +93,353372,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",305,0,"""",shellscript,content +94,353373,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",306,0,"",shellscript,selection_keyboard +95,353851,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",306,0,"j",shellscript,content +96,353851,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",307,0,"",shellscript,selection_keyboard +97,354597,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",306,1,"",shellscript,content +98,354735,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",305,0,"",shellscript,selection_command +99,354975,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",322,0,"",shellscript,selection_command +100,355201,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",333,0,"",shellscript,selection_command +101,355372,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",332,1,"",shellscript,content +102,355792,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",321,11,"",shellscript,content +103,356294,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",320,1,"",shellscript,content +104,358266,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",320,0,"0",shellscript,content +105,358267,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",321,0,"",shellscript,selection_keyboard +106,358505,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",321,0,"0",shellscript,content +107,358506,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",322,0,"",shellscript,selection_keyboard +108,358644,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",322,0,"0",shellscript,content +109,358645,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",323,0,"",shellscript,selection_keyboard +110,359065,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",323,0,"0",shellscript,content +111,359066,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",324,0,"",shellscript,selection_keyboard +112,359292,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",323,0,"",shellscript,selection_command +113,359666,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",325,0,"",shellscript,selection_command +114,360041,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",342,0,"",shellscript,selection_command +115,360632,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",384,0,"",shellscript,selection_command +116,361574,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",371,0,"",shellscript,selection_command +117,361778,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",370,1,"",shellscript,content +118,362514,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",370,0,"/",shellscript,content +119,362515,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",371,0,"",shellscript,selection_keyboard +120,362664,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",370,0,"",shellscript,selection_command +121,364464,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",681,0,"",shellscript,selection_mouse +122,366216,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",320,0,"",shellscript,selection_mouse +123,366587,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",320,4,"",shellscript,content +124,366603,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",319,0,"",shellscript,selection_command +125,367967,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",320,0,"",shellscript,selection_command +126,369878,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",320,0,"d",shellscript,content +127,369879,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",321,0,"",shellscript,selection_keyboard +128,370441,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",321,0,"e",shellscript,content +129,370442,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",322,0,"",shellscript,selection_keyboard +130,370500,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",322,0,"b",shellscript,content +131,370501,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",323,0,"",shellscript,selection_keyboard +132,370623,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",323,0,"u",shellscript,content +133,370624,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",324,0,"",shellscript,selection_keyboard +134,370941,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",324,0,"g",shellscript,content +135,370942,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",325,0,"",shellscript,selection_keyboard +136,371103,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",325,0,"-",shellscript,content +137,371104,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",326,0,"",shellscript,selection_keyboard +138,371362,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",326,0,"m",shellscript,content +139,371363,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",327,0,"",shellscript,selection_keyboard +140,371589,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",327,0,"i",shellscript,content +141,371590,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",328,0,"",shellscript,selection_keyboard +142,371734,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",328,0,"h",shellscript,content +143,371735,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",329,0,"",shellscript,selection_keyboard +144,371813,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",329,0,"i",shellscript,content +145,371814,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",330,0,"",shellscript,selection_keyboard +146,371896,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",330,0,"r",shellscript,content +147,371897,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",331,0,"",shellscript,selection_keyboard +148,373296,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",331,0,"""",shellscript,content +149,373297,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",332,0,"",shellscript,selection_keyboard +150,374096,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",325,0,"",shellscript,selection_command +151,374688,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",320,0,"""",shellscript,content +152,374689,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",321,0,"",shellscript,selection_keyboard +153,375364,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",565,0,"",shellscript,selection_mouse +154,376034,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",470,0,"",shellscript,selection_mouse +155,377435,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",830,0,"",shellscript,selection_mouse +156,378822,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",829,0,"",shellscript,selection_mouse +157,389964,"TERMINAL",0,0,"[?25lsh[?25h[?25lh[?25h",,terminal_output +158,390074,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +159,390238,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +160,390350,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +161,390554,"TERMINAL",0,0,"ripts_",,terminal_output +162,391835,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +163,391947,"TERMINAL",0,0,"oreka/",,terminal_output +164,392525,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +165,392670,"TERMINAL",0,0,"odelsize_scaling/",,terminal_output +166,394111,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +167,394317,"TERMINAL",0,0,"okenizer/",,terminal_output +168,394804,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +169,395002,"TERMINAL",0,0,"ester.sh ",,terminal_output +170,397313,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",0,0,"",shellscript,tab +171,398329,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",545,0,"",shellscript,selection_mouse +172,398946,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",525,0,"",shellscript,selection_mouse +173,399708,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",525,0,"\n ",shellscript,content +174,401455,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",530,0,"-",shellscript,content +175,401456,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",531,0,"",shellscript,selection_keyboard +176,401578,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",531,0,"-",shellscript,content +177,401579,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",532,0,"",shellscript,selection_keyboard +178,402167,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",475,0,"",shellscript,selection_command +179,408191,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",471,0,"",shellscript,selection_command +180,409783,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",526,7,"",shellscript,content +181,409989,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",597,0," --log_checkpoint_interval=5 \\n",shellscript,content +182,409990,"scripts_horeka/modelsize_scaling/tokenizer/tester.sh",631,36,"",shellscript,content +183,424618,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",0,0,"",shellscript,tab +184,428045,"TERMINAL",0,0,"bash",,terminal_focus +185,430263,"TERMINAL",0,0,"[?25l[?2004l\r[?25h\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\r\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\r\n\r\njob_name=""debug""\r\nslurm_job_id=""debug-mihir""\r\n\r\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_tokenizer.py \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=96 \\r\n --min_lr=1e-4 \\r\n --max_lr=1e-4 \\r\n --log_image_interval=500 \\r\n --log_checkpoint_interval=5 \\r\n --log \\r\n --name=tokenizer-model-size-scaling-38M-$slurm_job_id \\r\n --tags tokenizer model-size-scaling 38M \\r\n --entity instant-uv \\r\n --project jafar \\r\n --data_dir $tf_records_dir\r\n",,terminal_output +186,430556,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=5(x2)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2115172\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0615\r\nSLURM_JOB_START_TIME=1751540569\r\nSLURM_STEP_NODELIST=hkn0615\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751553169\r\nSLURM_PMI2_SRUN_PORT=45493\r\nSLURM_CPUS_ON_NODE=50\r\nSLURM_JOB_CPUS_PER_NODE=50(x2)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=10\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=2\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3314634\r\nSLURM_PTY_PORT=44647\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.199\r\nSLURM_PTY_WIN_ROW=58\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=10\r\nSLURM_NTASKS=10\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e29.hkn0615\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.199\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=185\r\nSLURM_NODELIST=hkn[0615-0616]\r\nSLURM_SRUN_COMM_PORT=42127\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=10\r\nSLURM_NNODES=2\r\nSLURM_SUBMIT_HOST=hkn1991.localdomain\r\nSLURM_JOB_ID=3314634\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0615\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=5\r\nSLURM_STEP_LAUNCHER_PORT=42127\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0615-0616]\r\n",,terminal_output +187,430665,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +188,436167,"TERMINAL",0,0,"srun",,terminal_focus +189,449777,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",0,0,"",shellscript,tab +190,449779,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",1183,0,"",shellscript,selection_mouse +191,449825,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",1182,0,"",shellscript,selection_command +192,450800,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab +193,450801,"extension-output-pdoom-org.crowd-code-#1-crowd-code",298,0,"",Log,selection_mouse +194,452444,"scripts_horeka/modelsize_scaling/tokenizer/train_tokenizer_37M.sbatch",0,0,"",shellscript,tab +195,452466,"TERMINAL",0,0,"bash",,terminal_focus +196,454105,"TERMINAL",0,0,"idling",,terminal_command +197,454179,"TERMINAL",0,0,"]633;E;2025-07-03 13:10:07 idling;0b978d27-9494-48c4-9d0c-5b0440e5ea51]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1991.localdomain: Thu Jul 3 13:10:07 2025Partition dev_cpuonly: 11 nodes idle\rPartition cpuonly: 127 nodes idle\rPartition dev_accelerated:\t 3 nodes idle\rPartition accelerated:\t 1 nodes idle\rPartition dev_accelerated-h100 :\t 1 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 7 nodes idle",,terminal_output +198,455214,"TERMINAL",0,0,"8",,terminal_output +199,456266,"TERMINAL",0,0,"9",,terminal_output +200,457343,"TERMINAL",0,0,"10",,terminal_output +201,458341,"TERMINAL",0,0,"1",,terminal_output +202,459390,"TERMINAL",0,0,"2",,terminal_output +203,459862,"TERMINAL",0,0,"srun",,terminal_focus +204,460230,"TERMINAL",0,0,"2025-07-03 13:10:13.561716: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-03 13:10:13.561716: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-03 13:10:13.561717: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-03 13:10:13.561714: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-03 13:10:13.561717: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-03 13:10:13.564121: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-03 13:10:13.564118: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-03 13:10:13.564119: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-03 13:10:13.564118: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-03 13:10:13.564113: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n",,terminal_output +205,460302,"TERMINAL",0,0,"WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751541013.642437 2116607 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751541013.642324 2116609 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751541013.642336 2116610 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751541013.642320 2116611 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751541013.642655 2116608 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751541013.642287 2057644 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751541013.642376 2057645 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751541013.642282 2057646 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751541013.642392 2057647 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751541013.642473 2057648 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751541013.648302 2116607 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751541013.648297 2116608 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751541013.648290 2116609 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751541013.648295 2116610 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751541013.648292 2116611 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751541013.652457 2057645 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751541013.652459 2057647 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751541013.652484 2057648 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751541013.652645 2057646 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751541013.652920 2057644 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\n",,terminal_output +206,460426,"TERMINAL",0,0,"3",,terminal_output +207,460545,"TERMINAL",0,0,"W0000 00:00:1751541013.897989 2057644 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898015 2057644 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898017 2057644 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898019 2057644 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.897993 2057645 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898012 2057645 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898014 2057645 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898016 2057645 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.897986 2057646 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898015 2057646 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898017 2057646 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898019 2057646 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.897981 2057647 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898011 2057647 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898014 2057647 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898016 2057647 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.897994 2057648 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898012 2057648 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898014 2057648 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.898016 2057648 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904899 2116607 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904943 2116607 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904945 2116607 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904947 2116607 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904894 2116608 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904939 2116608 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904941 2116608 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904942 2116608 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904897 2116609 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904943 2116609 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904945 2116609 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904947 2116609 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904895 2116610 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904938 2116610 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904941 2116610 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904943 2116610 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904894 2116611 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904937 2116611 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904940 2116611 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751541013.904942 2116611 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +208,461560,"TERMINAL",0,0,"4",,terminal_output +209,462583,"TERMINAL",0,0,"5",,terminal_output +210,463606,"TERMINAL",0,0,"6",,terminal_output +211,464631,"TERMINAL",0,0,"7",,terminal_output +212,465642,"TERMINAL",0,0,"8",,terminal_output +213,466685,"TERMINAL",0,0,"200",,terminal_output +214,467805,"TERMINAL",0,0,"1",,terminal_output +215,468761,"TERMINAL",0,0,"2",,terminal_output +216,469801,"TERMINAL",0,0,"3",,terminal_output +217,470878,"TERMINAL",0,0,"45",,terminal_output +218,471901,"TERMINAL",0,0,"5",,terminal_output +219,472925,"TERMINAL",0,0,"6",,terminal_output +220,474052,"TERMINAL",0,0,"7",,terminal_output +221,475078,"TERMINAL",0,0,"8",,terminal_output +222,476050,"TERMINAL",0,0,"9",,terminal_output +223,477092,"TERMINAL",0,0,"30",,terminal_output +224,478149,"TERMINAL",0,0,"1",,terminal_output +225,479175,"TERMINAL",0,0,"2",,terminal_output +226,480298,"TERMINAL",0,0,"3",,terminal_output +227,481398,"TERMINAL",0,0,"4",,terminal_output +228,482348,"TERMINAL",0,0,"5",,terminal_output +229,483347,"TERMINAL",0,0,"6",,terminal_output +230,484395,"TERMINAL",0,0,"7",,terminal_output +231,485516,"TERMINAL",0,0,"8",,terminal_output +232,486545,"TERMINAL",0,0,"9",,terminal_output +233,487607,"TERMINAL",0,0,"40",,terminal_output +234,488592,"TERMINAL",0,0,"1",,terminal_output +235,489615,"TERMINAL",0,0,"2",,terminal_output +236,490642,"TERMINAL",0,0,"3",,terminal_output +237,491669,"TERMINAL",0,0,"4",,terminal_output +238,492791,"TERMINAL",0,0,"6",,terminal_output +239,493751,"TERMINAL",0,0,"7",,terminal_output +240,494788,"TERMINAL",0,0,"8",,terminal_output +241,495825,"TERMINAL",0,0,"9",,terminal_output +242,496001,"TERMINAL",0,0,"W0000 00:00:1751541049.358593 2057644 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751541049.358508 2057645 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751541049.358597 2057646 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751541049.358537 2057647 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751541049.358541 2057648 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751541049.360308 2116611 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751541049.361015 2116607 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751541049.361062 2116608 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751541049.361036 2116609 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751541049.360956 2116610 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +243,496870,"TERMINAL",0,0,"50",,terminal_output +244,497836,"TERMINAL",0,0,"2025-07-03 13:10:51.117718: W external/xla/xla/service/platform_util.cc:220] unable to create StreamExecutor for CUDA:4: CUDA error: Failed call to cuDeviceGet: CUDA_ERROR_INVALID_DEVICE: invalid device ordinal\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 739, in backends\r\n backend = _init_backend(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 825, in _init_backend\r\n backend = registration.factory()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 525, in factory\r\n return xla_client.make_c_api_client(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jaxlib/xla_client.py"", line 149, in make_c_api_client\r\n2025-07-03 13:10:51.137832: W external/xla/xla/service/platform_util.cc:220] unable to create StreamExecutor for CUDA:4: CUDA error: Failed call to cuDeviceGet: CUDA_ERROR_INVALID_DEVICE: invalid device ordinal\r\n return _xla.get_c_api_client(plugin_name, options, distributed_client)\r\njaxlib._jax.XlaRuntimeError: INTERNAL: no supported devices found for platform CUDA\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 125, in \r\n num_devices = jax.device_count()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 904, in device_count\r\n return int(get_backend(backend).device_count())\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 873, in get_backend\r\n return _get_backend_uncached(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 852, in _get_backend_uncached\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 739, in backends\r\n bs = backends()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 755, in backends\r\n raise RuntimeError(err_msg)\r\nRuntimeError: Unable to initialize backend 'cuda': INTERNAL: no supported devices found for platform CUDA (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\r\n backend = _init_backend(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 825, in _init_backend\r\n backend = registration.factory()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 525, in factory\r\n return xla_client.make_c_api_client(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jaxlib/xla_client.py"", line 149, in make_c_api_client\r\n return _xla.get_c_api_client(plugin_name, options, distributed_client)\r\njaxlib._jax.XlaRuntimeError: INTERNAL: no supported devices found for platform CUDA\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 125, in \r\n num_devices = jax.device_count()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 904, in device_count\r\n return int(get_backend(backend).device_count())\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 873, in get_backend\r\n return _get_backend_uncached(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 852, in _get_backend_uncached\r\n bs = backends()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 755, in backends\r\n raise RuntimeError(err_msg)\r\nRuntimeError: Unable to initialize backend 'cuda': INTERNAL: no supported devices found for platform CUDA (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\r\n",,terminal_output +245,497949,"TERMINAL",0,0,"1",,terminal_output +246,498951,"TERMINAL",0,0,"2",,terminal_output +247,500061,"TERMINAL",0,0,"3",,terminal_output +248,501331,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py",0,0,"import functools\nimport jax\n\nimport tensorflow as tf\n\n# reserve GPU memory for JAX only if tensorflow is built with GPU support\ntf.config.experimental.set_visible_devices([], ""GPU"")\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c, seed):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n seed: The seed for the random number generator.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32, seed=seed\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef _create_processed_dataset_from_file(file_path, image_h, image_w, image_c, seq_len, num_parallel_calls, seed):\n """"""Creates a fully processed dataset from a single TFRecord file.""""""\n dataset = tf.data.TFRecordDataset([file_path])\n \n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n # Filter out episodes that are too short\n def filter_short_episodes(episode_tensor):\n return tf.shape(episode_tensor)[0] >= seq_len\n \n dataset = dataset.filter(filter_short_episodes)\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n seed=seed,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n \n return dataset\n\n\ndef get_dataloader(\n tfrecord_paths: list[str],\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 10,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n seed: int = 42,\n cycle_length: int = 4,\n block_length: int = 1,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert (\n global_batch_size % num_processes == 0\n ), f""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n def dataset_fn(file_path):\n return _create_processed_dataset_from_file(\n file_path, image_h, image_w, image_c, seq_len, num_parallel_calls, seed\n )\n \n dataset = tf.data.Dataset.from_tensor_slices(tfrecord_paths)\n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n \n dataset = dataset.interleave(\n dataset_fn,\n cycle_length=cycle_length,\n block_length=block_length,\n num_parallel_calls=num_parallel_calls,\n deterministic=False\n )\n \n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab +249,501382,"TERMINAL",0,0,"4",,terminal_output +250,502077,"TERMINAL",0,0,"5",,terminal_output +251,503119,"TERMINAL",0,0,"6",,terminal_output +252,504172,"TERMINAL",0,0,"7",,terminal_output +253,504522,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args,\n )\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer__\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n seed=args.seed,\n )\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in dataloader) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +254,505316,"TERMINAL",0,0,"8",,terminal_output +255,506237,"TERMINAL",0,0,"9",,terminal_output +256,507279,"TERMINAL",0,0,"1:00",,terminal_output +257,508320,"TERMINAL",0,0,"1",,terminal_output +258,509364,"TERMINAL",0,0,"2",,terminal_output +259,510406,"TERMINAL",0,0,"3",,terminal_output +260,511451,"TERMINAL",0,0,"4",,terminal_output +261,512555,"TERMINAL",0,0,"5",,terminal_output +262,513529,"TERMINAL",0,0,"6",,terminal_output +263,514581,"TERMINAL",0,0,"7",,terminal_output +264,515626,"TERMINAL",0,0,"8",,terminal_output +265,516661,"TERMINAL",0,0,"9",,terminal_output +266,517777,"TERMINAL",0,0,"11",,terminal_output +267,518801,"TERMINAL",0,0,"2",,terminal_output +268,519825,"TERMINAL",0,0,"3",,terminal_output +269,520833,"TERMINAL",0,0,"4",,terminal_output +270,521872,"TERMINAL",0,0,"5",,terminal_output +271,523000,"TERMINAL",0,0,"6",,terminal_output +272,523949,"TERMINAL",0,0,"7",,terminal_output +273,525047,"TERMINAL",0,0,"8",,terminal_output +274,526109,"TERMINAL",0,0,"9",,terminal_output +275,527084,"TERMINAL",0,0,"20",,terminal_output +276,528111,"TERMINAL",0,0,"1",,terminal_output +277,529260,"TERMINAL",0,0,"2",,terminal_output +278,530270,"TERMINAL",0,0,"3",,terminal_output +279,531233,"TERMINAL",0,0,"4",,terminal_output +280,532319,"TERMINAL",0,0,"5",,terminal_output +281,533343,"TERMINAL",0,0,"6",,terminal_output +282,534357,"TERMINAL",0,0,"7",,terminal_output +283,535408,"TERMINAL",0,0,"8",,terminal_output +284,536445,"TERMINAL",0,0,"9",,terminal_output +285,537500,"TERMINAL",0,0,"30",,terminal_output +286,538527,"TERMINAL",0,0,"1",,terminal_output +287,539590,"TERMINAL",0,0,"2",,terminal_output +288,540613,"TERMINAL",0,0,"3",,terminal_output +289,541683,"TERMINAL",0,0,"4",,terminal_output +290,542686,"TERMINAL",0,0,"6",,terminal_output +291,543786,"TERMINAL",0,0,"7",,terminal_output +292,544812,"TERMINAL",0,0,"8",,terminal_output +293,545819,"TERMINAL",0,0,"9",,terminal_output +294,546858,"TERMINAL",0,0,"40",,terminal_output +295,547986,"TERMINAL",0,0,"1",,terminal_output +296,549011,"TERMINAL",0,0,"2",,terminal_output +297,550034,"TERMINAL",0,0,"3",,terminal_output +298,551031,"TERMINAL",0,0,"4",,terminal_output +299,552070,"TERMINAL",0,0,"5",,terminal_output +300,553122,"TERMINAL",0,0,"6",,terminal_output +301,554147,"TERMINAL",0,0,"7",,terminal_output +302,555256,"TERMINAL",0,0,"8",,terminal_output +303,556362,"TERMINAL",0,0,"9",,terminal_output +304,557305,"TERMINAL",0,0,"50",,terminal_output +305,558326,"TERMINAL",0,0,"1",,terminal_output +306,559347,"TERMINAL",0,0,"2",,terminal_output +307,560396,"TERMINAL",0,0,"36",,terminal_output +308,561431,"TERMINAL",0,0,"4",,terminal_output +309,562473,"TERMINAL",0,0,"5",,terminal_output +310,563510,"TERMINAL",0,0,"6",,terminal_output +311,564554,"TERMINAL",0,0,"7",,terminal_output +312,565596,"TERMINAL",0,0,"8",,terminal_output +313,566686,"TERMINAL",0,0,"9",,terminal_output +314,567748,"TERMINAL",0,0,"2:01",,terminal_output +315,568772,"TERMINAL",0,0,"2",,terminal_output +316,569797,"TERMINAL",0,0,"3",,terminal_output +317,570823,"TERMINAL",0,0,"4",,terminal_output +318,571840,"TERMINAL",0,0,"5",,terminal_output +319,572971,"TERMINAL",0,0,"67",,terminal_output +320,573923,"TERMINAL",0,0,"7",,terminal_output +321,575020,"TERMINAL",0,0,"8",,terminal_output +322,576045,"TERMINAL",0,0,"9",,terminal_output +323,577067,"TERMINAL",0,0,"108",,terminal_output +324,578195,"TERMINAL",0,0,"1",,terminal_output +325,579223,"TERMINAL",0,0,"2",,terminal_output +326,580241,"TERMINAL",0,0,"3",,terminal_output +327,581266,"TERMINAL",0,0,"4",,terminal_output +328,582275,"TERMINAL",0,0,"5",,terminal_output +329,583423,"TERMINAL",0,0,"6",,terminal_output +330,584365,"TERMINAL",0,0,"7",,terminal_output +331,585405,"TERMINAL",0,0,"8",,terminal_output +332,586450,"TERMINAL",0,0,"9",,terminal_output +333,587494,"TERMINAL",0,0,"20",,terminal_output +334,588539,"TERMINAL",0,0,"1",,terminal_output +335,589589,"TERMINAL",0,0,"2",,terminal_output +336,590686,"TERMINAL",0,0,"3",,terminal_output +337,591673,"TERMINAL",0,0,"4",,terminal_output +338,592718,"TERMINAL",0,0,"6",,terminal_output +339,593760,"TERMINAL",0,0,"7",,terminal_output +340,594842,"TERMINAL",0,0,"8",,terminal_output +341,595839,"TERMINAL",0,0,"9",,terminal_output +342,596885,"TERMINAL",0,0,"30",,terminal_output +343,597930,"TERMINAL",0,0,"1",,terminal_output +344,598970,"TERMINAL",0,0,"2",,terminal_output +345,600064,"TERMINAL",0,0,"3",,terminal_output +346,601056,"TERMINAL",0,0,"4",,terminal_output +347,602094,"TERMINAL",0,0,"5",,terminal_output +348,603135,"TERMINAL",0,0,"6",,terminal_output +349,604182,"TERMINAL",0,0,"7",,terminal_output +350,605273,"TERMINAL",0,0,"8",,terminal_output +351,606261,"TERMINAL",0,0,"9",,terminal_output +352,607378,"TERMINAL",0,0,"401",,terminal_output +353,608344,"TERMINAL",0,0,"1",,terminal_output +354,609417,"TERMINAL",0,0,"2",,terminal_output +355,610426,"TERMINAL",0,0,"3",,terminal_output +356,611457,"TERMINAL",0,0,"4",,terminal_output +357,612534,"TERMINAL",0,0,"5",,terminal_output +358,613535,"TERMINAL",0,0,"6",,terminal_output +359,614578,"TERMINAL",0,0,"7",,terminal_output +360,615618,"TERMINAL",0,0,"8",,terminal_output +361,616685,"TERMINAL",0,0,"9",,terminal_output +362,617707,"TERMINAL",0,0,"51",,terminal_output +363,618618,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 739, in backends\r\n backend = _init_backend(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 825, in _init_backend\r\n backend = registration.factory()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 525, in factory\r\n return xla_client.make_c_api_client(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jaxlib/xla_client.py"", line 149, in make_c_api_client\r\n return _xla.get_c_api_client(plugin_name, options, distributed_client)\r\njaxlib._jax.XlaRuntimeError: INTERNAL: Getting local topologies failed: Error 1: GetKeyValue() timed out with key: cuda:local_topology/cuda/4 and duration: 2m\r\n\r\nError 2: GetKeyValue() timed out with key: cuda:local_topology/cuda/9 and duration: 2m\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_tokenizer.py"", line 125, in \r\n num_devices = jax.device_count()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 904, in device_count\r\n return int(get_backend(backend).device_count())\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 873, in get_backend\r\n return _get_backend_uncached(platform)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 852, in _get_backend_uncached\r\n bs = backends()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/xla_bridge.py"", line 755, in backends\r\n raise RuntimeError(err_msg)\r\nRuntimeError: Unable to initialize backend 'cuda': INTERNAL: Getting local topologies failed: Error 1: GetKeyValue() timed out with key: cuda:local_topology/cuda/4 and duration: 2m\r\n\r\nError 2: GetKeyValue() timed out with key: cuda:local_topology/cuda/9 and duration: 2m (you may need to uninstall the failing plugin package, or set JAX_PLATFORMS=cpu to skip this backend.)\r\n",,terminal_output +364,618750,"TERMINAL",0,0,"2",,terminal_output +365,619787,"TERMINAL",0,0,"3",,terminal_output +366,620828,"TERMINAL",0,0,"4",,terminal_output +367,621868,"TERMINAL",0,0,"5",,terminal_output +368,622943,"TERMINAL",0,0,"6",,terminal_output +369,623947,"TERMINAL",0,0,"7",,terminal_output +370,624991,"TERMINAL",0,0,"8",,terminal_output +371,626035,"TERMINAL",0,0,"99",,terminal_output +372,627075,"TERMINAL",0,0,"3:00",,terminal_output +373,628165,"TERMINAL",0,0,"1",,terminal_output +374,629189,"TERMINAL",0,0,"2",,terminal_output +375,630213,"TERMINAL",0,0,"3",,terminal_output +376,631260,"TERMINAL",0,0,"4",,terminal_output +377,632299,"TERMINAL",0,0,"5",,terminal_output +378,633362,"TERMINAL",0,0,"6",,terminal_output +379,634379,"TERMINAL",0,0,"7",,terminal_output +380,635422,"TERMINAL",0,0,"8",,terminal_output +381,636463,"TERMINAL",0,0,"9",,terminal_output +382,637509,"TERMINAL",0,0,"10",,terminal_output +383,638548,"TERMINAL",0,0,"1",,terminal_output +384,639634,"TERMINAL",0,0,"2",,terminal_output +385,640637,"TERMINAL",0,0,"3",,terminal_output +386,641688,"TERMINAL",0,0,"5",,terminal_output +387,642810,"TERMINAL",0,0,"6",,terminal_output +388,643769,"TERMINAL",0,0,"7",,terminal_output +389,644820,"TERMINAL",0,0,"8",,terminal_output +390,645880,"TERMINAL",0,0,"9",,terminal_output +391,646893,"TERMINAL",0,0,"20",,terminal_output +392,648031,"TERMINAL",0,0,"1",,terminal_output +393,649054,"TERMINAL",0,0,"2",,terminal_output +394,650023,"TERMINAL",0,0,"3",,terminal_output +395,651064,"TERMINAL",0,0,"4",,terminal_output +396,652108,"TERMINAL",0,0,"5",,terminal_output +397,653253,"TERMINAL",0,0,"6",,terminal_output +398,654280,"TERMINAL",0,0,"7",,terminal_output +399,655301,"TERMINAL",0,0,"8",,terminal_output +400,656326,"TERMINAL",0,0,"930",,terminal_output +401,657316,"TERMINAL",0,0,"30",,terminal_output +402,658393,"TERMINAL",0,0,"1",,terminal_output +403,659403,"TERMINAL",0,0,"2",,terminal_output +404,660444,"TERMINAL",0,0,"3",,terminal_output +405,661483,"TERMINAL",0,0,"4",,terminal_output +406,662527,"TERMINAL",0,0,"5",,terminal_output +407,663561,"TERMINAL",0,0,"6",,terminal_output +408,664607,"TERMINAL",0,0,"7",,terminal_output +409,665661,"TERMINAL",0,0,"8",,terminal_output +410,666691,"TERMINAL",0,0,"40",,terminal_output +411,667735,"TERMINAL",0,0,"1",,terminal_output +412,668785,"TERMINAL",0,0,"2",,terminal_output +413,670297,"TERMINAL",0,0,"3",,terminal_output +414,671381,"TERMINAL",0,0,"4",,terminal_output +415,672416,"TERMINAL",0,0,"5",,terminal_output +416,673433,"TERMINAL",0,0,"6",,terminal_output +417,674461,"TERMINAL",0,0,"7",,terminal_output +418,675502,"TERMINAL",0,0,"8",,terminal_output +419,676546,"TERMINAL",0,0,"9",,terminal_output +420,677597,"TERMINAL",0,0,"50",,terminal_output +421,678651,"TERMINAL",0,0,"1",,terminal_output +422,679673,"TERMINAL",0,0,"2",,terminal_output +423,680802,"TERMINAL",0,0,"4",,terminal_output +424,681756,"TERMINAL",0,0,"5",,terminal_output +425,682852,"TERMINAL",0,0,"6",,terminal_output +426,683835,"TERMINAL",0,0,"7",,terminal_output +427,684877,"TERMINAL",0,0,"8",,terminal_output +428,685916,"TERMINAL",0,0,"9",,terminal_output +429,687049,"TERMINAL",0,0,"4:00",,terminal_output +430,688002,"TERMINAL",0,0,"1",,terminal_output +431,689094,"TERMINAL",0,0,"2",,terminal_output +432,690087,"TERMINAL",0,0,"3",,terminal_output +433,691145,"TERMINAL",0,0,"4",,terminal_output +434,692269,"TERMINAL",0,0,"5",,terminal_output +435,693292,"TERMINAL",0,0,"6",,terminal_output +436,694319,"TERMINAL",0,0,"7",,terminal_output +437,695300,"TERMINAL",0,0,"8",,terminal_output +438,696365,"TERMINAL",0,0,"9",,terminal_output +439,697457,"TERMINAL",0,0,"10",,terminal_output +440,698430,"TERMINAL",0,0,"1",,terminal_output +441,699464,"TERMINAL",0,0,"2",,terminal_output +442,700504,"TERMINAL",0,0,"3",,terminal_output +443,701543,"TERMINAL",0,0,"4",,terminal_output +444,702582,"TERMINAL",0,0,"5",,terminal_output +445,703626,"TERMINAL",0,0,"6",,terminal_output +446,704669,"TERMINAL",0,0,"7",,terminal_output +447,705715,"TERMINAL",0,0,"9",,terminal_output +448,706750,"TERMINAL",0,0,"20",,terminal_output +449,707835,"TERMINAL",0,0,"1",,terminal_output +450,708832,"TERMINAL",0,0,"2",,terminal_output +451,709872,"TERMINAL",0,0,"3",,terminal_output +452,710922,"TERMINAL",0,0,"4",,terminal_output +453,711955,"TERMINAL",0,0,"5",,terminal_output +454,713005,"TERMINAL",0,0,"6",,terminal_output +455,714029,"TERMINAL",0,0,"7",,terminal_output +456,715070,"TERMINAL",0,0,"8",,terminal_output +457,716109,"TERMINAL",0,0,"9",,terminal_output +458,717150,"TERMINAL",0,0,"30",,terminal_output +459,718191,"TERMINAL",0,0,"1",,terminal_output +460,719233,"TERMINAL",0,0,"2",,terminal_output +461,720328,"TERMINAL",0,0,"3",,terminal_output +462,721350,"TERMINAL",0,0,"4",,terminal_output +463,722400,"TERMINAL",0,0,"5",,terminal_output +464,723403,"TERMINAL",0,0,"6",,terminal_output +465,724433,"TERMINAL",0,0,"7",,terminal_output +466,725469,"TERMINAL",0,0,"8",,terminal_output +467,726507,"TERMINAL",0,0,"9",,terminal_output +468,727542,"TERMINAL",0,0,"40",,terminal_output +469,728580,"TERMINAL",0,0,"1",,terminal_output +470,729646,"TERMINAL",0,0,"21",,terminal_output +471,730656,"TERMINAL",0,0,"3",,terminal_output +472,731695,"TERMINAL",0,0,"5",,terminal_output +473,732820,"TERMINAL",0,0,"6",,terminal_output +474,733847,"TERMINAL",0,0,"7",,terminal_output +475,734877,"TERMINAL",0,0,"8",,terminal_output +476,735892,"TERMINAL",0,0,"9",,terminal_output +477,736916,"TERMINAL",0,0,"50",,terminal_output +478,737936,"TERMINAL",0,0,"1",,terminal_output +479,738974,"TERMINAL",0,0,"2",,terminal_output +480,740016,"TERMINAL",0,0,"3",,terminal_output +481,741052,"TERMINAL",0,0,"4",,terminal_output +482,742093,"TERMINAL",0,0,"5",,terminal_output +483,743167,"TERMINAL",0,0,"6",,terminal_output +484,744187,"TERMINAL",0,0,"7",,terminal_output +485,745312,"TERMINAL",0,0,"8",,terminal_output +486,746246,"TERMINAL",0,0,"9",,terminal_output +487,747283,"TERMINAL",0,0,"5:00",,terminal_output +488,748319,"TERMINAL",0,0,"1",,terminal_output +489,749421,"TERMINAL",0,0,"2",,terminal_output +490,750432,"TERMINAL",0,0,"3",,terminal_output +491,751442,"TERMINAL",0,0,"4",,terminal_output +492,752478,"TERMINAL",0,0,"5",,terminal_output +493,753526,"TERMINAL",0,0,"6",,terminal_output +494,754569,"TERMINAL",0,0,"7",,terminal_output +495,755607,"TERMINAL",0,0,"8",,terminal_output +496,756690,"TERMINAL",0,0,"9",,terminal_output +497,757703,"TERMINAL",0,0,"11",,terminal_output +498,758734,"TERMINAL",0,0,"2",,terminal_output +499,759777,"TERMINAL",0,0,"3",,terminal_output +500,760884,"TERMINAL",0,0,"4",,terminal_output +501,761874,"TERMINAL",0,0,"5",,terminal_output +502,762926,"TERMINAL",0,0,"6",,terminal_output +503,763946,"TERMINAL",0,0,"7",,terminal_output +504,765076,"TERMINAL",0,0,"8",,terminal_output +505,766024,"TERMINAL",0,0,"90",,terminal_output +506,767060,"TERMINAL",0,0,"20",,terminal_output +507,768100,"TERMINAL",0,0,"1",,terminal_output +508,769162,"TERMINAL",0,0,"2",,terminal_output +509,770183,"TERMINAL",0,0,"3",,terminal_output +510,771220,"TERMINAL",0,0,"4",,terminal_output +511,772260,"TERMINAL",0,0,"5",,terminal_output +512,773305,"TERMINAL",0,0,"6",,terminal_output +513,774341,"TERMINAL",0,0,"7",,terminal_output +514,775389,"TERMINAL",0,0,"8",,terminal_output +515,776457,"TERMINAL",0,0,"9",,terminal_output +516,777519,"TERMINAL",0,0,"30",,terminal_output +517,778504,"TERMINAL",0,0,"1",,terminal_output +518,779539,"TERMINAL",0,0,"2",,terminal_output +519,780586,"TERMINAL",0,0,"3",,terminal_output +520,781623,"TERMINAL",0,0,"4",,terminal_output +521,782665,"TERMINAL",0,0,"5",,terminal_output +522,783814,"TERMINAL",0,0,"7",,terminal_output +523,784744,"TERMINAL",0,0,"8",,terminal_output +524,785848,"TERMINAL",0,0,"9",,terminal_output +525,786834,"TERMINAL",0,0,"40",,terminal_output +526,787871,"TERMINAL",0,0,"1",,terminal_output +527,788911,"TERMINAL",0,0,"2",,terminal_output +528,789954,"TERMINAL",0,0,"3",,terminal_output +529,790997,"TERMINAL",0,0,"4",,terminal_output +530,792041,"TERMINAL",0,0,"5",,terminal_output +531,793080,"TERMINAL",0,0,"6",,terminal_output +532,794158,"TERMINAL",0,0,"7",,terminal_output +533,795182,"TERMINAL",0,0,"8",,terminal_output +534,796211,"TERMINAL",0,0,"9",,terminal_output +535,797236,"TERMINAL",0,0,"50",,terminal_output +536,798100,"TERMINAL",0,0,"2025-07-03 13:15:51.436483: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service.cc:1689] Shutdown barrier in coordination service has failed:\r\nDEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::1119967448801037984::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 3/10.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:4. Some timed out task names:\r\n/job:jax_worker/replica:0/task:7\r\n/job:jax_worker/replica:0/task:3\r\n [type.googleapis.com/tensorflow.BarrierError='\n\x1dShutdown::1119967448801037984'] [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\nThis suggests that the workers are out of sync. Either at least one worker (a) crashed early due to program error or scheduler events (e.g. preemption, eviction), (b) was too fast in its execution, or (c) too slow / hanging. Check the logs (both the program and scheduler events) for an earlier error to identify the root cause.\r\n2025-07-03 13:15:51.436566: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service.cc:1730] Use error polling to propagate the following error to all tasks: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::1119967448801037984::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 3/10.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:4. Some timed out task names:\r\n/job:jax_worker/replica:0/task:7\r\n/job:jax_worker/replica:0/task:3\r\n [type.googleapis.com/tensorflow.BarrierError='\n\x1dShutdown::1119967448801037984'] [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-07-03 13:15:51.436823: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:427] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-07-03 13:15:51.436996: F external/xla/xla/pjrt/distributed/client.h:80] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::1119967448801037984::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 3/10.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:4. Some timed out task names:\r\n/job:jax_worker/replica:0/task:7\r\n/job:jax_worker/replica:0/task:3\r\n [type.googleapis.com/tensorflow.BarrierError='\n\x1dShutdown::1119967448801037984'] [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-07-03 13:15:51.437035: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:427] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-07-03 13:15:51.437021: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:427] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-07-03 13:15:51.436974: F external/xla/xla/pjrt/distributed/client.h:80] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Failed to disconnect from coordination service with status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::1119967448801037984::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 3/10.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:4. Some timed out task names:\r\n/job:jax_worker/replica:0/task:7\r\n/job:jax_worker/replica:0/task:3\r\n [type.googleapis.com/tensorflow.BarrierError='\n\x1dShutdown::1119967448801037984'] [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n\r\nRPC: /tensorflow.CoordinationService/ShutdownTask [type.googleapis.com/tensorflow.CoordinationServiceError='']Proceeding with agent shutdown anyway. This is usually caused by an earlier error during execution. Check the logs of (a) this task, (b) the leader (usually slice 0 task 0) and (c) the scheduler (e.g. preemption, eviction) for an earlier error to debug further. [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-07-03 13:15:51.437097: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:427] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-07-03 13:15:51.437178: F external/xla/xla/pjrt/distributed/client.h:80] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::1119967448801037984::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 3/10.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:4. Some timed out task names:\r\n/job:jax_worker/replica:0/task:7\r\n/job:jax_worker/replica:0/task:3\r\n [type.googleapis.com/tensorflow.BarrierError='\n\x1dShutdown::1119967448801037984'] [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n\r\n2025-07-03 13:15:51.437272: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:427] Polled an error from coordination service (this can be an error from this or another task).\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-07-03 13:15:51.437301: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:427] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-07-03 13:15:51.437481: F external/xla/xla/pjrt/distributed/client.h:80] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::1119967448801037984::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 3/10.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:4. Some timed out task names:\r\n/job:jax_worker/replica:0/task:7\r\n/job:jax_worker/replica:0/task:3\r\n [type.googleapis.com/tensorflow.BarrierError='\n\x1dShutdown::1119967448801037984'] [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n\r\n2025-07-03 13:15:51.437409: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:427] Polled an error from coordination service (this can be an error from this or another task).\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-07-03 13:15:51.437284: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:427] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-07-03 13:15:51.437577: F external/xla/xla/pjrt/distributed/client.h:80] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::1119967448801037984::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 3/10.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:4. Some timed out task names:\r\n/job:jax_worker/replica:0/task:7\r\n/job:jax_worker/replica:0/task:3\r\n [type.googleapis.com/tensorflow.BarrierError='\n\x1dShutdown::1119967448801037984'] [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n\r\n2025-07-03 13:15:51.437281: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:427] Polled an error from coordination service (this can be an error from this or another task).\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-07-03 13:15:51.437551: F external/xla/xla/pjrt/distributed/client.h:80] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::1119967448801037984::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 3/10.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:4. Some timed out task names:\r\n/job:jax_worker/replica:0/task:7\r\n/job:jax_worker/replica:0/task:3\r\n [type.googleapis.com/tensorflow.BarrierError='\n\x1dShutdown::1119967448801037984'] [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-07-03 13:15:51.437547: F external/xla/xla/pjrt/distributed/client.h:80] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::1119967448801037984::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 3/10.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:4. Some timed out task names:\r\n/job:jax_worker/replica:0/task:7\r\n/job:jax_worker/replica:0/task:3\r\n [type.googleapis.com/tensorflow.BarrierError='\n\x1dShutdown::1119967448801037984'] [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-07-03 13:15:51.437564: F external/xla/xla/pjrt/distributed/client.h:80] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::1119967448801037984::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 3/10.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:4. Some timed out task names:\r\n/job:jax_worker/replica:0/task:7\r\n/job:jax_worker/replica:0/task:3\r\n [type.googleapis.com/tensorflow.BarrierError='\n\x1dShutdown::1119967448801037984'] [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-07-03 13:15:51.437603: F external/xla/xla/pjrt/distributed/client.h:80] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::1119967448801037984::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 3/10.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:4. Some timed out task names:\r\n/job:jax_worker/replica:0/task:7\r\n/job:jax_worker/replica:0/task:3\r\n [type.googleapis.com/tensorflow.BarrierError='\n\x1dShutdown::1119967448801037984'] [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-07-03 13:15:51.437805: F external/xla/xla/pjrt/distributed/client.h:80] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::1119967448801037984::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 3/10.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:4. Some timed out task names:\r\n/job:jax_worker/replica:0/task:7\r\n/job:jax_worker/replica:0/task:3\r\n [type.googleapis.com/tensorflow.BarrierError='\n\x1dShutdown::1119967448801037984'] [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n",,terminal_output +537,798277,"TERMINAL",0,0,"1",,terminal_output +538,798524,"TERMINAL",0,0,"srun: error: hkn0615: task 4: Aborted (core dumped)\r\n",,terminal_output +539,798590,"TERMINAL",0,0,"srun: error: hkn0616: tasks 7,9: Aborted (core dumped)\r\n",,terminal_output +540,798713,"TERMINAL",0,0,"srun: error: hkn0615: tasks 2-3: Aborted (core dumped)\r\n",,terminal_output +541,798776,"TERMINAL",0,0,"srun: error: hkn0616: tasks 5-6: Aborted (core dumped)\r\n",,terminal_output +542,798894,"TERMINAL",0,0,"srun: error: hkn0615: task 1: Aborted (core dumped)\r\n",,terminal_output +543,799077,"TERMINAL",0,0,"srun: error: hkn0616: task 8: Aborted (core dumped)\r\n",,terminal_output +544,799206,"TERMINAL",0,0,"srun: error: hkn0615: task 0: Aborted (core dumped)\r\n]0;tum_cte0515@hkn0615:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0615 jafar]$ ",,terminal_output +545,799316,"TERMINAL",0,0,"2",,terminal_output +546,800363,"TERMINAL",0,0,"3",,terminal_output +547,801395,"TERMINAL",0,0,"4",,terminal_output +548,802452,"TERMINAL",0,0,"5",,terminal_output +549,803489,"TERMINAL",0,0,"6",,terminal_output +550,804509,"TERMINAL",0,0,"7",,terminal_output +551,805558,"TERMINAL",0,0,"8",,terminal_output +552,806612,"TERMINAL",0,0,"9",,terminal_output +553,807682,"TERMINAL",0,0,"6:00",,terminal_output +554,808676,"TERMINAL",0,0,"1",,terminal_output +555,809751,"TERMINAL",0,0,"3",,terminal_output +556,810758,"TERMINAL",0,0,"4",,terminal_output +557,811787,"TERMINAL",0,0,"5",,terminal_output +558,812825,"TERMINAL",0,0,"6",,terminal_output +559,813923,"TERMINAL",0,0,"7",,terminal_output +560,814946,"TERMINAL",0,0,"8",,terminal_output +561,815976,"TERMINAL",0,0,"9",,terminal_output +562,816994,"TERMINAL",0,0,"10",,terminal_output +563,818017,"TERMINAL",0,0,"1",,terminal_output +564,819056,"TERMINAL",0,0,"2",,terminal_output +565,820095,"TERMINAL",0,0,"3",,terminal_output +566,821192,"TERMINAL",0,0,"4",,terminal_output +567,822216,"TERMINAL",0,0,"5",,terminal_output +568,823240,"TERMINAL",0,0,"6",,terminal_output +569,824265,"TERMINAL",0,0,"7",,terminal_output +570,825291,"TERMINAL",0,0,"8",,terminal_output +571,826431,"TERMINAL",0,0,"9",,terminal_output +572,827370,"TERMINAL",0,0,"20",,terminal_output +573,828404,"TERMINAL",0,0,"1",,terminal_output +574,829499,"TERMINAL",0,0,"2",,terminal_output +575,830558,"TERMINAL",0,0,"3 75",,terminal_output +576,831579,"TERMINAL",0,0,"4",,terminal_output +577,832619,"TERMINAL",0,0,"5",,terminal_output +578,833684,"TERMINAL",0,0,"6",,terminal_output +579,834702,"TERMINAL",0,0,"8",,terminal_output +580,835745,"TERMINAL",0,0,"9",,terminal_output +581,836785,"TERMINAL",0,0,"30",,terminal_output +582,837884,"TERMINAL",0,0,"1",,terminal_output +583,838901,"TERMINAL",0,0,"2",,terminal_output +584,839932,"TERMINAL",0,0,"3",,terminal_output +585,840956,"TERMINAL",0,0,"4",,terminal_output +586,841993,"TERMINAL",0,0,"5",,terminal_output +587,843035,"TERMINAL",0,0,"6",,terminal_output +588,844130,"TERMINAL",0,0,"7",,terminal_output +589,845117,"TERMINAL",0,0,"8",,terminal_output +590,846178,"TERMINAL",0,0,"96",,terminal_output +591,847198,"TERMINAL",0,0,"40",,terminal_output +592,848332,"TERMINAL",0,0,"1",,terminal_output +593,849274,"TERMINAL",0,0,"2",,terminal_output +594,850311,"TERMINAL",0,0,"3",,terminal_output +595,851403,"TERMINAL",0,0,"4",,terminal_output +596,852425,"TERMINAL",0,0,"5",,terminal_output +597,853474,"TERMINAL",0,0,"6",,terminal_output +598,854484,"TERMINAL",0,0,"7",,terminal_output +599,855522,"TERMINAL",0,0,"8",,terminal_output +600,856566,"TERMINAL",0,0,"9",,terminal_output +601,857678,"TERMINAL",0,0,"50",,terminal_output +602,858702,"TERMINAL",0,0,"1",,terminal_output +603,859689,"TERMINAL",0,0,"3",,terminal_output +604,860729,"TERMINAL",0,0,"4",,terminal_output +605,861766,"TERMINAL",0,0,"5",,terminal_output +606,862808,"TERMINAL",0,0,"6",,terminal_output +607,863851,"TERMINAL",0,0,"7",,terminal_output +608,864917,"TERMINAL",0,0,"8",,terminal_output +609,865935,"TERMINAL",0,0,"9",,terminal_output +610,867068,"TERMINAL",0,0,"7:00",,terminal_output +611,868092,"TERMINAL",0,0,"1",,terminal_output +612,869115,"TERMINAL",0,0,"2",,terminal_output +613,870085,"TERMINAL",0,0,"3",,terminal_output +614,871173,"TERMINAL",0,0,"4",,terminal_output +615,872189,"TERMINAL",0,0,"5",,terminal_output +616,873315,"TERMINAL",0,0,"6",,terminal_output +617,874338,"TERMINAL",0,0,"7",,terminal_output +618,875308,"TERMINAL",0,0,"8",,terminal_output +619,876338,"TERMINAL",0,0,"9",,terminal_output +620,877410,"TERMINAL",0,0,"10",,terminal_output +621,878616,"TERMINAL",0,0,"1",,terminal_output +622,879659,"TERMINAL",0,0,"2",,terminal_output +623,880792,"TERMINAL",0,0,"4",,terminal_output +624,881748,"TERMINAL",0,0,"5",,terminal_output +625,882839,"TERMINAL",0,0,"6",,terminal_output +626,883859,"TERMINAL",0,0,"7",,terminal_output +627,884867,"TERMINAL",0,0,"8",,terminal_output +628,885911,"TERMINAL",0,0,"92",,terminal_output +629,887039,"TERMINAL",0,0,"20",,terminal_output +630,887992,"TERMINAL",0,0,"1",,terminal_output +631,889087,"TERMINAL",0,0,"2",,terminal_output +632,890095,"TERMINAL",0,0,"3",,terminal_output +633,891135,"TERMINAL",0,0,"4",,terminal_output +634,892165,"TERMINAL",0,0,"5",,terminal_output +635,893283,"TERMINAL",0,0,"6",,terminal_output +636,894251,"TERMINAL",0,0,"7",,terminal_output +637,895334,"TERMINAL",0,0,"8",,terminal_output +638,896355,"TERMINAL",0,0,"9",,terminal_output +639,897482,"TERMINAL",0,0,"30",,terminal_output +640,898508,"TERMINAL",0,0,"1",,terminal_output +641,899532,"TERMINAL",0,0,"2",,terminal_output +642,900528,"TERMINAL",0,0,"3",,terminal_output +643,901578,"TERMINAL",0,0,"4",,terminal_output +644,902584,"TERMINAL",0,0,"5",,terminal_output +645,903627,"TERMINAL",0,0,"6",,terminal_output +646,904669,"TERMINAL",0,0,"7",,terminal_output +647,905710,"TERMINAL",0,0,"9",,terminal_output +648,906751,"TERMINAL",0,0,"40",,terminal_output +649,907825,"TERMINAL",0,0,"1",,terminal_output +650,908850,"TERMINAL",0,0,"2",,terminal_output +651,909974,"TERMINAL",0,0,"3",,terminal_output +652,910916,"TERMINAL",0,0,"4",,terminal_output +653,912022,"TERMINAL",0,0,"5",,terminal_output +654,913049,"TERMINAL",0,0,"6",,terminal_output +655,914047,"TERMINAL",0,0,"7",,terminal_output +656,915097,"TERMINAL",0,0,"8",,terminal_output +657,916134,"TERMINAL",0,0,"9",,terminal_output +658,917250,"TERMINAL",0,0,"50",,terminal_output +659,918269,"TERMINAL",0,0,"1",,terminal_output +660,919256,"TERMINAL",0,0,"2",,terminal_output +661,920374,"TERMINAL",0,0,"3",,terminal_output +662,921348,"TERMINAL",0,0,"41",,terminal_output +663,922492,"TERMINAL",0,0,"5",,terminal_output +664,923468,"TERMINAL",0,0,"6",,terminal_output +665,924510,"TERMINAL",0,0,"7",,terminal_output +666,925509,"TERMINAL",0,0,"8",,terminal_output +667,926555,"TERMINAL",0,0,"9",,terminal_output +668,927598,"TERMINAL",0,0,"8:00",,terminal_output +669,928647,"TERMINAL",0,0,"1",,terminal_output +670,929682,"TERMINAL",0,0,"37",,terminal_output +671,930722,"TERMINAL",0,0,"4",,terminal_output +672,931768,"TERMINAL",0,0,"5",,terminal_output +673,932810,"TERMINAL",0,0,"6",,terminal_output +674,933850,"TERMINAL",0,0,"7",,terminal_output +675,934920,"TERMINAL",0,0,"8",,terminal_output +676,935940,"TERMINAL",0,0,"9",,terminal_output +677,937007,"TERMINAL",0,0,"10",,terminal_output +678,938024,"TERMINAL",0,0,"1",,terminal_output +679,939158,"TERMINAL",0,0,"2",,terminal_output +680,940100,"TERMINAL",0,0,"3",,terminal_output +681,941142,"TERMINAL",0,0,"4",,terminal_output +682,942230,"TERMINAL",0,0,"5",,terminal_output +683,943226,"TERMINAL",0,0,"6",,terminal_output +684,944279,"TERMINAL",0,0,"7",,terminal_output +685,945343,"TERMINAL",0,0,"8",,terminal_output +686,946347,"TERMINAL",0,0,"9",,terminal_output +687,947388,"TERMINAL",0,0,"20",,terminal_output +688,948429,"TERMINAL",0,0,"1",,terminal_output +689,949527,"TERMINAL",0,0,"2",,terminal_output +690,950534,"TERMINAL",0,0,"3",,terminal_output +691,951543,"TERMINAL",0,0,"44",,terminal_output +692,952587,"TERMINAL",0,0,"5",,terminal_output +693,953630,"TERMINAL",0,0,"6",,terminal_output +694,954668,"TERMINAL",0,0,"7",,terminal_output +695,955708,"TERMINAL",0,0,"9",,terminal_output +696,956749,"TERMINAL",0,0,"30",,terminal_output +697,957793,"TERMINAL",0,0,"1",,terminal_output +698,958922,"TERMINAL",0,0,"2",,terminal_output +699,959961,"TERMINAL",0,0,"3",,terminal_output +700,960921,"TERMINAL",0,0,"4",,terminal_output +701,961993,"TERMINAL",0,0,"5",,terminal_output +702,963121,"TERMINAL",0,0,"6",,terminal_output +703,964048,"TERMINAL",0,0,"7",,terminal_output +704,965169,"TERMINAL",0,0,"8",,terminal_output +705,966135,"TERMINAL",0,0,"9",,terminal_output +706,967175,"TERMINAL",0,0,"40",,terminal_output +707,968237,"TERMINAL",0,0,"1",,terminal_output +708,969367,"TERMINAL",0,0,"2",,terminal_output +709,970333,"TERMINAL",0,0,"3",,terminal_output +710,971418,"TERMINAL",0,0,"4",,terminal_output +711,972439,"TERMINAL",0,0,"5",,terminal_output +712,973463,"TERMINAL",0,0,"6",,terminal_output +713,974473,"TERMINAL",0,0,"7",,terminal_output +714,975516,"TERMINAL",0,0,"8",,terminal_output +715,976560,"TERMINAL",0,0,"9",,terminal_output +716,977603,"TERMINAL",0,0,"50",,terminal_output +717,978638,"TERMINAL",0,0,"1",,terminal_output +718,979683,"TERMINAL",0,0,"3",,terminal_output +719,980726,"TERMINAL",0,0,"45",,terminal_output +720,981770,"TERMINAL",0,0,"5",,terminal_output +721,982811,"TERMINAL",0,0,"6",,terminal_output +722,983856,"TERMINAL",0,0,"7",,terminal_output +723,984893,"TERMINAL",0,0,"8",,terminal_output +724,985957,"TERMINAL",0,0,"9",,terminal_output +725,986977,"TERMINAL",0,0,"9:00",,terminal_output +726,988020,"TERMINAL",0,0,"1",,terminal_output +727,989063,"TERMINAL",0,0,"2",,terminal_output +728,990166,"TERMINAL",0,0,"3",,terminal_output +729,991156,"TERMINAL",0,0,"4",,terminal_output +730,992197,"TERMINAL",0,0,"5",,terminal_output +731,993241,"TERMINAL",0,0,"6",,terminal_output +732,994363,"TERMINAL",0,0,"7",,terminal_output +733,995329,"TERMINAL",0,0,"8",,terminal_output +734,996401,"TERMINAL",0,0,"9",,terminal_output +735,997404,"TERMINAL",0,0,"10",,terminal_output +736,998552,"TERMINAL",0,0,"1",,terminal_output +737,999486,"TERMINAL",0,0,"2",,terminal_output +738,1000547,"TERMINAL",0,0,"39",,terminal_output +739,1001632,"TERMINAL",0,0,"4",,terminal_output +740,1002608,"TERMINAL",0,0,"5",,terminal_output +741,1003648,"TERMINAL",0,0,"6",,terminal_output +742,1004687,"TERMINAL",0,0,"8",,terminal_output +743,1005732,"TERMINAL",0,0,"9",,terminal_output +744,1006776,"TERMINAL",0,0,"20",,terminal_output +745,1007818,"TERMINAL",0,0,"1",,terminal_output +746,1008859,"TERMINAL",0,0,"2",,terminal_output +747,1009901,"TERMINAL",0,0,"3",,terminal_output +748,1010942,"TERMINAL",0,0,"4",,terminal_output +749,1012175,"TERMINAL",0,0,"5",,terminal_output +750,1013029,"TERMINAL",0,0,"6",,terminal_output +751,1014118,"TERMINAL",0,0,"7",,terminal_output +752,1015111,"TERMINAL",0,0,"8",,terminal_output +753,1016168,"TERMINAL",0,0,"9",,terminal_output +754,1017189,"TERMINAL",0,0,"30",,terminal_output +755,1018315,"TERMINAL",0,0,"1",,terminal_output +756,1019338,"TERMINAL",0,0,"2",,terminal_output +757,1020315,"TERMINAL",0,0,"3",,terminal_output +758,1021404,"TERMINAL",0,0,"4",,terminal_output +759,1022418,"TERMINAL",0,0,"5",,terminal_output +760,1023454,"TERMINAL",0,0,"6",,terminal_output +761,1024562,"TERMINAL",0,0,"7",,terminal_output +762,1025589,"TERMINAL",0,0,"8",,terminal_output +763,1026616,"TERMINAL",0,0,"9",,terminal_output +764,1027615,"TERMINAL",0,0,"40",,terminal_output +765,1028655,"TERMINAL",0,0,"1",,terminal_output +766,1029695,"TERMINAL",0,0,"3",,terminal_output +767,1030735,"TERMINAL",0,0,"4",,terminal_output +768,1031776,"TERMINAL",0,0,"5",,terminal_output +769,1032812,"TERMINAL",0,0,"6",,terminal_output +770,1033880,"TERMINAL",0,0,"7",,terminal_output +771,1034890,"TERMINAL",0,0,"8",,terminal_output +772,1035935,"TERMINAL",0,0,"9",,terminal_output +773,1036969,"TERMINAL",0,0,"50",,terminal_output +774,1038079,"TERMINAL",0,0,"1",,terminal_output +775,1039103,"TERMINAL",0,0,"2",,terminal_output +776,1040129,"TERMINAL",0,0,"3",,terminal_output +777,1041125,"TERMINAL",0,0,"4",,terminal_output +778,1042176,"TERMINAL",0,0,"5",,terminal_output +779,1043301,"TERMINAL",0,0,"6",,terminal_output +780,1044250,"TERMINAL",0,0,"7",,terminal_output +781,1045350,"TERMINAL",0,0,"8",,terminal_output +782,1046326,"TERMINAL",0,0,"9",,terminal_output +783,1047399,"TERMINAL",0,0,"20:00",,terminal_output +784,1048409,"TERMINAL",0,0,"1",,terminal_output +785,1049452,"TERMINAL",0,0,"2",,terminal_output +786,1050571,"TERMINAL",0,0,"3",,terminal_output +787,1051596,"TERMINAL",0,0,"480",,terminal_output +788,1052623,"TERMINAL",0,0,"5",,terminal_output +789,1053644,"TERMINAL",0,0,"6",,terminal_output +790,1054649,"TERMINAL",0,0,"7",,terminal_output +791,1055694,"TERMINAL",0,0,"9",,terminal_output +792,1056757,"TERMINAL",0,0,"10",,terminal_output +793,1057842,"TERMINAL",0,0,"1",,terminal_output +794,1058866,"TERMINAL",0,0,"2",,terminal_output +795,1059889,"TERMINAL",0,0,"3",,terminal_output +796,1060902,"TERMINAL",0,0,"4",,terminal_output +797,1062041,"TERMINAL",0,0,"5",,terminal_output +798,1062994,"TERMINAL",0,0,"6",,terminal_output +799,1064088,"TERMINAL",0,0,"7",,terminal_output +800,1065112,"TERMINAL",0,0,"8",,terminal_output +801,1066137,"TERMINAL",0,0,"9",,terminal_output +802,1067155,"TERMINAL",0,0,"20",,terminal_output +803,1068289,"TERMINAL",0,0,"1",,terminal_output +804,1069310,"TERMINAL",0,0,"2",,terminal_output +805,1070376,"TERMINAL",0,0,"3",,terminal_output +806,1078916,"TERMINAL",0,0,"477567hkn1991.localdomain: Thu Jul 3 13:20:28 2025Partition dev_cpuonly: 11 nodes idle\rPartition cpuonly: 77 nodes idle\rPartition dev_accelerated:\t 2 nodes idle\rPartition accelerated:\t 1 nodes idle\rPartition dev_accelerated-h100 :\t 1 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 7 nodes idle 9301",,terminal_output +807,1078939,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0615 jafar]$ \r(jafar) [tum_cte0515@hkn0615 jafar]$ \r(jafar) [tum_cte0515@hkn0615 jafar]$ ",,terminal_output +808,1079077,"TERMINAL",0,0,"2",,terminal_output +809,1080165,"TERMINAL",0,0,"3",,terminal_output +810,1081131,"TERMINAL",0,0,"4",,terminal_output +811,1082215,"TERMINAL",0,0,"5",,terminal_output +812,1083239,"TERMINAL",0,0,"6",,terminal_output +813,1084367,"TERMINAL",0,0,"78",,terminal_output +814,1085326,"TERMINAL",0,0,"8",,terminal_output +815,1086412,"TERMINAL",0,0,"9",,terminal_output +816,1087437,"TERMINAL",0,0,"40",,terminal_output +817,1088426,"TERMINAL",0,0,"12",,terminal_output +818,1089485,"TERMINAL",0,0,"2",,terminal_output +819,1090508,"TERMINAL",0,0,"380",,terminal_output +820,1091665,"TERMINAL",0,0,"4",,terminal_output +821,1092581,"TERMINAL",0,0,"5",,terminal_output +822,1093622,"TERMINAL",0,0,"6",,terminal_output +823,1094663,"TERMINAL",0,0,"7",,terminal_output +824,1095706,"TERMINAL",0,0,"9",,terminal_output +825,1096760,"TERMINAL",0,0,"50",,terminal_output +826,1097881,"TERMINAL",0,0,"1",,terminal_output +827,1098905,"TERMINAL",0,0,"2",,terminal_output +828,1099874,"TERMINAL",0,0,"3",,terminal_output +829,1100956,"TERMINAL",0,0,"4",,terminal_output +830,1101956,"TERMINAL",0,0,"53",,terminal_output +831,1103001,"TERMINAL",0,0,"6",,terminal_output +832,1104128,"TERMINAL",0,0,"7",,terminal_output +833,1105081,"TERMINAL",0,0,"8",,terminal_output +834,1106193,"TERMINAL",0,0,"9",,terminal_output +835,1107201,"TERMINAL",0,0,"1:00",,terminal_output +836,1108209,"TERMINAL",0,0,"1",,terminal_output +837,1109351,"TERMINAL",0,0,"2",,terminal_output +838,1110314,"TERMINAL",0,0,"3",,terminal_output +839,1111327,"TERMINAL",0,0,"4",,terminal_output +840,1112422,"TERMINAL",0,0,"5",,terminal_output +841,1113446,"TERMINAL",0,0,"6",,terminal_output +842,1114470,"TERMINAL",0,0,"7",,terminal_output +843,1115493,"TERMINAL",0,0,"8",,terminal_output +844,1116525,"TERMINAL",0,0,"9",,terminal_output +845,1117645,"TERMINAL",0,0,"10",,terminal_output +846,1118604,"TERMINAL",0,0,"1",,terminal_output +847,1119642,"TERMINAL",0,0,"2",,terminal_output +848,1120681,"TERMINAL",0,0,"4",,terminal_output +849,1121720,"TERMINAL",0,0,"5",,terminal_output +850,1122767,"TERMINAL",0,0,"6",,terminal_output +851,1123807,"TERMINAL",0,0,"7",,terminal_output +852,1124914,"TERMINAL",0,0,"8",,terminal_output +853,1125883,"TERMINAL",0,0,"9",,terminal_output +854,1126966,"TERMINAL",0,0,"20",,terminal_output +855,1127990,"TERMINAL",0,0,"1",,terminal_output +856,1129099,"TERMINAL",0,0,"2",,terminal_output +857,1130046,"TERMINAL",0,0,"3",,terminal_output +858,1131162,"TERMINAL",0,0,"4",,terminal_output +859,1132185,"TERMINAL",0,0,"5",,terminal_output +860,1133170,"TERMINAL",0,0,"6",,terminal_output +861,1134271,"TERMINAL",0,0,"7",,terminal_output +862,1135367,"TERMINAL",0,0,"8",,terminal_output +863,1136293,"TERMINAL",0,0,"9",,terminal_output +864,1137340,"TERMINAL",0,0,"30",,terminal_output +865,1138432,"TERMINAL",0,0,"1",,terminal_output +866,1139457,"TERMINAL",0,0,"2",,terminal_output +867,1140481,"TERMINAL",0,0,"3",,terminal_output +868,1141504,"TERMINAL",0,0,"4",,terminal_output +869,1142528,"TERMINAL",0,0,"5",,terminal_output +870,1143656,"TERMINAL",0,0,"6",,terminal_output +871,1144682,"TERMINAL",0,0,"7",,terminal_output +872,1145635,"TERMINAL",0,0,"8",,terminal_output +873,1146670,"TERMINAL",0,0,"40",,terminal_output +874,1147710,"TERMINAL",0,0,"1",,terminal_output +875,1148750,"TERMINAL",0,0,"2",,terminal_output +876,1149800,"TERMINAL",0,0,"3",,terminal_output +877,1150927,"TERMINAL",0,0,"4",,terminal_output +878,1151863,"TERMINAL",0,0,"5",,terminal_output +879,1152973,"TERMINAL",0,0,"6",,terminal_output +880,1153944,"TERMINAL",0,0,"7",,terminal_output +881,1155025,"TERMINAL",0,0,"8",,terminal_output +882,1156033,"TERMINAL",0,0,"9",,terminal_output +883,1157082,"TERMINAL",0,0,"50",,terminal_output +884,1158112,"TERMINAL",0,0,"1",,terminal_output +885,1159150,"TERMINAL",0,0,"2",,terminal_output +886,1160249,"TERMINAL",0,0,"3",,terminal_output +887,1161278,"TERMINAL",0,0,"4",,terminal_output +888,1162265,"TERMINAL",0,0,"5",,terminal_output +889,1163312,"TERMINAL",0,0,"6",,terminal_output +890,1164442,"TERMINAL",0,0,"7",,terminal_output +891,1165382,"TERMINAL",0,0,"8",,terminal_output +892,1166452,"TERMINAL",0,0,"9",,terminal_output +893,1167455,"TERMINAL",0,0,"2:00",,terminal_output +894,1168540,"TERMINAL",0,0,"1",,terminal_output +895,1169566,"TERMINAL",0,0,"2",,terminal_output +896,1170613,"TERMINAL",0,0,"3",,terminal_output +897,1171685,"TERMINAL",0,0,"4",,terminal_output +898,1172658,"TERMINAL",0,0,"5",,terminal_output +899,1173693,"TERMINAL",0,0,"7",,terminal_output +900,1174735,"TERMINAL",0,0,"8",,terminal_output +901,1175771,"TERMINAL",0,0,"9",,terminal_output +902,1176807,"TERMINAL",0,0,"10",,terminal_output +903,1177847,"TERMINAL",0,0,"1",,terminal_output +904,1178888,"TERMINAL",0,0,"2",,terminal_output +905,1180007,"TERMINAL",0,0,"3",,terminal_output +906,1181034,"TERMINAL",0,0,"4",,terminal_output +907,1182003,"TERMINAL",0,0,"5",,terminal_output +908,1183038,"TERMINAL",0,0,"6",,terminal_output +909,1184104,"TERMINAL",0,0,"7",,terminal_output +910,1185114,"TERMINAL",0,0,"8",,terminal_output +911,1186162,"TERMINAL",0,0,"9",,terminal_output +912,1187278,"TERMINAL",0,0,"20",,terminal_output +913,1188238,"TERMINAL",0,0,"1",,terminal_output +914,1189286,"TERMINAL",0,0,"2",,terminal_output +915,1190349,"TERMINAL",0,0,"3",,terminal_output +916,1191864,"TERMINAL",0,0,"471",,terminal_output +917,1192911,"TERMINAL",0,0,"6",,terminal_output +918,1194037,"TERMINAL",0,0,"7",,terminal_output +919,1194979,"TERMINAL",0,0,"8",,terminal_output +920,1196087,"TERMINAL",0,0,"9",,terminal_output +921,1197063,"TERMINAL",0,0,"30",,terminal_output +922,1198101,"TERMINAL",0,0,"1",,terminal_output +923,1199142,"TERMINAL",0,0,"2",,terminal_output +924,1200183,"TERMINAL",0,0,"3",,terminal_output +925,1201567,"TERMINAL",0,0,"4",,terminal_output +926,1202329,"TERMINAL",0,0,"5",,terminal_output +927,1203313,"TERMINAL",0,0,"6",,terminal_output +928,1204345,"TERMINAL",0,0,"7",,terminal_output +929,1205607,"TERMINAL",0,0,"8",,terminal_output +930,1206507,"TERMINAL",0,0,"9",,terminal_output +931,1207554,"TERMINAL",0,0,"40",,terminal_output +932,1208496,"TERMINAL",0,0,"1",,terminal_output +933,1209639,"TERMINAL",0,0,"2",,terminal_output +934,1210646,"TERMINAL",0,0,"32",,terminal_output +935,1211640,"TERMINAL",0,0,"4",,terminal_output +936,1212649,"TERMINAL",0,0,"5",,terminal_output +937,1213704,"TERMINAL",0,0,"73",,terminal_output +938,1214747,"TERMINAL",0,0,"84",,terminal_output +939,1216052,"TERMINAL",0,0,"9",,terminal_output +940,1217385,"TERMINAL",0,0,"50",,terminal_output +941,1217896,"TERMINAL",0,0,"1",,terminal_output +942,1218882,"TERMINAL",0,0,"2",,terminal_output +943,1219938,"TERMINAL",0,0,"3",,terminal_output +944,1220964,"TERMINAL",0,0,"4",,terminal_output +945,1222010,"TERMINAL",0,0,"5",,terminal_output +946,1223118,"TERMINAL",0,0,"6",,terminal_output +947,1224450,"TERMINAL",0,0,"7",,terminal_output +948,1225116,"TERMINAL",0,0,"8",,terminal_output +949,1226191,"TERMINAL",0,0,"9",,terminal_output +950,1227405,"TERMINAL",0,0,"3:00",,terminal_output +951,1228238,"TERMINAL",0,0,"1",,terminal_output +952,1229364,"TERMINAL",0,0,"2",,terminal_output +953,1230370,"TERMINAL",0,0,"3",,terminal_output +954,1231342,"TERMINAL",0,0,"4",,terminal_output +955,1232437,"TERMINAL",0,0,"5",,terminal_output +956,1233461,"TERMINAL",0,0,"6",,terminal_output +957,1235102,"TERMINAL",0,0,"7",,terminal_output +958,1235611,"TERMINAL",0,0,"8",,terminal_output +959,1236650,"TERMINAL",0,0,"9",,terminal_output +960,1237658,"TERMINAL",0,0,"10",,terminal_output +961,1238650,"TERMINAL",0,0,"1",,terminal_output +962,1239680,"TERMINAL",0,0,"2",,terminal_output +963,1240731,"TERMINAL",0,0,"4",,terminal_output +964,1241767,"TERMINAL",0,0,"5",,terminal_output +965,1242789,"TERMINAL",0,0,"6",,terminal_output +966,1243827,"TERMINAL",0,0,"7",,terminal_output +967,1244862,"TERMINAL",0,0,"8",,terminal_output +968,1245954,"TERMINAL",0,0,"9",,terminal_output +969,1246978,"TERMINAL",0,0,"20",,terminal_output +970,1247996,"TERMINAL",0,0,"1",,terminal_output +971,1249017,"TERMINAL",0,0,"2",,terminal_output +972,1250051,"TERMINAL",0,0,"3",,terminal_output +973,1251092,"TERMINAL",0,0,"4",,terminal_output +974,1252200,"TERMINAL",0,0,"5",,terminal_output +975,1253224,"TERMINAL",0,0,"6",,terminal_output +976,1254247,"TERMINAL",0,0,"7",,terminal_output +977,1255272,"TERMINAL",0,0,"8",,terminal_output +978,1256399,"TERMINAL",0,0,"9",,terminal_output +979,1257335,"TERMINAL",0,0,"30",,terminal_output +980,1258446,"TERMINAL",0,0,"1",,terminal_output +981,1259472,"TERMINAL",0,0,"2",,terminal_output +982,1260494,"TERMINAL",0,0,"3",,terminal_output +983,1261520,"TERMINAL",0,0,"4",,terminal_output +984,1262542,"TERMINAL",0,0,"5",,terminal_output +985,1263675,"TERMINAL",0,0,"63",,terminal_output +986,1264618,"TERMINAL",0,0,"7",,terminal_output +987,1265644,"TERMINAL",0,0,"85",,terminal_output +988,1266682,"TERMINAL",0,0,"40",,terminal_output +989,1267728,"TERMINAL",0,0,"1",,terminal_output +990,1268773,"TERMINAL",0,0,"2",,terminal_output +991,1269813,"TERMINAL",0,0,"3",,terminal_output +992,1270849,"TERMINAL",0,0,"4",,terminal_output +993,1271964,"TERMINAL",0,0,"5",,terminal_output +994,1272987,"TERMINAL",0,0,"6",,terminal_output +995,1273969,"TERMINAL",0,0,"7",,terminal_output +996,1275035,"TERMINAL",0,0,"8",,terminal_output +997,1276060,"TERMINAL",0,0,"9",,terminal_output +998,1277088,"TERMINAL",0,0,"50",,terminal_output +999,1278208,"TERMINAL",0,0,"1",,terminal_output +1000,1279227,"TERMINAL",0,0,"2",,terminal_output +1001,1280195,"TERMINAL",0,0,"3",,terminal_output +1002,1281240,"TERMINAL",0,0,"4",,terminal_output +1003,1282337,"TERMINAL",0,0,"5",,terminal_output +1004,1283434,"TERMINAL",0,0,"6",,terminal_output +1005,1284365,"TERMINAL",0,0,"7",,terminal_output +1006,1285401,"TERMINAL",0,0,"8",,terminal_output +1007,1286504,"TERMINAL",0,0,"9",,terminal_output +1008,1287531,"TERMINAL",0,0,"4:006",,terminal_output +1009,1288520,"TERMINAL",0,0,"1",,terminal_output +1010,1289577,"TERMINAL",0,0,"2",,terminal_output +1011,1290703,"TERMINAL",0,0,"3",,terminal_output +1012,1291690,"TERMINAL",0,0,"4",,terminal_output +1013,1292673,"TERMINAL",0,0,"5",,terminal_output +1014,1293779,"TERMINAL",0,0,"7",,terminal_output +1015,1294751,"TERMINAL",0,0,"8",,terminal_output +1016,1295791,"TERMINAL",0,0,"9",,terminal_output +1017,1296827,"TERMINAL",0,0,"10",,terminal_output +1018,1297881,"TERMINAL",0,0,"1",,terminal_output +1019,1298998,"TERMINAL",0,0,"2",,terminal_output +1020,1299958,"TERMINAL",0,0,"3",,terminal_output +1021,1301046,"TERMINAL",0,0,"4",,terminal_output +1022,1302031,"TERMINAL",0,0,"5",,terminal_output +1023,1303094,"TERMINAL",0,0,"6",,terminal_output +1024,1304122,"TERMINAL",0,0,"7",,terminal_output +1025,1305244,"TERMINAL",0,0,"8",,terminal_output +1026,1306268,"TERMINAL",0,0,"9",,terminal_output +1027,1307292,"TERMINAL",0,0,"20",,terminal_output +1028,1308296,"TERMINAL",0,0,"17",,terminal_output +1029,1309321,"TERMINAL",0,0,"2",,terminal_output +1030,1310406,"TERMINAL",0,0,"3",,terminal_output +1031,1311403,"TERMINAL",0,0,"4",,terminal_output +1032,1312515,"TERMINAL",0,0,"52",,terminal_output +1033,1313485,"TERMINAL",0,0,"6",,terminal_output +1034,1314562,"TERMINAL",0,0,"7",,terminal_output +1035,1315566,"TERMINAL",0,0,"8",,terminal_output +1036,1316611,"TERMINAL",0,0,"9",,terminal_output +1037,1317751,"TERMINAL",0,0,"30",,terminal_output +1038,1318763,"TERMINAL",0,0,"2",,terminal_output +1039,1319786,"TERMINAL",0,0,"3",,terminal_output +1040,1320779,"TERMINAL",0,0,"4",,terminal_output +1041,1321815,"TERMINAL",0,0,"5",,terminal_output +1042,1322849,"TERMINAL",0,0,"6",,terminal_output +1043,1323893,"TERMINAL",0,0,"7",,terminal_output +1044,1324937,"TERMINAL",0,0,"8",,terminal_output +1045,1326031,"TERMINAL",0,0,"9",,terminal_output +1046,1327022,"TERMINAL",0,0,"40",,terminal_output +1047,1328080,"TERMINAL",0,0,"1",,terminal_output +1048,1329108,"TERMINAL",0,0,"2",,terminal_output +1049,1330143,"TERMINAL",0,0,"3",,terminal_output +1050,1331255,"TERMINAL",0,0,"4",,terminal_output +1051,1332279,"TERMINAL",0,0,"5",,terminal_output +1052,1333304,"TERMINAL",0,0,"6",,terminal_output +1053,1334301,"TERMINAL",0,0,"7",,terminal_output +1054,1335348,"TERMINAL",0,0,"8",,terminal_output +1055,1336478,"TERMINAL",0,0,"9",,terminal_output +1056,1337504,"TERMINAL",0,0,"50",,terminal_output +1057,1338485,"TERMINAL",0,0,"1",,terminal_output +1058,1339600,"TERMINAL",0,0,"2",,terminal_output +1059,1340588,"TERMINAL",0,0,"3",,terminal_output +1060,1341616,"TERMINAL",0,0,"4",,terminal_output +1061,1342664,"TERMINAL",0,0,"5",,terminal_output +1062,1343711,"TERMINAL",0,0,"7",,terminal_output +1063,1344773,"TERMINAL",0,0,"8",,terminal_output +1064,1345797,"TERMINAL",0,0,"9",,terminal_output +1065,1346820,"TERMINAL",0,0,"5:00",,terminal_output +1066,1347860,"TERMINAL",0,0,"1",,terminal_output +1067,1348905,"TERMINAL",0,0,"2",,terminal_output +1068,1349943,"TERMINAL",0,0,"3",,terminal_output +1069,1351019,"TERMINAL",0,0,"4",,terminal_output +1070,1352034,"TERMINAL",0,0,"5",,terminal_output +1071,1353169,"TERMINAL",0,0,"6",,terminal_output +1072,1354193,"TERMINAL",0,0,"7128",,terminal_output +1073,1355175,"TERMINAL",0,0,"8",,terminal_output +1074,1356228,"TERMINAL",0,0,"9",,terminal_output +1075,1357368,"TERMINAL",0,0,"10",,terminal_output +1076,1358314,"TERMINAL",0,0,"1",,terminal_output +1077,1359358,"TERMINAL",0,0,"2",,terminal_output +1078,1360400,"TERMINAL",0,0,"3",,terminal_output +1079,1361435,"TERMINAL",0,0,"4",,terminal_output +1080,1362488,"TERMINAL",0,0,"5",,terminal_output +1081,1363530,"TERMINAL",0,0,"6",,terminal_output +1082,1364638,"TERMINAL",0,0,"7",,terminal_output +1083,1365661,"TERMINAL",0,0,"8",,terminal_output +1084,1366720,"TERMINAL",0,0,"90",,terminal_output +1085,1367699,"TERMINAL",0,0,"21",,terminal_output +1086,1368740,"TERMINAL",0,0,"2",,terminal_output +1087,1369777,"TERMINAL",0,0,"330",,terminal_output +1088,1370824,"TERMINAL",0,0,"43",,terminal_output +1089,1371867,"TERMINAL",0,0,"56",,terminal_output +1090,1372916,"TERMINAL",0,0,"67",,terminal_output +1091,1373956,"TERMINAL",0,0,"7",,terminal_output +1092,1375083,"TERMINAL",0,0,"8",,terminal_output +1093,1376105,"TERMINAL",0,0,"9",,terminal_output +1094,1377087,"TERMINAL",0,0,"30",,terminal_output +1095,1378154,"TERMINAL",0,0,"1",,terminal_output +1096,1379282,"TERMINAL",0,0,"2",,terminal_output +1097,1380304,"TERMINAL",0,0,"3",,terminal_output +1098,1381329,"TERMINAL",0,0,"4",,terminal_output +1099,1382356,"TERMINAL",0,0,"5",,terminal_output +1100,1383337,"TERMINAL",0,0,"6",,terminal_output +1101,1384394,"TERMINAL",0,0,"7",,terminal_output +1102,1385414,"TERMINAL",0,0,"8",,terminal_output +1103,1386450,"TERMINAL",0,0,"9",,terminal_output +1104,1387496,"TERMINAL",0,0,"40",,terminal_output +1105,1388542,"TERMINAL",0,0,"1",,terminal_output +1106,1389623,"TERMINAL",0,0,"2",,terminal_output +1107,1390647,"TERMINAL",0,0,"3",,terminal_output +1108,1391663,"TERMINAL",0,0,"4",,terminal_output +1109,1392797,"TERMINAL",0,0,"6",,terminal_output +1110,1393823,"TERMINAL",0,0,"7",,terminal_output +1111,1394798,"TERMINAL",0,0,"8",,terminal_output +1112,1395838,"TERMINAL",0,0,"9",,terminal_output +1113,1396901,"TERMINAL",0,0,"50",,terminal_output +1114,1397930,"TERMINAL",0,0,"1",,terminal_output +1115,1398999,"TERMINAL",0,0,"2",,terminal_output +1116,1400069,"TERMINAL",0,0,"3",,terminal_output +1117,1401109,"TERMINAL",0,0,"4",,terminal_output +1118,1402107,"TERMINAL",0,0,"5",,terminal_output +1119,1403159,"TERMINAL",0,0,"6",,terminal_output +1120,1404266,"TERMINAL",0,0,"7",,terminal_output +1121,1405304,"TERMINAL",0,0,"8",,terminal_output +1122,1406315,"TERMINAL",0,0,"9",,terminal_output +1123,1407373,"TERMINAL",0,0,"6:00",,terminal_output +1124,1408465,"TERMINAL",0,0,"1",,terminal_output +1125,1409489,"TERMINAL",0,0,"2",,terminal_output +1126,1410515,"TERMINAL",0,0,"3",,terminal_output +1127,1411577,"TERMINAL",0,0,"4",,terminal_output +1128,1412535,"TERMINAL",0,0,"5",,terminal_output +1129,1413587,"TERMINAL",0,0,"6",,terminal_output +1130,1414626,"TERMINAL",0,0,"7",,terminal_output +1131,1415739,"TERMINAL",0,0,"8",,terminal_output +1132,1416692,"TERMINAL",0,0,"10",,terminal_output +1133,1417785,"TERMINAL",0,0,"18",,terminal_output +1134,1418777,"TERMINAL",0,0,"240",,terminal_output +1135,1419817,"TERMINAL",0,0,"31",,terminal_output +1136,1420858,"TERMINAL",0,0,"42",,terminal_output +1137,1421899,"TERMINAL",0,0,"5",,terminal_output +1138,1422945,"TERMINAL",0,0,"6",,terminal_output +1139,1424030,"TERMINAL",0,0,"7",,terminal_output +1140,1425055,"TERMINAL",0,0,"8",,terminal_output +1141,1426179,"TERMINAL",0,0,"90",,terminal_output +1142,1427118,"TERMINAL",0,0,"20",,terminal_output +1143,1428227,"TERMINAL",0,0,"1",,terminal_output +1144,1429226,"TERMINAL",0,0,"2",,terminal_output +1145,1430276,"TERMINAL",0,0,"3",,terminal_output +1146,1431303,"TERMINAL",0,0,"4",,terminal_output +1147,1432428,"TERMINAL",0,0,"51",,terminal_output +1148,1433364,"TERMINAL",0,0,"6",,terminal_output +1149,1434475,"TERMINAL",0,0,"7",,terminal_output +1150,1435582,"TERMINAL",0,0,"8",,terminal_output +1151,1436545,"TERMINAL",0,0,"9",,terminal_output +1152,1437550,"TERMINAL",0,0,"30",,terminal_output +1153,1438580,"TERMINAL",0,0,"1",,terminal_output +1154,1439698,"TERMINAL",0,0,"2",,terminal_output +1155,1440662,"TERMINAL",0,0,"3",,terminal_output +1156,1441705,"TERMINAL",0,0,"5",,terminal_output +1157,1442784,"TERMINAL",0,0,"6",,terminal_output +1158,1443794,"TERMINAL",0,0,"7",,terminal_output +1159,1444834,"TERMINAL",0,0,"8",,terminal_output +1160,1445886,"TERMINAL",0,0,"9",,terminal_output +1161,1446966,"TERMINAL",0,0,"40",,terminal_output +1162,1447992,"TERMINAL",0,0,"1",,terminal_output +1163,1449015,"TERMINAL",0,0,"2",,terminal_output +1164,1450142,"TERMINAL",0,0,"3",,terminal_output +1165,1451095,"TERMINAL",0,0,"4",,terminal_output +1166,1452153,"TERMINAL",0,0,"5",,terminal_output +1167,1453195,"TERMINAL",0,0,"6",,terminal_output +1168,1454341,"TERMINAL",0,0,"7",,terminal_output +1169,1455365,"TERMINAL",0,0,"8",,terminal_output +1170,1456389,"TERMINAL",0,0,"9",,terminal_output +1171,1457358,"TERMINAL",0,0,"50",,terminal_output +1172,1458406,"TERMINAL",0,0,"1",,terminal_output +1173,1459445,"TERMINAL",0,0,"2",,terminal_output +1174,1460488,"TERMINAL",0,0,"3",,terminal_output +1175,1461530,"TERMINAL",0,0,"4",,terminal_output +1176,1462646,"TERMINAL",0,0,"5",,terminal_output +1177,1463670,"TERMINAL",0,0,"6",,terminal_output +1178,1464693,"TERMINAL",0,0,"7",,terminal_output +1179,1465714,"TERMINAL",0,0,"9",,terminal_output +1180,1466768,"TERMINAL",0,0,"7:00",,terminal_output +1181,1467859,"TERMINAL",0,0,"1",,terminal_output +1182,1468832,"TERMINAL",0,0,"2",,terminal_output +1183,1469880,"TERMINAL",0,0,"3",,terminal_output +1184,1470924,"TERMINAL",0,0,"4",,terminal_output +1185,1472056,"TERMINAL",0,0,"5",,terminal_output +1186,1473011,"TERMINAL",0,0,"6",,terminal_output +1187,1474104,"TERMINAL",0,0,"7",,terminal_output +1188,1475128,"TERMINAL",0,0,"8",,terminal_output +1189,1476256,"TERMINAL",0,0,"9",,terminal_output +1190,1477279,"TERMINAL",0,0,"10",,terminal_output +1191,1478304,"TERMINAL",0,0,"1",,terminal_output +1192,1479279,"TERMINAL",0,0,"2",,terminal_output +1193,1480341,"TERMINAL",0,0,"3",,terminal_output +1194,1481375,"TERMINAL",0,0,"4",,terminal_output +1195,1482501,"TERMINAL",0,0,"5",,terminal_output +1196,1483531,"TERMINAL",0,0,"6",,terminal_output +1197,1484549,"TERMINAL",0,0,"7",,terminal_output +1198,1485573,"TERMINAL",0,0,"8",,terminal_output +1199,1486596,"TERMINAL",0,0,"9",,terminal_output +1200,1487723,"TERMINAL",0,0,"20",,terminal_output +1201,1488664,"TERMINAL",0,0,"1",,terminal_output +1202,1489772,"TERMINAL",0,0,"3",,terminal_output +1203,1490795,"TERMINAL",0,0,"4",,terminal_output +1204,1491797,"TERMINAL",0,0,"5",,terminal_output +1205,1492844,"TERMINAL",0,0,"6",,terminal_output +1206,1493872,"TERMINAL",0,0,"7",,terminal_output +1207,1494916,"TERMINAL",0,0,"8",,terminal_output +1208,1495961,"TERMINAL",0,0,"9",,terminal_output +1209,1497042,"TERMINAL",0,0,"30",,terminal_output +1210,1498066,"TERMINAL",0,0,"1",,terminal_output +1211,1499193,"TERMINAL",0,0,"2",,terminal_output +1212,1500121,"TERMINAL",0,0,"3",,terminal_output +1213,1501180,"TERMINAL",0,0,"4",,terminal_output +1214,1502209,"TERMINAL",0,0,"5",,terminal_output +1215,1503243,"TERMINAL",0,0,"6",,terminal_output +1216,1504316,"TERMINAL",0,0,"7",,terminal_output +1217,1505336,"TERMINAL",0,0,"8",,terminal_output +1218,1506368,"TERMINAL",0,0,"9",,terminal_output +1219,1507408,"TERMINAL",0,0,"40",,terminal_output +1220,1508511,"TERMINAL",0,0,"1",,terminal_output +1221,1509535,"TERMINAL",0,0,"2",,terminal_output +1222,1510560,"TERMINAL",0,0,"3",,terminal_output +1223,1511585,"TERMINAL",0,0,"4",,terminal_output +1224,1512620,"TERMINAL",0,0,"5",,terminal_output +1225,1513734,"TERMINAL",0,0,"6",,terminal_output +1226,1514782,"TERMINAL",0,0,"8",,terminal_output +1227,1515782,"TERMINAL",0,0,"9",,terminal_output +1228,1516799,"TERMINAL",0,0,"50",,terminal_output +1229,1517816,"TERMINAL",0,0,"1",,terminal_output +1230,1518863,"TERMINAL",0,0,"2",,terminal_output +1231,1519908,"TERMINAL",0,0,"3",,terminal_output +1232,1520947,"TERMINAL",0,0,"4",,terminal_output +1233,1521988,"TERMINAL",0,0,"5",,terminal_output +1234,1523035,"TERMINAL",0,0,"6",,terminal_output +1235,1524195,"TERMINAL",0,0,"7",,terminal_output +1236,1525115,"TERMINAL",0,0,"8",,terminal_output +1237,1526228,"TERMINAL",0,0,"9",,terminal_output +1238,1527250,"TERMINAL",0,0,"8:00",,terminal_output +1239,1528274,"TERMINAL",0,0,"1",,terminal_output +1240,1529299,"TERMINAL",0,0,"2",,terminal_output +1241,1530327,"TERMINAL",0,0,"3",,terminal_output +1242,1531451,"TERMINAL",0,0,"4",,terminal_output +1243,1532473,"TERMINAL",0,0,"5",,terminal_output +1244,1533446,"TERMINAL",0,0,"6",,terminal_output +1245,1534521,"TERMINAL",0,0,"7",,terminal_output +1246,1535544,"TERMINAL",0,0,"8",,terminal_output +1247,1536672,"TERMINAL",0,0,"9",,terminal_output +1248,1537606,"TERMINAL",0,0,"10",,terminal_output +1249,1538670,"TERMINAL",0,0,"1",,terminal_output +1250,1539744,"TERMINAL",0,0,"3",,terminal_output +1251,1540779,"TERMINAL",0,0,"4",,terminal_output +1252,1541795,"TERMINAL",0,0,"5",,terminal_output +1253,1542818,"TERMINAL",0,0,"6",,terminal_output +1254,1543859,"TERMINAL",0,0,"7",,terminal_output +1255,1544897,"TERMINAL",0,0,"8",,terminal_output +1256,1545945,"TERMINAL",0,0,"9",,terminal_output +1257,1547014,"TERMINAL",0,0,"20",,terminal_output +1258,1548139,"TERMINAL",0,0,"1",,terminal_output +1259,1549074,"TERMINAL",0,0,"2",,terminal_output +1260,1550188,"TERMINAL",0,0,"3",,terminal_output +1261,1551151,"TERMINAL",0,0,"4",,terminal_output +1262,1552339,"TERMINAL",0,0,"5",,terminal_output +1263,1553292,"TERMINAL",0,0,"6",,terminal_output +1264,1554335,"TERMINAL",0,0,"7",,terminal_output +1265,1555423,"TERMINAL",0,0,"8",,terminal_output +1266,1556434,"TERMINAL",0,0,"9",,terminal_output +1267,1557461,"TERMINAL",0,0,"30",,terminal_output +1268,1558498,"TERMINAL",0,0,"1",,terminal_output +1269,1559609,"TERMINAL",0,0,"2",,terminal_output +1270,1560581,"TERMINAL",0,0,"3",,terminal_output +1271,1561621,"TERMINAL",0,0,"4",,terminal_output +1272,1562681,"TERMINAL",0,0,"5",,terminal_output +1273,1563706,"TERMINAL",0,0,"7",,terminal_output +1274,1564741,"TERMINAL",0,0,"8",,terminal_output +1275,1565856,"TERMINAL",0,0,"9",,terminal_output +1276,1566813,"TERMINAL",0,0,"40",,terminal_output +1277,1568001,"TERMINAL",0,0,"1",,terminal_output +1278,1568886,"TERMINAL",0,0,"2",,terminal_output +1279,1569918,"TERMINAL",0,0,"3",,terminal_output +1280,1570959,"TERMINAL",0,0,"4",,terminal_output +1281,1572104,"TERMINAL",0,0,"5",,terminal_output +1282,1573105,"TERMINAL",0,0,"6",,terminal_output +1283,1574151,"TERMINAL",0,0,"7",,terminal_output +1284,1575175,"TERMINAL",0,0,"8",,terminal_output +1285,1576198,"TERMINAL",0,0,"9",,terminal_output +1286,1577223,"TERMINAL",0,0,"50",,terminal_output +1287,1578245,"TERMINAL",0,0,"1",,terminal_output +1288,1579373,"TERMINAL",0,0,"2",,terminal_output +1289,1580439,"TERMINAL",0,0,"3",,terminal_output +1290,1581367,"TERMINAL",0,0,"4",,terminal_output +1291,1582436,"TERMINAL",0,0,"5",,terminal_output +1292,1583452,"TERMINAL",0,0,"6",,terminal_output +1293,1584594,"TERMINAL",0,0,"7",,terminal_output +1294,1585619,"TERMINAL",0,0,"8",,terminal_output +1295,1586582,"TERMINAL",0,0,"9",,terminal_output +1296,1587614,"TERMINAL",0,0,"9:00",,terminal_output +1297,1588690,"TERMINAL",0,0,"1",,terminal_output +1298,1589716,"TERMINAL",0,0,"3",,terminal_output +1299,1590740,"TERMINAL",0,0,"4",,terminal_output +1300,1591812,"TERMINAL",0,0,"5",,terminal_output +1301,1592891,"TERMINAL",0,0,"6",,terminal_output +1302,1593916,"TERMINAL",0,0,"7",,terminal_output +1303,1594939,"TERMINAL",0,0,"8",,terminal_output +1304,1595960,"TERMINAL",0,0,"9",,terminal_output +1305,1597102,"TERMINAL",0,0,"10",,terminal_output +1306,1598113,"TERMINAL",0,0,"1",,terminal_output +1307,1599137,"TERMINAL",0,0,"2",,terminal_output +1308,1600175,"TERMINAL",0,0,"3",,terminal_output +1309,1601186,"TERMINAL",0,0,"4",,terminal_output +1310,1602313,"TERMINAL",0,0,"5",,terminal_output +1311,1603376,"TERMINAL",0,0,"6",,terminal_output +1312,1604294,"TERMINAL",0,0,"7",,terminal_output +1313,1605383,"TERMINAL",0,0,"8",,terminal_output +1314,1606406,"TERMINAL",0,0,"92",,terminal_output +1315,1607430,"TERMINAL",0,0,"20",,terminal_output +1316,1608460,"TERMINAL",0,0,"1",,terminal_output +1317,1609562,"TERMINAL",0,0,"2",,terminal_output +1318,1610546,"TERMINAL",0,0,"3",,terminal_output +1319,1611587,"TERMINAL",0,0,"4",,terminal_output +1320,1612652,"TERMINAL",0,0,"5",,terminal_output +1321,1613677,"TERMINAL",0,0,"6",,terminal_output +1322,1614815,"TERMINAL",0,0,"8",,terminal_output +1323,1615830,"TERMINAL",0,0,"9",,terminal_output +1324,1616839,"TERMINAL",0,0,"30",,terminal_output +1325,1617853,"TERMINAL",0,0,"1",,terminal_output +1326,1618894,"TERMINAL",0,0,"2",,terminal_output +1327,1619936,"TERMINAL",0,0,"3",,terminal_output +1328,1621049,"TERMINAL",0,0,"4",,terminal_output +1329,1622076,"TERMINAL",0,0,"5",,terminal_output +1330,1623097,"TERMINAL",0,0,"6",,terminal_output +1331,1624121,"TERMINAL",0,0,"7",,terminal_output +1332,1625154,"TERMINAL",0,0,"8",,terminal_output +1333,1626273,"TERMINAL",0,0,"9",,terminal_output +1334,1627242,"TERMINAL",0,0,"40",,terminal_output +1335,1628273,"TERMINAL",0,0,"1",,terminal_output +1336,1629344,"TERMINAL",0,0,"2",,terminal_output +1337,1630355,"TERMINAL",0,0,"3",,terminal_output +1338,1631396,"TERMINAL",0,0,"4",,terminal_output +1339,1632438,"TERMINAL",0,0,"5",,terminal_output +1340,1633543,"TERMINAL",0,0,"6",,terminal_output +1341,1634568,"TERMINAL",0,0,"7",,terminal_output +1342,1635568,"TERMINAL",0,0,"8",,terminal_output +1343,1636618,"TERMINAL",0,0,"9",,terminal_output +1344,1637654,"TERMINAL",0,0,"50",,terminal_output +1345,1638765,"TERMINAL",0,0,"2",,terminal_output +1346,1639790,"TERMINAL",0,0,"3",,terminal_output +1347,1640776,"TERMINAL",0,0,"4",,terminal_output +1348,1641842,"TERMINAL",0,0,"5",,terminal_output +1349,1642861,"TERMINAL",0,0,"6",,terminal_output +1350,1643900,"TERMINAL",0,0,"7",,terminal_output +1351,1644946,"TERMINAL",0,0,"8",,terminal_output +1352,1645990,"TERMINAL",0,0,"9",,terminal_output +1353,1647061,"TERMINAL",0,0,"30:00",,terminal_output +1354,1648085,"TERMINAL",0,0,"1",,terminal_output +1355,1649107,"TERMINAL",0,0,"2",,terminal_output +1356,1650146,"TERMINAL",0,0,"3",,terminal_output +1357,1651261,"TERMINAL",0,0,"4",,terminal_output +1358,1652283,"TERMINAL",0,0,"5",,terminal_output +1359,1653307,"TERMINAL",0,0,"6",,terminal_output +1360,1654307,"TERMINAL",0,0,"7",,terminal_output +1361,1655354,"TERMINAL",0,0,"8",,terminal_output +1362,1656481,"TERMINAL",0,0,"9",,terminal_output +1363,1657431,"TERMINAL",0,0,"10",,terminal_output +1364,1658530,"TERMINAL",0,0,"1",,terminal_output +1365,1659555,"TERMINAL",0,0,"2",,terminal_output +1366,1660580,"TERMINAL",0,0,"3",,terminal_output +1367,1661601,"TERMINAL",0,0,"4",,terminal_output +1368,1662732,"TERMINAL",0,0,"5",,terminal_output +1369,1663753,"TERMINAL",0,0,"7",,terminal_output +1370,1664777,"TERMINAL",0,0,"8",,terminal_output +1371,1665800,"TERMINAL",0,0,"9",,terminal_output +1372,1666802,"TERMINAL",0,0,"20",,terminal_output +1373,1667865,"TERMINAL",0,0,"1",,terminal_output +1374,1668897,"TERMINAL",0,0,"2",,terminal_output +1375,1669935,"TERMINAL",0,0,"3",,terminal_output +1376,1670975,"TERMINAL",0,0,"4",,terminal_output +1377,1672023,"TERMINAL",0,0,"5",,terminal_output +1378,1673173,"TERMINAL",0,0,"6",,terminal_output +1379,1674197,"TERMINAL",0,0,"7",,terminal_output +1380,1675229,"TERMINAL",0,0,"8",,terminal_output +1381,1676246,"TERMINAL",0,0,"9",,terminal_output +1382,1677267,"TERMINAL",0,0,"30",,terminal_output +1383,1678292,"TERMINAL",0,0,"1",,terminal_output +1384,1679418,"TERMINAL",0,0,"2",,terminal_output +1385,1680384,"TERMINAL",0,0,"3",,terminal_output +1386,1681468,"TERMINAL",0,0,"4",,terminal_output +1387,1682446,"TERMINAL",0,0,"5",,terminal_output +1388,1683516,"TERMINAL",0,0,"6",,terminal_output +1389,1684539,"TERMINAL",0,0,"7",,terminal_output +1390,1685569,"TERMINAL",0,0,"8",,terminal_output +1391,1686684,"TERMINAL",0,0,"9",,terminal_output +1392,1687714,"TERMINAL",0,0,"40",,terminal_output +1393,1688738,"TERMINAL",0,0,"2",,terminal_output +1394,1689735,"TERMINAL",0,0,"3",,terminal_output +1395,1690776,"TERMINAL",0,0,"4",,terminal_output +1396,1691816,"TERMINAL",0,0,"5",,terminal_output +1397,1692866,"TERMINAL",0,0,"6",,terminal_output +1398,1693962,"TERMINAL",0,0,"7",,terminal_output +1399,1694938,"TERMINAL",0,0,"8",,terminal_output +1400,1695980,"TERMINAL",0,0,"9",,terminal_output +1401,1697023,"TERMINAL",0,0,"50",,terminal_output +1402,1698063,"TERMINAL",0,0,"1",,terminal_output +1403,1699184,"TERMINAL",0,0,"2",,terminal_output +1404,1700143,"TERMINAL",0,0,"3",,terminal_output +1405,1701185,"TERMINAL",0,0,"4",,terminal_output +1406,1702222,"TERMINAL",0,0,"5",,terminal_output +1407,1703279,"TERMINAL",0,0,"6",,terminal_output +1408,1704317,"TERMINAL",0,0,"7",,terminal_output +1409,1705370,"TERMINAL",0,0,"8",,terminal_output +1410,1706453,"TERMINAL",0,0,"9",,terminal_output +1411,1707477,"TERMINAL",0,0,"1:00",,terminal_output +1412,1708500,"TERMINAL",0,0,"1",,terminal_output +1413,1709521,"TERMINAL",0,0,"2",,terminal_output +1414,1710581,"TERMINAL",0,0,"3",,terminal_output +1415,1711677,"TERMINAL",0,0,"4",,terminal_output +1416,1712700,"TERMINAL",0,0,"5",,terminal_output +1417,1713728,"TERMINAL",0,0,"7",,terminal_output +1418,1714720,"TERMINAL",0,0,"8",,terminal_output +1419,1715773,"TERMINAL",0,0,"9",,terminal_output +1420,1716800,"TERMINAL",0,0,"10",,terminal_output +1421,1717921,"TERMINAL",0,0,"1",,terminal_output +1422,1718884,"TERMINAL",0,0,"2",,terminal_output +1423,1719971,"TERMINAL",0,0,"3",,terminal_output +1424,1720960,"TERMINAL",0,0,"4",,terminal_output +1425,1722003,"TERMINAL",0,0,"5",,terminal_output +1426,1723146,"TERMINAL",0,0,"6",,terminal_output +1427,1724081,"TERMINAL",0,0,"7",,terminal_output +1428,1725181,"TERMINAL",0,0,"8",,terminal_output +1429,1726217,"TERMINAL",0,0,"9",,terminal_output +1430,1727240,"TERMINAL",0,0,"20",,terminal_output +1431,1728264,"TERMINAL",0,0,"1",,terminal_output +1432,1729284,"TERMINAL",0,0,"2",,terminal_output +1433,1730415,"TERMINAL",0,0,"3",,terminal_output +1434,1731361,"TERMINAL",0,0,"4",,terminal_output +1435,1732400,"TERMINAL",0,0,"5",,terminal_output +1436,1733486,"TERMINAL",0,0,"6",,terminal_output +1437,1734488,"TERMINAL",0,0,"7",,terminal_output +1438,1735539,"TERMINAL",0,0,"8",,terminal_output +1439,1736564,"TERMINAL",0,0,"9",,terminal_output +1440,1737686,"TERMINAL",0,0,"30",,terminal_output +1441,1738709,"TERMINAL",0,0,"1",,terminal_output +1442,1739772,"TERMINAL",0,0,"3",,terminal_output +1443,1740736,"TERMINAL",0,0,"4",,terminal_output +1444,1741779,"TERMINAL",0,0,"5",,terminal_output +1445,1742817,"TERMINAL",0,0,"6",,terminal_output +1446,1743933,"TERMINAL",0,0,"7",,terminal_output +1447,1744958,"TERMINAL",0,0,"8",,terminal_output +1448,1745999,"TERMINAL",0,0,"9",,terminal_output +1449,1746976,"TERMINAL",0,0,"40",,terminal_output +1450,1748014,"TERMINAL",0,0,"1",,terminal_output +1451,1749154,"TERMINAL",0,0,"2",,terminal_output +1452,1750093,"TERMINAL",0,0,"3",,terminal_output +1453,1751203,"TERMINAL",0,0,"4",,terminal_output +1454,1752227,"TERMINAL",0,0,"52",,terminal_output +1455,1753258,"TERMINAL",0,0,"6",,terminal_output +1456,1754287,"TERMINAL",0,0,"7",,terminal_output +1457,1755299,"TERMINAL",0,0,"8",,terminal_output +1458,1756427,"TERMINAL",0,0,"9",,terminal_output +1459,1757375,"TERMINAL",0,0,"50",,terminal_output +1460,1758472,"TERMINAL",0,0,"1",,terminal_output +1461,1759468,"TERMINAL",0,0,"2",,terminal_output +1462,1760521,"TERMINAL",0,0,"3",,terminal_output +1463,1761558,"TERMINAL",0,0,"4",,terminal_output +1464,1762599,"TERMINAL",0,0,"5",,terminal_output +1465,1763642,"TERMINAL",0,0,"6",,terminal_output +1466,1764720,"TERMINAL",0,0,"8",,terminal_output +1467,1765764,"TERMINAL",0,0,"9",,terminal_output +1468,1766771,"TERMINAL",0,0,"2:00",,terminal_output +1469,1767814,"TERMINAL",0,0,"1",,terminal_output +1470,1768918,"TERMINAL",0,0,"2",,terminal_output +1471,1769889,"TERMINAL",0,0,"3",,terminal_output +1472,1770930,"TERMINAL",0,0,"4",,terminal_output +1473,1771969,"TERMINAL",0,0,"5",,terminal_output +1474,1773012,"TERMINAL",0,0,"6",,terminal_output +1475,1774055,"TERMINAL",0,0,"7",,terminal_output +1476,1775094,"TERMINAL",0,0,"83",,terminal_output +1477,1776136,"TERMINAL",0,0,"9",,terminal_output +1478,1777212,"TERMINAL",0,0,"10",,terminal_output +1479,1778237,"TERMINAL",0,0,"1",,terminal_output +1480,1779364,"TERMINAL",0,0,"2",,terminal_output +1481,1780387,"TERMINAL",0,0,"3",,terminal_output +1482,1781339,"TERMINAL",0,0,"4",,terminal_output +1483,1782435,"TERMINAL",0,0,"5",,terminal_output +1484,1783424,"TERMINAL",0,0,"6",,terminal_output +1485,1784463,"TERMINAL",0,0,"7",,terminal_output +1486,1785501,"TERMINAL",0,0,"8",,terminal_output +1487,1786542,"TERMINAL",0,0,"9",,terminal_output +1488,1787585,"TERMINAL",0,0,"20",,terminal_output +1489,1788703,"TERMINAL",0,0,"1",,terminal_output +1490,1789705,"TERMINAL",0,0,"2",,terminal_output +1491,1790713,"TERMINAL",0,0,"4",,terminal_output +1492,1791782,"TERMINAL",0,0,"5",,terminal_output +1493,1792892,"TERMINAL",0,0,"638",,terminal_output +1494,1793900,"TERMINAL",0,0,"7",,terminal_output +1495,1794942,"TERMINAL",0,0,"8",,terminal_output +1496,1795928,"TERMINAL",0,0,"9",,terminal_output +1497,1796966,"TERMINAL",0,0,"30",,terminal_output +1498,1798010,"TERMINAL",0,0,"1",,terminal_output +1499,1799050,"TERMINAL",0,0,"2",,terminal_output +1500,1800151,"TERMINAL",0,0,"3",,terminal_output +1501,1801175,"TERMINAL",0,0,"4",,terminal_output +1502,1802174,"TERMINAL",0,0,"5",,terminal_output +1503,1803326,"TERMINAL",0,0,"6",,terminal_output +1504,1804348,"TERMINAL",0,0,"7",,terminal_output +1505,1805373,"TERMINAL",0,0,"8",,terminal_output +1506,1806396,"TERMINAL",0,0,"9",,terminal_output +1507,1807388,"TERMINAL",0,0,"4044",,terminal_output +1508,1808444,"TERMINAL",0,0,"1",,terminal_output +1509,1809573,"TERMINAL",0,0,"26",,terminal_output +1510,1810595,"TERMINAL",0,0,"3",,terminal_output +1511,1811618,"TERMINAL",0,0,"4",,terminal_output +1512,1812591,"TERMINAL",0,0,"5",,terminal_output +1513,1813631,"TERMINAL",0,0,"6",,terminal_output +1514,1814693,"TERMINAL",0,0,"7",,terminal_output +1515,1815818,"TERMINAL",0,0,"9",,terminal_output +1516,1816780,"TERMINAL",0,0,"50",,terminal_output +1517,1817865,"TERMINAL",0,0,"1",,terminal_output +1518,1818851,"TERMINAL",0,0,"2",,terminal_output +1519,1819919,"TERMINAL",0,0,"3",,terminal_output +1520,1820939,"TERMINAL",0,0,"4",,terminal_output +1521,1821975,"TERMINAL",0,0,"5",,terminal_output +1522,1823018,"TERMINAL",0,0,"6",,terminal_output +1523,1824063,"TERMINAL",0,0,"7",,terminal_output +1524,1825137,"TERMINAL",0,0,"8",,terminal_output +1525,1826161,"TERMINAL",0,0,"9",,terminal_output +1526,1827286,"TERMINAL",0,0,"3:00",,terminal_output +1527,1828223,"TERMINAL",0,0,"1",,terminal_output +1528,1829335,"TERMINAL",0,0,"2",,terminal_output +1529,1830303,"TERMINAL",0,0,"3",,terminal_output +1530,1831385,"TERMINAL",0,0,"4",,terminal_output +1531,1832406,"TERMINAL",0,0,"5",,terminal_output +1532,1833533,"TERMINAL",0,0,"6",,terminal_output +1533,1834558,"TERMINAL",0,0,"7",,terminal_output +1534,1835600,"TERMINAL",0,0,"8",,terminal_output +1535,1836604,"TERMINAL",0,0,"9",,terminal_output +1536,1837630,"TERMINAL",0,0,"10",,terminal_output +1537,1838652,"TERMINAL",0,0,"1",,terminal_output +1538,1839881,"TERMINAL",0,0,"2",,terminal_output +1539,1840843,"TERMINAL",0,0,"4",,terminal_output +1540,1841883,"TERMINAL",0,0,"5",,terminal_output +1541,1842960,"TERMINAL",0,0,"6",,terminal_output +1542,1843980,"TERMINAL",0,0,"7",,terminal_output +1543,1845006,"TERMINAL",0,0,"8",,terminal_output +1544,1846436,"TERMINAL",0,0,"97",,terminal_output +1545,1847461,"TERMINAL",0,0,"20",,terminal_output +1546,1848483,"TERMINAL",0,0,"1",,terminal_output +1547,1849456,"TERMINAL",0,0,"2",,terminal_output +1548,1850535,"TERMINAL",0,0,"3",,terminal_output +1549,1851542,"TERMINAL",0,0,"4",,terminal_output +1550,1852582,"TERMINAL",0,0,"5",,terminal_output +1551,1853707,"TERMINAL",0,0,"61",,terminal_output +1552,1854730,"TERMINAL",0,0,"7",,terminal_output +1553,1855754,"TERMINAL",0,0,"9",,terminal_output +1554,1856751,"TERMINAL",0,0,"30",,terminal_output +1555,1857803,"TERMINAL",0,0,"1",,terminal_output +1556,1858928,"TERMINAL",0,0,"2",,terminal_output +1557,1859953,"TERMINAL",0,0,"3",,terminal_output +1558,1860929,"TERMINAL",0,0,"4",,terminal_output +1559,1862002,"TERMINAL",0,0,"5",,terminal_output +1560,1863026,"TERMINAL",0,0,"6",,terminal_output +1561,1864041,"TERMINAL",0,0,"76",,terminal_output +1562,1865081,"TERMINAL",0,0,"8",,terminal_output +1563,1866120,"TERMINAL",0,0,"9",,terminal_output +1564,1867164,"TERMINAL",0,0,"40",,terminal_output +1565,1868204,"TERMINAL",0,0,"1",,terminal_output +1566,1869271,"TERMINAL",0,0,"2",,terminal_output +1567,1870294,"TERMINAL",0,0,"3",,terminal_output +1568,1871422,"TERMINAL",0,0,"4",,terminal_output +1569,1872366,"TERMINAL",0,0,"5",,terminal_output +1570,1873470,"TERMINAL",0,0,"6",,terminal_output +1571,1874454,"TERMINAL",0,0,"7",,terminal_output +1572,1875518,"TERMINAL",0,0,"8",,terminal_output +1573,1876534,"TERMINAL",0,0,"9",,terminal_output +1574,1877670,"TERMINAL",0,0,"50",,terminal_output +1575,1878692,"TERMINAL",0,0,"1",,terminal_output +1576,1879717,"TERMINAL",0,0,"2",,terminal_output +1577,1880741,"TERMINAL",0,0,"4",,terminal_output +1578,1881783,"TERMINAL",0,0,"5",,terminal_output +1579,1882892,"TERMINAL",0,0,"6",,terminal_output +1580,1883839,"TERMINAL",0,0,"7",,terminal_output +1581,1884939,"TERMINAL",0,0,"8",,terminal_output +1582,1885963,"TERMINAL",0,0,"9",,terminal_output +1583,1886987,"TERMINAL",0,0,"4:00",,terminal_output +1584,1888012,"TERMINAL",0,0,"1",,terminal_output +1585,1889056,"TERMINAL",0,0,"2",,terminal_output +1586,1890077,"TERMINAL",0,0,"37",,terminal_output +1587,1891196,"TERMINAL",0,0,"4",,terminal_output +1588,1892161,"TERMINAL",0,0,"5",,terminal_output +1589,1893237,"TERMINAL",0,0,"6",,terminal_output +1590,1894259,"TERMINAL",0,0,"7",,terminal_output +1591,1895385,"TERMINAL",0,0,"8",,terminal_output +1592,1896408,"TERMINAL",0,0,"9",,terminal_output +1593,1897432,"TERMINAL",0,0,"10",,terminal_output +1594,1898456,"TERMINAL",0,0,"1",,terminal_output +1595,1899480,"TERMINAL",0,0,"2",,terminal_output +1596,1900497,"TERMINAL",0,0,"3",,terminal_output +1597,1901547,"TERMINAL",0,0,"4",,terminal_output +1598,1902589,"TERMINAL",0,0,"5",,terminal_output +1599,1903678,"TERMINAL",0,0,"6",,terminal_output +1600,1904704,"TERMINAL",0,0,"7",,terminal_output +1601,1905727,"TERMINAL",0,0,"9",,terminal_output +1602,1907004,"TERMINAL",0,0,"200",,terminal_output +1603,1907979,"TERMINAL",0,0,"1",,terminal_output +1604,1909006,"TERMINAL",0,0,"2",,terminal_output +1605,1910050,"TERMINAL",0,0,"3",,terminal_output +1606,1911092,"TERMINAL",0,0,"4",,terminal_output +1607,1912178,"TERMINAL",0,0,"5",,terminal_output +1608,1913201,"TERMINAL",0,0,"66",,terminal_output +1609,1914225,"TERMINAL",0,0,"7",,terminal_output +1610,1915254,"TERMINAL",0,0,"8",,terminal_output +1611,1916376,"TERMINAL",0,0,"9",,terminal_output +1612,1917333,"TERMINAL",0,0,"30",,terminal_output +1613,1918425,"TERMINAL",0,0,"1",,terminal_output +1614,1919448,"TERMINAL",0,0,"2",,terminal_output +1615,1920447,"TERMINAL",0,0,"3",,terminal_output +1616,1921597,"TERMINAL",0,0,"4",,terminal_output +1617,1922622,"TERMINAL",0,0,"5",,terminal_output +1618,1923646,"TERMINAL",0,0,"6",,terminal_output +1619,1924670,"TERMINAL",0,0,"7",,terminal_output +1620,1925664,"TERMINAL",0,0,"8",,terminal_output +1621,1926704,"TERMINAL",0,0,"40",,terminal_output +1622,1927738,"TERMINAL",0,0,"17",,terminal_output +1623,1928819,"TERMINAL",0,0,"2",,terminal_output +1624,1929893,"TERMINAL",0,0,"3",,terminal_output +1625,1930873,"TERMINAL",0,0,"4",,terminal_output +1626,1931948,"TERMINAL",0,0,"5",,terminal_output +1627,1932953,"TERMINAL",0,0,"6",,terminal_output +1628,1934007,"TERMINAL",0,0,"7",,terminal_output +1629,1935033,"TERMINAL",0,0,"8",,terminal_output +1630,1936073,"TERMINAL",0,0,"98",,terminal_output +1631,1937114,"TERMINAL",0,0,"50",,terminal_output +1632,1938151,"TERMINAL",0,0,"1",,terminal_output +1633,1939182,"TERMINAL",0,0,"2",,terminal_output +1634,1940222,"TERMINAL",0,0,"39",,terminal_output +1635,1941260,"TERMINAL",0,0,"451",,terminal_output +1636,1942387,"TERMINAL",0,0,"5",,terminal_output +1637,1943409,"TERMINAL",0,0,"6",,terminal_output +1638,1944432,"TERMINAL",0,0,"7",,terminal_output +1639,1945421,"TERMINAL",0,0,"8",,terminal_output +1640,1946482,"TERMINAL",0,0,"9",,terminal_output +1641,1947500,"TERMINAL",0,0,"5:00",,terminal_output +1642,1948543,"TERMINAL",0,0,"1",,terminal_output +1643,1949655,"TERMINAL",0,0,"2",,terminal_output +1644,1950618,"TERMINAL",0,0,"3",,terminal_output +1645,1951684,"TERMINAL",0,0,"4",,terminal_output +1646,1952728,"TERMINAL",0,0,"6",,terminal_output +1647,1953738,"TERMINAL",0,0,"7",,terminal_output +1648,1954878,"TERMINAL",0,0,"8",,terminal_output +1649,1955821,"TERMINAL",0,0,"9",,terminal_output +1650,1956867,"TERMINAL",0,0,"10",,terminal_output +1651,1957962,"TERMINAL",0,0,"1",,terminal_output +1652,1958982,"TERMINAL",0,0,"2",,terminal_output +1653,1959998,"TERMINAL",0,0,"3",,terminal_output +1654,1961037,"TERMINAL",0,0,"4",,terminal_output +1655,1962086,"TERMINAL",0,0,"5",,terminal_output +1656,1963171,"TERMINAL",0,0,"61",,terminal_output +1657,1964197,"TERMINAL",0,0,"7",,terminal_output +1658,1965212,"TERMINAL",0,0,"8",,terminal_output +1659,1966253,"TERMINAL",0,0,"9",,terminal_output +1660,1967371,"TERMINAL",0,0,"20",,terminal_output +1661,1968394,"TERMINAL",0,0,"1",,terminal_output +1662,1969384,"TERMINAL",0,0,"23",,terminal_output +1663,1970429,"TERMINAL",0,0,"3",,terminal_output +1664,1971472,"TERMINAL",0,0,"4",,terminal_output +1665,1972594,"TERMINAL",0,0,"5",,terminal_output +1666,1973616,"TERMINAL",0,0,"6",,terminal_output +1667,1974603,"TERMINAL",0,0,"7",,terminal_output +1668,1975644,"TERMINAL",0,0,"8",,terminal_output +1669,1976693,"TERMINAL",0,0,"30",,terminal_output +1670,1977739,"TERMINAL",0,0,"1",,terminal_output +1671,1978841,"TERMINAL",0,0,"2",,terminal_output +1672,1979826,"TERMINAL",0,0,"3",,terminal_output +1673,1980872,"TERMINAL",0,0,"4",,terminal_output +1674,1982014,"TERMINAL",0,0,"5",,terminal_output +1675,1983039,"TERMINAL",0,0,"6",,terminal_output +1676,1984064,"TERMINAL",0,0,"7",,terminal_output +1677,1985043,"TERMINAL",0,0,"8",,terminal_output +1678,1986091,"TERMINAL",0,0,"9",,terminal_output +1679,1987135,"TERMINAL",0,0,"40",,terminal_output +1680,1988179,"TERMINAL",0,0,"1",,terminal_output +1681,1989223,"TERMINAL",0,0,"2",,terminal_output +1682,1990308,"TERMINAL",0,0,"3",,terminal_output +1683,1991332,"TERMINAL",0,0,"4",,terminal_output +1684,1992459,"TERMINAL",0,0,"5",,terminal_output +1685,1993401,"TERMINAL",0,0,"6",,terminal_output +1686,1994441,"TERMINAL",0,0,"7",,terminal_output +1687,1995482,"TERMINAL",0,0,"8",,terminal_output +1688,1996524,"TERMINAL",0,0,"9",,terminal_output +1689,1997564,"TERMINAL",0,0,"50",,terminal_output +1690,1998707,"TERMINAL",0,0,"1",,terminal_output +1691,1999637,"TERMINAL",0,0,"2",,terminal_output +1692,2000680,"TERMINAL",0,0,"4",,terminal_output +1693,2001723,"TERMINAL",0,0,"5",,terminal_output +1694,2002802,"TERMINAL",0,0,"6",,terminal_output +1695,2003828,"TERMINAL",0,0,"7",,terminal_output +1696,2004952,"TERMINAL",0,0,"8",,terminal_output +1697,2005897,"TERMINAL",0,0,"9",,terminal_output +1698,2006941,"TERMINAL",0,0,"6:006",,terminal_output +1699,2008025,"TERMINAL",0,0,"1",,terminal_output +1700,2009049,"TERMINAL",0,0,"2",,terminal_output +1701,2010073,"TERMINAL",0,0,"3",,terminal_output +1702,2011110,"TERMINAL",0,0,"4",,terminal_output +1703,2012224,"TERMINAL",0,0,"5",,terminal_output +1704,2013248,"TERMINAL",0,0,"6",,terminal_output +1705,2014245,"TERMINAL",0,0,"7",,terminal_output +1706,2015285,"TERMINAL",0,0,"8",,terminal_output +1707,2016322,"TERMINAL",0,0,"9",,terminal_output +1708,2017431,"TERMINAL",0,0,"10",,terminal_output +1709,2018469,"TERMINAL",0,0,"1",,terminal_output +1710,2019495,"TERMINAL",0,0,"2",,terminal_output +1711,2020507,"TERMINAL",0,0,"3",,terminal_output +1712,2021544,"TERMINAL",0,0,"4",,terminal_output +1713,2022667,"TERMINAL",0,0,"5",,terminal_output +1714,2023621,"TERMINAL",0,0,"6",,terminal_output +1715,2024716,"TERMINAL",0,0,"7",,terminal_output +1716,2025739,"TERMINAL",0,0,"9",,terminal_output +1717,2026743,"TERMINAL",0,0,"20",,terminal_output +1718,2027861,"TERMINAL",0,0,"1",,terminal_output +1719,2028844,"TERMINAL",0,0,"2",,terminal_output +1720,2029938,"TERMINAL",0,0,"3",,terminal_output +1721,2030967,"TERMINAL",0,0,"4",,terminal_output +1722,2031944,"TERMINAL",0,0,"5",,terminal_output +1723,2032978,"TERMINAL",0,0,"61",,terminal_output +1724,2034050,"TERMINAL",0,0,"7",,terminal_output +1725,2035094,"TERMINAL",0,0,"8",,terminal_output +1726,2036095,"TERMINAL",0,0,"9",,terminal_output +1727,2037135,"TERMINAL",0,0,"30",,terminal_output +1728,2038232,"TERMINAL",0,0,"1",,terminal_output +1729,2039256,"TERMINAL",0,0,"2",,terminal_output +1730,2040280,"TERMINAL",0,0,"3",,terminal_output +1731,2041283,"TERMINAL",0,0,"4",,terminal_output +1732,2042329,"TERMINAL",0,0,"5",,terminal_output +1733,2043455,"TERMINAL",0,0,"6",,terminal_output +1734,2044481,"TERMINAL",0,0,"7",,terminal_output +1735,2045443,"TERMINAL",0,0,"8",,terminal_output +1736,2046532,"TERMINAL",0,0,"9",,terminal_output +1737,2047550,"TERMINAL",0,0,"40",,terminal_output +1738,2048554,"TERMINAL",0,0,"1",,terminal_output +1739,2049593,"TERMINAL",0,0,"2",,terminal_output +1740,2050633,"TERMINAL",0,0,"32",,terminal_output +1741,2051683,"TERMINAL",0,0,"4",,terminal_output +1742,2052774,"TERMINAL",0,0,"6",,terminal_output +1743,2053799,"TERMINAL",0,0,"7",,terminal_output +1744,2054822,"TERMINAL",0,0,"8",,terminal_output +1745,2055869,"TERMINAL",0,0,"9",,terminal_output +1746,2056868,"TERMINAL",0,0,"50",,terminal_output +1747,2057904,"TERMINAL",0,0,"1",,terminal_output +1748,2059023,"TERMINAL",0,0,"20",,terminal_output +1749,2060044,"TERMINAL",0,0,"3",,terminal_output +1750,2061070,"TERMINAL",0,0,"4",,terminal_output +1751,2062060,"TERMINAL",0,0,"5",,terminal_output +1752,2063117,"TERMINAL",0,0,"6",,terminal_output +1753,2064137,"TERMINAL",0,0,"7",,terminal_output +1754,2065179,"TERMINAL",0,0,"8",,terminal_output +1755,2066212,"TERMINAL",0,0,"9",,terminal_output +1756,2067448,"TERMINAL",0,0,"7:00",,terminal_output +1757,2068396,"TERMINAL",0,0,"1",,terminal_output +1758,2069365,"TERMINAL",0,0,"2",,terminal_output +1759,2070416,"TERMINAL",0,0,"3",,terminal_output +1760,2071420,"TERMINAL",0,0,"4",,terminal_output +1761,2072538,"TERMINAL",0,0,"5",,terminal_output +1762,2073501,"TERMINAL",0,0,"6",,terminal_output +1763,2074562,"TERMINAL",0,0,"7",,terminal_output +1764,2075570,"TERMINAL",0,0,"8",,terminal_output +1765,2076668,"TERMINAL",0,0,"9",,terminal_output +1766,2077675,"TERMINAL",0,0,"101",,terminal_output +1767,2078783,"TERMINAL",0,0,"2",,terminal_output +1768,2079809,"TERMINAL",0,0,"3",,terminal_output +1769,2080833,"TERMINAL",0,0,"4",,terminal_output +1770,2081842,"TERMINAL",0,0,"5",,terminal_output +1771,2082880,"TERMINAL",0,0,"6",,terminal_output +1772,2083921,"TERMINAL",0,0,"7",,terminal_output +1773,2084981,"TERMINAL",0,0,"8",,terminal_output +1774,2085996,"TERMINAL",0,0,"9",,terminal_output +1775,2087079,"TERMINAL",0,0,"20",,terminal_output +1776,2088128,"TERMINAL",0,0,"1",,terminal_output +1777,2089168,"TERMINAL",0,0,"2",,terminal_output +1778,2090156,"TERMINAL",0,0,"3",,terminal_output +1779,2091194,"TERMINAL",0,0,"4",,terminal_output +1780,2092403,"TERMINAL",0,0,"5",,terminal_output +1781,2093325,"TERMINAL",0,0,"6",,terminal_output +1782,2094350,"TERMINAL",0,0,"7",,terminal_output +1783,2095375,"TERMINAL",0,0,"8",,terminal_output +1784,2096500,"TERMINAL",0,0,"9",,terminal_output +1785,2097523,"TERMINAL",0,0,"30",,terminal_output +1786,2098478,"TERMINAL",0,0,"1",,terminal_output +1787,2099571,"TERMINAL",0,0,"2",,terminal_output +1788,2100560,"TERMINAL",0,0,"3",,terminal_output +1789,2101621,"TERMINAL",0,0,"4",,terminal_output +1790,2102747,"TERMINAL",0,0,"5",,terminal_output +1791,2103685,"TERMINAL",0,0,"7",,terminal_output +1792,2104796,"TERMINAL",0,0,"8",,terminal_output +1793,2105767,"TERMINAL",0,0,"9",,terminal_output +1794,2106810,"TERMINAL",0,0,"40",,terminal_output +1795,2107866,"TERMINAL",0,0,"1",,terminal_output +1796,2108993,"TERMINAL",0,0,"2",,terminal_output +1797,2110030,"TERMINAL",0,0,"3",,terminal_output +1798,2111041,"TERMINAL",0,0,"4",,terminal_output +1799,2112063,"TERMINAL",0,0,"53",,terminal_output +1800,2113091,"TERMINAL",0,0,"6",,terminal_output +1801,2114119,"TERMINAL",0,0,"7",,terminal_output +1802,2115160,"TERMINAL",0,0,"8",,terminal_output +1803,2116263,"TERMINAL",0,0,"9",,terminal_output +1804,2117289,"TERMINAL",0,0,"50",,terminal_output +1805,2118313,"TERMINAL",0,0,"1",,terminal_output +1806,2119436,"TERMINAL",0,0,"2",,terminal_output +1807,2120369,"TERMINAL",0,0,"3",,terminal_output +1808,2121418,"TERMINAL",0,0,"4",,terminal_output +1809,2122453,"TERMINAL",0,0,"5",,terminal_output +1810,2123497,"TERMINAL",0,0,"6",,terminal_output +1811,2124546,"TERMINAL",0,0,"7",,terminal_output +1812,2125684,"TERMINAL",0,0,"8",,terminal_output +1813,2126676,"TERMINAL",0,0,"9",,terminal_output +1814,2127735,"TERMINAL",0,0,"8:00",,terminal_output +1815,2128707,"TERMINAL",0,0,"2",,terminal_output +1816,2129781,"TERMINAL",0,0,"3",,terminal_output +1817,2130787,"TERMINAL",0,0,"4",,terminal_output +1818,2131833,"TERMINAL",0,0,"5",,terminal_output +1819,2132955,"TERMINAL",0,0,"6",,terminal_output +1820,2133978,"TERMINAL",0,0,"7",,terminal_output +1821,2135002,"TERMINAL",0,0,"8",,terminal_output +1822,2136000,"TERMINAL",0,0,"9",,terminal_output +1823,2137057,"TERMINAL",0,0,"10",,terminal_output +1824,2138077,"TERMINAL",0,0,"1",,terminal_output +1825,2139121,"TERMINAL",0,0,"2",,terminal_output +1826,2140165,"TERMINAL",0,0,"3",,terminal_output +1827,2141204,"TERMINAL",0,0,"4",,terminal_output +1828,2142273,"TERMINAL",0,0,"5",,terminal_output +1829,2143283,"TERMINAL",0,0,"6",,terminal_output +1830,2144424,"TERMINAL",0,0,"7",,terminal_output +1831,2145385,"TERMINAL",0,0,"8",,terminal_output +1832,2146465,"TERMINAL",0,0,"90",,terminal_output +1833,2147492,"TERMINAL",0,0,"20",,terminal_output +1834,2148621,"TERMINAL",0,0,"1",,terminal_output +1835,2149571,"TERMINAL",0,0,"2",,terminal_output +1836,2150670,"TERMINAL",0,0,"3",,terminal_output +1837,2151684,"TERMINAL",0,0,"4",,terminal_output +1838,2152924,"TERMINAL",0,0,"60",,terminal_output +1839,2153946,"TERMINAL",0,0,"7",,terminal_output +1840,2154971,"TERMINAL",0,0,"8",,terminal_output +1841,2155954,"TERMINAL",0,0,"9",,terminal_output +1842,2157020,"TERMINAL",0,0,"30",,terminal_output +1843,2158068,"TERMINAL",0,0,"1",,terminal_output +1844,2159061,"TERMINAL",0,0,"2",,terminal_output +1845,2160099,"TERMINAL",0,0,"3",,terminal_output +1846,2161136,"TERMINAL",0,0,"4",,terminal_output +1847,2162178,"TERMINAL",0,0,"5",,terminal_output +1848,2163215,"TERMINAL",0,0,"6",,terminal_output +1849,2164289,"TERMINAL",0,0,"7",,terminal_output +1850,2165314,"TERMINAL",0,0,"8",,terminal_output +1851,2166338,"TERMINAL",0,0,"9",,terminal_output +1852,2167463,"TERMINAL",0,0,"40",,terminal_output +1853,2168416,"TERMINAL",0,0,"1",,terminal_output +1854,2169495,"TERMINAL",0,0,"2",,terminal_output +1855,2170502,"TERMINAL",0,0,"3",,terminal_output +1856,2171564,"TERMINAL",0,0,"4",,terminal_output +1857,2172590,"TERMINAL",0,0,"5",,terminal_output +1858,2173623,"TERMINAL",0,0,"6",,terminal_output +1859,2174733,"TERMINAL",0,0,"7",,terminal_output +1860,2175702,"TERMINAL",0,0,"9",,terminal_output +1861,2176743,"TERMINAL",0,0,"50",,terminal_output +1862,2177807,"TERMINAL",0,0,"1",,terminal_output +1863,2178830,"TERMINAL",0,0,"2",,terminal_output +1864,2179956,"TERMINAL",0,0,"3",,terminal_output +1865,2180896,"TERMINAL",0,0,"4",,terminal_output +1866,2181935,"TERMINAL",0,0,"5",,terminal_output +1867,2183027,"TERMINAL",0,0,"6",,terminal_output +1868,2184014,"TERMINAL",0,0,"7",,terminal_output +1869,2185094,"TERMINAL",0,0,"8",,terminal_output +1870,2186104,"TERMINAL",0,0,"9",,terminal_output +1871,2187141,"TERMINAL",0,0,"9:00",,terminal_output +1872,2188185,"TERMINAL",0,0,"1",,terminal_output +1873,2189224,"TERMINAL",0,0,"2",,terminal_output +1874,2190299,"TERMINAL",0,0,"3",,terminal_output +1875,2191323,"TERMINAL",0,0,"4",,terminal_output +1876,2192348,"TERMINAL",0,0,"5",,terminal_output +1877,2193473,"TERMINAL",0,0,"6",,terminal_output +1878,2194409,"TERMINAL",0,0,"7",,terminal_output +1879,2195444,"TERMINAL",0,0,"8",,terminal_output +1880,2196546,"TERMINAL",0,0,"9",,terminal_output +1881,2197519,"TERMINAL",0,0,"10",,terminal_output +1882,2198560,"TERMINAL",0,0,"1",,terminal_output +1883,2199618,"TERMINAL",0,0,"2",,terminal_output +1884,2200743,"TERMINAL",0,0,"3",,terminal_output +1885,2201680,"TERMINAL",0,0,"5",,terminal_output +1886,2202722,"TERMINAL",0,0,"6",,terminal_output +1887,2203816,"TERMINAL",0,0,"7",,terminal_output +1888,2204844,"TERMINAL",0,0,"8",,terminal_output +1889,2205863,"TERMINAL",0,0,"9",,terminal_output +1890,2206879,"TERMINAL",0,0,"20",,terminal_output +1891,2207931,"TERMINAL",0,0,"1",,terminal_output +1892,2208974,"TERMINAL",0,0,"2",,terminal_output +1893,2210065,"TERMINAL",0,0,"3",,terminal_output +1894,2211033,"TERMINAL",0,0,"4",,terminal_output +1895,2212108,"TERMINAL",0,0,"51",,terminal_output +1896,2213135,"TERMINAL",0,0,"6",,terminal_output +1897,2214161,"TERMINAL",0,0,"7",,terminal_output +1898,2215200,"TERMINAL",0,0,"8",,terminal_output +1899,2216242,"TERMINAL",0,0,"9",,terminal_output +1900,2217332,"TERMINAL",0,0,"30",,terminal_output +1901,2218356,"TERMINAL",0,0,"1",,terminal_output +1902,2219380,"TERMINAL",0,0,"2",,terminal_output +1903,2220401,"TERMINAL",0,0,"3",,terminal_output +1904,2221436,"TERMINAL",0,0,"4",,terminal_output +1905,2222479,"TERMINAL",0,0,"5",,terminal_output +1906,2223515,"TERMINAL",0,0,"6",,terminal_output +1907,2224602,"TERMINAL",0,0,"7",,terminal_output +1908,2225593,"TERMINAL",0,0,"8",,terminal_output +1909,2226668,"TERMINAL",0,0,"9",,terminal_output +1910,2227682,"TERMINAL",0,0,"40",,terminal_output +1911,2228803,"TERMINAL",0,0,"2",,terminal_output +1912,2229826,"TERMINAL",0,0,"3",,terminal_output +1913,2230851,"TERMINAL",0,0,"4",,terminal_output +1914,2231858,"TERMINAL",0,0,"5",,terminal_output +1915,2232900,"TERMINAL",0,0,"6",,terminal_output +1916,2234023,"TERMINAL",0,0,"7",,terminal_output +1917,2235048,"TERMINAL",0,0,"8",,terminal_output +1918,2236073,"TERMINAL",0,0,"9",,terminal_output +1919,2237108,"TERMINAL",0,0,"50",,terminal_output +1920,2238120,"TERMINAL",0,0,"1",,terminal_output +1921,2239146,"TERMINAL",0,0,"2",,terminal_output +1922,2240184,"TERMINAL",0,0,"3",,terminal_output +1923,2241225,"TERMINAL",0,0,"4",,terminal_output +1924,2242265,"TERMINAL",0,0,"5",,terminal_output +1925,2243304,"TERMINAL",0,0,"6",,terminal_output +1926,2244356,"TERMINAL",0,0,"7",,terminal_output +1927,2245388,"TERMINAL",0,0,"8",,terminal_output +1928,2246445,"TERMINAL",0,0,"9",,terminal_output +1929,2247474,"TERMINAL",0,0,"40:00",,terminal_output +1930,2248512,"TERMINAL",0,0,"1",,terminal_output +1931,2249595,"TERMINAL",0,0,"2",,terminal_output +1932,2250614,"TERMINAL",0,0,"3",,terminal_output +1933,2251668,"TERMINAL",0,0,"4",,terminal_output +1934,2252764,"TERMINAL",0,0,"6",,terminal_output +1935,2253787,"TERMINAL",0,0,"7",,terminal_output +1936,2254752,"TERMINAL",0,0,"81",,terminal_output +1937,2255835,"TERMINAL",0,0,"9",,terminal_output +1938,2256840,"TERMINAL",0,0,"10",,terminal_output +1939,2257878,"TERMINAL",0,0,"1",,terminal_output +1940,2259010,"TERMINAL",0,0,"2",,terminal_output +1941,2259962,"TERMINAL",0,0,"3",,terminal_output +1942,2261058,"TERMINAL",0,0,"4",,terminal_output +1943,2262051,"TERMINAL",0,0,"5",,terminal_output +1944,2263132,"TERMINAL",0,0,"6",,terminal_output +1945,2264136,"TERMINAL",0,0,"7",,terminal_output +1946,2265179,"TERMINAL",0,0,"8",,terminal_output +1947,2266225,"TERMINAL",0,0,"9",,terminal_output +1948,2267270,"TERMINAL",0,0,"20",,terminal_output +1949,2268314,"TERMINAL",0,0,"1",,terminal_output +1950,2269455,"TERMINAL",0,0,"2",,terminal_output +1951,2270405,"TERMINAL",0,0,"3",,terminal_output +1952,2271505,"TERMINAL",0,0,"4",,terminal_output +1953,2272497,"TERMINAL",0,0,"5",,terminal_output +1954,2273534,"TERMINAL",0,0,"6 1",,terminal_output +1955,2274678,"TERMINAL",0,0,"7",,terminal_output +1956,2275702,"TERMINAL",0,0,"8",,terminal_output +1957,2276681,"TERMINAL",0,0,"9",,terminal_output +1958,2277750,"TERMINAL",0,0,"31",,terminal_output +1959,2278774,"TERMINAL",0,0,"2",,terminal_output +1960,2279795,"TERMINAL",0,0,"3",,terminal_output +1961,2280841,"TERMINAL",0,0,"4",,terminal_output +1962,2281892,"TERMINAL",0,0,"5",,terminal_output +1963,2282931,"TERMINAL",0,0,"6",,terminal_output +1964,2283974,"TERMINAL",0,0,"7",,terminal_output +1965,2285135,"TERMINAL",0,0,"8",,terminal_output +1966,2286146,"TERMINAL",0,0,"92",,terminal_output +1967,2287171,"TERMINAL",0,0,"408",,terminal_output +1968,2288143,"TERMINAL",0,0,"1",,terminal_output +1969,2289233,"TERMINAL",0,0,"2",,terminal_output +1970,2290244,"TERMINAL",0,0,"3",,terminal_output +1971,2291273,"TERMINAL",0,0,"4",,terminal_output +1972,2292314,"TERMINAL",0,0,"5",,terminal_output +1973,2293352,"TERMINAL",0,0,"69",,terminal_output +1974,2294392,"TERMINAL",0,0,"7",,terminal_output +1975,2295437,"TERMINAL",0,0,"8",,terminal_output +1976,2296488,"TERMINAL",0,0,"9",,terminal_output +1977,2297616,"TERMINAL",0,0,"50",,terminal_output +1978,2298564,"TERMINAL",0,0,"1",,terminal_output +1979,2299667,"TERMINAL",0,0,"2",,terminal_output +1980,2300688,"TERMINAL",0,0,"3",,terminal_output +1981,2301693,"TERMINAL",0,0,"521",,terminal_output +1982,2302839,"TERMINAL",0,0,"6",,terminal_output +1983,2303863,"TERMINAL",0,0,"7",,terminal_output +1984,2304825,"TERMINAL",0,0,"8",,terminal_output +1985,2305911,"TERMINAL",0,0,"9",,terminal_output +1986,2306934,"TERMINAL",0,0,"1:00",,terminal_output +1987,2307955,"TERMINAL",0,0,"1",,terminal_output +1988,2309083,"TERMINAL",0,0,"2",,terminal_output +1989,2310108,"TERMINAL",0,0,"3",,terminal_output +1990,2311148,"TERMINAL",0,0,"4",,terminal_output +1991,2312163,"TERMINAL",0,0,"5",,terminal_output +1992,2313183,"TERMINAL",0,0,"6",,terminal_output +1993,2314206,"TERMINAL",0,0,"7",,terminal_output +1994,2315251,"TERMINAL",0,0,"8",,terminal_output +1995,2316293,"TERMINAL",0,0,"9",,terminal_output +1996,2317379,"TERMINAL",0,0,"10",,terminal_output +1997,2318398,"TERMINAL",0,0,"1",,terminal_output +1998,2319423,"TERMINAL",0,0,"2",,terminal_output +1999,2320464,"TERMINAL",0,0,"3",,terminal_output +2000,2321577,"TERMINAL",0,0,"4",,terminal_output +2001,2322602,"TERMINAL",0,0,"52",,terminal_output +2002,2323624,"TERMINAL",0,0,"6",,terminal_output +2003,2324650,"TERMINAL",0,0,"7",,terminal_output +2004,2325777,"TERMINAL",0,0,"9",,terminal_output +2005,2326716,"TERMINAL",0,0,"20",,terminal_output +2006,2327823,"TERMINAL",0,0,"1",,terminal_output +2007,2328847,"TERMINAL",0,0,"2",,terminal_output +2008,2329887,"TERMINAL",0,0,"3",,terminal_output +2009,2330884,"TERMINAL",0,0,"4",,terminal_output +2010,2331924,"TERMINAL",0,0,"5",,terminal_output +2011,2333027,"TERMINAL",0,0,"6",,terminal_output +2012,2334070,"TERMINAL",0,0,"7",,terminal_output +2013,2335095,"TERMINAL",0,0,"8",,terminal_output +2014,2336117,"TERMINAL",0,0,"9",,terminal_output +2015,2337131,"TERMINAL",0,0,"30",,terminal_output +2016,2338271,"TERMINAL",0,0,"1",,terminal_output +2017,2339214,"TERMINAL",0,0,"2",,terminal_output +2018,2340259,"TERMINAL",0,0,"3",,terminal_output +2019,2341298,"TERMINAL",0,0,"4",,terminal_output +2020,2342364,"TERMINAL",0,0,"5",,terminal_output +2021,2343380,"TERMINAL",0,0,"6",,terminal_output +2022,2344515,"TERMINAL",0,0,"7",,terminal_output +2023,2345479,"TERMINAL",0,0,"8",,terminal_output +2024,2346574,"TERMINAL",0,0,"9",,terminal_output +2025,2347590,"TERMINAL",0,0,"40",,terminal_output +2026,2348610,"TERMINAL",0,0,"1",,terminal_output +2027,2349635,"TERMINAL",0,0,"2",,terminal_output +2028,2350761,"TERMINAL",0,0,"3",,terminal_output +2029,2351709,"TERMINAL",0,0,"5",,terminal_output +2030,2352810,"TERMINAL",0,0,"6",,terminal_output +2031,2353834,"TERMINAL",0,0,"7",,terminal_output +2032,2354827,"TERMINAL",0,0,"8",,terminal_output +2033,2355882,"TERMINAL",0,0,"9",,terminal_output +2034,2357008,"TERMINAL",0,0,"50",,terminal_output +2035,2357947,"TERMINAL",0,0,"1",,terminal_output +2036,2359057,"TERMINAL",0,0,"2",,terminal_output +2037,2360032,"TERMINAL",0,0,"3",,terminal_output +2038,2361072,"TERMINAL",0,0,"4",,terminal_output +2039,2362116,"TERMINAL",0,0,"5",,terminal_output +2040,2363255,"TERMINAL",0,0,"6",,terminal_output +2041,2364281,"TERMINAL",0,0,"7",,terminal_output +2042,2365243,"TERMINAL",0,0,"8",,terminal_output +2043,2366283,"TERMINAL",0,0,"92",,terminal_output +2044,2367395,"TERMINAL",0,0,"2:00",,terminal_output +2045,2368371,"TERMINAL",0,0,"1",,terminal_output +2046,2369425,"TERMINAL",0,0,"2",,terminal_output +2047,2370489,"TERMINAL",0,0,"3",,terminal_output +2048,2371550,"TERMINAL",0,0,"4",,terminal_output +2049,2372575,"TERMINAL",0,0,"5",,terminal_output +2050,2373597,"TERMINAL",0,0,"6",,terminal_output +2051,2374624,"TERMINAL",0,0,"7",,terminal_output +2052,2375744,"TERMINAL",0,0,"8",,terminal_output +2053,2376683,"TERMINAL",0,0,"10",,terminal_output +2054,2377745,"TERMINAL",0,0,"1",,terminal_output +2055,2378821,"TERMINAL",0,0,"2",,terminal_output +2056,2379843,"TERMINAL",0,0,"3",,terminal_output +2057,2380842,"TERMINAL",0,0,"4",,terminal_output +2058,2381868,"TERMINAL",0,0,"53",,terminal_output +2059,2382917,"TERMINAL",0,0,"6",,terminal_output +2060,2383952,"TERMINAL",0,0,"7",,terminal_output +2061,2384988,"TERMINAL",0,0,"8",,terminal_output +2062,2386031,"TERMINAL",0,0,"9",,terminal_output +2063,2387072,"TERMINAL",0,0,"20",,terminal_output +2064,2388139,"TERMINAL",0,0,"1",,terminal_output +2065,2389161,"TERMINAL",0,0,"2",,terminal_output +2066,2390290,"TERMINAL",0,0,"3",,terminal_output +2067,2391242,"TERMINAL",0,0,"4",,terminal_output +2068,2392287,"TERMINAL",0,0,"5",,terminal_output +2069,2393325,"TERMINAL",0,0,"6 5",,terminal_output +2070,2394386,"TERMINAL",0,0,"7",,terminal_output +2071,2395410,"TERMINAL",0,0,"8",,terminal_output +2072,2396544,"TERMINAL",0,0,"9",,terminal_output +2073,2397496,"TERMINAL",0,0,"30",,terminal_output +2074,2398583,"TERMINAL",0,0,"1",,terminal_output +2075,2399607,"TERMINAL",0,0,"2",,terminal_output +2076,2400619,"TERMINAL",0,0,"3",,terminal_output +2077,2401662,"TERMINAL",0,0,"4",,terminal_output +2078,2402780,"TERMINAL",0,0,"6",,terminal_output +2079,2403805,"TERMINAL",0,0,"72",,terminal_output +2080,2404830,"TERMINAL",0,0,"8",,terminal_output +2081,2405854,"TERMINAL",0,0,"9",,terminal_output +2082,2406870,"TERMINAL",0,0,"40",,terminal_output +2083,2407913,"TERMINAL",0,0,"1",,terminal_output +2084,2409029,"TERMINAL",0,0,"2",,terminal_output +2085,2410053,"TERMINAL",0,0,"3",,terminal_output +2086,2411037,"TERMINAL",0,0,"4",,terminal_output +2087,2412079,"TERMINAL",0,0,"5",,terminal_output +2088,2413233,"TERMINAL",0,0,"6",,terminal_output +2089,2414250,"TERMINAL",0,0,"7",,terminal_output +2090,2415211,"TERMINAL",0,0,"8",,terminal_output +2091,2416253,"TERMINAL",0,0,"9",,terminal_output +2092,2417295,"TERMINAL",0,0,"50",,terminal_output +2093,2418338,"TERMINAL",0,0,"1",,terminal_output +2094,2419378,"TERMINAL",0,0,"2",,terminal_output +2095,2420419,"TERMINAL",0,0,"3",,terminal_output +2096,2421456,"TERMINAL",0,0,"4",,terminal_output +2097,2422500,"TERMINAL",0,0,"5",,terminal_output +2098,2423570,"TERMINAL",0,0,"6",,terminal_output +2099,2424596,"TERMINAL",0,0,"7",,terminal_output +2100,2425606,"TERMINAL",0,0,"8",,terminal_output +2101,2426668,"TERMINAL",0,0,"9",,terminal_output +2102,2427767,"TERMINAL",0,0,"3:01",,terminal_output +2103,2428791,"TERMINAL",0,0,"2",,terminal_output +2104,2429817,"TERMINAL",0,0,"3",,terminal_output +2105,2430829,"TERMINAL",0,0,"4",,terminal_output +2106,2431869,"TERMINAL",0,0,"5",,terminal_output +2107,2432913,"TERMINAL",0,0,"6",,terminal_output +2108,2433959,"TERMINAL",0,0,"7",,terminal_output +2109,2435038,"TERMINAL",0,0,"8",,terminal_output +2110,2436063,"TERMINAL",0,0,"9",,terminal_output +2111,2437208,"TERMINAL",0,0,"10",,terminal_output +2112,2438133,"TERMINAL",0,0,"1",,terminal_output +2113,2439238,"TERMINAL",0,0,"2",,terminal_output +2114,2440260,"TERMINAL",0,0,"3",,terminal_output +2115,2441256,"TERMINAL",0,0,"4",,terminal_output +2116,2442297,"TERMINAL",0,0,"5",,terminal_output +2117,2443353,"TERMINAL",0,0,"6",,terminal_output +2118,2444372,"TERMINAL",0,0,"7",,terminal_output +2119,2445432,"TERMINAL",0,0,"8",,terminal_output +2120,2446457,"TERMINAL",0,0,"9",,terminal_output +2121,2447532,"TERMINAL",0,0,"20",,terminal_output +2122,2448556,"TERMINAL",0,0,"1",,terminal_output +2123,2449683,"TERMINAL",0,0,"2",,terminal_output +2124,2450620,"TERMINAL",0,0,"3",,terminal_output +2125,2451666,"TERMINAL",0,0,"4",,terminal_output +2126,2452759,"TERMINAL",0,0,"6",,terminal_output +2127,2453747,"TERMINAL",0,0,"7",,terminal_output +2128,2454802,"TERMINAL",0,0,"8",,terminal_output +2129,2455835,"TERMINAL",0,0,"9",,terminal_output +2130,2456870,"TERMINAL",0,0,"30",,terminal_output +2131,2457977,"TERMINAL",0,0,"1",,terminal_output +2132,2458969,"TERMINAL",0,0,"2",,terminal_output +2133,2460026,"TERMINAL",0,0,"3",,terminal_output +2134,2461074,"TERMINAL",0,0,"4",,terminal_output +2135,2462129,"TERMINAL",0,0,"5",,terminal_output +2136,2463199,"TERMINAL",0,0,"6",,terminal_output +2137,2464175,"TERMINAL",0,0,"7",,terminal_output +2138,2465247,"TERMINAL",0,0,"8",,terminal_output +2139,2466274,"TERMINAL",0,0,"9",,terminal_output +2140,2467297,"TERMINAL",0,0,"40",,terminal_output +2141,2468336,"TERMINAL",0,0,"1",,terminal_output +2142,2469370,"TERMINAL",0,0,"2",,terminal_output +2143,2470416,"TERMINAL",0,0,"3",,terminal_output +2144,2471532,"TERMINAL",0,0,"4",,terminal_output +2145,2472519,"TERMINAL",0,0,"5",,terminal_output +2146,2473645,"TERMINAL",0,0,"6",,terminal_output +2147,2474671,"TERMINAL",0,0,"7",,terminal_output +2148,2475632,"TERMINAL",0,0,"8",,terminal_output +2149,2476682,"TERMINAL",0,0,"9",,terminal_output +2150,2477742,"TERMINAL",0,0,"51",,terminal_output +2151,2478814,"TERMINAL",0,0,"2",,terminal_output +2152,2479827,"TERMINAL",0,0,"3",,terminal_output +2153,2480842,"TERMINAL",0,0,"4",,terminal_output +2154,2481883,"TERMINAL",0,0,"5",,terminal_output +2155,2482933,"TERMINAL",0,0,"6",,terminal_output +2156,2483986,"TERMINAL",0,0,"7",,terminal_output +2157,2485011,"TERMINAL",0,0,"8",,terminal_output +2158,2486138,"TERMINAL",0,0,"9",,terminal_output +2159,2487163,"TERMINAL",0,0,"4:00",,terminal_output +2160,2488130,"TERMINAL",0,0,"1",,terminal_output +2161,2489165,"TERMINAL",0,0,"2",,terminal_output +2162,2490205,"TERMINAL",0,0,"3",,terminal_output +2163,2491245,"TERMINAL",0,0,"4",,terminal_output +2164,2492283,"TERMINAL",0,0,"5",,terminal_output +2165,2493324,"TERMINAL",0,0,"6",,terminal_output +2166,2494369,"TERMINAL",0,0,"7",,terminal_output +2167,2495461,"TERMINAL",0,0,"8",,terminal_output +2168,2496445,"TERMINAL",0,0,"9",,terminal_output +2169,2497491,"TERMINAL",0,0,"10",,terminal_output +2170,2498630,"TERMINAL",0,0,"1",,terminal_output +2171,2499653,"TERMINAL",0,0,"2",,terminal_output +2172,2500612,"TERMINAL",0,0,"39",,terminal_output +2173,2501667,"TERMINAL",0,0,"410",,terminal_output +2174,2502720,"TERMINAL",0,0,"6",,terminal_output +2175,2503750,"TERMINAL",0,0,"7",,terminal_output +2176,2504773,"TERMINAL",0,0,"8",,terminal_output +2177,2505902,"TERMINAL",0,0,"9",,terminal_output +2178,2506854,"TERMINAL",0,0,"20",,terminal_output +2179,2507949,"TERMINAL",0,0,"1",,terminal_output +2180,2508956,"TERMINAL",0,0,"2",,terminal_output +2181,2510008,"TERMINAL",0,0,"3",,terminal_output +2182,2511017,"TERMINAL",0,0,"4",,terminal_output +2183,2512060,"TERMINAL",0,0,"5",,terminal_output +2184,2513273,"TERMINAL",0,0,"61",,terminal_output +2185,2514296,"TERMINAL",0,0,"7",,terminal_output +2186,2515257,"TERMINAL",0,0,"8",,terminal_output +2187,2516346,"TERMINAL",0,0,"9",,terminal_output +2188,2517330,"TERMINAL",0,0,"30",,terminal_output +2189,2518372,"TERMINAL",0,0,"12",,terminal_output +2190,2519408,"TERMINAL",0,0,"2",,terminal_output +2191,2520446,"TERMINAL",0,0,"3",,terminal_output +2192,2521480,"TERMINAL",0,0,"4",,terminal_output +2193,2522591,"TERMINAL",0,0,"5",,terminal_output +2194,2523615,"TERMINAL",0,0,"6",,terminal_output +2195,2524602,"TERMINAL",0,0,"7",,terminal_output +2196,2525644,"TERMINAL",0,0,"8",,terminal_output +2197,2526681,"TERMINAL",0,0,"40",,terminal_output +2198,2527719,"TERMINAL",0,0,"1",,terminal_output +2199,2528840,"TERMINAL",0,0,"2",,terminal_output +2200,2529863,"TERMINAL",0,0,"3",,terminal_output +2201,2530888,"TERMINAL",0,0,"4",,terminal_output +2202,2531886,"TERMINAL",0,0,"5",,terminal_output +2203,2532924,"TERMINAL",0,0,"6",,terminal_output +2204,2533970,"TERMINAL",0,0,"7",,terminal_output +2205,2535085,"TERMINAL",0,0,"8",,terminal_output +2206,2536056,"TERMINAL",0,0,"9",,terminal_output +2207,2537132,"TERMINAL",0,0,"50",,terminal_output +2208,2538159,"TERMINAL",0,0,"1",,terminal_output +2209,2539282,"TERMINAL",0,0,"2",,terminal_output +2210,2540302,"TERMINAL",0,0,"3",,terminal_output +2211,2541278,"TERMINAL",0,0,"4",,terminal_output +2212,2542300,"TERMINAL",0,0,"5",,terminal_output +2213,2543382,"TERMINAL",0,0,"6",,terminal_output +2214,2544377,"TERMINAL",0,0,"7",,terminal_output +2215,2545419,"TERMINAL",0,0,"8",,terminal_output +2216,2546454,"TERMINAL",0,0,"9",,terminal_output +2217,2547579,"TERMINAL",0,0,"5:00",,terminal_output +2218,2548537,"TERMINAL",0,0,"1",,terminal_output +2219,2549581,"TERMINAL",0,0,"2",,terminal_output +2220,2550653,"TERMINAL",0,0,"3",,terminal_output +2221,2551677,"TERMINAL",0,0,"4",,terminal_output +2222,2552707,"TERMINAL",0,0,"6",,terminal_output +2223,2553826,"TERMINAL",0,0,"7",,terminal_output +2224,2554851,"TERMINAL",0,0,"8",,terminal_output +2225,2555872,"TERMINAL",0,0,"9",,terminal_output +2226,2556879,"TERMINAL",0,0,"10",,terminal_output +2227,2558023,"TERMINAL",0,0,"1",,terminal_output +2228,2558962,"TERMINAL",0,0,"2",,terminal_output +2229,2560070,"TERMINAL",0,0,"3",,terminal_output +2230,2561047,"TERMINAL",0,0,"4",,terminal_output +2231,2562120,"TERMINAL",0,0,"5",,terminal_output +2232,2563142,"TERMINAL",0,0,"6",,terminal_output +2233,2564179,"TERMINAL",0,0,"7",,terminal_output +2234,2565217,"TERMINAL",0,0,"8",,terminal_output +2235,2566258,"TERMINAL",0,0,"9",,terminal_output +2236,2567324,"TERMINAL",0,0,"20",,terminal_output +2237,2568367,"TERMINAL",0,0,"1",,terminal_output +2238,2569383,"TERMINAL",0,0,"2",,terminal_output +2239,2570425,"TERMINAL",0,0,"3",,terminal_output +2240,2571466,"TERMINAL",0,0,"4",,terminal_output +2241,2572515,"TERMINAL",0,0,"5",,terminal_output +2242,2573551,"TERMINAL",0,0,"6",,terminal_output +2243,2574611,"TERMINAL",0,0,"7",,terminal_output +2244,2575626,"TERMINAL",0,0,"8",,terminal_output +2245,2576681,"TERMINAL",0,0,"9",,terminal_output +2246,2577789,"TERMINAL",0,0,"31",,terminal_output +2247,2578765,"TERMINAL",0,0,"2",,terminal_output +2248,2579836,"TERMINAL",0,0,"3",,terminal_output +2249,2580962,"TERMINAL",0,0,"4",,terminal_output +2250,2581896,"TERMINAL",0,0,"5",,terminal_output +2251,2582994,"TERMINAL",0,0,"6",,terminal_output +2252,2583983,"TERMINAL",0,0,"7",,terminal_output +2253,2585058,"TERMINAL",0,0,"8",,terminal_output +2254,2586069,"TERMINAL",0,0,"9",,terminal_output +2255,2587209,"TERMINAL",0,0,"40",,terminal_output +2256,2588148,"TERMINAL",0,0,"1",,terminal_output +2257,2589199,"TERMINAL",0,0,"2",,terminal_output +2258,2590243,"TERMINAL",0,0,"3",,terminal_output +2259,2591306,"TERMINAL",0,0,"4",,terminal_output +2260,2592331,"TERMINAL",0,0,"5",,terminal_output +2261,2593374,"TERMINAL",0,0,"6",,terminal_output +2262,2594409,"TERMINAL",0,0,"7",,terminal_output +2263,2595453,"TERMINAL",0,0,"8",,terminal_output +2264,2596526,"TERMINAL",0,0,"9",,terminal_output +2265,2597537,"TERMINAL",0,0,"50",,terminal_output +2266,2598575,"TERMINAL",0,0,"1",,terminal_output +2267,2599700,"TERMINAL",0,0,"2",,terminal_output +2268,2600723,"TERMINAL",0,0,"3",,terminal_output +2269,2601699,"TERMINAL",0,0,"5",,terminal_output +2270,2602772,"TERMINAL",0,0,"6",,terminal_output +2271,2603898,"TERMINAL",0,0,"7",,terminal_output +2272,2604927,"TERMINAL",0,0,"8",,terminal_output +2273,2605960,"TERMINAL",0,0,"9",,terminal_output +2274,2606972,"TERMINAL",0,0,"6:00",,terminal_output +2275,2607996,"TERMINAL",0,0,"1",,terminal_output +2276,2609020,"TERMINAL",0,0,"2",,terminal_output +2277,2610146,"TERMINAL",0,0,"3",,terminal_output +2278,2611084,"TERMINAL",0,0,"4",,terminal_output +2279,2612193,"TERMINAL",0,0,"5",,terminal_output +2280,2613157,"TERMINAL",0,0,"6",,terminal_output +2281,2614240,"TERMINAL",0,0,"7",,terminal_output +2282,2615252,"TERMINAL",0,0,"8",,terminal_output +2283,2616321,"TERMINAL",0,0,"9",,terminal_output +2284,2617418,"TERMINAL",0,0,"10",,terminal_output +2285,2618384,"TERMINAL",0,0,"1",,terminal_output +2286,2619426,"TERMINAL",0,0,"2",,terminal_output +2287,2620467,"TERMINAL",0,0,"3",,terminal_output +2288,2621508,"TERMINAL",0,0,"4",,terminal_output +2289,2622638,"TERMINAL",0,0,"5",,terminal_output +2290,2623663,"TERMINAL",0,0,"6",,terminal_output +2291,2624630,"TERMINAL",0,0,"7",,terminal_output +2292,2625709,"TERMINAL",0,0,"8",,terminal_output +2293,2626717,"TERMINAL",0,0,"20",,terminal_output +2294,2627763,"TERMINAL",0,0,"1",,terminal_output +2295,2628803,"TERMINAL",0,0,"2",,terminal_output +2296,2629847,"TERMINAL",0,0,"3",,terminal_output +2297,2630913,"TERMINAL",0,0,"4",,terminal_output +2298,2631943,"TERMINAL",0,0,"5",,terminal_output +2299,2632980,"TERMINAL",0,0,"6",,terminal_output +2300,2634108,"TERMINAL",0,0,"71",,terminal_output +2301,2635137,"TERMINAL",0,0,"8",,terminal_output +2302,2636100,"TERMINAL",0,0,"9",,terminal_output +2303,2637140,"TERMINAL",0,0,"30",,terminal_output +2304,2638205,"TERMINAL",0,0,"1",,terminal_output +2305,2639227,"TERMINAL",0,0,"2",,terminal_output +2306,2640357,"TERMINAL",0,0,"3",,terminal_output +2307,2641335,"TERMINAL",0,0,"4",,terminal_output +2308,2642363,"TERMINAL",0,0,"5",,terminal_output +2309,2643417,"TERMINAL",0,0,"62",,terminal_output +2310,2644469,"TERMINAL",0,0,"7",,terminal_output +2311,2645576,"TERMINAL",0,0,"8",,terminal_output +2312,2646546,"TERMINAL",0,0,"9",,terminal_output +2313,2647624,"TERMINAL",0,0,"40",,terminal_output +2314,2648624,"TERMINAL",0,0,"1",,terminal_output +2315,2649670,"TERMINAL",0,0,"2",,terminal_output +2316,2650704,"TERMINAL",0,0,"4",,terminal_output +2317,2651745,"TERMINAL",0,0,"5",,terminal_output +2318,2652787,"TERMINAL",0,0,"6",,terminal_output +2319,2653870,"TERMINAL",0,0,"7",,terminal_output +2320,2654898,"TERMINAL",0,0,"8",,terminal_output +2321,2655920,"TERMINAL",0,0,"9",,terminal_output +2322,2656952,"TERMINAL",0,0,"50",,terminal_output +2323,2657999,"TERMINAL",0,0,"1",,terminal_output +2324,2659039,"TERMINAL",0,0,"2",,terminal_output +2325,2660079,"TERMINAL",0,0,"3",,terminal_output +2326,2661159,"TERMINAL",0,0,"4",,terminal_output +2327,2662172,"TERMINAL",0,0,"5",,terminal_output +2328,2663210,"TERMINAL",0,0,"6",,terminal_output +2329,2664316,"TERMINAL",0,0,"7",,terminal_output +2330,2665361,"TERMINAL",0,0,"8",,terminal_output +2331,2666366,"TERMINAL",0,0,"9",,terminal_output +2332,2667375,"TERMINAL",0,0,"7:00",,terminal_output +2333,2668423,"TERMINAL",0,0,"1",,terminal_output +2334,2669479,"TERMINAL",0,0,"2",,terminal_output +2335,2670518,"TERMINAL",0,0,"3",,terminal_output +2336,2671621,"TERMINAL",0,0,"4",,terminal_output +2337,2672609,"TERMINAL",0,0,"5",,terminal_output +2338,2673735,"TERMINAL",0,0,"6",,terminal_output +2339,2674759,"TERMINAL",0,0,"8",,terminal_output +2340,2675783,"TERMINAL",0,0,"9",,terminal_output +2341,2676771,"TERMINAL",0,0,"10",,terminal_output +2342,2677832,"TERMINAL",0,0,"1",,terminal_output +2343,2678861,"TERMINAL",0,0,"2",,terminal_output +2344,2679899,"TERMINAL",0,0,"3",,terminal_output +2345,2680938,"TERMINAL",0,0,"4",,terminal_output +2346,2682029,"TERMINAL",0,0,"5",,terminal_output +2347,2683015,"TERMINAL",0,0,"6",,terminal_output +2348,2684079,"TERMINAL",0,0,"7",,terminal_output +2349,2685105,"TERMINAL",0,0,"8",,terminal_output +2350,2686229,"TERMINAL",0,0,"9",,terminal_output +2351,2687253,"TERMINAL",0,0,"20",,terminal_output +2352,2688273,"TERMINAL",0,0,"1",,terminal_output +2353,2689294,"TERMINAL",0,0,"2",,terminal_output +2354,2690317,"TERMINAL",0,0,"3",,terminal_output +2355,2691370,"TERMINAL",0,0,"4",,terminal_output +2356,2692399,"TERMINAL",0,0,"5",,terminal_output +2357,2693442,"TERMINAL",0,0,"6",,terminal_output +2358,2694483,"TERMINAL",0,0,"7",,terminal_output +2359,2695547,"TERMINAL",0,0,"8",,terminal_output +2360,2696587,"TERMINAL",0,0,"9",,terminal_output +2361,2703470,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0615 jafar]$ ",,terminal_output +2362,2703471,"TERMINAL",0,0,"301345Every 1.0s: sinfo_t_idlehkn1991.localdomain: Thu Jul 3 13:47:35 2025Partition dev_cpuonly: 12 nodes idle\rPartition cpuonly: 12 nodes idle\rPartition dev_accelerated:\t 2 nodes idle\rPartition accelerated:\t 0 nodes idle\rPartition dev_accelerated-h100 :\t 1 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 7 nodes idle6",,terminal_output +2363,2703567,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0615 jafar]$ ",,terminal_output +2364,2706530,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0615 jafar]$ ",,terminal_output +2365,2706530,"TERMINAL",0,0,"789",,terminal_output +2366,2707581,"TERMINAL",0,0,"403",,terminal_output +2367,2708084,"train_tokenizer.py",9576,0,"",python,selection_mouse +2368,2708559,"TERMINAL",0,0,"1",,terminal_output +2369,2709602,"TERMINAL",0,0,"2",,terminal_output +2370,2710360,"TERMINAL",0,0,"[?25lqu[?25h",,terminal_output +2371,2710420,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +2372,2710515,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +2373,2710644,"TERMINAL",0,0,"[?25lu[?25h[?25le[?25h",,terminal_output +2374,2710658,"TERMINAL",0,0,"3",,terminal_output +2375,2710794,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0615.localdomain: Thu Jul 3 13:47:44 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3313564 accelerat train_to tum_cte0 R 6:16:28\t 4 hkn[0405,0532,0729,0814]3314634 accelerat interact tum_cte0 R44:55\t 2 hkn[0615-0616]",,terminal_output +2376,2711454,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0615:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0615 jafar]$ ",,terminal_output +2377,2711682,"TERMINAL",0,0,"5",,terminal_output +2378,2712501,"TERMINAL",0,0,"queue",,terminal_output +2379,2712711,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0615.localdomain: Thu Jul 3 13:47:46 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3313564 accelerat train_to tum_cte0 R 6:16:30\t 4 hkn[0405,0532,0729,0814]3314634 accelerat interact tum_cte0 R44:57\t 2 hkn[0615-0616]",,terminal_output +2380,2712738,"TERMINAL",0,0,"6",,terminal_output +2381,2713776,"TERMINAL",0,0,"718",,terminal_output +2382,2713789,"TERMINAL",0,0,"7",,terminal_output +2383,2713860,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0615:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0615 jafar]$ ",,terminal_output +2384,2714903,"TERMINAL",0,0,"8",,terminal_output +2385,2715851,"TERMINAL",0,0,"9",,terminal_output +2386,2716903,"TERMINAL",0,0,"50",,terminal_output +2387,2717973,"TERMINAL",0,0,"1",,terminal_output +2388,2719001,"TERMINAL",0,0,"2",,terminal_output +2389,2720031,"TERMINAL",0,0,"3",,terminal_output +2390,2721160,"TERMINAL",0,0,"4",,terminal_output +2391,2722106,"TERMINAL",0,0,"5",,terminal_output +2392,2723197,"TERMINAL",0,0,"6",,terminal_output +2393,2724204,"TERMINAL",0,0,"7",,terminal_output +2394,2725247,"TERMINAL",0,0,"84",,terminal_output +2395,2726381,"TERMINAL",0,0,"9",,terminal_output +2396,2727319,"TERMINAL",0,0,"8:00",,terminal_output +2397,2728521,"TERMINAL",0,0,"1",,terminal_output +2398,2729446,"TERMINAL",0,0,"2",,terminal_output +2399,2730499,"TERMINAL",0,0,"3",,terminal_output +2400,2731504,"TERMINAL",0,0,"4",,terminal_output +2401,2732550,"TERMINAL",0,0,"5",,terminal_output +2402,2732582,"TERMINAL",0,0,"watch",,terminal_focus +2403,2733593,"TERMINAL",0,0,"6",,terminal_output +2404,2735118,"TERMINAL",0,0,"",,terminal_focus +2405,2735212,"TERMINAL",0,0,"7",,terminal_output +2406,2735677,"TERMINAL",0,0,"8",,terminal_output +2407,2736716,"TERMINAL",0,0,"10",,terminal_output +2408,2737760,"TERMINAL",0,0,"1",,terminal_output +2409,2738803,"TERMINAL",0,0,"2",,terminal_output +2410,2739845,"TERMINAL",0,0,"3",,terminal_output +2411,2740912,"TERMINAL",0,0,"4",,terminal_output +2412,2741952,"TERMINAL",0,0,"5",,terminal_output +2413,2742999,"TERMINAL",0,0,"6",,terminal_output +2414,2744020,"TERMINAL",0,0,"7",,terminal_output +2415,2745110,"TERMINAL",0,0,"8",,terminal_output +2416,2746134,"TERMINAL",0,0,"9",,terminal_output +2417,2747159,"TERMINAL",0,0,"20",,terminal_output +2418,2748172,"TERMINAL",0,0,"1",,terminal_output +2419,2749228,"TERMINAL",0,0,"2",,terminal_output +2420,2750312,"TERMINAL",0,0,"3",,terminal_output +2421,2751294,"TERMINAL",0,0,"4",,terminal_output +2422,2752332,"TERMINAL",0,0,"5",,terminal_output +2423,2753393,"TERMINAL",0,0,"63",,terminal_output +2424,2754430,"TERMINAL",0,0,"7",,terminal_output +2425,2754747,"TERMINAL",0,0,"salloc --time=01:30:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5",,terminal_command +2426,2754820,"TERMINAL",0,0,"]633;E;2025-07-03 13:48:28 salloc --time=01:30:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5;3d7e79a2-e817-4582-b692-6332553dbb63]633;Csalloc: Pending job allocation 3314747\r\nsalloc: job 3314747 queued and waiting for resources\r\n",,terminal_output +2427,2755479,"TERMINAL",0,0,"8",,terminal_output +2428,2756560,"TERMINAL",0,0,"9",,terminal_output +2429,2757144,"TERMINAL",0,0,"srun",,terminal_focus +2430,2757582,"TERMINAL",0,0,"30",,terminal_output +2431,2758211,"TERMINAL",0,0,"[?2004l\r\r\nexit\r\n",,terminal_output +2432,2758280,"TERMINAL",0,0,"salloc: Relinquishing job allocation 3314634\r\nsalloc: Job allocation 3314634 has been revoked.\r\n",,terminal_output +2433,2758627,"TERMINAL",0,0,"12",,terminal_output +2434,2759811,"TERMINAL",0,0,"salloc",,terminal_focus +2435,2759857,"TERMINAL",0,0,"2",,terminal_output +2436,2760776,"TERMINAL",0,0,"4",,terminal_output +2437,2761742,"TERMINAL",0,0,"5",,terminal_output +2438,2762113,"TERMINAL",0,0,"watch",,terminal_focus +2439,2762786,"TERMINAL",0,0,"6",,terminal_output +2440,2763826,"TERMINAL",0,0,"7",,terminal_output +2441,2764976,"TERMINAL",0,0,"8",,terminal_output +2442,2766092,"TERMINAL",0,0,"9",,terminal_output +2443,2766971,"TERMINAL",0,0,"404",,terminal_output +2444,2768048,"TERMINAL",0,0,"salloc",,terminal_focus +2445,2768123,"TERMINAL",0,0,"1",,terminal_output +2446,2769072,"TERMINAL",0,0,"2",,terminal_output +2447,2770170,"TERMINAL",0,0,"3",,terminal_output +2448,2771225,"TERMINAL",0,0,"4",,terminal_output +2449,2772246,"TERMINAL",0,0,"5",,terminal_output +2450,2773304,"TERMINAL",0,0,"6",,terminal_output +2451,2774294,"TERMINAL",0,0,"7",,terminal_output +2452,2775311,"TERMINAL",0,0,"8",,terminal_output +2453,2776355,"TERMINAL",0,0,"9",,terminal_output +2454,2777403,"TERMINAL",0,0,"50",,terminal_output +2455,2778443,"TERMINAL",0,0,"1",,terminal_output +2456,2779487,"TERMINAL",0,0,"2",,terminal_output +2457,2780535,"TERMINAL",0,0,"3",,terminal_output +2458,2781585,"TERMINAL",0,0,"4",,terminal_output +2459,2782694,"TERMINAL",0,0,"5",,terminal_output +2460,2783706,"TERMINAL",0,0,"6",,terminal_output +2461,2784740,"TERMINAL",0,0,"8",,terminal_output +2462,2785865,"TERMINAL",0,0,"9",,terminal_output +2463,2786811,"TERMINAL",0,0,"9:00",,terminal_output +2464,2787844,"TERMINAL",0,0,"1",,terminal_output +2465,2788716,"TERMINAL",0,0,"watch",,terminal_focus +2466,2788934,"TERMINAL",0,0,"2",,terminal_output +2467,2789977,"TERMINAL",0,0,"3",,terminal_output +2468,2790889,"TERMINAL",0,0,"salloc",,terminal_focus +2469,2790978,"TERMINAL",0,0,"4",,terminal_output +2470,2792116,"TERMINAL",0,0,"5",,terminal_output +2471,2793136,"TERMINAL",0,0,"6",,terminal_output +2472,2794146,"TERMINAL",0,0,"7",,terminal_output +2473,2795188,"TERMINAL",0,0,"8",,terminal_output +2474,2796219,"TERMINAL",0,0,"9",,terminal_output +2475,2797285,"TERMINAL",0,0,"10",,terminal_output +2476,2798289,"TERMINAL",0,0,"1",,terminal_output +2477,2799326,"TERMINAL",0,0,"2",,terminal_output +2478,2800398,"TERMINAL",0,0,"3",,terminal_output +2479,2801421,"TERMINAL",0,0,"4",,terminal_output +2480,2802467,"TERMINAL",0,0,"5",,terminal_output +2481,2803516,"TERMINAL",0,0,"6",,terminal_output +2482,2804550,"TERMINAL",0,0,"7",,terminal_output +2483,2805629,"TERMINAL",0,0,"8",,terminal_output +2484,2806672,"TERMINAL",0,0,"9",,terminal_output +2485,2807688,"TERMINAL",0,0,"2111",,terminal_output +2486,2808803,"TERMINAL",0,0,"2",,terminal_output +2487,2809830,"TERMINAL",0,0,"3",,terminal_output +2488,2810851,"TERMINAL",0,0,"4",,terminal_output +2489,2811869,"TERMINAL",0,0,"5",,terminal_output +2490,2813002,"TERMINAL",0,0,"6",,terminal_output +2491,2813957,"TERMINAL",0,0,"7",,terminal_output +2492,2815051,"TERMINAL",0,0,"8",,terminal_output +2493,2816074,"TERMINAL",0,0,"9",,terminal_output +2494,2817091,"TERMINAL",0,0,"30",,terminal_output +2495,2818227,"TERMINAL",0,0,"1",,terminal_output +2496,2819249,"TERMINAL",0,0,"2",,terminal_output +2497,2820230,"TERMINAL",0,0,"3",,terminal_output +2498,2821292,"TERMINAL",0,0,"4",,terminal_output +2499,2822314,"TERMINAL",0,0,"5",,terminal_output +2500,2823446,"TERMINAL",0,0,"6",,terminal_output +2501,2824472,"TERMINAL",0,0,"7",,terminal_output +2502,2825447,"TERMINAL",0,0,"8",,terminal_output +2503,2826492,"TERMINAL",0,0,"9",,terminal_output +2504,2827588,"TERMINAL",0,0,"40",,terminal_output +2505,2828668,"TERMINAL",0,0,"1",,terminal_output +2506,2829734,"TERMINAL",0,0,"2",,terminal_output +2507,2830721,"TERMINAL",0,0,"4",,terminal_output +2508,2831759,"TERMINAL",0,0,"5",,terminal_output +2509,2832871,"TERMINAL",0,0,"6",,terminal_output +2510,2833892,"TERMINAL",0,0,"75",,terminal_output +2511,2834938,"TERMINAL",0,0,"8",,terminal_output +2512,2836043,"TERMINAL",0,0,"9",,terminal_output +2513,2837003,"TERMINAL",0,0,"50",,terminal_output +2514,2838060,"TERMINAL",0,0,"1",,terminal_output +2515,2838932,"TERMINAL",0,0,"^Csalloc: Job allocation 3314747 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;1",,terminal_output +2516,2839123,"TERMINAL",0,0,"2",,terminal_output +2517,2839380,"TERMINAL",0,0,"salloc --time=01:30:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5",,terminal_command +2518,2839469,"TERMINAL",0,0,"]633;E;2025-07-03 13:49:52 salloc --time=01:30:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5;3d7e79a2-e817-4582-b692-6332553dbb63]633;Csalloc: Pending job allocation 3314819\r\nsalloc: job 3314819 queued and waiting for resources\r\n",,terminal_output +2519,2840138,"TERMINAL",0,0,"3",,terminal_output +2520,2841215,"TERMINAL",0,0,"^Csalloc: Job allocation 3314819 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;1",,terminal_output +2521,2841267,"TERMINAL",0,0,"4",,terminal_output +2522,2842288,"TERMINAL",0,0,"5",,terminal_output +2523,2843313,"TERMINAL",0,0,"6",,terminal_output +2524,2844322,"TERMINAL",0,0,"7",,terminal_output +2525,2845407,"TERMINAL",0,0,"8",,terminal_output +2526,2846487,"TERMINAL",0,0,"96",,terminal_output +2527,2846746,"TERMINAL",0,0,"salloc --time=01:30:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5",,terminal_command +2528,2846796,"TERMINAL",0,0,"]633;E;2025-07-03 13:50:00 salloc --time=01:30:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5;3d7e79a2-e817-4582-b692-6332553dbb63]633;C",,terminal_output +2529,2846849,"TERMINAL",0,0,"salloc: Pending job allocation 3314820\r\nsalloc: job 3314820 queued and waiting for resources\r\n",,terminal_output +2530,2847458,"TERMINAL",0,0,"50:00",,terminal_output +2531,2848542,"TERMINAL",0,0,"1",,terminal_output +2532,2849553,"TERMINAL",0,0,"2",,terminal_output +2533,2849964,"TERMINAL",0,0,"^Csalloc: Job allocation 3314820 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;1",,terminal_output +2534,2850615,"TERMINAL",0,0,"3",,terminal_output +2535,2851649,"TERMINAL",0,0,"4",,terminal_output +2536,2852675,"TERMINAL",0,0,"5",,terminal_output +2537,2853758,"TERMINAL",0,0,"7",,terminal_output +2538,2854317,"TERMINAL",0,0,"salloc --time=01:30:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5",,terminal_command +2539,2854381,"TERMINAL",0,0,"]633;E;2025-07-03 13:50:07 salloc --time=01:30:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5;3d7e79a2-e817-4582-b692-6332553dbb63]633;Csalloc: error: Job submit/allocate failed: Requested time limit is invalid (missing or exceeds some limit)\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;1",,terminal_output +2540,2854782,"TERMINAL",0,0,"8",,terminal_output +2541,2855841,"TERMINAL",0,0,"9",,terminal_output +2542,2856846,"TERMINAL",0,0,"10",,terminal_output +2543,2857933,"TERMINAL",0,0,"1",,terminal_output +2544,2858638,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5",,terminal_command +2545,2858712,"TERMINAL",0,0,"]633;E;2025-07-03 13:50:11 salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5;3d7e79a2-e817-4582-b692-6332553dbb63]633;Csalloc: Granted job allocation 3314821\r\n",,terminal_output +2546,2858825,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +2547,2858935,"TERMINAL",0,0,"21",,terminal_output +2548,2860006,"TERMINAL",0,0,"3",,terminal_output +2549,2861029,"TERMINAL",0,0,"4",,terminal_output +2550,2862064,"TERMINAL",0,0,"5",,terminal_output +2551,2863107,"TERMINAL",0,0,"6",,terminal_output +2552,2864203,"TERMINAL",0,0,"7",,terminal_output +2553,2865187,"TERMINAL",0,0,"8",,terminal_output +2554,2866250,"TERMINAL",0,0,"9",,terminal_output +2555,2867279,"TERMINAL",0,0,"20",,terminal_output +2556,2868443,"TERMINAL",0,0,"10",,terminal_output +2557,2869357,"TERMINAL",0,0,"2",,terminal_output +2558,2870461,"TERMINAL",0,0,"3",,terminal_output +2559,2871474,"TERMINAL",0,0,"4",,terminal_output +2560,2872489,"TERMINAL",0,0,"5",,terminal_output +2561,2873932,"TERMINAL",0,0,"6 2",,terminal_output +2562,2875057,"TERMINAL",0,0,"8",,terminal_output +2563,2876007,"TERMINAL",0,0,"9",,terminal_output +2564,2877054,"TERMINAL",0,0,"30",,terminal_output +2565,2878107,"TERMINAL",0,0,"1",,terminal_output +2566,2879148,"TERMINAL",0,0,"2",,terminal_output +2567,2880186,"TERMINAL",0,0,"3",,terminal_output +2568,2881231,"TERMINAL",0,0,"4",,terminal_output +2569,2882277,"TERMINAL",0,0,"5",,terminal_output +2570,2883321,"TERMINAL",0,0,"6",,terminal_output +2571,2884369,"TERMINAL",0,0,"7",,terminal_output +2572,2885423,"TERMINAL",0,0,"8",,terminal_output +2573,2885847,"TERMINAL",0,0,"salloc: Nodes hkn0402 are ready for job\r\n",,terminal_output +2574,2886461,"TERMINAL",0,0,"9",,terminal_output +2575,2886700,"TERMINAL",0,0,"]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h[tum_cte0515@hkn0402 jafar]$ ",,terminal_output +2576,2887552,"TERMINAL",0,0,"40",,terminal_output +2577,2888552,"TERMINAL",0,0,"1",,terminal_output +2578,2889598,"TERMINAL",0,0,"2",,terminal_output +2579,2890724,"TERMINAL",0,0,"3",,terminal_output +2580,2891690,"TERMINAL",0,0,"5",,terminal_output +2581,2892744,"TERMINAL",0,0,"6",,terminal_output +2582,2893796,"TERMINAL",0,0,"7",,terminal_output +2583,2894764,"TERMINAL",0,0,"s",,terminal_output +2584,2894830,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +2585,2894831,"TERMINAL",0,0,"8",,terminal_output +2586,2895023,"TERMINAL",0,0,"[?25lu[?25h[?25lr[?25h",,terminal_output +2587,2895290,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +2588,2895363,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +2589,2895430,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +2590,2895576,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +2591,2895629,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +2592,2895841,"TERMINAL",0,0,"env/",,terminal_output +2593,2895902,"TERMINAL",0,0,"9",,terminal_output +2594,2896092,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +2595,2896141,"TERMINAL",0,0,"in/",,terminal_output +2596,2896412,"TERMINAL",0,0,"[?25la[?25h[?25lc[?25h",,terminal_output +2597,2896596,"TERMINAL",0,0,"tivate",,terminal_output +2598,2896825,"TERMINAL",0,0,"[?25l[?2004l\r[?25h]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +2599,2896935,"TERMINAL",0,0,"50",,terminal_output +2600,2897112,"TERMINAL",0,0,"l",,terminal_output +2601,2897241,"TERMINAL",0,0,"[?25ls[?25h[?25l[?2004l\r[?25h",,terminal_output +2602,2897405,"TERMINAL",0,0,"data frame-knoms.png generate_dataset.py LICENSE overfit_dir read_tf_record.py sample.py slurm train_dynamics.py utils\r\ndata_tfrecord_duplicated frame.png genie.py logs __pycache__ requirements-franz.txt scripts_cremers slurm-3309772.out train_lam.py wandb\r\ndata_tfrecords frames gifs models README.md requirements.txt scripts_horeka tests train_tokenizer.py\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +2603,2897962,"TERMINAL",0,0,"1",,terminal_output +2604,2898508,"train_tokenizer.py",0,0,"",python,tab +2605,2898509,"train_tokenizer.py",7794,0,"",python,selection_mouse +2606,2898574,"train_tokenizer.py",7793,0,"",python,selection_command +2607,2899017,"TERMINAL",0,0,"2",,terminal_output +2608,2900160,"TERMINAL",0,0,"3",,terminal_output +2609,2901173,"TERMINAL",0,0,"4",,terminal_output +2610,2902148,"TERMINAL",0,0,"5",,terminal_output +2611,2903408,"TERMINAL",0,0,"6",,terminal_output +2612,2904232,"TERMINAL",0,0,"7",,terminal_output +2613,2905369,"TERMINAL",0,0,"8",,terminal_output +2614,2906390,"TERMINAL",0,0,"9",,terminal_output +2615,2907363,"TERMINAL",0,0,"1:00",,terminal_output +2616,2908480,"TERMINAL",0,0,"1",,terminal_output +2617,2909498,"TERMINAL",0,0,"2",,terminal_output +2618,2910506,"TERMINAL",0,0,"3",,terminal_output +2619,2911544,"TERMINAL",0,0,"4",,terminal_output +2620,2912592,"TERMINAL",0,0,"5",,terminal_output +2621,2913636,"TERMINAL",0,0,"6",,terminal_output +2622,2914679,"TERMINAL",0,0,"8",,terminal_output +2623,2915784,"TERMINAL",0,0,"9",,terminal_output +2624,2916771,"TERMINAL",0,0,"10",,terminal_output +2625,2917813,"TERMINAL",0,0,"1",,terminal_output +2626,2918884,"TERMINAL",0,0,"2",,terminal_output +2627,2920010,"TERMINAL",0,0,"3",,terminal_output +2628,2921034,"TERMINAL",0,0,"4",,terminal_output +2629,2922059,"TERMINAL",0,0,"5",,terminal_output +2630,2923083,"TERMINAL",0,0,"6",,terminal_output +2631,2924088,"TERMINAL",0,0,"7",,terminal_output +2632,2925128,"TERMINAL",0,0,"8",,terminal_output +2633,2926168,"TERMINAL",0,0,"9",,terminal_output +2634,2927214,"TERMINAL",0,0,"20",,terminal_output +2635,2928306,"TERMINAL",0,0,"1",,terminal_output +2636,2929321,"TERMINAL",0,0,"2",,terminal_output +2637,2930360,"TERMINAL",0,0,"3",,terminal_output +2638,2931400,"TERMINAL",0,0,"4",,terminal_output +2639,2932504,"TERMINAL",0,0,"5",,terminal_output +2640,2933529,"TERMINAL",0,0,"63",,terminal_output +2641,2934542,"TERMINAL",0,0,"74",,terminal_output +2642,2935576,"TERMINAL",0,0,"8",,terminal_output +2643,2936669,"TERMINAL",0,0,"9",,terminal_output +2644,2937737,"TERMINAL",0,0,"30",,terminal_output +2645,2938754,"TERMINAL",0,0,"2",,terminal_output +2646,2939878,"TERMINAL",0,0,"3",,terminal_output +2647,2940901,"TERMINAL",0,0,"4",,terminal_output +2648,2941844,"TERMINAL",0,0,"5",,terminal_output +2649,2942957,"TERMINAL",0,0,"6",,terminal_output +2650,2943972,"TERMINAL",0,0,"7",,terminal_output +2651,2944980,"TERMINAL",0,0,"8",,terminal_output +2652,2945807,"train_tokenizer.py",0,0,"",python,tab +2653,2945808,"train_tokenizer.py",8677,0,"",python,selection_mouse +2654,2945987,"train_tokenizer.py",8676,0,"",python,selection_command +2655,2946064,"TERMINAL",0,0,"9",,terminal_output +2656,2947066,"TERMINAL",0,0,"40",,terminal_output +2657,2948311,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py",0,0,"",python,tab +2658,2948381,"TERMINAL",0,0,"1",,terminal_output +2659,2949204,"TERMINAL",0,0,"2",,terminal_output +2660,2950209,"TERMINAL",0,0,"3",,terminal_output +2661,2951244,"TERMINAL",0,0,"4",,terminal_output +2662,2952401,"TERMINAL",0,0,"5",,terminal_output +2663,2952911,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py",3508,0,"",python,selection_mouse +2664,2952914,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py",3507,0,"",python,selection_command +2665,2953323,"TERMINAL",0,0,"6",,terminal_output +2666,2954432,"TERMINAL",0,0,"7",,terminal_output +2667,2955446,"TERMINAL",0,0,"8",,terminal_output +2668,2956483,"TERMINAL",0,0,"9",,terminal_output +2669,2957480,"TERMINAL",0,0,"50",,terminal_output +2670,2958531,"TERMINAL",0,0,"1",,terminal_output +2671,2959578,"TERMINAL",0,0,"2",,terminal_output +2672,2960662,"TERMINAL",0,0,"3",,terminal_output +2673,2960743,"scripts_horeka/batchsize_scaling/adjusted_lr/tester.sh",0,0,"#!/usr/bin/env bash\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/open_ai_minecraft_tfrecord\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""0000""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=48 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=10 \\n --log \\n --name=tokenizer-batch-size-scaling-1-node-$slurm_job_id \\n --tags tokenizer batch-size-scaling 1-node debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,tab +2674,2961205,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/dataloader.py",0,0,"",python,tab +2675,2961695,"TERMINAL",0,0,"4",,terminal_output +2676,2962832,"TERMINAL",0,0,"6",,terminal_output +2677,2963842,"TERMINAL",0,0,"7",,terminal_output +2678,2964790,"TERMINAL",0,0,"8",,terminal_output +2679,2965926,"TERMINAL",0,0,"9",,terminal_output +2680,2966881,"TERMINAL",0,0,"2:00",,terminal_output +2681,2967946,"TERMINAL",0,0,"1",,terminal_output +2682,2968960,"TERMINAL",0,0,"2",,terminal_output +2683,2970065,"TERMINAL",0,0,"3",,terminal_output +2684,2971195,"TERMINAL",0,0,"4",,terminal_output +2685,2972093,"sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n for frame_idx in range(args.start_frame + 1, args.seq_len):\n # --- Sample next frame ---\n print(""Frame"", frame_idx)\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch[:, :frame_idx], rng=_rng)\n new_frame = genie.apply(\n params,\n batch,\n args.maskgit_steps,\n args.temperature,\n args.sample_argmax,\n method=Genie.sample,\n )\n vid = jnp.concatenate([vid, new_frame], axis=1)\n return vid\n\n\n# --- Get video + latent actions ---\ntfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n]\ndataloader = get_dataloader(\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n seed=args.seed,\n)\nvideo_batch = next(iter(dataloader))\n# Get latent actions from first video only\nfirst_video = video_batch[:1]\nbatch = dict(videos=first_video)\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(1, args.seq_len - 1, 1)\n# Use actions from first video for all videos\naction_batch = jnp.repeat(action_batch, video_batch.shape[0], axis=0)\n\n# --- Sample + evaluate video ---\nvid = _autoreg_sample(rng, video_batch, action_batch)\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\nprint(f""SSIM: {ssim}"")\n\n# --- Construct video ---\nfirst_true = (video_batch[0:1] * 255).astype(np.uint8)\nfirst_pred = (vid[0:1] * 255).astype(np.uint8)\nfirst_video_comparison = np.zeros((2, *vid.shape[1:5]), dtype=np.uint8)\nfirst_video_comparison[0] = first_true[:, : vid.shape[1]]\nfirst_video_comparison[1] = first_pred\n# For other videos, only show generated video\nother_preds = (vid[1:] * 255).astype(np.uint8)\nall_frames = np.concatenate([first_video_comparison, other_preds], axis=0)\nflat_vid = einops.rearrange(all_frames, ""n t h w c -> t h (n w) c"")\n\n# --- Save video ---\nimgs = [Image.fromarray(img) for img in flat_vid]\n# Write actions on each frame\nfor img, action in zip(imgs[1:], action_batch[0, :, 0]):\n d = ImageDraw.Draw(img)\n d.text((2, 2), f""{action}"", fill=255)\nimgs[0].save(\n f""generation_{time.time()}.gif"",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n)\n",python,tab +2686,2972370,"TERMINAL",0,0,"5",,terminal_output +2687,2973274,"TERMINAL",0,0,"6",,terminal_output +2688,2974648,"TERMINAL",0,0,"7",,terminal_output +2689,2975317,"TERMINAL",0,0,"8",,terminal_output +2690,2976281,"TERMINAL",0,0,"916",,terminal_output +2691,2977375,"TERMINAL",0,0,"10",,terminal_output +2692,2978502,"TERMINAL",0,0,"1",,terminal_output +2693,2979420,"TERMINAL",0,0,"2",,terminal_output +2694,2980500,"TERMINAL",0,0,"340",,terminal_output +2695,2981555,"TERMINAL",0,0,"452",,terminal_output +2696,2982580,"TERMINAL",0,0,"5",,terminal_output +2697,2983604,"TERMINAL",0,0,"6",,terminal_output +2698,2984636,"TERMINAL",0,0,"7",,terminal_output +2699,2985753,"TERMINAL",0,0,"864",,terminal_output +2700,2986418,"TERMINAL",0,0,"watch",,terminal_focus +2701,2986719,"TERMINAL",0,0,"2076",,terminal_output +2702,2986955,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +2703,2987963,"TERMINAL",0,0,"srun",,terminal_focus +2704,2988769,"TERMINAL",0,0,"[?25lgi[?25h[?25li[?25h",,terminal_output +2705,2988843,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +2706,2988908,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +2707,2989076,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +2708,2989172,"TERMINAL",0,0,"[?25lt[?25h[?25la[?25h",,terminal_output +2709,2989320,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +2710,2989380,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +2711,2989448,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +2712,2989642,"TERMINAL",0,0,"[?25l[?2004l\r[?25h",,terminal_output +2713,2989830,"TERMINAL",0,0,"On branch runner\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: utils/dataloader.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdata_tfrecord_duplicated/\r\n\tdata_tfrecords/\r\n\tlogs/\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tscripts_cremers/\r\n\tscripts_horeka/\r\n\tslurm-3309772.out\r\n\tslurm/\r\n\tutils/visualizer.py\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +2714,2990599,"TERMINAL",0,0,"g",,terminal_output +2715,2990729,"TERMINAL",0,0,"[?25li[?25h[?25lt[?25h",,terminal_output +2716,2990795,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +2717,2991011,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +2718,2991076,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +2719,2991214,"TERMINAL",0,0,"[?25lf[?25h",,terminal_output +2720,2991428,"TERMINAL",0,0,"[?25lf[?25h[?25l [?25h",,terminal_output +2721,2991531,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +2722,2991768,"TERMINAL",0,0,"tils/",,terminal_output +2723,2992515,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +2724,2992746,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +2725,2992904,"TERMINAL",0,0,"taloader.py",,terminal_output +2726,2993384,"TERMINAL",0,0,"[?25l[?2004l\r[?25h[?1h=\rdiff --git a/utils/dataloader.py b/utils/dataloader.py\r\nindex 19147d7..19652b5 100644\r\n--- a/utils/dataloader.py\r\n+++ b/utils/dataloader.py\r\n@@ -64,7 +64,7 @@ def _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\r\n return episode_tensor\r\n \r\n \r\n-def _create_processed_dataset_from_file(file_path, image_h, image_w, image_c, seq_len, num_parallel_calls):\r\n+def _create_processed_dataset_from_file(file_path, image_h, image_w, image_c, seq_len, num_parallel_calls, seed):\r\n """"""Creates a fully processed dataset from a single TFRecord file.""""""\r\n dataset = tf.data.TFRecordDataset([file_path])\r\n \r\n@@ -73,6 +73,12 @@ def _create_processed_dataset_from_file(file_path, image_h, image_w, image_c, se\r\n )\r\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\r\n \r\n+ # Filter out episodes that are too short\r\n+ def filter_short_episodes(episode_tensor):\r\n+ return tf.shape(episode_tensor)[0] >= seq_len\r\n+ \r\n+ dataset = dataset.filter(filter_short_episodes)\r\n+\r\n tf_process_fn = functools.partial(\r\n _tf_process_episode,\r\n seq_len=seq_len,\r\n@@ -93,7 +99,7 @@ def get_dataloader(\r\n image_h: int,\r\n image_w: int,\r\n image_c: int,\r\n- shuffle_buffer_size: int = 1000,\r\n+ shuffle_buffer_size: int = 10,\r\n num_parallel_calls: int = tf.data.AUTOTUNE,\r\n seed: int = 42,\r\n cycle_length: int = 4,\r\n@@ -116,7 +122,7 @@ def get_dataloader(\r\n \r\n def dataset_fn(file_path):\r\n return _create_processed_dataset_from_file(\r\n- file_path, image_h, image_w, image_c, seq_len, num_parallel_calls\r\n+ file_path, image_h, image_w, image_c, seq_len, num_parallel_calls, seed\r\n )\r\n \r\n dataset = tf.data.Dataset.from_tensor_slices(tfrecord_paths)\r\n",,terminal_output +2727,2993444,"TERMINAL",0,0,"\r[?1l>]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-6791460b-ec38-4da2-872f-193943c12d601753274780799-2025_07_23-14.47.19.396/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-6791460b-ec38-4da2-872f-193943c12d601753274780799-2025_07_23-14.47.19.396/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..21b016da800282df0d40a8fcddbe1b4d415e6ef7 --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-6791460b-ec38-4da2-872f-193943c12d601753274780799-2025_07_23-14.47.19.396/source.csv @@ -0,0 +1,7092 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +2,393,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:47:19 PM [info] Activating crowd-code\n2:47:19 PM [info] Recording started\n2:47:19 PM [info] Initializing git provider using file system watchers...\n2:47:19 PM [info] Git repository found\n2:47:19 PM [info] Git provider initialized successfully\n",Log,tab +3,464,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"2:47:19 PM [info] Initial git state: [object Object]\n",Log,content +4,4068,"TERMINAL",0,0,"queue",,terminal_command +5,4097,"TERMINAL",0,0,"]633;E;2025-07-23 14:47:23 queue;469e5d18-6e08-4909-a55e-e2644c9abc02]633;C",,terminal_output +6,4191,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Wed Jul 23 14:47:23 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3370434 accelerat interact tum_cte0 PD\t0:00\t 2 (Priority)",,terminal_output +7,5216,"TERMINAL",0,0,"4\t ",,terminal_output +8,6289,"TERMINAL",0,0,"5\t ",,terminal_output +9,7314,"TERMINAL",0,0,"6\t ",,terminal_output +10,8344,"TERMINAL",0,0,"7\t ",,terminal_output +11,9392,"TERMINAL",0,0,"8\t ",,terminal_output +12,9936,"TERMINAL",0,0,"bash",,terminal_focus +13,10442,"TERMINAL",0,0,"9\t ",,terminal_output +14,10501,"TERMINAL",0,0,"watch",,terminal_focus +15,10981,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +16,14957,"TERMINAL",0,0,"scancel --me",,terminal_command +17,14987,"TERMINAL",0,0,"]633;E;2025-07-23 14:47:34 scancel --me;469e5d18-6e08-4909-a55e-e2644c9abc02]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +18,16394,"TERMINAL",0,0,"queue",,terminal_command +19,16474,"TERMINAL",0,0,"]633;E;2025-07-23 14:47:35 queue;469e5d18-6e08-4909-a55e-e2644c9abc02]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Wed Jul 23 14:47:35 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)",,terminal_output +20,16831,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +21,18166,"TERMINAL",0,0,"bash",,terminal_focus +22,107784,"models/dynamics.py",0,0,"from typing import Dict, Any\n\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\n\nfrom utils.nn import STTransformer\n\n\nclass DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n ffn_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n return dict(token_logits=logits, mask=mask)\n",python,tab +23,107790,"models/dynamics.py",1896,0,"",python,selection_mouse +24,108384,"models/dynamics.py",2002,0,"",python,selection_mouse +25,108385,"models/dynamics.py",2001,0,"",python,selection_command +26,108562,"models/dynamics.py",2001,1,":",python,selection_mouse +27,108573,"models/dynamics.py",1922,79,"= jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else",python,selection_mouse +28,108574,"models/dynamics.py",2002,0,"",python,selection_command +29,108592,"models/dynamics.py",1873,129,"= mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +30,108606,"models/dynamics.py",1833,169,"(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +31,108624,"models/dynamics.py",1710,292," lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +32,108647,"models/dynamics.py",1679,323," mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +33,108685,"models/dynamics.py",1624,378," per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +34,108708,"models/dynamics.py",1564,438," mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +35,108736,"models/dynamics.py",1563,439," mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +36,108736,"models/dynamics.py",1549,453," )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +37,108798,"models/dynamics.py",1483,519," rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +38,108799,"models/dynamics.py",1484,518," rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +39,108817,"models/dynamics.py",1440,562," mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +40,108874,"models/dynamics.py",1441,561," mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +41,108881,"models/dynamics.py",1442,560," mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +42,108902,"models/dynamics.py",1381,621," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +43,108957,"models/dynamics.py",1338,664," batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +44,108958,"models/dynamics.py",1317,685," if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:",python,selection_mouse +45,109487,"models/dynamics.py",1317,0,"",python,selection_mouse +46,109488,"models/dynamics.py",1312,8," ",python,selection_mouse +47,109648,"models/dynamics.py",1312,21," if training:\n",python,selection_mouse +48,109801,"models/dynamics.py",1312,65," if training:\n batch_size = vid_embed.shape[0]\n",python,selection_mouse +49,109817,"models/dynamics.py",1312,170," if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n",python,selection_mouse +50,109875,"models/dynamics.py",1312,236," if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n",python,selection_mouse +51,109876,"models/dynamics.py",1312,309," if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n",python,selection_mouse +52,109877,"models/dynamics.py",1312,362," if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n",python,selection_mouse +53,109889,"models/dynamics.py",1312,391," if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n",python,selection_mouse +54,109903,"models/dynamics.py",1312,476," if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n",python,selection_mouse +55,109964,"models/dynamics.py",1312,508," if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n",python,selection_mouse +56,109965,"models/dynamics.py",1312,544," if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n",python,selection_mouse +57,109971,"models/dynamics.py",1312,588," if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n",python,selection_mouse +58,110029,"models/dynamics.py",1312,677," if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +59,110118,"models/dynamics.py",1312,691," if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:\n",python,selection_mouse +60,110518,"models/dynamics.py",2002,0,"",python,selection_mouse +61,110526,"models/dynamics.py",2001,0,"",python,selection_command +62,110686,"models/dynamics.py",2002,0,"",python,selection_mouse +63,110706,"models/dynamics.py",2001,0,"",python,selection_command +64,110829,"models/dynamics.py",1989,14," else:\n",python,selection_mouse +65,110832,"models/dynamics.py",1990,13," else:\n",python,selection_command +66,110990,"models/dynamics.py",1900,90," vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n ",python,selection_mouse +67,110990,"models/dynamics.py",1856,134," mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n ",python,selection_mouse +68,111007,"models/dynamics.py",1703,287," lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n ",python,selection_mouse +69,111058,"models/dynamics.py",1621,369," per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n ",python,selection_mouse +70,111059,"models/dynamics.py",1562,428," mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n ",python,selection_mouse +71,111082,"models/dynamics.py",1548,442," )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n ",python,selection_mouse +72,111099,"models/dynamics.py",1482,508," rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n ",python,selection_mouse +73,111159,"models/dynamics.py",1438,552," mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n ",python,selection_mouse +74,111221,"models/dynamics.py",1377,613," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n ",python,selection_mouse +75,111339,"models/dynamics.py",1333,657," batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n ",python,selection_mouse +76,111404,"models/dynamics.py",1312,678," if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n ",python,selection_mouse +77,111458,"models/dynamics.py",1252,738," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n ",python,selection_mouse +78,111854,"models/dynamics.py",1252,0,"",python,selection_mouse +79,111895,"models/dynamics.py",1252,8," ",python,selection_mouse +80,112065,"models/dynamics.py",1252,60," vid_embed = self.patch_embed(batch[""video_tokens""])\n",python,selection_mouse +81,112242,"models/dynamics.py",1252,81," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n",python,selection_mouse +82,112300,"models/dynamics.py",1252,125," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n",python,selection_mouse +83,112301,"models/dynamics.py",1252,186," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n",python,selection_mouse +84,112311,"models/dynamics.py",1252,296," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n",python,selection_mouse +85,112375,"models/dynamics.py",1252,310," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n",python,selection_mouse +86,112376,"models/dynamics.py",1252,369," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n",python,selection_mouse +87,112379,"models/dynamics.py",1252,422," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n",python,selection_mouse +88,112391,"models/dynamics.py",1252,451," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n",python,selection_mouse +89,112451,"models/dynamics.py",1252,536," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n",python,selection_mouse +90,112452,"models/dynamics.py",1252,568," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n",python,selection_mouse +91,112476,"models/dynamics.py",1252,604," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n",python,selection_mouse +92,112536,"models/dynamics.py",1252,648," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n",python,selection_mouse +93,112599,"models/dynamics.py",1252,737," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +94,113130,"models/dynamics.py",1909,0,"",python,selection_mouse +95,113131,"models/dynamics.py",1900,12," ",python,selection_mouse +96,113296,"models/dynamics.py",1900,89," vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +97,113451,"models/dynamics.py",1856,133," mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +98,113508,"models/dynamics.py",1788,201," in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +99,113509,"models/dynamics.py",1562,427," mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +100,113510,"models/dynamics.py",1548,441," )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +101,113525,"models/dynamics.py",1438,551," mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +102,113549,"models/dynamics.py",1377,612," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +103,113598,"models/dynamics.py",1333,656," batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +104,162381,"slurm/jobs/mihir/horeka/batchsize_scaling/dynamics_cotraining/sqrt_lr/start_runs.sh",0,0,"sbatch scripts_horeka/batchsize_scaling/sqrt_lr/train_tokenizer_16_nodes.sbatch\nsbatch scripts_horeka/batchsize_scaling/sqrt_lr/train_tokenizer_1_nodes.sbatch\nsbatch scripts_horeka/batchsize_scaling/sqrt_lr/train_tokenizer_2_nodes.sbatch\nsbatch scripts_horeka/batchsize_scaling/sqrt_lr/train_tokenizer_4_nodes.sbatch\nsbatch scripts_horeka/batchsize_scaling/sqrt_lr/train_tokenizer_8_nodes.sbatch\n\n# sbatch scripts_horeka/batchsize_scaling/sqrt_lr/train_tokenizer_32_nodes.sbatch",shellscript,tab +105,174015,"models/dynamics.py",0,0,"",python,tab +106,226247,"TERMINAL",0,0,"bash",,terminal_focus +107,231620,"slurm/jobs/mihir/horeka/batchsize_scaling/dynamics_cotraining/sqrt_lr/tester.sh",0,0,"#!/usr/bin/env bash\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=""debug""\nslurm_job_id=""0000""\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=12 \\n --max_lr=1e-4 \\n --log_image_interval=1000 \\n --log \\n --num_latent_actions=20 \\n --log_checkpoint_interval=1000 \\n --name=dynamics-debug-$slurm_job_id \\n --tags dynamics debug \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir\n",shellscript,tab +108,237565,"slurm/jobs/mihir/horeka/batchsize_scaling/dynamics_cotraining/sqrt_lr/tester.sh",623,0,"",shellscript,selection_mouse +109,237566,"slurm/jobs/mihir/horeka/batchsize_scaling/dynamics_cotraining/sqrt_lr/tester.sh",622,0,"",shellscript,selection_command +110,241496,"models/dynamics.py",0,0,"",python,tab +111,241498,"models/dynamics.py",2002,0,"",python,selection_mouse +112,241515,"models/dynamics.py",2001,0,"",python,selection_command +113,242171,"models/dynamics.py",1918,0,"",python,selection_mouse +114,242319,"models/dynamics.py",1912,9,"vid_embed",python,selection_mouse +115,242470,"models/dynamics.py",1900,89," vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +116,242633,"models/dynamics.py",1856,133," mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +117,242650,"models/dynamics.py",1820,169," )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +118,242710,"models/dynamics.py",1788,201," in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +119,242711,"models/dynamics.py",1703,286," lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +120,242718,"models/dynamics.py",1674,315," mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +121,242747,"models/dynamics.py",1621,368," per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +122,242804,"models/dynamics.py",1562,427," mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +123,242818,"models/dynamics.py",1548,441," )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +124,242878,"models/dynamics.py",1482,507," rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +125,242896,"models/dynamics.py",1438,551," mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +126,242998,"models/dynamics.py",1377,612," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +127,243084,"models/dynamics.py",1333,656," batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n",python,selection_mouse +128,246577,"models/dynamics.py",1333,657,"",python,content +129,246645,"models/dynamics.py",1333,0,"d",python,content +130,246646,"models/dynamics.py",1334,0,"",python,selection_keyboard +131,246699,"models/dynamics.py",1334,0," ",python,content +132,246700,"models/dynamics.py",1335,0,"",python,selection_keyboard +133,246873,"models/dynamics.py",1335,0,"s",python,content +134,246874,"models/dynamics.py",1336,0,"",python,selection_keyboard +135,248882,"models/dynamics.py",1334,2,"",python,content +136,249246,"models/dynamics.py",1333,1,"",python,content +137,250220,"models/dynamics.py",1333,0," batch_size = vid_embed.shape[0]\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(\n rng1, shape=(batch_size,), minval=self.mask_limit\n )\n mask_rngs = jax.random.split(rng2, batch_size)\n per_sample_shape = vid_embed.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(mask_rngs, mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n ",python,content +138,252164,"TERMINAL",0,0,"bash",,terminal_focus +139,254015,"TERMINAL",0,0,"cd slurm/",,terminal_command +140,255841,"TERMINAL",0,0,"git pull",,terminal_command +141,255892,"TERMINAL",0,0,"]633;E;2025-07-23 14:51:35 git pull;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C",,terminal_output +142,257473,"TERMINAL",0,0,"remote: Enumerating objects: 29, done.\r\nremote: Counting objects: 3% (1/29)\rremote: Counting objects: 6% (2/29)\rremote: Counting objects: 10% (3/29)\rremote: Counting objects: 13% (4/29)\rremote: Counting objects: 17% (5/29)\rremote: Counting objects: 20% (6/29)\rremote: Counting objects: 24% (7/29)\rremote: Counting objects: 27% (8/29)\rremote: Counting objects: 31% (9/29)\rremote: Counting objects: 34% (10/29)\rremote: Counting objects: 37% (11/29)\rremote: Counting objects: 41% (12/29)\rremote: Counting objects: 44% (13/29)\rremote: Counting objects: 48% (14/29)\rremote: Counting objects: 51% (15/29)\rremote: Counting objects: 55% (16/29)\rremote: Counting objects: 58% (17/29)\rremote: Counting objects: 62% (18/29)\rremote: Counting objects: 65% (19/29)\rremote: Counting objects: 68% (20/29)\rremote: Counting objects: 72% (21/29)\rremote: Counting objects: 75% (22/29)\rremote: Counting objects: 79% (23/29)\rremote: Counting objects: 82% (24/29)\rremote: Counting objects: 86% (25/29)\rremote: Counting objects: 89% (26/29)\rremote: Counting objects: 93% (27/29)\rremote: Counting objects: 96% (28/29)\rremote: Counting objects: 100% (29/29)\rremote: Counting objects: 100% (29/29), done.\r\nremote: Compressing objects: 11% (1/9)\rremote: Compressing objects: 22% (2/9)\rremote: Compressing objects: 33% (3/9)\rremote: Compressing objects: 44% (4/9)\rremote: Compressing objects: 55% (5/9)\rremote: Compressing objects: 66% (6/9)\rremote: Compressing objects: 77% (7/9)\rremote: Compressing objects: 88% (8/9)\rremote: Compressing objects: 100% (9/9)\rremote: Compressing objects: 100% (9/9), done.\r\nremote: Total 23 (delta 13), reused 23 (delta 13), pack-reused 0 (from 0)\r\nUnpacking objects: 4% (1/23)\rUnpacking objects: 8% (2/23)\rUnpacking objects: 13% (3/23)\rUnpacking objects: 17% (4/23)\rUnpacking objects: 21% (5/23)\rUnpacking objects: 26% (6/23)\rUnpacking objects: 30% (7/23)\rUnpacking objects: 34% (8/23)\rUnpacking objects: 39% (9/23)\rUnpacking objects: 43% (10/23)\rUnpacking objects: 47% (11/23)\rUnpacking objects: 52% (12/23)\rUnpacking objects: 56% (13/23)\rUnpacking objects: 60% (14/23)\rUnpacking objects: 65% (15/23)\rUnpacking objects: 69% (16/23)\rUnpacking objects: 73% (17/23)\rUnpacking objects: 78% (18/23)\rUnpacking objects: 82% (19/23)\rUnpacking objects: 86% (20/23)\rUnpacking objects: 91% (21/23)\rUnpacking objects: 95% (22/23)\rUnpacking objects: 100% (23/23)\rUnpacking objects: 100% (23/23), 2.49 KiB | 23.00 KiB/s, done.\r\n",,terminal_output +143,257602,"TERMINAL",0,0,"From github.com:p-doom/slurm\r\n ec57c8a..71220f9 dev-dir-args -> origin/dev-dir-args\r\n",,terminal_output +144,257647,"TERMINAL",0,0,"Already up to date.\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar/slurm]633;D;0",,terminal_output +145,334226,"TERMINAL",0,0,"git pull",,terminal_command +146,334278,"TERMINAL",0,0,"]633;E;2025-07-23 14:52:53 git pull;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C",,terminal_output +147,335878,"TERMINAL",0,0,"remote: Enumerating objects: 7, done.\r\nremote: Counting objects: 14% (1/7)\rremote: Counting objects: 28% (2/7)\rremote: Counting objects: 42% (3/7)\rremote: Counting objects: 57% (4/7)\rremote: Counting objects: 71% (5/7)\rremote: Counting objects: 85% (6/7)\rremote: Counting objects: 100% (7/7)\rremote: Counting objects: 100% (7/7), done.\r\nremote: Compressing objects: 33% (1/3)\rremote: Compressing objects: 66% (2/3)\rremote: Compressing objects: 100% (3/3)\rremote: Compressing objects: 100% (3/3), done.\r\nremote: Total 3 (delta 2), reused 0 (delta 0), pack-reused 0 (from 0)\r\nUnpacking objects: 33% (1/3)\rUnpacking objects: 66% (2/3)\rUnpacking objects: 100% (3/3)\rUnpacking objects: 100% (3/3), 1.03 KiB | 50.00 KiB/s, done.\r\n",,terminal_output +148,336082,"TERMINAL",0,0,"From github.com:p-doom/slurm\r\n cb16dab..5d7446e main -> origin/main\r\n",,terminal_output +149,336156,"TERMINAL",0,0,"Updating cb16dab..5d7446e\r\n",,terminal_output +150,336557,"TERMINAL",0,0,"Fast-forward\r\n",,terminal_output +151,336627,"TERMINAL",0,0," jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/start_runs.sh | 7 +++++++\r\n jobs/franz/horeka/batchsize_scaling/{dynamics_cotraining => dynamics_cotraining_new_arch}/linear_lr/tester.sh | 4 ++--\r\n jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_16_nodes.sbatch | 47 +++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/franz/horeka/batchsize_scaling/{dynamics_cotraining => dynamics_cotraining_new_arch}/linear_lr/train_dynamics_1_nodes.sbatch | 4 ++--\r\n jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_2_nodes.sbatch | 47 +++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_4_nodes.sbatch | 47 +++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch | 47 +++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/franz/horeka/batchsize_scaling/{dynamics_cotraining/linear_lr => dynamics_cotraining_new_arch/sqrt_lr}/start_runs.sh | 12 ++++++------\r\n jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/sqrt_lr/tester.sh | 27 +++++++++++++++++++++++++++\r\n jobs/franz/horeka/batchsize_scaling/{dynamics_cotraining/linear_lr => dynamics_cotraining_new_arch/sqrt_lr}/train_dynamics_16_nodes.sbatch | 4 ++--\r\n jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/sqrt_lr/train_dynamics_1_nodes.sbatch | 47 +++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/franz/horeka/batchsize_scaling/{dynamics_cotraining/linear_lr => dynamics_cotraining_new_arch/sqrt_lr}/train_dynamics_2_nodes.sbatch | 4 ++--\r\n jobs/franz/horeka/batchsize_scaling/{dynamics_cotraining/linear_lr => dynamics_cotraining_new_arch/sqrt_lr}/train_dynamics_4_nodes.sbatch | 4 ++--\r\n jobs/franz/horeka/batchsize_scaling/{dynamics_cotraining/linear_lr => dynamics_cotraining_new_arch/sqrt_lr}/train_dynamics_8_nodes.sbatch | 4 ++--\r\n utils/create_dev_dir.sh | 21 +++++++++++++++++----\r\n utils/init_slurm_repo.sh | 62 --------------------------------------------------------------\r\n 16 files changed, 304 insertions(+), 84 deletions(-)\r\n create mode 100644 jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/start_runs.sh\r\n rename jobs/franz/horeka/batchsize_scaling/{dynamics_cotraining => dynamics_cotraining_new_arch}/linear_lr/tester.sh (96%)\r\n create mode 100644 jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_16_nodes.sbatch\r\n rename jobs/franz/horeka/batchsize_scaling/{dynamics_cotraining => dynamics_cotraining_new_arch}/linear_lr/train_dynamics_1_nodes.sbatch (94%)\r\n create mode 100644 jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_2_nodes.sbatch\r\n create mode 100644 jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_4_nodes.sbatch\r\n create mode 100644 jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch\r\n rename jobs/franz/horeka/batchsize_scaling/{dynamics_cotraining/linear_lr => dynamics_cotraining_new_arch/sqrt_lr}/start_runs.sh (55%)\r\n create mode 100644 jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/sqrt_lr/tester.sh\r\n rename jobs/franz/horeka/batchsize_scaling/{dynamics_cotraining/linear_lr => dynamics_cotraining_new_arch/sqrt_lr}/train_dynamics_16_nodes.sbatch (94%)\r\n create mode 100644 jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/sqrt_lr/train_dynamics_1_nodes.sbatch\r\n rename jobs/franz/horeka/batchsize_scaling/{dynamics_cotraining/linear_lr => dynamics_cotraining_new_arch/sqrt_lr}/train_dynamics_2_nodes.sbatch (94%)\r\n rename jobs/franz/horeka/batchsize_scaling/{dynamics_cotraining/linear_lr => dynamics_cotraining_new_arch/sqrt_lr}/train_dynamics_4_nodes.sbatch (94%)\r\n rename jobs/franz/horeka/batchsize_scaling/{dynamics_cotraining/linear_lr => dynamics_cotraining_new_arch/sqrt_lr}/train_dynamics_8_nodes.sbatch (94%)\r\n delete mode 100755 utils/init_slurm_repo.sh\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar/slurm]633;D;0",,terminal_output +152,349819,"TERMINAL",0,0,"cd ..",,terminal_command +153,381891,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/tester.sh",0,0,"#!/usr/bin/env bash\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=""debug""\nslurm_job_id=""0000""\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --init_lr=0 \\n --max_lr=2e-5 \\n --num_latent_actions=20 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir \\n",shellscript,tab +154,387143,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/tester.sh",414,0,"",shellscript,selection_mouse +155,387252,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/tester.sh",409,9,"tokenizer",shellscript,selection_mouse +156,387427,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/tester.sh",409,14,"tokenizer with",shellscript,selection_mouse +157,387433,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/tester.sh",409,18,"tokenizer with the",shellscript,selection_mouse +158,387449,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/tester.sh",409,32,"tokenizer with the new structure",shellscript,selection_mouse +159,387504,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/tester.sh",409,43,"tokenizer with the new structure supporting",shellscript,selection_mouse +160,387566,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/tester.sh",409,44,"tokenizer with the new structure supporting ",shellscript,selection_mouse +161,387583,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/tester.sh",409,50,"tokenizer with the new structure supporting larger",shellscript,selection_mouse +162,387698,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/tester.sh",409,51,"tokenizer with the new structure supporting larger ",shellscript,selection_mouse +163,387717,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/tester.sh",409,58,"tokenizer with the new structure supporting larger ffn_dim",shellscript,selection_mouse +164,389319,"models/dynamics.py",0,0,"",python,tab +165,390465,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/start_runs.sh",0,0,"sbatch slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_16_nodes.sbatch\nsbatch slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch\nsbatch slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_2_nodes.sbatch\nsbatch slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_4_nodes.sbatch\nsbatch slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch\n\n# sbatch slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_32_nodes.sbatch",shellscript,tab +166,414301,"TERMINAL",0,0,"bash",,terminal_focus +167,424815,"TERMINAL",0,0,"idling",,terminal_command +168,424871,"TERMINAL",0,0,"]633;E;2025-07-23 14:54:24 idling;469e5d18-6e08-4909-a55e-e2644c9abc02]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1990.localdomain: Wed Jul 23 14:54:24 2025Partition dev_cpuonly: 10 nodes idle\rPartition cpuonly: 16 nodes idle\rPartition dev_accelerated:\t 1 nodes idle\rPartition accelerated:\t 0 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 7 nodes idle",,terminal_output +169,425916,"TERMINAL",0,0,"5",,terminal_output +170,426201,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +171,433402,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5",,terminal_command +172,433543,"TERMINAL",0,0,"]633;E;2025-07-23 14:54:32 salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5;469e5d18-6e08-4909-a55e-e2644c9abc02]633;Csalloc: Granted job allocation 3370713\r\n",,terminal_output +173,433618,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +174,434112,"TERMINAL",0,0,"^Csalloc: Job allocation 3370713 has been revoked.\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;1]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +175,441849,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:4 --cpus-per-task=5",,terminal_command +176,441898,"TERMINAL",0,0,"]633;E;2025-07-23 14:54:41 salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:4 --cpus-per-task=5;469e5d18-6e08-4909-a55e-e2644c9abc02]633;C",,terminal_output +177,441954,"TERMINAL",0,0,"salloc: Pending job allocation 3370714\r\nsalloc: job 3370714 queued and waiting for resources\r\n",,terminal_output +178,442398,"TERMINAL",0,0,"^Csalloc: Job allocation 3370714 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;1",,terminal_output +179,446187,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5",,terminal_command +180,446248,"TERMINAL",0,0,"]633;E;2025-07-23 14:54:45 salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5;469e5d18-6e08-4909-a55e-e2644c9abc02]633;Csalloc: Pending job allocation 3370715\r\nsalloc: job 3370715 queued and waiting for resources\r\n",,terminal_output +181,446940,"TERMINAL",0,0,"bash",,terminal_focus +182,447464,"TERMINAL",0,0,"^C",,terminal_command +183,448640,"TERMINAL",0,0,"idling",,terminal_command +184,448706,"TERMINAL",0,0,"]633;E;2025-07-23 14:54:47 idling;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1990.localdomain: Wed Jul 23 14:54:47 2025Partition dev_cpuonly:\t 8 nodes idle\rPartition cpuonly:\t 4 nodes idle\rPartition dev_accelerated:\t 1 nodes idle\rPartition accelerated:\t 0 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 7 nodes idle",,terminal_output +185,449745,"TERMINAL",0,0,"9",,terminal_output +186,450808,"TERMINAL",0,0,"505",,terminal_output +187,450862,"TERMINAL",0,0,"salloc",,terminal_focus +188,451830,"TERMINAL",0,0,"1",,terminal_output +189,452882,"TERMINAL",0,0,"2",,terminal_output +190,453988,"TERMINAL",0,0,"3",,terminal_output +191,455006,"TERMINAL",0,0,"4",,terminal_output +192,456033,"TERMINAL",0,0,"5",,terminal_output +193,457020,"TERMINAL",0,0,"6",,terminal_output +194,458058,"TERMINAL",0,0,"7",,terminal_output +195,459105,"TERMINAL",0,0,"8",,terminal_output +196,460235,"TERMINAL",0,0,"9",,terminal_output +197,461176,"TERMINAL",0,0,"5:00",,terminal_output +198,462277,"TERMINAL",0,0,"1",,terminal_output +199,463247,"TERMINAL",0,0,"2",,terminal_output +200,464331,"TERMINAL",0,0,"3",,terminal_output +201,465350,"TERMINAL",0,0,"4",,terminal_output +202,466375,"TERMINAL",0,0,"5",,terminal_output +203,467401,"TERMINAL",0,0,"6",,terminal_output +204,468444,"TERMINAL",0,0,"7",,terminal_output +205,469555,"TERMINAL",0,0,"8",,terminal_output +206,470518,"TERMINAL",0,0,"9",,terminal_output +207,471595,"TERMINAL",0,0,"10",,terminal_output +208,472619,"TERMINAL",0,0,"1",,terminal_output +209,473690,"TERMINAL",0,0,"2",,terminal_output +210,474773,"TERMINAL",0,0,"3",,terminal_output +211,475725,"TERMINAL",0,0,"5",,terminal_output +212,476820,"TERMINAL",0,0,"6",,terminal_output +213,477849,"TERMINAL",0,0,"7",,terminal_output +214,478867,"TERMINAL",0,0,"8",,terminal_output +215,479892,"TERMINAL",0,0,"9",,terminal_output +216,480925,"TERMINAL",0,0,"20",,terminal_output +217,481969,"TERMINAL",0,0,"1",,terminal_output +218,483079,"TERMINAL",0,0,"2",,terminal_output +219,484091,"TERMINAL",0,0,"3",,terminal_output +220,485076,"TERMINAL",0,0,"4",,terminal_output +221,486141,"TERMINAL",0,0,"5",,terminal_output +222,487152,"TERMINAL",0,0,"6",,terminal_output +223,488303,"TERMINAL",0,0,"7",,terminal_output +224,489233,"TERMINAL",0,0,"8",,terminal_output +225,490283,"TERMINAL",0,0,"9",,terminal_output +226,491371,"TERMINAL",0,0,"30",,terminal_output +227,492389,"TERMINAL",0,0,"1",,terminal_output +228,493386,"TERMINAL",0,0,"2",,terminal_output +229,494425,"TERMINAL",0,0,"3",,terminal_output +230,495462,"TERMINAL",0,0,"4",,terminal_output +231,496581,"TERMINAL",0,0,"5",,terminal_output +232,497553,"TERMINAL",0,0,"6",,terminal_output +233,498650,"TERMINAL",0,0,"7",,terminal_output +234,499643,"TERMINAL",0,0,"8",,terminal_output +235,500782,"TERMINAL",0,0,"9",,terminal_output +236,501811,"TERMINAL",0,0,"40",,terminal_output +237,502840,"TERMINAL",0,0,"2",,terminal_output +238,503789,"TERMINAL",0,0,"3",,terminal_output +239,504830,"TERMINAL",0,0,"4",,terminal_output +240,505873,"TERMINAL",0,0,"5",,terminal_output +241,506927,"TERMINAL",0,0,"6",,terminal_output +242,508092,"TERMINAL",0,0,"7",,terminal_output +243,509087,"TERMINAL",0,0,"8",,terminal_output +244,509809,"TERMINAL",0,0,"salloc: job 3370715 has been allocated resources\r\nsalloc: Granted job allocation 3370715\r\nsalloc: Waiting for resource configuration\r\n",,terminal_output +245,510100,"TERMINAL",0,0,"90",,terminal_output +246,511073,"TERMINAL",0,0,"50",,terminal_output +247,512149,"TERMINAL",0,0,"1",,terminal_output +248,513172,"TERMINAL",0,0,"2",,terminal_output +249,514197,"TERMINAL",0,0,"3",,terminal_output +250,515323,"TERMINAL",0,0,"4",,terminal_output +251,516355,"TERMINAL",0,0,"5",,terminal_output +252,517320,"TERMINAL",0,0,"6",,terminal_output +253,518398,"TERMINAL",0,0,"7",,terminal_output +254,519391,"TERMINAL",0,0,"8",,terminal_output +255,520432,"TERMINAL",0,0,"9",,terminal_output +256,521475,"TERMINAL",0,0,"6:00",,terminal_output +257,522518,"TERMINAL",0,0,"1",,terminal_output +258,523618,"TERMINAL",0,0,"2",,terminal_output +259,524594,"TERMINAL",0,0,"3",,terminal_output +260,525665,"TERMINAL",0,0,"4",,terminal_output +261,526687,"TERMINAL",0,0,"5",,terminal_output +262,527816,"TERMINAL",0,0,"6",,terminal_output +263,528763,"TERMINAL",0,0,"8",,terminal_output +264,529866,"TERMINAL",0,0,"9",,terminal_output +265,530853,"TERMINAL",0,0,"10",,terminal_output +266,531913,"TERMINAL",0,0,"1",,terminal_output +267,532934,"TERMINAL",0,0,"2",,terminal_output +268,533976,"TERMINAL",0,0,"3",,terminal_output +269,535085,"TERMINAL",0,0,"4",,terminal_output +270,536062,"TERMINAL",0,0,"5",,terminal_output +271,536864,"TERMINAL",0,0,"salloc: Nodes hkn0402 are ready for job\r\n",,terminal_output +272,537097,"TERMINAL",0,0,"6",,terminal_output +273,537752,"TERMINAL",0,0,"]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h[tum_cte0515@hkn0402 jafar]$ ",,terminal_output +274,538136,"TERMINAL",0,0,"7",,terminal_output +275,539282,"TERMINAL",0,0,"8",,terminal_output +276,540320,"TERMINAL",0,0,"9",,terminal_output +277,541272,"TERMINAL",0,0,"20",,terminal_output +278,542334,"TERMINAL",0,0,"1",,terminal_output +279,543345,"TERMINAL",0,0,"2",,terminal_output +280,544387,"TERMINAL",0,0,"3",,terminal_output +281,545425,"TERMINAL",0,0,"4",,terminal_output +282,546487,"TERMINAL",0,0,"5",,terminal_output +283,547577,"TERMINAL",0,0,"6",,terminal_output +284,548601,"TERMINAL",0,0,"7",,terminal_output +285,549623,"TERMINAL",0,0,"8",,terminal_output +286,550649,"TERMINAL",0,0,"9",,terminal_output +287,551665,"TERMINAL",0,0,"30",,terminal_output +288,552799,"TERMINAL",0,0,"1",,terminal_output +289,553741,"TERMINAL",0,0,"3",,terminal_output +290,554848,"TERMINAL",0,0,"4",,terminal_output +291,555871,"TERMINAL",0,0,"5",,terminal_output +292,556895,"TERMINAL",0,0,"6",,terminal_output +293,557922,"TERMINAL",0,0,"7",,terminal_output +294,558940,"TERMINAL",0,0,"8",,terminal_output +295,559975,"TERMINAL",0,0,"9",,terminal_output +296,561103,"TERMINAL",0,0,"40",,terminal_output +297,562068,"TERMINAL",0,0,"1",,terminal_output +298,563101,"TERMINAL",0,0,"2",,terminal_output +299,564166,"TERMINAL",0,0,"3",,terminal_output +300,565192,"TERMINAL",0,0,"4",,terminal_output +301,566334,"TERMINAL",0,0,"5",,terminal_output +302,567259,"TERMINAL",0,0,"6",,terminal_output +303,568297,"TERMINAL",0,0,"7",,terminal_output +304,570104,"TERMINAL",0,0,"874",,terminal_output +305,571134,"TERMINAL",0,0,"50",,terminal_output +306,572112,"TERMINAL",0,0,"1",,terminal_output +307,573180,"TERMINAL",0,0,"2",,terminal_output +308,574192,"TERMINAL",0,0,"3",,terminal_output +309,575344,"TERMINAL",0,0,"4",,terminal_output +310,576274,"TERMINAL",0,0,"5",,terminal_output +311,577373,"TERMINAL",0,0,"6",,terminal_output +312,578398,"TERMINAL",0,0,"7",,terminal_output +313,579427,"TERMINAL",0,0,"8",,terminal_output +314,580437,"TERMINAL",0,0,"9",,terminal_output +315,581481,"TERMINAL",0,0,"7:00",,terminal_output +316,582598,"TERMINAL",0,0,"1",,terminal_output +317,583558,"TERMINAL",0,0,"2",,terminal_output +318,584648,"TERMINAL",0,0,"3",,terminal_output +319,585674,"TERMINAL",0,0,"4",,terminal_output +320,586692,"TERMINAL",0,0,"5",,terminal_output +321,587825,"TERMINAL",0,0,"7",,terminal_output +322,588754,"TERMINAL",0,0,"8",,terminal_output +323,589791,"TERMINAL",0,0,"9",,terminal_output +324,590839,"TERMINAL",0,0,"10",,terminal_output +325,591914,"TERMINAL",0,0,"1",,terminal_output +326,592916,"TERMINAL",0,0,"2",,terminal_output +327,593968,"TERMINAL",0,0,"3",,terminal_output +328,594990,"TERMINAL",0,0,"4",,terminal_output +329,596118,"TERMINAL",0,0,"5",,terminal_output +330,597140,"TERMINAL",0,0,"6",,terminal_output +331,598167,"TERMINAL",0,0,"7",,terminal_output +332,599189,"TERMINAL",0,0,"8",,terminal_output +333,600198,"TERMINAL",0,0,"9",,terminal_output +334,601347,"TERMINAL",0,0,"20",,terminal_output +335,602361,"TERMINAL",0,0,"1",,terminal_output +336,603326,"TERMINAL",0,0,"2",,terminal_output +337,604374,"TERMINAL",0,0,"3",,terminal_output +338,605439,"TERMINAL",0,0,"4",,terminal_output +339,606461,"TERMINAL",0,0,"5",,terminal_output +340,607587,"TERMINAL",0,0,"6",,terminal_output +341,608537,"TERMINAL",0,0,"7",,terminal_output +342,609632,"TERMINAL",0,0,"8",,terminal_output +343,610653,"TERMINAL",0,0,"9",,terminal_output +344,611686,"TERMINAL",0,0,"30",,terminal_output +345,612726,"TERMINAL",0,0,"1",,terminal_output +346,613747,"TERMINAL",0,0,"3",,terminal_output +347,614857,"TERMINAL",0,0,"4",,terminal_output +348,615834,"TERMINAL",0,0,"5",,terminal_output +349,616888,"TERMINAL",0,0,"6",,terminal_output +350,618032,"TERMINAL",0,0,"7",,terminal_output +351,619106,"TERMINAL",0,0,"8",,terminal_output +352,620092,"TERMINAL",0,0,"9",,terminal_output +353,621104,"TERMINAL",0,0,"40",,terminal_output +354,622082,"TERMINAL",0,0,"1",,terminal_output +355,623148,"TERMINAL",0,0,"2",,terminal_output +356,624278,"TERMINAL",0,0,"3",,terminal_output +357,625217,"TERMINAL",0,0,"4",,terminal_output +358,626325,"TERMINAL",0,0,"5",,terminal_output +359,627361,"TERMINAL",0,0,"6",,terminal_output +360,628342,"TERMINAL",0,0,"7",,terminal_output +361,629394,"TERMINAL",0,0,"8",,terminal_output +362,630423,"TERMINAL",0,0,"9",,terminal_output +363,631482,"TERMINAL",0,0,"50",,terminal_output +364,632508,"TERMINAL",0,0,"1",,terminal_output +365,633550,"TERMINAL",0,0,"2",,terminal_output +366,634617,"TERMINAL",0,0,"3",,terminal_output +367,635625,"TERMINAL",0,0,"4",,terminal_output +368,636767,"TERMINAL",0,0,"5",,terminal_output +369,637713,"TERMINAL",0,0,"6",,terminal_output +370,638551,"TERMINAL",0,0,"[?25lso[?25h[?25lo[?25h",,terminal_output +371,638697,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +372,638755,"TERMINAL",0,0,"8",,terminal_output +373,638806,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +374,638972,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +375,639109,"TERMINAL",0,0,"[?25le[?25h[?25l [?25h",,terminal_output +376,639384,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +377,639495,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +378,639769,"TERMINAL",0,0,"env/",,terminal_output +379,639837,"TERMINAL",0,0,"9",,terminal_output +380,640264,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +381,640598,"TERMINAL",0,0,"in/",,terminal_output +382,640875,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +383,640890,"TERMINAL",0,0,"8:00",,terminal_output +384,641016,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +385,641220,"TERMINAL",0,0,"tivate",,terminal_output +386,641893,"TERMINAL",0,0,"1",,terminal_output +387,642014,"TERMINAL",0,0,"[?25l[?2004l\r[?25h]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +388,643016,"TERMINAL",0,0,"2",,terminal_output +389,643965,"TERMINAL",0,0,"3",,terminal_output +390,645007,"TERMINAL",0,0,"4",,terminal_output +391,646090,"TERMINAL",0,0,"5",,terminal_output +392,647110,"TERMINAL",0,0,"6",,terminal_output +393,648237,"TERMINAL",0,0,"7",,terminal_output +394,649182,"TERMINAL",0,0,"8",,terminal_output +395,650287,"TERMINAL",0,0,"9",,terminal_output +396,651258,"TERMINAL",0,0,"10",,terminal_output +397,652332,"TERMINAL",0,0,"1",,terminal_output +398,653371,"TERMINAL",0,0,"28",,terminal_output +399,654486,"TERMINAL",0,0,"3",,terminal_output +400,655419,"TERMINAL",0,0,"4",,terminal_output +401,656463,"TERMINAL",0,0,"5",,terminal_output +402,657507,"TERMINAL",0,0,"6",,terminal_output +403,658556,"TERMINAL",0,0,"7",,terminal_output +404,659607,"TERMINAL",0,0,"8",,terminal_output +405,660722,"TERMINAL",0,0,"9",,terminal_output +406,661671,"TERMINAL",0,0,"20",,terminal_output +407,662707,"TERMINAL",0,0,"1",,terminal_output +408,663753,"TERMINAL",0,0,"3",,terminal_output +409,664785,"TERMINAL",0,0,"45",,terminal_output +410,665825,"TERMINAL",0,0,"5",,terminal_output +411,666878,"TERMINAL",0,0,"6",,terminal_output +412,667998,"TERMINAL",0,0,"7",,terminal_output +413,668945,"TERMINAL",0,0,"8",,terminal_output +414,670051,"TERMINAL",0,0,"9",,terminal_output +415,671032,"TERMINAL",0,0,"30",,terminal_output +416,672094,"TERMINAL",0,0,"1",,terminal_output +417,673114,"TERMINAL",0,0,"2",,terminal_output +418,674248,"TERMINAL",0,0,"3",,terminal_output +419,675208,"TERMINAL",0,0,"4",,terminal_output +420,676247,"TERMINAL",0,0,"5",,terminal_output +421,677291,"TERMINAL",0,0,"6",,terminal_output +422,678332,"TERMINAL",0,0,"7",,terminal_output +423,679474,"TERMINAL",0,0,"8",,terminal_output +424,680495,"TERMINAL",0,0,"9",,terminal_output +425,681461,"TERMINAL",0,0,"40",,terminal_output +426,682502,"TERMINAL",0,0,"1",,terminal_output +427,683548,"TERMINAL",0,0,"2",,terminal_output +428,684586,"TERMINAL",0,0,"3",,terminal_output +429,685717,"TERMINAL",0,0,"4",,terminal_output +430,686662,"TERMINAL",0,0,"5",,terminal_output +431,687715,"TERMINAL",0,0,"6",,terminal_output +432,688770,"TERMINAL",0,0,"8",,terminal_output +433,689921,"TERMINAL",0,0,"9",,terminal_output +434,690857,"TERMINAL",0,0,"50",,terminal_output +435,691902,"TERMINAL",0,0,"1",,terminal_output +436,692986,"TERMINAL",0,0,"2",,terminal_output +437,694010,"TERMINAL",0,0,"3",,terminal_output +438,695034,"TERMINAL",0,0,"4",,terminal_output +439,696162,"TERMINAL",0,0,"5",,terminal_output +440,697185,"TERMINAL",0,0,"6",,terminal_output +441,697791,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/big-runs/dynamics-cotraining-batchsize-scaling-linear-lr/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/big-runs/dynamics-cotraining-batchsize-scaling-linear-lr/%x_%j.log\n#SBATCH --job-name=train_dynamics_batch_size_scaling_linear_lr_1_node\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/batchsize_scaling/dynamics_cotraining/linear-lr/$job_name\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=48 \\n --init_lr=0 \\n --max_lr=1e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-batch-size-scaling-linear-lr-1-node-$slurm_job_id \\n --tags dynamics batch-size-scaling linear-lr 1-node \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir\n",shellscript,tab +442,698155,"TERMINAL",0,0,"7",,terminal_output +443,699237,"TERMINAL",0,0,"8",,terminal_output +444,700258,"TERMINAL",0,0,"9",,terminal_output +445,701383,"TERMINAL",0,0,"9:00",,terminal_output +446,702314,"TERMINAL",0,0,"1",,terminal_output +447,703351,"TERMINAL",0,0,"2",,terminal_output +448,703617,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1104,0,"",shellscript,selection_mouse +449,703741,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1104,4,"hkfs",shellscript,selection_mouse +450,703876,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1104,114,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n",shellscript,selection_mouse +451,703934,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1104,54,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared",shellscript,selection_mouse +452,703945,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1104,71,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints",shellscript,selection_mouse +453,703964,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1104,72,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/",shellscript,selection_mouse +454,703992,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1104,99,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e",shellscript,selection_mouse +455,704009,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1104,114,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n",shellscript,selection_mouse +456,704064,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1104,131,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\nenv | grep SLURM",shellscript,selection_mouse +457,704325,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1104,114,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n",shellscript,selection_mouse +458,704406,"TERMINAL",0,0,"3",,terminal_output +459,704426,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1104,113,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +460,705488,"TERMINAL",0,0,"4",,terminal_output +461,706502,"TERMINAL",0,0,"5",,terminal_output +462,707516,"TERMINAL",0,0,"6",,terminal_output +463,708100,"TERMINAL",0,0,"watch",,terminal_focus +464,708555,"TERMINAL",0,0,"7",,terminal_output +465,708935,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +466,712311,"TERMINAL",0,0,"ls hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",,terminal_command +467,730090,"TERMINAL",0,0,"ls hkfs/work/workspace/scratch/tu^C",,terminal_command +468,730115,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +469,735150,"TERMINAL",0,0,"ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",,terminal_command +470,735173,"TERMINAL",0,0,"]633;E;2025-07-23 14:59:34 ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C020000 040000 051000 052000 053000\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +471,764814,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/big-runs/dynamics-cotraining-batchsize-scaling-linear-lr/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/big-runs/dynamics-cotraining-batchsize-scaling-linear-lr/%x_%j.log\n#SBATCH --job-name=train_dynamics_batch_size_scaling_linear_lr_8_node\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/batchsize_scaling/dynamics_cotraining/linear-lr/$job_name\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=384 \\n --init_lr=0 \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-batch-size-scaling-linear-lr-8-node-$slurm_job_id \\n --tags dynamics batch-size-scaling linear-lr 8-node \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir\n",shellscript,tab +472,780678,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",1022,0,"",shellscript,selection_mouse +473,796322,"TERMINAL",0,0,"srun",,terminal_focus +474,798355,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",0,0,"",shellscript,tab +475,800906,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",0,0,"",shellscript,tab +476,802687,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/start_runs.sh",0,0,"",shellscript,tab +477,803182,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",0,0,"",shellscript,tab +478,822094,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",0,0,"",shellscript,tab +479,822096,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1516,0,"",shellscript,selection_mouse +480,823424,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1516,0,"-",shellscript,content +481,823426,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1517,0,"",shellscript,selection_keyboard +482,823761,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1517,0,"d",shellscript,content +483,823762,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1518,0,"",shellscript,selection_keyboard +484,823863,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1518,0,"e",shellscript,content +485,823864,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1519,0,"",shellscript,selection_keyboard +486,824047,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1519,0,"v",shellscript,content +487,824048,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1520,0,"",shellscript,selection_keyboard +488,824709,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1519,0,"",shellscript,selection_command +489,832671,"TERMINAL",0,0,"g",,terminal_output +490,832743,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +491,832828,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +492,832896,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +493,833038,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +494,833120,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +495,833288,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +496,833353,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +497,833545,"TERMINAL",0,0,"[?25lc[?25h[?25lh[?25h",,terminal_output +498,833699,"TERMINAL",0,0,"[?25l[?2004l\r[?25h[?1h=\r",,terminal_output +499,833852,"TERMINAL",0,0," add-wandb-name-and-tags\r\n causal-transformer-dynamics-model\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/explicit-image-dims\r\n fix-sampling\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n logging-variants\r\n lr-schedules\r\n main\r\n* maskgit-different-maskprob-per-sample\r\n metrics-logging-for-dynamics-model\r\n monkey-patch\r\n new-arch-sampling\r\n preprocess_video\r\n refactor-tmp\r\n revised-dataloader\r\n runner\r\n runner-grain\r\n sample-from-different-topologies\r\n speedup-tfrecord-preprocessing\r\n tmp\r\n\r[?1l>]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +500,836149,"TERMINAL",0,0,"s",,terminal_output +501,836216,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +502,836282,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +503,837370,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",0,0,"",shellscript,tab +504,842810,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1726,0,"",shellscript,selection_mouse +505,845094,"TERMINAL",0,0,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",,terminal_output +506,846137,"TERMINAL",0,0,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch\r\n[?2004l\r#!/usr/bin/env bash\r\n\r\n#SBATCH --nodes=1\r\n#SBATCH --ntasks-per-node=4\r\n#SBATCH --time=48:00:00\r\n#SBATCH --partition=accelerated\r\n#SBATCH --cpus-per-task=5\r\n#SBATCH --gres=gpu:4\r\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/big-runs/dynamics-cotraining-batchsize-scaling-linear-lr/%x_%j.log\r\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/big-runs/dynamics-cotraining-batchsize-scaling-linear-lr/%x_%j.log\r\n#SBATCH --job-name=train_dynamics_batch_size_scaling_linear_lr_1_node\r\n\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/batchsize_scaling/dynamics_cotraining/linear-lr/$job_name\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\n# tokenizer with the new structure supporting larger ffn_dim\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --save_ckpt \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=48 \\r\n --init_lr=0 \\r\n --max_lr=1e-5 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-batch-size-scaling-linear-lr-1-node-dev-$slurm_job_id \\r\n --tags dynamics batch-size-scaling linear-lr 1-node \\r\n --entity instant-uv \\r\n --project jafar \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $array_records_dir\r\n",,terminal_output +507,846300,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=1909774\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0402\r\nSLURM_JOB_START_TIME=1753275349\r\nSLURM_STEP_NODELIST=hkn0402\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1753278949\r\nSLURM_PMI2_SRUN_PORT=33437\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=dev_accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3370715\r\nSLURM_PTY_PORT=44769\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=41\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=4\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e11.hkn0402\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=124\r\nSLURM_NODELIST=hkn0402\r\nSLURM_SRUN_COMM_PORT=43989\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=4\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3370715\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0402\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=43989\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0402\r\n",,terminal_output +508,846418,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +509,866901,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +510,872396,"TERMINAL",0,0,"2025-07-23 15:01:51.613333: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-23 15:01:51.613359: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-23 15:01:51.614290: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-23 15:01:51.614269: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +511,899761,"TERMINAL",0,0,"2025-07-23 15:02:19.053210: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-23 15:02:19.069930: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +512,902519,"TERMINAL",0,0,"2025-07-23 15:02:21.807966: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +513,902952,"TERMINAL",0,0,"2025-07-23 15:02:22.270045: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +514,904577,"TERMINAL",0,0,"2025-07-23 15:02:23.894093: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +515,904857,"TERMINAL",0,0,"2025-07-23 15:02:24.142906: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +516,908136,"TERMINAL",0,0,"2025-07-23 15:02:27.426817: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +517,908807,"TERMINAL",0,0,"2025-07-23 15:02:28.120854: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +518,911520,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +519,912234,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250723_150230-tkx38tzf\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-batch-size-scaling-linear-lr-1-node-dev-3370715\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/tkx38tzf\r\n",,terminal_output +520,976153,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\r\n",,terminal_output +521,976264,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 51000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/051000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 51000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/051000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 51000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/051000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 53000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/053000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 53000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/053000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 53000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/053000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 52000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/052000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 52000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/052000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 52000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/052000/metrics/metrics not found.\r\n",,terminal_output +522,976326,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 51000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/051000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 52000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/052000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 53000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/053000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/020000/metrics/metrics not found.\r\n",,terminal_output +523,978390,"TERMINAL",0,0,"Running on 4 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 33750256, 'lam': 17229792, 'dynamics': 26555904, 'total': 77535952}\r\nRunning on 4 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 33750256, 'lam': 17229792, 'dynamics': 26555904, 'total': 77535952}\r\nRunning on 4 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 33750256, 'lam': 17229792, 'dynamics': 26555904, 'total': 77535952}\r\nRunning on 4 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 33750256, 'lam': 17229792, 'dynamics': 26555904, 'total': 77535952}\r\n",,terminal_output +524,983420,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +525,983609,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +526,984206,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +527,984690,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +528,984806,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +529,984914,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +530,985022,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +531,985089,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +532,985698,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +533,985824,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +534,986084,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +535,986579,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +536,986633,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type ``, but the default value `` has type ``. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n",,terminal_output +537,1004871,"TERMINAL",0,0,"2025-07-23 15:04:04.191515: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-23 15:04:04.191964: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +538,1005417,"TERMINAL",0,0,"2025-07-23 15:04:04.723023: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-23 15:04:04.723067: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +539,1009816,"TERMINAL",0,0,"2025-07-23 15:04:09.112343: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +540,1015814,"TERMINAL",0,0,"2025-07-23 15:04:15.082284: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +541,1053513,"TERMINAL",0,0,"bash",,terminal_focus +542,1056623,"TERMINAL",0,0,"fsacct_week",,terminal_command +543,1056670,"TERMINAL",0,0,"]633;E;2025-07-23 15:04:55 fsacct_week;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C JobID JobName Partition All State Elapsed Timelimit \r\n--------------- ------------------------------ ---------------- --- ------------ ---------- ---------- \r\n 3345116 train_dynamics_modelsize_scal+ accelerated 48 TIMEOUT 2-00:00:17 2-00:00:00 \r\n 3348397 train_dynamics_lr_schedule_co+ accelerated 48 COMPLETED 1-03:21:05 2-00:00:00 \r\n 3348399 train_dynamics_lr_schedule_cos accelerated 48 COMPLETED 1-03:28:39 2-00:00:00 \r\n 3348400 train_dynamics_lr_schedule_wsd accelerated 48 COMPLETED 1-03:19:17 2-00:00:00 \r\n 3358457 train_dyn_yolorun_new_arch accelerated 48 FAILED 00:00:28 2-00:00:00 \r\n 3359334 wrap accelerated 6 TIMEOUT 10:00:29 10:00:00 \r\n 3359338 wrap accelerated 6 TIMEOUT 10:00:16 10:00:00 \r\n 3359343 train_dyn_new_arch-bugfixed-s+ accelerated 48 COMPLETED 23:19:14 2-00:00:00 \r\n 3359349 train_dyn_new_arch-bugfixed-t+ accelerated 48 COMPLETED 1-01:00:55 2-00:00:00 \r\n 3365873 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:26:52 2-00:00:00 \r\n 3365876 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:40:07 2-00:00:00 \r\n 3366883 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:33:31 2-00:00:00 \r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +544,1075551,"TERMINAL",0,0,"alias",,terminal_command +545,1075563,"TERMINAL",0,0,"]633;E;2025-07-23 15:05:14 alias;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;Calias egrep='egrep --color=auto'\r\nalias fgrep='fgrep --color=auto'\r\nalias fqueue='watch -n 1 ""squeue -o \""%.10i %.16P %.30j %.8u %.8T %.10M %.9l %.6D %R\""""'\r\nalias fsacct_week='sacct --format=""JobID%15,JobName%30,Partition%16,AllocCPUS%3,State%12,Elapsed%10,Timelimit%10"" --starttime $(date -d ""last week"" +%Y-%m-%d) | grep -vE ""*.batch|*.extern|*.inter|bash|python|CANCELLED|echo""'\r\nalias grep='grep --color=auto'\r\nalias idle='sinfo_t_idle'\r\nalias idling='watch -n1 sinfo_t_idle'\r\nalias l.='ls -d .* --color=auto'\r\nalias ll='ls -l --color=auto'\r\nalias ls='ls --color=auto'\r\nalias mc='. /usr/libexec/mc/mc-wrapper.sh'\r\nalias queue='watch -n1 squeue --me'\r\nalias runner='cd /home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/'\r\nalias salloc_cpu='salloc --time=01:00:00 --partition=dev_cpuonly --nodes=1 --cpus-per-task=128'\r\nalias salloc_node='salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --gres=gpu:1 --cpus-per-task=8'\r\nalias smi='watch -n1 nvidia-smi'\r\nalias sync-runner='sh /home/hk-project-p0023960/tum_cte0515/sync_runner.sh /home/hk-project-p0023960/tum_cte0515/Projects/jafar /home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/'\r\nalias xzegrep='xzegrep --color=auto'\r\nalias xzfgrep='xzfgrep --color=auto'\r\nalias xzgrep='xzgrep --color=auto'\r\nalias zegrep='zegrep --color=auto'\r\nalias zfgrep='zfgrep --color=auto'\r\nalias zgrep='zgrep --color=auto'\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +546,1261437,"TERMINAL",0,0,"Step 0, loss: 13.384492874145508\r\nStep 1, loss: 13.427678108215332\r\nStep 2, loss: 13.41768741607666\r\nStep 3, loss: 13.526819229125977\r\nStep 4, loss: 13.285727500915527\r\nStep 5, loss: 13.3654146194458\r\nStep 6, loss: 13.535348892211914\r\nStep 7, loss: 13.251749992370605\r\nStep 8, loss: 13.198036193847656\r\nStep 9, loss: 13.523465156555176\r\nStep 10, loss: 13.20858097076416\r\nStep 11, loss: 13.36640453338623\r\nStep 12, loss: 13.336483001708984\r\nStep 13, loss: 13.538276672363281\r\nStep 14, loss: 13.350528717041016\r\nStep 15, loss: 13.182315826416016\r\nStep 16, loss: 13.515462875366211\r\nStep 17, loss: 13.380617141723633\r\nStep 18, loss: 13.30540657043457\r\nStep 19, loss: 13.270244598388672\r\nStep 20, loss: 13.312971115112305\r\nStep 21, loss: 13.330161094665527\r\nStep 22, loss: 13.234780311584473\r\nStep 23, loss: 13.449820518493652\r\nStep 24, loss: 13.327351570129395\r\nStep 25, loss: 13.309706687927246\r\nStep 26, loss: 13.227757453918457\r\nStep 27, loss: 13.307857513427734\r\nStep 28, loss: 12.929142951965332\r\nStep 29, loss: 12.978145599365234\r\nStep 30, loss: 13.010326385498047\r\nStep 31, loss: 13.1903076171875\r\nStep 32, loss: 13.181324005126953\r\nStep 33, loss: 13.31438159942627\r\nStep 34, loss: 13.08935546875\r\nStep 35, loss: 13.111137390136719\r\nStep 36, loss: 13.158140182495117\r\nStep 37, loss: 13.269023895263672\r\nStep 38, loss: 13.077432632446289\r\nStep 39, loss: 13.06289005279541\r\nStep 40, loss: 12.860542297363281\r\nStep 41, loss: 12.856751441955566\r\nStep 42, loss: 12.869312286376953\r\nStep 43, loss: 13.064133644104004\r\nStep 44, loss: 12.938496589660645\r\nStep 45, loss: 12.908574104309082\r\nStep 46, loss: 12.93155574798584\r\nStep 47, loss: 12.868438720703125\r\nStep 48, loss: 12.939332008361816\r\nStep 49, loss: 12.604743957519531\r\nStep 50, loss: 12.854501724243164\r\nStep 51, loss: 12.79870891571045\r\nStep 52, loss: 12.796146392822266\r\nStep 53, loss: 12.810787200927734\r\nStep 54, loss: 12.810173988342285\r\nStep 55, loss: 13.020254135131836\r\nStep 56, loss: 12.810023307800293\r\nStep 57, loss: 12.655186653137207\r\nStep 58, loss: 12.692059516906738\r\nStep 59, loss: 12.783642768859863\r\nStep 60, loss: 12.768692970275879\r\nStep 61, loss: 12.505507469177246\r\nStep 62, loss: 12.68720817565918\r\nStep 63, loss: 12.598185539245605\r\nStep 64, loss: 12.455301284790039\r\nStep 65, loss: 12.59665584564209\r\nStep 66, loss: 12.519197463989258\r\nStep 67, loss: 12.54979133605957\r\nStep 68, loss: 12.375537872314453\r\nStep 69, loss: 12.428227424621582\r\nStep 70, loss: 12.505049705505371\r\nStep 71, loss: 12.53386116027832\r\nStep 72, loss: 12.547608375549316\r\nStep 73, loss: 12.267524719238281\r\nStep 74, loss: 12.256860733032227\r\nStep 75, loss: 12.457502365112305\r\nStep 76, loss: 12.275701522827148\r\nStep 77, loss: 12.409900665283203\r\nStep 78, loss: 12.37984561920166\r\nStep 79, loss: 12.292445182800293\r\nStep 80, loss: 12.372243881225586\r\nStep 81, loss: 12.25374984741211\r\nStep 82, loss: 12.393712043762207\r\nStep 83, loss: 12.259675025939941\r\nStep 84, loss: 12.01478099822998\r\nStep 85, loss: 12.147265434265137\r\nStep 86, loss: 12.035223007202148\r\nStep 87, loss: 12.121434211730957\r\nStep 88, loss: 12.115534782409668\r\nStep 89, loss: 12.05721664428711\r\nStep 90, loss: 11.967655181884766\r\nStep 91, loss: 12.01622486114502\r\nStep 92, loss: 12.02457332611084\r\nStep 93, loss: 12.041686058044434\r\nStep 94, loss: 11.967320442199707\r\nStep 95, loss: 12.092466354370117\r\nStep 96, loss: 11.966588020324707\r\nStep 97, loss: 11.938796997070312\r\nStep 98, loss: 11.885150909423828\r\nStep 99, loss: 11.921113967895508\r\nStep 100, loss: 11.814417839050293\r\nStep 101, loss: 11.705466270446777\r\nStep 102, loss: 11.92094898223877\r\nStep 103, loss: 11.835418701171875\r\nStep 104, loss: 11.99173355102539\r\nStep 105, loss: 11.883056640625\r\nStep 106, loss: 11.87367057800293\r\nStep 107, loss: 11.811823844909668\r\nStep 108, loss: 11.843890190124512\r\nStep 109, loss: 11.631610870361328\r\nStep 110, loss: 11.705374717712402\r\nStep 111, loss: 11.703887939453125\r\nStep 112, loss: 11.64559268951416\r\nStep 113, loss: 11.510262489318848\r\nStep 114, loss: 11.537859916687012\r\nStep 115, loss: 11.553618431091309\r\nStep 116, loss: 11.568038940429688\r\nStep 117, loss: 11.450733184814453\r\nStep 118, loss: 11.679144859313965\r\nStep 119, loss: 11.4474515914917\r\nStep 120, loss: 11.510746002197266\r\nStep 0, loss: 13.384492874145508\r\nStep 1, loss: 13.427678108215332\r\nStep 2, loss: 13.41768741607666\r\nStep 3, loss: 13.526819229125977\r\nStep 4, loss: 13.285727500915527\r\nStep 5, loss: 13.3654146194458\r\nStep 6, loss: 13.535348892211914\r\nStep 7, loss: 13.251749992370605\r\nStep 8, loss: 13.198036193847656\r\nStep 9, loss: 13.523465156555176\r\nStep 10, loss: 13.20858097076416\r\nStep 11, loss: 13.36640453338623\r\nStep 12, loss: 13.336483001708984\r\nStep 13, loss: 13.538276672363281\r\nStep 14, loss: 13.350528717041016\r\nStep 15, loss: 13.182315826416016\r\nStep 16, loss: 13.515462875366211\r\nStep 17, loss: 13.380617141723633\r\nStep 18, loss: 13.30540657043457\r\nStep 19, loss: 13.270244598388672\r\nStep 20, loss: 13.312971115112305\r\nStep 21, loss: 13.330161094665527\r\nStep 22, loss: 13.234780311584473\r\nStep 23, loss: 13.449820518493652\r\nStep 24, loss: 13.327351570129395\r\nStep 25, loss: 13.309706687927246\r\nStep 26, loss: 13.227757453918457\r\nStep 27, loss: 13.307857513427734\r\nStep 28, loss: 12.929142951965332\r\nStep 29, loss: 12.978145599365234\r\nStep 30, loss: 13.010326385498047\r\nStep 31, loss: 13.1903076171875\r\nStep 32, loss: 13.181324005126953\r\nStep 33, loss: 13.31438159942627\r\nStep 34, loss: 13.08935546875\r\nStep 35, loss: 13.111137390136719\r\nStep 36, loss: 13.158140182495117\r\nStep 37, loss: 13.269023895263672\r\nStep 38, loss: 13.077432632446289\r\nStep 39, loss: 13.06289005279541\r\nStep 40, loss: 12.860542297363281\r\nStep 41, loss: 12.856751441955566\r\nStep 42, loss: 12.869312286376953\r\nStep 43, loss: 13.064133644104004\r\nStep 44, loss: 12.938496589660645\r\nStep 45, loss: 12.908574104309082\r\nStep 46, loss: 12.93155574798584\r\nStep 47, loss: 12.868438720703125\r\nStep 48, loss: 12.939332008361816\r\nStep 49, loss: 12.604743957519531\r\nStep 50, loss: 12.854501724243164\r\nStep 51, loss: 12.79870891571045\r\nStep 52, loss: 12.796146392822266\r\nStep 53, loss: 12.810787200927734\r\nStep 54, loss: 12.810173988342285\r\nStep 55, loss: 13.020254135131836\r\nStep 56, loss: 12.810023307800293\r\nStep 57, loss: 12.655186653137207\r\nStep 58, loss: 12.692059516906738\r\nStep 59, loss: 12.783642768859863\r\nStep 60, loss: 12.768692970275879\r\nStep 61, loss: 12.505507469177246\r\nStep 62, loss: 12.68720817565918\r\nStep 63, loss: 12.598185539245605\r\nStep 64, loss: 12.455301284790039\r\nStep 65, loss: 12.59665584564209\r\nStep 66, loss: 12.519197463989258\r\nStep 67, loss: 12.54979133605957\r\nStep 68, loss: 12.375537872314453\r\nStep 69, loss: 12.428227424621582\r\nStep 70, loss: 12.505049705505371\r\nStep 71, loss: 12.53386116027832\r\nStep 72, loss: 12.547608375549316\r\nStep 73, loss: 12.267524719238281\r\nStep 74, loss: 12.256860733032227\r\nStep 75, loss: 12.457502365112305\r\nStep 76, loss: 12.275701522827148\r\nStep 77, loss: 12.409900665283203\r\nStep 78, loss: 12.37984561920166\r\nStep 79, loss: 12.292445182800293\r\nStep 80, loss: 12.372243881225586\r\nStep 81, loss: 12.25374984741211\r\nStep 82, loss: 12.393712043762207\r\nStep 83, loss: 12.259675025939941\r\nStep 84, loss: 12.01478099822998\r\nStep 85, loss: 12.147265434265137\r\nStep 86, loss: 12.035223007202148\r\nStep 87, loss: 12.121434211730957\r\nStep 88, loss: 12.115534782409668\r\nStep 89, loss: 12.05721664428711\r\nStep 90, loss: 11.967655181884766\r\nStep 91, loss: 12.01622486114502\r\nStep 92, loss: 12.02457332611084\r\nStep 93, loss: 12.041686058044434\r\nStep 94, loss: 11.967320442199707\r\nStep 95, loss: 12.092466354370117\r\nStep 96, loss: 11.966588020324707\r\nStep 97, loss: 11.938796997070312\r\nStep 98, loss: 11.885150909423828\r\nStep 99, loss: 11.921113967895508\r\nStep 100, loss: 11.814417839050293\r\nStep 101, loss: 11.705466270446777\r\nStep 102, loss: 11.92094898223877\r\nStep 103, loss: 11.835418701171875\r\nStep 104, loss: 11.99173355102539\r\nStep 105, loss: 11.883056640625\r\nStep 106, loss: 11.87367057800293\r\nStep 107, loss: 11.811823844909668\r\nStep 108, loss: 11.843890190124512\r\nStep 109, loss: 11.631610870361328\r\nStep 110, loss: 11.705374717712402\r\nStep 111, loss: 11.703887939453125\r\nStep 112, loss: 11.64559268951416\r\nStep 113, loss: 11.510262489318848\r\nStep 114, loss: 11.537859916687012\r\nStep 115, loss: 11.553618431091309\r\nStep 116, loss: 11.568038940429688\r\nStep 117, loss: 11.450733184814453\r\nStep 118, loss: 11.679144859313965\r\nStep 119, loss: 11.4474515914917\r\nStep 120, loss: 11.510746002197266\r\nStep 0, loss: 13.384492874145508\r\nStep 1, loss: 13.427678108215332\r\nStep 2, loss: 13.41768741607666\r\nStep 3, loss: 13.526819229125977\r\nStep 4, loss: 13.285727500915527\r\nStep 5, loss: 13.3654146194458\r\nStep 6, loss: 13.535348892211914\r\nStep 7, loss: 13.251749992370605\r\nStep 8, loss: 13.198036193847656\r\nStep 9, loss: 13.523465156555176\r\nStep 10, loss: 13.20858097076416\r\nStep 11, loss: 13.36640453338623\r\nStep 12, loss: 13.336483001708984\r\nStep 13, loss: 13.538276672363281\r\nStep 14, loss: 13.350528717041016\r\nStep 15, loss: 13.182315826416016\r\nStep 16, loss: 13.515462875366211\r\nStep 17, loss: 13.380617141723633\r\nStep 18, loss: 13.30540657043457\r\nStep 19, loss: 13.270244598388672\r\nStep 20, loss: 13.312971115112305\r\nStep 21, loss: 13.330161094665527\r\nStep 22, loss: 13.234780311584473\r\nStep 23, loss: 13.449820518493652\r\nStep 24, loss: 13.327351570129395\r\nStep 25, loss: 13.309706687927246\r\nStep 26, loss: 13.227757453918457\r\nStep 27, loss: 13.307857513427734\r\nStep 28, loss: 12.929142951965332\r\nStep 29, loss: 12.978145599365234\r\nStep 30, loss: 13.010326385498047\r\nStep 31, loss: 13.1903076171875\r\nStep 32, loss: 13.181324005126953\r\nStep 33, loss: 13.31438159942627\r\nStep 34, loss: 13.08935546875\r\nStep 35, loss: 13.111137390136719\r\nStep 36, loss: 13.158140182495117\r\nStep 37, loss: 13.269023895263672\r\nStep 38, loss: 13.077432632446289\r\nStep 39, loss: 13.06289005279541\r\nStep 40, loss: 12.860542297363281\r\nStep 41, loss: 12.856751441955566\r\nStep 42, loss: 12.869312286376953\r\nStep 43, loss: 13.064133644104004\r\nStep 44, loss: 12.938496589660645\r\nStep 45, loss: 12.908574104309082\r\nStep 46, loss: 12.93155574798584\r\nStep 47, loss: 12.868438720703125\r\nStep 48, loss: 12.939332008361816\r\nStep 49, loss: 12.604743957519531\r\nStep 50, loss: 12.854501724243164\r\nStep 51, loss: 12.79870891571045\r\nStep 52, loss: 12.796146392822266\r\nStep 53, loss: 12.810787200927734\r\nStep 54, loss: 12.810173988342285\r\nStep 55, loss: 13.020254135131836\r\nStep 56, loss: 12.810023307800293\r\nStep 57, loss: 12.655186653137207\r\nStep 58, loss: 12.692059516906738\r\nStep 59, loss: 12.783642768859863\r\nStep 60, loss: 12.768692970275879\r\nStep 61, loss: 12.505507469177246\r\nStep 62, loss: 12.68720817565918\r\nStep 63, loss: 12.598185539245605\r\nStep 64, loss: 12.455301284790039\r\nStep 65, loss: 12.59665584564209\r\nStep 66, loss: 12.519197463989258\r\nStep 67, loss: 12.54979133605957\r\nStep 68, loss: 12.375537872314453\r\nStep 69, loss: 12.428227424621582\r\nStep 70, loss: 12.505049705505371\r\nStep 71, loss: 12.53386116027832\r\nStep 72, loss: 12.547608375549316\r\nStep 73, loss: 12.267524719238281\r\nStep 74, loss: 12.256860733032227\r\nStep 75, loss: 12.457502365112305\r\nStep 76, loss: 12.275701522827148\r\nStep 77, loss: 12.409900665283203\r\nStep 78, loss: 12.37984561920166\r\nStep 79, loss: 12.292445182800293\r\nStep 80, loss: 12.372243881225586\r\nStep 81, loss: 12.25374984741211\r\nStep 82, loss: 12.393712043762207\r\nStep 83, loss: 12.259675025939941\r\nStep 84, loss: 12.01478099822998\r\nStep 85, loss: 12.147265434265137\r\nStep 86, loss: 12.035223007202148\r\nStep 87, loss: 12.121434211730957\r\nStep 88, loss: 12.115534782409668\r\nStep 89, loss: 12.05721664428711\r\nStep 90, loss: 11.967655181884766\r\nStep 91, loss: 12.01622486114502\r\nStep 92, loss: 12.02457332611084\r\nStep 93, loss: 12.041686058044434\r\nStep 94, loss: 11.967320442199707\r\nStep 95, loss: 12.092466354370117\r\nStep 96, loss: 11.966588020324707\r\nStep 97, loss: 11.938796997070312\r\nStep 98, loss: 11.885150909423828\r\nStep 99, loss: 11.921113967895508\r\nStep 100, loss: 11.814417839050293\r\nStep 101, loss: 11.705466270446777\r\nStep 102, loss: 11.92094898223877\r\nStep 103, loss: 11.835418701171875\r\nStep 104, loss: 11.99173355102539\r\nStep 105, loss: 11.883056640625\r\nStep 106, loss: 11.87367057800293\r\nStep 107, loss: 11.811823844909668\r\nStep 108, loss: 11.843890190124512\r\nStep 109, loss: 11.631610870361328\r\nStep 110, loss: 11.705374717712402\r\nStep 111, loss: 11.703887939453125\r\nStep 112, loss: 11.64559268951416\r\nStep 113, loss: 11.510262489318848\r\nStep 114, loss: 11.537859916687012\r\nStep 115, loss: 11.553618431091309\r\nStep 116, loss: 11.568038940429688\r\nStep 117, loss: 11.450733184814453\r\nStep 118, loss: 11.679144859313965\r\nStep 119, loss: 11.4474515914917\r\nStep 120, loss: 11.510746002197266\r\nStep 121, loss: 11.56981086730957\r\nStep 122, loss: 11.496781349182129\r\nStep 123, loss: 11.388836860656738\r\nStep 124, loss: 11.475391387939453\r\nStep 125, loss: 11.363513946533203\r\nStep 126, loss: 11.443411827087402\r\nStep 127, loss: 11.288275718688965\r\nStep 128, loss: 11.240981101989746\r\nStep 129, loss: 11.36435317993164\r\nStep 130, loss: 11.254955291748047\r\nStep 131, loss: 11.446842193603516\r\nStep 132, loss: 11.139900207519531\r\nStep 133, loss: 11.190834999084473\r\nStep 134, loss: 11.278471946716309\r\nStep 135, loss: 11.32027530670166\r\nStep 136, loss: 11.088859558105469\r\nStep 137, loss: 11.135394096374512\r\nStep 138, loss: 11.177473068237305\r\nStep 139, loss: 11.23405933380127\r\nStep 140, loss: 11.08298110961914\r\nStep 141, loss: 11.202191352844238\r\nStep 142, loss: 11.048303604125977\r\nStep 143, loss: 10.978487968444824\r\nStep 144, loss: 11.075052261352539\r\nStep 145, loss: 11.054147720336914\r\nStep 146, loss: 10.895940780639648\r\nStep 147, loss: 10.922086715698242\r\nStep 148, loss: 10.918930053710938\r\nStep 149, loss: 10.89687728881836\r\nStep 150, loss: 10.764581680297852\r\nStep 151, loss: 10.817301750183105\r\nStep 152, loss: 10.946759223937988\r\nStep 153, loss: 10.930502891540527\r\nStep 154, loss: 10.75655460357666\r\nStep 155, loss: 10.799771308898926\r\nStep 156, loss: 10.828421592712402\r\nStep 157, loss: 10.779156684875488\r\nStep 158, loss: 10.779332160949707\r\nStep 159, loss: 10.735968589782715\r\nStep 160, loss: 10.727903366088867\r\nStep 161, loss: 10.790885925292969\r\nStep 162, loss: 10.72120475769043\r\nStep 163, loss: 10.649282455444336\r\nStep 164, loss: 10.577407836914062\r\nStep 165, loss: 10.578060150146484\r\nStep 166, loss: 10.560288429260254\r\nStep 167, loss: 10.520197868347168\r\nStep 168, loss: 10.533600807189941\r\nStep 169, loss: 10.507055282592773\r\nStep 170, loss: 10.434101104736328\r\nStep 171, loss: 10.510315895080566\r\nStep 172, loss: 10.432265281677246\r\nStep 173, loss: 10.584399223327637\r\nStep 174, loss: 10.299633026123047\r\nStep 175, loss: 10.43995475769043\r\nStep 176, loss: 10.365023612976074\r\nStep 177, loss: 10.461203575134277\r\nStep 178, loss: 10.347628593444824\r\nStep 179, loss: 10.431109428405762\r\nStep 180, loss: 10.297615051269531\r\nStep 181, loss: 10.400114059448242\r\nStep 182, loss: 10.26560115814209\r\nStep 183, loss: 10.370589256286621\r\nStep 184, loss: 10.315866470336914\r\nStep 185, loss: 10.231348991394043\r\nStep 186, loss: 10.154208183288574\r\nStep 187, loss: 10.333128929138184\r\nStep 188, loss: 10.272704124450684\r\nStep 189, loss: 10.174359321594238\r\nStep 190, loss: 10.168438911437988\r\nStep 191, loss: 10.096136093139648\r\nStep 192, loss: 10.216461181640625\r\nStep 193, loss: 10.187451362609863\r\nStep 194, loss: 10.097491264343262\r\nStep 195, loss: 10.079633712768555\r\nStep 196, loss: 10.051545143127441\r\nStep 197, loss: 10.009090423583984\r\nStep 198, loss: 10.088085174560547\r\nStep 199, loss: 10.088445663452148\r\nStep 200, loss: 10.041621208190918\r\nStep 201, loss: 9.929361343383789\r\nStep 202, loss: 9.920825004577637\r\nStep 203, loss: 9.930505752563477\r\nStep 204, loss: 9.932853698730469\r\nStep 205, loss: 9.92989730834961\r\nStep 206, loss: 9.902012825012207\r\nStep 207, loss: 9.840082168579102\r\nStep 208, loss: 9.814620971679688\r\nStep 209, loss: 9.894298553466797\r\nStep 210, loss: 10.030668258666992\r\nStep 211, loss: 9.759446144104004\r\nStep 212, loss: 9.910297393798828\r\nStep 213, loss: 9.761031150817871\r\nStep 214, loss: 9.851898193359375\r\nStep 215, loss: 9.810975074768066\r\nStep 216, loss: 9.772217750549316\r\nStep 217, loss: 9.875381469726562\r\nStep 218, loss: 9.591219902038574\r\nStep 219, loss: 9.717686653137207\r\nStep 220, loss: 9.577218055725098\r\nStep 221, loss: 9.735997200012207\r\nStep 222, loss: 9.597052574157715\r\nStep 223, loss: 9.398141860961914\r\nStep 224, loss: 9.628983497619629\r\nStep 225, loss: 9.639044761657715\r\nStep 226, loss: 9.594630241394043\r\nStep 227, loss: 9.588937759399414\r\nStep 228, loss: 9.650166511535645\r\nStep 229, loss: 9.535862922668457\r\nStep 230, loss: 9.545808792114258\r\nStep 231, loss: 9.621722221374512\r\nStep 232, loss: 9.607393264770508\r\nStep 233, loss: 9.588334083557129\r\nStep 234, loss: 9.480213165283203\r\nStep 235, loss: 9.391374588012695\r\nStep 236, loss: 9.275531768798828\r\nStep 237, loss: 9.436307907104492\r\nStep 238, loss: 9.50087833404541\r\nStep 121, loss: 11.56981086730957\r\nStep 122, loss: 11.496781349182129\r\nStep 123, loss: 11.388836860656738\r\nStep 124, loss: 11.475391387939453\r\nStep 125, loss: 11.363513946533203\r\nStep 126, loss: 11.443411827087402\r\nStep 127, loss: 11.288275718688965\r\nStep 128, loss: 11.240981101989746\r\nStep 129, loss: 11.36435317993164\r\nStep 130, loss: 11.254955291748047\r\nStep 131, loss: 11.446842193603516\r\nStep 132, loss: 11.139900207519531\r\nStep 133, loss: 11.190834999084473\r\nStep 134, loss: 11.278471946716309\r\nStep 135, loss: 11.32027530670166\r\nStep 136, loss: 11.088859558105469\r\nStep 137, loss: 11.135394096374512\r\nStep 138, loss: 11.177473068237305\r\nStep 139, loss: 11.23405933380127\r\nStep 140, loss: 11.08298110961914\r\nStep 141, loss: 11.202191352844238\r\nStep 142, loss: 11.048303604125977\r\nStep 143, loss: 10.978487968444824\r\nStep 144, loss: 11.075052261352539\r\nStep 145, loss: 11.054147720336914\r\nStep 146, loss: 10.895940780639648\r\nStep 147, loss: 10.922086715698242\r\nStep 148, loss: 10.918930053710938\r\nStep 149, loss: 10.89687728881836\r\nStep 150, loss: 10.764581680297852\r\nStep 151, loss: 10.817301750183105\r\nStep 152, loss: 10.946759223937988\r\nStep 153, loss: 10.930502891540527\r\nStep 154, loss: 10.75655460357666\r\nStep 155, loss: 10.799771308898926\r\nStep 156, loss: 10.828421592712402\r\nStep 157, loss: 10.779156684875488\r\nStep 158, loss: 10.779332160949707\r\nStep 159, loss: 10.735968589782715\r\nStep 160, loss: 10.727903366088867\r\nStep 161, loss: 10.790885925292969\r\nStep 162, loss: 10.72120475769043\r\nStep 163, loss: 10.649282455444336\r\nStep 164, loss: 10.577407836914062\r\nStep 165, loss: 10.578060150146484\r\nStep 166, loss: 10.560288429260254\r\nStep 167, loss: 10.520197868347168\r\nStep 168, loss: 10.533600807189941\r\nStep 169, loss: 10.507055282592773\r\nStep 170, loss: 10.434101104736328\r\nStep 171, loss: 10.510315895080566\r\nStep 172, loss: 10.432265281677246\r\nStep 173, loss: 10.584399223327637\r\nStep 174, loss: 10.299633026123047\r\nStep 175, loss: 10.43995475769043\r\nStep 176, loss: 10.365023612976074\r\nStep 177, loss: 10.461203575134277\r\nStep 178, loss: 10.347628593444824\r\nStep 179, loss: 10.431109428405762\r\nStep 180, loss: 10.297615051269531\r\nStep 181, loss: 10.400114059448242\r\nStep 182, loss: 10.26560115814209\r\nStep 183, loss: 10.370589256286621\r\nStep 184, loss: 10.315866470336914\r\nStep 185, loss: 10.231348991394043\r\nStep 186, loss: 10.154208183288574\r\nStep 187, loss: 10.333128929138184\r\nStep 188, loss: 10.272704124450684\r\nStep 189, loss: 10.174359321594238\r\nStep 190, loss: 10.168438911437988\r\nStep 191, loss: 10.096136093139648\r\nStep 192, loss: 10.216461181640625\r\nStep 193, loss: 10.187451362609863\r\nStep 194, loss: 10.097491264343262\r\nStep 195, loss: 10.079633712768555\r\nStep 196, loss: 10.051545143127441\r\nStep 197, loss: 10.009090423583984\r\nStep 198, loss: 10.088085174560547\r\nStep 199, loss: 10.088445663452148\r\nStep 200, loss: 10.041621208190918\r\nStep 201, loss: 9.929361343383789\r\nStep 202, loss: 9.920825004577637\r\nStep 203, loss: 9.930505752563477\r\nStep 204, loss: 9.932853698730469\r\nStep 205, loss: 9.92989730834961\r\nStep 206, loss: 9.902012825012207\r\nStep 207, loss: 9.840082168579102\r\nStep 208, loss: 9.814620971679688\r\nStep 209, loss: 9.894298553466797\r\nStep 210, loss: 10.030668258666992\r\nStep 211, loss: 9.759446144104004\r\nStep 212, loss: 9.910297393798828\r\nStep 213, loss: 9.761031150817871\r\nStep 214, loss: 9.851898193359375\r\nStep 215, loss: 9.810975074768066\r\nStep 216, loss: 9.772217750549316\r\nStep 217, loss: 9.875381469726562\r\nStep 218, loss: 9.591219902038574\r\nStep 219, loss: 9.717686653137207\r\nStep 220, loss: 9.577218055725098\r\nStep 221, loss: 9.735997200012207\r\nStep 222, loss: 9.597052574157715\r\nStep 223, loss: 9.398141860961914\r\nStep 224, loss: 9.628983497619629\r\nStep 225, loss: 9.639044761657715\r\nStep 226, loss: 9.594630241394043\r\nStep 227, loss: 9.588937759399414\r\nStep 228, loss: 9.650166511535645\r\nStep 229, loss: 9.535862922668457\r\nStep 230, loss: 9.545808792114258\r\nStep 231, loss: 9.621722221374512\r\nStep 232, loss: 9.607393264770508\r\nStep 233, loss: 9.588334083557129\r\nStep 234, loss: 9.480213165283203\r\nStep 235, loss: 9.391374588012695\r\nStep 236, loss: 9.275531768798828\r\nStep 237, loss: 9.436307907104492\r\nStep 238, loss: 9.50087833404541\r\nStep 121, loss: 11.56981086730957\r\nStep 122, loss: 11.496781349182129\r\nStep 123, loss: 11.388836860656738\r\nStep 124, loss: 11.475391387939453\r\nStep 125, loss: 11.363513946533203\r\nStep 126, loss: 11.443411827087402\r\nStep 127, loss: 11.288275718688965\r\nStep 128, loss: 11.240981101989746\r\nStep 129, loss: 11.36435317993164\r\nStep 130, loss: 11.254955291748047\r\nStep 131, loss: 11.446842193603516\r\nStep 132, loss: 11.139900207519531\r\nStep 133, loss: 11.190834999084473\r\nStep 134, loss: 11.278471946716309\r\nStep 135, loss: 11.32027530670166\r\nStep 136, loss: 11.088859558105469\r\nStep 137, loss: 11.135394096374512\r\nStep 138, loss: 11.177473068237305\r\nStep 139, loss: 11.23405933380127\r\nStep 140, loss: 11.08298110961914\r\nStep 141, loss: 11.202191352844238\r\nStep 142, loss: 11.048303604125977\r\nStep 143, loss: 10.978487968444824\r\nStep 144, loss: 11.075052261352539\r\nStep 145, loss: 11.054147720336914\r\nStep 146, loss: 10.895940780639648\r\nStep 147, loss: 10.922086715698242\r\nStep 148, loss: 10.918930053710938\r\nStep 149, loss: 10.89687728881836\r\nStep 150, loss: 10.764581680297852\r\nStep 151, loss: 10.817301750183105\r\nStep 152, loss: 10.946759223937988\r\nStep 153, loss: 10.930502891540527\r\nStep 154, loss: 10.75655460357666\r\nStep 155, loss: 10.799771308898926\r\nStep 156, loss: 10.828421592712402\r\nStep 157, loss: 10.779156684875488\r\nStep 158, loss: 10.779332160949707\r\nStep 159, loss: 10.735968589782715\r\nStep 160, loss: 10.727903366088867\r\nStep 161, loss: 10.790885925292969\r\nStep 162, loss: 10.72120475769043\r\nStep 163, loss: 10.649282455444336\r\nStep 164, loss: 10.577407836914062\r\nStep 165, loss: 10.578060150146484\r\nStep 166, loss: 10.560288429260254\r\nStep 167, loss: 10.520197868347168\r\nStep 168, loss: 10.533600807189941\r\nStep 169, loss: 10.507055282592773\r\nStep 170, loss: 10.434101104736328\r\nStep 171, loss: 10.510315895080566\r\nStep 172, loss: 10.432265281677246\r\nStep 173, loss: 10.584399223327637\r\nStep 174, loss: 10.299633026123047\r\nStep 175, loss: 10.43995475769043\r\nStep 176, loss: 10.365023612976074\r\nStep 177, loss: 10.461203575134277\r\nStep 178, loss: 10.347628593444824\r\nStep 179, loss: 10.431109428405762\r\nStep 180, loss: 10.297615051269531\r\nStep 181, loss: 10.400114059448242\r\nStep 182, loss: 10.26560115814209\r\nStep 183, loss: 10.370589256286621\r\nStep 184, loss: 10.315866470336914\r\nStep 185, loss: 10.231348991394043\r\nStep 186, loss: 10.154208183288574\r\nStep 187, loss: 10.333128929138184\r\nStep 188, loss: 10.272704124450684\r\nStep 189, loss: 10.174359321594238\r\nStep 190, loss: 10.168438911437988\r\nStep 191, loss: 10.096136093139648\r\nStep 192, loss: 10.216461181640625\r\nStep 193, loss: 10.187451362609863\r\nStep 194, loss: 10.097491264343262\r\nStep 195, loss: 10.079633712768555\r\nStep 196, loss: 10.051545143127441\r\nStep 197, loss: 10.009090423583984\r\nStep 198, loss: 10.088085174560547\r\nStep 199, loss: 10.088445663452148\r\nStep 200, loss: 10.041621208190918\r\nStep 201, loss: 9.929361343383789\r\nStep 202, loss: 9.920825004577637\r\nStep 203, loss: 9.930505752563477\r\nStep 204, loss: 9.932853698730469\r\nStep 205, loss: 9.92989730834961\r\nStep 206, loss: 9.902012825012207\r\nStep 207, loss: 9.840082168579102\r\nStep 208, loss: 9.814620971679688\r\nStep 209, loss: 9.894298553466797\r\nStep 210, loss: 10.030668258666992\r\nStep 211, loss: 9.759446144104004\r\nStep 212, loss: 9.910297393798828\r\nStep 213, loss: 9.761031150817871\r\nStep 214, loss: 9.851898193359375\r\nStep 215, loss: 9.810975074768066\r\nStep 216, loss: 9.772217750549316\r\nStep 217, loss: 9.875381469726562\r\nStep 218, loss: 9.591219902038574\r\nStep 219, loss: 9.717686653137207\r\nStep 220, loss: 9.577218055725098\r\nStep 221, loss: 9.735997200012207\r\nStep 222, loss: 9.597052574157715\r\nStep 223, loss: 9.398141860961914\r\nStep 224, loss: 9.628983497619629\r\nStep 225, loss: 9.639044761657715\r\nStep 226, loss: 9.594630241394043\r\nStep 227, loss: 9.588937759399414\r\nStep 228, loss: 9.650166511535645\r\nStep 229, loss: 9.535862922668457\r\nStep 230, loss: 9.545808792114258\r\nStep 231, loss: 9.621722221374512\r\nStep 232, loss: 9.607393264770508\r\nStep 233, loss: 9.588334083557129\r\nStep 234, loss: 9.480213165283203\r\nStep 235, loss: 9.391374588012695\r\nStep 236, loss: 9.275531768798828\r\nStep 237, loss: 9.436307907104492\r\nStep 238, loss: 9.50087833404541\r\nStep 0, loss: 13.384492874145508\r\nStep 1, loss: 13.427678108215332\r\nStep 2, loss: 13.41768741607666\r\nStep 3, loss: 13.526819229125977\r\nStep 4, loss: 13.285727500915527\r\nStep 5, loss: 13.3654146194458\r\nStep 6, loss: 13.535348892211914\r\nStep 7, loss: 13.251749992370605\r\nStep 8, loss: 13.198036193847656\r\nStep 9, loss: 13.523465156555176\r\nStep 10, loss: 13.20858097076416\r\nStep 11, loss: 13.36640453338623\r\nStep 12, loss: 13.336483001708984\r\nStep 13, loss: 13.538276672363281\r\nStep 14, loss: 13.350528717041016\r\nStep 15, loss: 13.182315826416016\r\nStep 16, loss: 13.515462875366211\r\nStep 17, loss: 13.380617141723633\r\nStep 18, loss: 13.30540657043457\r\nStep 19, loss: 13.270244598388672\r\nStep 20, loss: 13.312971115112305\r\nStep 21, loss: 13.330161094665527\r\nStep 22, loss: 13.234780311584473\r\nStep 23, loss: 13.449820518493652\r\nStep 24, loss: 13.327351570129395\r\nStep 25, loss: 13.309706687927246\r\nStep 26, loss: 13.227757453918457\r\nStep 27, loss: 13.307857513427734\r\nStep 28, loss: 12.929142951965332\r\nStep 29, loss: 12.978145599365234\r\nStep 30, loss: 13.010326385498047\r\nStep 31, loss: 13.1903076171875\r\nStep 32, loss: 13.181324005126953\r\nStep 33, loss: 13.31438159942627\r\nStep 34, loss: 13.08935546875\r\nStep 35, loss: 13.111137390136719\r\nStep 36, loss: 13.158140182495117\r\nStep 37, loss: 13.269023895263672\r\nStep 38, loss: 13.077432632446289\r\nStep 39, loss: 13.06289005279541\r\nStep 40, loss: 12.860542297363281\r\nStep 41, loss: 12.856751441955566\r\nStep 42, loss: 12.869312286376953\r\nStep 43, loss: 13.064133644104004\r\nStep 44, loss: 12.938496589660645\r\nStep 45, loss: 12.908574104309082\r\nStep 46, loss: 12.93155574798584\r\nStep 47, loss: 12.868438720703125\r\nStep 48, loss: 12.939332008361816\r\nStep 49, loss: 12.604743957519531\r\nStep 50, loss: 12.854501724243164\r\nStep 51, loss: 12.79870891571045\r\nStep 52, loss: 12.796146392822266\r\nStep 53, loss: 12.810787200927734\r\nStep 54, loss: 12.810173988342285\r\nStep 55, loss: 13.020254135131836\r\nStep 56, loss: 12.810023307800293\r\nStep 57, loss: 12.655186653137207\r\nStep 58, loss: 12.692059516906738\r\nStep 59, loss: 12.783642768859863\r\nStep 60, loss: 12.768692970275879\r\nStep 61, loss: 12.505507469177246\r\nStep 62, loss: 12.68720817565918\r\nStep 63, loss: 12.598185539245605\r\nStep 64, loss: 12.455301284790039\r\nStep 65, loss: 12.59665584564209\r\nStep 66, loss: 12.519197463989258\r\nStep 67, loss: 12.54979133605957\r\nStep 68, loss: 12.375537872314453\r\nStep 69, loss: 12.428227424621582\r\nStep 70, loss: 12.505049705505371\r\nStep 71, loss: 12.53386116027832\r\nStep 72, loss: 12.547608375549316\r\nStep 73, loss: 12.267524719238281\r\nStep 74, loss: 12.256860733032227\r\nStep 75, loss: 12.457502365112305\r\nStep 76, loss: 12.275701522827148\r\nStep 77, loss: 12.409900665283203\r\nStep 78, loss: 12.37984561920166\r\nStep 79, loss: 12.292445182800293\r\nStep 80, loss: 12.372243881225586\r\nStep 81, loss: 12.25374984741211\r\nStep 82, loss: 12.393712043762207\r\nStep 83, loss: 12.259675025939941\r\nStep 84, loss: 12.01478099822998\r\nStep 85, loss: 12.147265434265137\r\nStep 86, loss: 12.035223007202148\r\nStep 87, loss: 12.121434211730957\r\nStep 88, loss: 12.115534782409668\r\nStep 89, loss: 12.05721664428711\r\nStep 90, loss: 11.967655181884766\r\nStep 91, loss: 12.01622486114502\r\nStep 92, loss: 12.02457332611084\r\nStep 93, loss: 12.041686058044434\r\nStep 94, loss: 11.967320442199707\r\nStep 95, loss: 12.092466354370117\r\nStep 96, loss: 11.966588020324707\r\nStep 97, loss: 11.938796997070312\r\nStep 98, loss: 11.885150909423828\r\nStep 99, loss: 11.921113967895508\r\nStep 100, loss: 11.814417839050293\r\nStep 101, loss: 11.705466270446777\r\nStep 102, loss: 11.92094898223877\r\nStep 103, loss: 11.835418701171875\r\nStep 104, loss: 11.99173355102539\r\nStep 105, loss: 11.883056640625\r\nStep 106, loss: 11.87367057800293\r\nStep 107, loss: 11.811823844909668\r\nStep 108, loss: 11.843890190124512\r\nStep 109, loss: 11.631610870361328\r\nStep 110, loss: 11.705374717712402\r\nStep 111, loss: 11.703887939453125\r\nStep 112, loss: 11.64559268951416\r\nStep 113, loss: 11.510262489318848\r\nStep 114, loss: 11.537859916687012\r\nStep 115, loss: 11.553618431091309\r\nStep 116, loss: 11.568038940429688\r\nStep 117, loss: 11.450733184814453\r\nStep 118, loss: 11.679144859313965\r\nStep 119, loss: 11.4474515914917\r\nStep 120, loss: 11.510746002197266\r\nStep 121, loss: 11.56981086730957\r\nStep 122, loss: 11.496781349182129\r\nStep 123, loss: 11.388836860656738\r\nStep 124, loss: 11.475391387939453\r\nStep 125, loss: 11.363513946533203\r\nStep 126, loss: 11.443411827087402\r\nStep 127, loss: 11.288275718688965\r\nStep 128, loss: 11.240981101989746\r\nStep 129, loss: 11.36435317993164\r\nStep 130, loss: 11.254955291748047\r\nStep 131, loss: 11.446842193603516\r\nStep 132, loss: 11.139900207519531\r\nStep 133, loss: 11.190834999084473\r\nStep 134, loss: 11.278471946716309\r\nStep 135, loss: 11.32027530670166\r\nStep 136, loss: 11.088859558105469\r\nStep 137, loss: 11.135394096374512\r\nStep 138, loss: 11.177473068237305\r\nStep 139, loss: 11.23405933380127\r\nStep 140, loss: 11.08298110961914\r\nStep 141, loss: 11.202191352844238\r\nStep 142, loss: 11.048303604125977\r\nStep 143, loss: 10.978487968444824\r\nStep 144, loss: 11.075052261352539\r\nStep 145, loss: 11.054147720336914\r\nStep 146, loss: 10.895940780639648\r\nStep 147, loss: 10.922086715698242\r\nStep 148, loss: 10.918930053710938\r\nStep 149, loss: 10.89687728881836\r\nStep 150, loss: 10.764581680297852\r\nStep 151, loss: 10.817301750183105\r\nStep 152, loss: 10.946759223937988\r\nStep 153, loss: 10.930502891540527\r\nStep 154, loss: 10.75655460357666\r\nStep 155, loss: 10.799771308898926\r\nStep 156, loss: 10.828421592712402\r\nStep 157, loss: 10.779156684875488\r\nStep 158, loss: 10.779332160949707\r\nStep 159, loss: 10.735968589782715\r\nStep 160, loss: 10.727903366088867\r\nStep 161, loss: 10.790885925292969\r\nStep 162, loss: 10.72120475769043\r\nStep 163, loss: 10.649282455444336\r\nStep 164, loss: 10.577407836914062\r\nStep 165, loss: 10.578060150146484\r\nStep 166, loss: 10.560288429260254\r\nStep 167, loss: 10.520197868347168\r\nStep 168, loss: 10.533600807189941\r\nStep 169, loss: 10.507055282592773\r\nStep 170, loss: 10.434101104736328\r\nStep 171, loss: 10.510315895080566\r\nStep 172, loss: 10.432265281677246\r\nStep 173, loss: 10.584399223327637\r\nStep 174, loss: 10.299633026123047\r\nStep 175, loss: 10.43995475769043\r\nStep 176, loss: 10.365023612976074\r\nStep 177, loss: 10.461203575134277\r\nStep 178, loss: 10.347628593444824\r\nStep 179, loss: 10.431109428405762\r\nStep 180, loss: 10.297615051269531\r\nStep 181, loss: 10.400114059448242\r\nStep 182, loss: 10.26560115814209\r\nStep 183, loss: 10.370589256286621\r\nStep 184, loss: 10.315866470336914\r\nStep 185, loss: 10.231348991394043\r\nStep 186, loss: 10.154208183288574\r\nStep 187, loss: 10.333128929138184\r\nStep 188, loss: 10.272704124450684\r\nStep 189, loss: 10.174359321594238\r\nStep 190, loss: 10.168438911437988\r\nStep 191, loss: 10.096136093139648\r\nStep 192, loss: 10.216461181640625\r\nStep 193, loss: 10.187451362609863\r\nStep 194, loss: 10.097491264343262\r\nStep 195, loss: 10.079633712768555\r\nStep 196, loss: 10.051545143127441\r\nStep 197, loss: 10.009090423583984\r\nStep 198, loss: 10.088085174560547\r\nStep 199, loss: 10.088445663452148\r\nStep 200, loss: 10.041621208190918\r\nStep 201, loss: 9.929361343383789\r\nStep 202, loss: 9.920825004577637\r\nStep 203, loss: 9.930505752563477\r\nStep 204, loss: 9.932853698730469\r\nStep 205, loss: 9.92989730834961\r\nStep 206, loss: 9.902012825012207\r\nStep 207, loss: 9.840082168579102\r\nStep 208, loss: 9.814620971679688\r\nStep 209, loss: 9.894298553466797\r\nStep 210, loss: 10.030668258666992\r\nStep 211, loss: 9.759446144104004\r\nStep 212, loss: 9.910297393798828\r\nStep 213, loss: 9.761031150817871\r\nStep 214, loss: 9.851898193359375\r\nStep 215, loss: 9.810975074768066\r\nStep 216, loss: 9.772217750549316\r\nStep 217, loss: 9.875381469726562\r\nStep 218, loss: 9.591219902038574\r\nStep 219, loss: 9.717686653137207\r\nStep 220, loss: 9.577218055725098\r\nStep 221, loss: 9.735997200012207\r\nStep 222, loss: 9.597052574157715\r\nStep 223, loss: 9.398141860961914\r\nStep 224, loss: 9.628983497619629\r\nStep 225, loss: 9.639044761657715\r\nStep 226, loss: 9.594630241394043\r\nStep 227, loss: 9.588937759399414\r\nStep 228, loss: 9.650166511535645\r\nStep 229, loss: 9.535862922668457\r\nStep 230, loss: 9.545808792114258\r\nStep 231, loss: 9.621722221374512\r\nStep 232, loss: 9.607393264770508\r\nStep 233, loss: 9.588334083557129\r\nStep 234, loss: 9.480213165283203\r\nStep 235, loss: 9.391374588012695\r\nStep 236, loss: 9.275531768798828\r\nStep 237, loss: 9.436307907104492\r\nStep 238, loss: 9.50087833404541\r\n",,terminal_output +547,1402400,"TERMINAL",0,0,"sacct --format=""JobID%15,JobName%30,Partition%16,AllocCPUS%3,State%12,Elapsed%10,Timelimit%10"" --starttime $(date -d ""last week"" +%Y-%m-%d) | grep -vE "".batch|.extern""",,terminal_command +548,1402475,"TERMINAL",0,0,"]633;E;2025-07-23 15:10:41 sacct --format=""JobID%15,JobName%30,Partition%16,AllocCPUS%3,State%12,Elapsed%10,Timelimit%10"" --starttime $(date -d ""last week"" +%Y-%m-%d) | grep -vE "".batch|.extern"";3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C JobID JobName Partition All State Elapsed Timelimit \r\n--------------- ------------------------------ ---------------- --- ------------ ---------- ---------- \r\n 3345116 train_dynamics_modelsize_scal+ accelerated 48 TIMEOUT 2-00:00:17 2-00:00:00 \r\n 3345116.0 python 40 CANCELLED 2-00:00:25 \r\n 3348397 train_dynamics_lr_schedule_co+ accelerated 48 COMPLETED 1-03:21:05 2-00:00:00 \r\n 3348397.0 python 40 COMPLETED 1-03:20:36 \r\n 3348399 train_dynamics_lr_schedule_cos accelerated 48 COMPLETED 1-03:28:39 2-00:00:00 \r\n 3348399.0 python 40 COMPLETED 1-03:28:10 \r\n 3348400 train_dynamics_lr_schedule_wsd accelerated 48 COMPLETED 1-03:19:17 2-00:00:00 \r\n 3348400.0 python 40 COMPLETED 1-03:18:48 \r\n 3348592 train_dyn_yolorun accelerated 48 CANCELLED b+ 19:29:04 1-00:00:00 \r\n 3348592.0 python 40 CANCELLED 19:29:13 \r\n 3349982 interactive accelerated 48 CANCELLED b+ 00:10:32 10:00:00 \r\n3349982.intera+ interactive 24 CANCELLED 00:11:02 \r\n 3350109 interactive accelerated 0 CANCELLED b+ 00:00:00 10:00:00 \r\n 3350110 interactive dev_accelerated 0 CANCELLED b+ 00:00:00 01:00:00 \r\n 3350111 interactive accelerated 48 CANCELLED b+ 00:09:37 10:00:00 \r\n3350111.intera+ interactive 24 CANCELLED 00:10:06 \r\n 3350245 interactive accelerated 48 CANCELLED b+ 00:17:59 10:00:00 \r\n3350245.intera+ interactive 24 CANCELLED 00:18:28 \r\n 3350302 interactive accelerated 48 CANCELLED b+ 00:40:22 10:00:00 \r\n3350302.intera+ interactive 24 CANCELLED 00:40:52 \r\n 3350302.0 python 40 FAILED 00:00:27 \r\n 3350302.1 python 40 FAILED 00:05:27 \r\n 3350302.2 python 40 FAILED 00:03:41 \r\n 3350302.3 python 40 CANCELLED b+ 00:03:28 \r\n 3350302.4 python 40 CANCELLED b+ 00:04:30 \r\n 3350302.5 python 40 FAILED 00:01:21 \r\n 3350302.6 python 40 FAILED 00:00:27 \r\n 3350302.7 python 40 CANCELLED 00:04:03 \r\n 3350418 interactive accelerated 48 COMPLETED 08:32:29 10:00:00 \r\n3350418.intera+ interactive 24 CANCELLED 08:32:59 \r\n 3350418.0 python 40 CANCELLED b+ 00:01:30 \r\n 3350418.1 python 40 FAILED 00:02:41 \r\n 3350418.2 python 40 CANCELLED b+ 01:19:51 \r\n 3350418.3 python 40 FAILED 00:00:16 \r\n 3350418.4 python 40 FAILED 00:00:03 \r\n 3350418.5 python 40 FAILED 00:00:07 \r\n 3350418.6 python 40 FAILED 00:00:03 \r\n 3350418.7 python 40 FAILED 00:02:57 \r\n 3350418.8 python 40 CANCELLED b+ 00:08:48 \r\n 3350418.9 python 40 CANCELLED b+ 00:10:49 \r\n 3350418.10 python 40 CANCELLED b+ 00:05:51 \r\n 3350418.11 python 40 CANCELLED b+ 00:06:24 \r\n 3351743 train_dyn_yolorun_new_arch accelerated 48 CANCELLED b+ 01:58:23 2-00:00:00 \r\n 3351743.0 python 40 CANCELLED 01:58:29 \r\n 3352103 train_dyn_yolorun_new_arch accelerated 48 CANCELLED b+ 00:09:58 2-00:00:00 \r\n 3352103.0 python 40 CANCELLED 00:10:03 \r\n 3352115 train_dyn_yolorun_new_arch accelerated 48 CANCELLED b+ 23:14:45 2-00:00:00 \r\n 3352115.0 python 40 CANCELLED 23:15:56 \r\n 3352588 train_dyn_yolorun_new_arch accelerated 0 CANCELLED b+ 00:00:00 2-00:00:00 \r\n 3352994 interactive accelerated 48 CANCELLED b+ 05:23:39 10:00:00 \r\n3352994.intera+ interactive 24 CANCELLED 05:24:09 \r\n 3352994.0 python 40 CANCELLED b+ 00:13:38 \r\n 3352994.1 python 40 CANCELLED b+ 00:00:04 \r\n 3352994.2 python 40 COMPLETED 00:09:49 \r\n 3352996 interactive accelerated 6 CANCELLED b+ 05:23:16 10:00:00 \r\n3352996.intera+ interactive 6 CANCELLED 05:23:46 \r\n 3352996.0 python 5 CANCELLED b+ 00:00:01 \r\n 3353884 interactive accelerated 48 CANCELLED b+ 07:02:05 10:00:00 \r\n3353884.intera+ interactive 24 CANCELLED 07:02:35 \r\n 3353884.0 python 40 FAILED 00:01:30 \r\n 3353884.1 python 40 FAILED 00:02:39 \r\n 3353884.2 python 40 COMPLETED 00:10:28 \r\n 3353885 interactive dev_accelerated 0 CANCELLED b+ 00:00:00 01:00:00 \r\n 3353924 interactive accelerated 6 COMPLETED 02:30:28 10:00:00 \r\n3353924.intera+ interactive 6 CANCELLED 02:30:58 \r\n 3355596 interactive accelerated 6 COMPLETED 01:37:30 10:00:00 \r\n3355596.intera+ interactive 6 CANCELLED b+ 01:37:30 \r\n 3355871 interactive accelerated 6 COMPLETED 04:03:13 10:00:00 \r\n3355871.intera+ interactive 6 CANCELLED 04:03:42 \r\n 3357147 interactive accelerated 6 CANCELLED b+ 04:19:45 10:00:00 \r\n3357147.intera+ interactive 6 CANCELLED 04:20:14 \r\n 3357893 interactive accelerated 48 CANCELLED b+ 03:08:43 10:00:00 \r\n3357893.intera+ interactive 24 CANCELLED 03:09:12 \r\n 3357893.0 python 40 CANCELLED b+ 00:07:38 \r\n 3357893.1 python 40 CANCELLED b+ 00:04:25 \r\n 3357893.2 python 40 CANCELLED b+ 00:00:15 \r\n 3357893.3 python 40 CANCELLED b+ 00:04:41 \r\n 3357893.4 python 40 COMPLETED 00:20:40 \r\n 3357893.5 python 40 CANCELLED b+ 00:04:27 \r\n 3357893.6 python 40 CANCELLED b+ 00:02:17 \r\n 3357893.7 python 40 FAILED 00:08:51 \r\n 3357893.8 python 40 CANCELLED b+ 00:16:40 \r\n 3357893.9 python 40 COMPLETED 00:10:01 \r\n 3357894 interactive accelerated 6 CANCELLED b+ 03:45:27 10:00:00 \r\n3357894.intera+ interactive 6 CANCELLED 03:45:57 \r\n 3358457 train_dyn_yolorun_new_arch accelerated 48 FAILED 00:00:28 2-00:00:00 \r\n 3358457.0 python 40 FAILED 00:00:00 \r\n 3359231 interactive accelerated 48 CANCELLED b+ 01:47:34 10:00:00 \r\n3359231.intera+ interactive 24 CANCELLED 01:48:04 \r\n 3359231.0 python 40 CANCELLED b+ 00:01:53 \r\n 3359231.1 python 40 COMPLETED 00:10:04 \r\n 3359231.2 python 40 COMPLETED 00:09:48 \r\n 3359232 interactive accelerated 6 COMPLETED 05:31:42 10:00:00 \r\n3359232.intera+ interactive 6 CANCELLED 05:32:11 \r\n 3359275 interactive accelerated 6 COMPLETED 04:50:32 10:00:00 \r\n3359275.intera+ interactive 6 CANCELLED 04:51:02 \r\n 3359333 wrap accelerated 6 CANCELLED b+ 00:00:18 10:00:00 \r\n 3359334 wrap accelerated 6 TIMEOUT 10:00:29 10:00:00 \r\n 3359338 wrap accelerated 6 TIMEOUT 10:00:16 10:00:00 \r\n 3359343 train_dyn_new_arch-bugfixed-s+ accelerated 48 COMPLETED 23:19:14 2-00:00:00 \r\n 3359343.0 python 40 COMPLETED 23:18:47 \r\n 3359349 train_dyn_new_arch-bugfixed-t+ accelerated 48 COMPLETED 1-01:00:55 2-00:00:00 \r\n 3359349.0 python 40 COMPLETED 1-01:00:27 \r\n 3364311 interactive accelerated 48 CANCELLED b+ 02:14:14 10:00:00 \r\n3364311.intera+ interactive 24 CANCELLED 02:14:43 \r\n 3364312 interactive accelerated 6 CANCELLED b+ 03:21:44 10:00:00 \r\n3364312.intera+ interactive 6 CANCELLED 03:22:14 \r\n 3365091 interactive accelerated 6 COMPLETED 01:55:23 10:00:00 \r\n3365091.intera+ interactive 6 CANCELLED 01:55:53 \r\n 3365092 interactive accelerated 0 CANCELLED b+ 00:00:00 10:00:00 \r\n 3365094 interactive accelerated-h100 0 CANCELLED b+ 00:00:00 10:00:00 \r\n 3365095 interactive dev_accelerated+ 6 TIMEOUT 01:00:30 01:00:00 \r\n3365095.intera+ interactive 6 CANCELLED 01:00:59 \r\n 3365333 interactive accelerated 6 COMPLETED 09:28:57 10:00:00 \r\n3365333.intera+ interactive 6 CANCELLED 09:29:26 \r\n 3365333.0 python 5 CANCELLED b+ 00:00:05 \r\n 3365333.1 python 5 COMPLETED 00:11:36 \r\n 3365333.2 python 5 FAILED 00:00:37 \r\n 3365333.3 python 5 COMPLETED 00:01:35 \r\n 3365333.4 python 5 COMPLETED 00:17:00 \r\n 3365333.5 python 5 FAILED 00:00:51 \r\n 3365333.6 python 5 COMPLETED 00:16:07 \r\n 3365333.7 python 5 COMPLETED 00:17:36 \r\n 3365333.8 python 5 COMPLETED 00:16:28 \r\n 3365333.9 python 5 COMPLETED 00:01:03 \r\n 3365333.10 python 5 COMPLETED 00:13:24 \r\n 3365333.11 python 5 CANCELLED b+ 00:03:34 \r\n 3365334 interactive dev_accelerated 6 TIMEOUT 01:00:28 01:00:00 \r\n3365334.intera+ interactive 6 FAILED 01:00:55 \r\n 3365334.0 python 5 FAILED 00:00:26 \r\n 3365334.1 python 5 COMPLETED 00:01:46 \r\n 3365334.2 python 5 CANCELLED b+ 00:03:51 \r\n 3365334.3 python 5 COMPLETED 00:03:29 \r\n 3365563 interactive dev_accelerated 6 TIMEOUT 01:00:02 01:00:00 \r\n3365563.intera+ interactive 6 CANCELLED 01:00:31 \r\n 3365563.0 python 5 COMPLETED 00:15:24 \r\n 3365563.1 python 5 COMPLETED 00:14:50 \r\n 3365872 train_dynamics_overfit_sample+ accelerated 0 CANCELLED b+ 00:00:00 2-00:00:00 \r\n 3365873 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:26:52 2-00:00:00 \r\n 3365873.0 python 5 COMPLETED 01:26:22 \r\n 3365876 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:40:07 2-00:00:00 \r\n 3365876.0 python 5 COMPLETED 01:39:38 \r\n 3366626 interactive accelerated 0 CANCELLED b+ 00:00:00 10:00:00 \r\n 3366628 interactive accelerated 0 CANCELLED b+ 00:00:00 10:00:00 \r\n 3366766 interactive accelerated 0 CANCELLED b+ 00:00:00 02:00:00 \r\n 3366767 interactive accelerated 0 CANCELLED b+ 00:00:00 01:00:00 \r\n 3366769 interactive accelerated 0 CANCELLED b+ 00:00:00 00:30:00 \r\n 3366770 interactive accelerated 0 CANCELLED b+ 00:00:00 10:00:00 \r\n 3366817 interactive accelerated 0 CANCELLED b+ 00:00:00 01:00:00 \r\n 3366843 train_dynamics_overfit_sample+ accelerated 0 CANCELLED b+ 00:00:00 2-00:00:00 \r\n 3366883 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:33:31 2-00:00:00 \r\n 3366883.0 python 5 COMPLETED 01:33:02 \r\n 3367399 interactive accelerated 0 CANCELLED b+ 00:00:00 10:00:00 \r\n 3367418 interactive dev_accelerated 6 TIMEOUT 01:00:18 01:00:00 \r\n3367418.intera+ interactive 6 CANCELLED 01:00:47 \r\n 3367418.0 python 5 CANCELLED b+ 00:05:11 \r\n 3367418.1 python 5 COMPLETED 00:04:09 \r\n 3367418.2 python 5 CANCELLED b+ 00:02:55 \r\n 3367418.3 python 5 COMPLETED 00:04:13 \r\n 3367418.4 python 5 CANCELLED b+ 00:04:23 \r\n 3367418.5 python 5 CANCELLED b+ 00:02:57 \r\n 3367418.6 python 5 COMPLETED 00:04:12 \r\n 3367418.7 python 5 COMPLETED 00:04:16 \r\n 3367418.8 python 5 CANCELLED b+ 00:02:55 \r\n 3367418.9 python 5 CANCELLED b+ 00:00:01 \r\n 3367418.10 python 5 COMPLETED 00:03:35 \r\n 3367730 interactive dev_accelerated 0 CANCELLED b+ 00:00:00 01:00:00 \r\n 3367739 interactive dev_accelerated 0 CANCELLED b+ 00:00:00 01:00:00 \r\n 3370431 interactive dev_accelerated 6 TIMEOUT 01:00:02 01:00:00 \r\n3370431.intera+ interactive 6 CANCELLED 01:00:31 \r\n 3370431.0 python 5 FAILED 00:01:53 \r\n 3370431.1 python 5 FAILED 00:01:35 \r\n 3370431.2 python 5 CANCELLED b+ 00:17:33 \r\n 3370431.3 python 5 CANCELLED b+ 00:17:11 \r\n 3370431.4 python 5 COMPLETED 00:08:21 \r\n 3370434 interactive accelerated 0 CANCELLED b+ 00:00:00 10:00:00 \r\n 3370488 interactive dev_accelerated 0 CANCELLED b+ 00:00:00 01:00:00 \r\n 3370713 interactive dev_accelerated 6 FAILED 00:00:01 01:00:00 \r\n 3370714 interactive dev_accelerated 0 CANCELLED b+ 00:00:00 01:00:00 \r\n 3370715 interactive dev_accelerated 24 RUNNING 00:14:52 01:00:00 \r\n3370715.intera+ interactive 24 RUNNING 00:14:52 \r\n 3370715.0 python 20 RUNNING 00:09:16 \r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +549,1414268,"TERMINAL",0,0,"sacct --format=""JobID%15,JobName%30,Partition%16,AllocCPUS%3,State%12,Elapsed%10,Timelimit%10"" --starttime $(date -d ""last week"" +%Y-%m-%d) | grep -vE "".batch|.extern|interactive""",,terminal_command +550,1414343,"TERMINAL",0,0,"]633;E;2025-07-23 15:10:53 sacct --format=""JobID%15,JobName%30,Partition%16,AllocCPUS%3,State%12,Elapsed%10,Timelimit%10"" --starttime $(date -d ""last week"" +%Y-%m-%d) | grep -vE "".batch|.extern|interactive"";3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C JobID JobName Partition All State Elapsed Timelimit \r\n--------------- ------------------------------ ---------------- --- ------------ ---------- ---------- \r\n 3345116 train_dynamics_modelsize_scal+ accelerated 48 TIMEOUT 2-00:00:17 2-00:00:00 \r\n 3345116.0 python 40 CANCELLED 2-00:00:25 \r\n 3348397 train_dynamics_lr_schedule_co+ accelerated 48 COMPLETED 1-03:21:05 2-00:00:00 \r\n 3348397.0 python 40 COMPLETED 1-03:20:36 \r\n 3348399 train_dynamics_lr_schedule_cos accelerated 48 COMPLETED 1-03:28:39 2-00:00:00 \r\n 3348399.0 python 40 COMPLETED 1-03:28:10 \r\n 3348400 train_dynamics_lr_schedule_wsd accelerated 48 COMPLETED 1-03:19:17 2-00:00:00 \r\n 3348400.0 python 40 COMPLETED 1-03:18:48 \r\n 3348592 train_dyn_yolorun accelerated 48 CANCELLED b+ 19:29:04 1-00:00:00 \r\n 3348592.0 python 40 CANCELLED 19:29:13 \r\n 3350302.0 python 40 FAILED 00:00:27 \r\n 3350302.1 python 40 FAILED 00:05:27 \r\n 3350302.2 python 40 FAILED 00:03:41 \r\n 3350302.3 python 40 CANCELLED b+ 00:03:28 \r\n 3350302.4 python 40 CANCELLED b+ 00:04:30 \r\n 3350302.5 python 40 FAILED 00:01:21 \r\n 3350302.6 python 40 FAILED 00:00:27 \r\n 3350302.7 python 40 CANCELLED 00:04:03 \r\n 3350418.0 python 40 CANCELLED b+ 00:01:30 \r\n 3350418.1 python 40 FAILED 00:02:41 \r\n 3350418.2 python 40 CANCELLED b+ 01:19:51 \r\n 3350418.3 python 40 FAILED 00:00:16 \r\n 3350418.4 python 40 FAILED 00:00:03 \r\n 3350418.5 python 40 FAILED 00:00:07 \r\n 3350418.6 python 40 FAILED 00:00:03 \r\n 3350418.7 python 40 FAILED 00:02:57 \r\n 3350418.8 python 40 CANCELLED b+ 00:08:48 \r\n 3350418.9 python 40 CANCELLED b+ 00:10:49 \r\n 3350418.10 python 40 CANCELLED b+ 00:05:51 \r\n 3350418.11 python 40 CANCELLED b+ 00:06:24 \r\n 3351743 train_dyn_yolorun_new_arch accelerated 48 CANCELLED b+ 01:58:23 2-00:00:00 \r\n 3351743.0 python 40 CANCELLED 01:58:29 \r\n 3352103 train_dyn_yolorun_new_arch accelerated 48 CANCELLED b+ 00:09:58 2-00:00:00 \r\n 3352103.0 python 40 CANCELLED 00:10:03 \r\n 3352115 train_dyn_yolorun_new_arch accelerated 48 CANCELLED b+ 23:14:45 2-00:00:00 \r\n 3352115.0 python 40 CANCELLED 23:15:56 \r\n 3352588 train_dyn_yolorun_new_arch accelerated 0 CANCELLED b+ 00:00:00 2-00:00:00 \r\n 3352994.0 python 40 CANCELLED b+ 00:13:38 \r\n 3352994.1 python 40 CANCELLED b+ 00:00:04 \r\n 3352994.2 python 40 COMPLETED 00:09:49 \r\n 3352996.0 python 5 CANCELLED b+ 00:00:01 \r\n 3353884.0 python 40 FAILED 00:01:30 \r\n 3353884.1 python 40 FAILED 00:02:39 \r\n 3353884.2 python 40 COMPLETED 00:10:28 \r\n 3357893.0 python 40 CANCELLED b+ 00:07:38 \r\n 3357893.1 python 40 CANCELLED b+ 00:04:25 \r\n 3357893.2 python 40 CANCELLED b+ 00:00:15 \r\n 3357893.3 python 40 CANCELLED b+ 00:04:41 \r\n 3357893.4 python 40 COMPLETED 00:20:40 \r\n 3357893.5 python 40 CANCELLED b+ 00:04:27 \r\n 3357893.6 python 40 CANCELLED b+ 00:02:17 \r\n 3357893.7 python 40 FAILED 00:08:51 \r\n 3357893.8 python 40 CANCELLED b+ 00:16:40 \r\n 3357893.9 python 40 COMPLETED 00:10:01 \r\n 3358457 train_dyn_yolorun_new_arch accelerated 48 FAILED 00:00:28 2-00:00:00 \r\n 3358457.0 python 40 FAILED 00:00:00 \r\n 3359231.0 python 40 CANCELLED b+ 00:01:53 \r\n 3359231.1 python 40 COMPLETED 00:10:04 \r\n 3359231.2 python 40 COMPLETED 00:09:48 \r\n 3359333 wrap accelerated 6 CANCELLED b+ 00:00:18 10:00:00 \r\n 3359334 wrap accelerated 6 TIMEOUT 10:00:29 10:00:00 \r\n 3359338 wrap accelerated 6 TIMEOUT 10:00:16 10:00:00 \r\n 3359343 train_dyn_new_arch-bugfixed-s+ accelerated 48 COMPLETED 23:19:14 2-00:00:00 \r\n 3359343.0 python 40 COMPLETED 23:18:47 \r\n 3359349 train_dyn_new_arch-bugfixed-t+ accelerated 48 COMPLETED 1-01:00:55 2-00:00:00 \r\n 3359349.0 python 40 COMPLETED 1-01:00:27 \r\n 3365333.0 python 5 CANCELLED b+ 00:00:05 \r\n 3365333.1 python 5 COMPLETED 00:11:36 \r\n 3365333.2 python 5 FAILED 00:00:37 \r\n 3365333.3 python 5 COMPLETED 00:01:35 \r\n 3365333.4 python 5 COMPLETED 00:17:00 \r\n 3365333.5 python 5 FAILED 00:00:51 \r\n 3365333.6 python 5 COMPLETED 00:16:07 \r\n 3365333.7 python 5 COMPLETED 00:17:36 \r\n 3365333.8 python 5 COMPLETED 00:16:28 \r\n 3365333.9 python 5 COMPLETED 00:01:03 \r\n 3365333.10 python 5 COMPLETED 00:13:24 \r\n 3365333.11 python 5 CANCELLED b+ 00:03:34 \r\n 3365334.0 python 5 FAILED 00:00:26 \r\n 3365334.1 python 5 COMPLETED 00:01:46 \r\n 3365334.2 python 5 CANCELLED b+ 00:03:51 \r\n 3365334.3 python 5 COMPLETED 00:03:29 \r\n 3365563.0 python 5 COMPLETED 00:15:24 \r\n 3365563.1 python 5 COMPLETED 00:14:50 \r\n 3365872 train_dynamics_overfit_sample+ accelerated 0 CANCELLED b+ 00:00:00 2-00:00:00 \r\n 3365873 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:26:52 2-00:00:00 \r\n 3365873.0 python 5 COMPLETED 01:26:22 \r\n 3365876 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:40:07 2-00:00:00 \r\n 3365876.0 python 5 COMPLETED 01:39:38 \r\n 3366843 train_dynamics_overfit_sample+ accelerated 0 CANCELLED b+ 00:00:00 2-00:00:00 \r\n 3366883 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:33:31 2-00:00:00 \r\n 3366883.0 python 5 COMPLETED 01:33:02 \r\n 3367418.0 python 5 CANCELLED b+ 00:05:11 \r\n 3367418.1 python 5 COMPLETED 00:04:09 \r\n 3367418.2 python 5 CANCELLED b+ 00:02:55 \r\n 3367418.3 python 5 COMPLETED 00:04:13 \r\n 3367418.4 python 5 CANCELLED b+ 00:04:23 \r\n 3367418.5 python 5 CANCELLED b+ 00:02:57 \r\n 3367418.6 python 5 COMPLETED 00:04:12 \r\n 3367418.7 python 5 COMPLETED 00:04:16 \r\n 3367418.8 python 5 CANCELLED b+ 00:02:55 \r\n 3367418.9 python 5 CANCELLED b+ 00:00:01 \r\n 3367418.10 python 5 COMPLETED 00:03:35 \r\n 3370431.0 python 5 FAILED 00:01:53 \r\n 3370431.1 python 5 FAILED 00:01:35 \r\n 3370431.2 python 5 CANCELLED b+ 00:17:33 \r\n 3370431.3 python 5 CANCELLED b+ 00:17:11 \r\n 3370431.4 python 5 COMPLETED 00:08:21 \r\n 3370715.0 python 20 RUNNING 00:09:28 \r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +551,1453114,"TERMINAL",0,0,"ython 40 CANCELLED 19:29:13 3350302.0 python 40 FAILED 00:00:27 3350302.1 python 40 FAILED 00:05:27 3350302.2 python 40 FAILED 00:03:41 3350302.3 python 40 CANCELLED b+ 00:03:28 3350302.4 python 40 CANCELLED b+ 00:04:30 3350302.5 python 40 FAILED 00:01:21 3350302.6 python 40 FAILED 00:00:27 3350302.7 python 40 CANCELLED 00:04:03 3350418.0 python ython 40 CANCELLED 19:29:13 3350302.0 python 40 FAILED 00:00:27 3350302.1 python 40 FAILED 00:05:27 3350302.2 python 40 FAILED 00:03:41 3350302.3 python 40 CANCELLED b+ 00:03:28 3350302.4 python 40 CANCELLED b+ 00:04:30 3350302.5 python 40 FAILED 00:01:21 3350302.6 python 40 FAILED 00:00:27 3350302.7 python 40 CANCELLED 00:04:03 3350418.0 python ython 40 CANCELLED 19:29:13 3350302.0 python 40 FAILED 00:00:27 3350302.1 python 40 FAILED 00:05:27 3350302.2 python 40 FAILED 00:03:41 3350302.3 python 40 CANCELLED b+ 00:03:28 3350302.4 python 40 CANCELLED b+ 00:04:30 3350302.5 python 40 FAILED 00:01:21 3350302.6 python 40 FAILED 00:00:27 3350302.7 python 40 CANCELLED 00:04:03 3350418.0 python ",,terminal_command +552,1453166,"TERMINAL",0,0,"]633;E;2025-07-23 15:11:32 ython 40 CANCELLED 19:29:13 ;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;Cbash: ython: command not found...\r\n",,terminal_output +553,1453361,"TERMINAL",0,0,"\r\n",,terminal_output +554,1454418,"TERMINAL",0,0,"^C\r\nbash: 3350302.0: command not found...\r\n",,terminal_output +555,1454566,"TERMINAL",0,0,"bash: 3350302.1: command not found...\r\n",,terminal_output +556,1454734,"TERMINAL",0,0,"bash: 3350302.2: command not found...\r\n",,terminal_output +557,1454822,"TERMINAL",0,0,"^[[A",,terminal_output +558,1454879,"TERMINAL",0,0,"bash: 3350302.3: command not found...\r\n",,terminal_output +559,1455028,"TERMINAL",0,0,"bash: 3350302.4: command not found...\r\n",,terminal_output +560,1455186,"TERMINAL",0,0,"bash: 3350302.5: command not found...\r\n",,terminal_output +561,1455339,"TERMINAL",0,0,"bash: 3350302.6: command not found...\r\n",,terminal_output +562,1455494,"TERMINAL",0,0,"bash: 3350302.7: command not found...\r\n",,terminal_output +563,1455706,"TERMINAL",0,0,"^C\r\nbash: 3350302.0: command not found...\r\n",,terminal_output +564,1455850,"TERMINAL",0,0,"^C\r\nbash: 3350302.1: command not found...\r\n",,terminal_output +565,1456019,"TERMINAL",0,0,"^C\r\nbash: 3350302.3: command not found...\r\n",,terminal_output +566,1456149,"TERMINAL",0,0,"^C\r\nbash: 3350302.4: command not found...\r\n",,terminal_output +567,1456339,"TERMINAL",0,0,"bash: 3350302.5: command not found...\r\n^C\r\nbash: 3350302.6: command not found...\r\n",,terminal_output +568,1456483,"TERMINAL",0,0,"^C\r\nbash: 3350302.7: command not found...\r\n",,terminal_output +569,1456612,"TERMINAL",0,0,"^C\r\nbash: 3350418.0: command not found...\r\n",,terminal_output +570,1456747,"TERMINAL",0,0,"bash: 3350302.0: command not found...\r\n",,terminal_output +571,1456843,"TERMINAL",0,0,"^C\r\nbash: 3350302.1: command not found...\r\n",,terminal_output +572,1457038,"TERMINAL",0,0,"^C\r\nbash: 3350302.2: command not found...\r\n",,terminal_output +573,1457171,"TERMINAL",0,0,"bash: 3350302.3: command not found...\r\n",,terminal_output +574,1457341,"TERMINAL",0,0,"bash: 3350302.4: command not found...\r\n",,terminal_output +575,1457580,"TERMINAL",0,0,"bash: 3350302.5: command not found...\r\n^C\r\n^C\r\nbash: 3350302.7: command not found...\r\n^C\r\n^C\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;130",,terminal_output +576,1457591,"TERMINAL",0,0,"^C",,terminal_command +577,1457603,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +578,1457614,"TERMINAL",0,0,"^C",,terminal_command +579,1457641,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +580,1457654,"TERMINAL",0,0,"^C",,terminal_command +581,1457666,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +582,1457679,"TERMINAL",0,0,"^C",,terminal_command +583,1457704,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +584,1457716,"TERMINAL",0,0,"^C",,terminal_command +585,1457727,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +586,1457739,"TERMINAL",0,0,"^C",,terminal_command +587,1457756,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +588,1457770,"TERMINAL",0,0,"^C",,terminal_command +589,1457794,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +590,1457807,"TERMINAL",0,0,"^C",,terminal_command +591,1457820,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +592,1457831,"TERMINAL",0,0,"^C",,terminal_command +593,1457843,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +594,1457859,"TERMINAL",0,0,"^C",,terminal_command +595,1457884,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +596,1457895,"TERMINAL",0,0,"^C",,terminal_command +597,1457906,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +598,1457919,"TERMINAL",0,0,"^C",,terminal_command +599,1457943,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +600,1457955,"TERMINAL",0,0,"^C",,terminal_command +601,1457970,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +602,1457982,"TERMINAL",0,0,"^C",,terminal_command +603,1457993,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +604,1458004,"TERMINAL",0,0,"^C",,terminal_command +605,1458029,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +606,1458040,"TERMINAL",0,0,"^C",,terminal_command +607,1458060,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +608,1468711,"TERMINAL",0,0,"sacct --format=""JobID%15,JobName%30,Partition%16,AllocCPUS%3,State%12,Elapsed%10,Timelimit%10"" --starttime $(date -d ""last week"" +%Y-%m-%d) | grep -vE "".batch|.extern|interactive""",,terminal_command +609,1468757,"TERMINAL",0,0,"]633;E;2025-07-23 15:11:48 sacct --format=""JobID%15,JobName%30,Partition%16,AllocCPUS%3,State%12,Elapsed%10,Timelimit%10"" --starttime $(date -d ""last week"" +%Y-%m-%d) | grep -vE "".batch|.extern|interactive"";3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C JobID JobName Partition All State Elapsed Timelimit \r\n--------------- ------------------------------ ---------------- --- ------------ ---------- ---------- \r\n 3345116 train_dynamics_modelsize_scal+ accelerated 48 TIMEOUT 2-00:00:17 2-00:00:00 \r\n 3345116.0 python 40 CANCELLED 2-00:00:25 \r\n 3348397 train_dynamics_lr_schedule_co+ accelerated 48 COMPLETED 1-03:21:05 2-00:00:00 \r\n 3348397.0 python 40 COMPLETED 1-03:20:36 \r\n 3348399 train_dynamics_lr_schedule_cos accelerated 48 COMPLETED 1-03:28:39 2-00:00:00 \r\n 3348399.0 python 40 COMPLETED 1-03:28:10 \r\n 3348400 train_dynamics_lr_schedule_wsd accelerated 48 COMPLETED 1-03:19:17 2-00:00:00 \r\n 3348400.0 python 40 COMPLETED 1-03:18:48 \r\n 3348592 train_dyn_yolorun accelerated 48 CANCELLED b+ 19:29:04 1-00:00:00 \r\n 3348592.0 python 40 CANCELLED 19:29:13 \r\n 3350302.0 python 40 FAILED 00:00:27 \r\n 3350302.1 python 40 FAILED 00:05:27 \r\n 3350302.2 python 40 FAILED 00:03:41 \r\n 3350302.3 python 40 CANCELLED b+ 00:03:28 \r\n 3350302.4 python 40 CANCELLED b+ 00:04:30 \r\n 3350302.5 python 40 FAILED 00:01:21 \r\n 3350302.6 python 40 FAILED 00:00:27 \r\n 3350302.7 python 40 CANCELLED 00:04:03 \r\n 3350418.0 python 40 CANCELLED b+ 00:01:30 \r\n 3350418.1 python 40 FAILED 00:02:41 \r\n 3350418.2 python 40 CANCELLED b+ 01:19:51 \r\n 3350418.3 python 40 FAILED 00:00:16 \r\n 3350418.4 python 40 FAILED 00:00:03 \r\n 3350418.5 python 40 FAILED 00:00:07 \r\n 3350418.6 python 40 FAILED 00:00:03 \r\n 3350418.7 python 40 FAILED 00:02:57 \r\n 3350418.8 python 40 CANCELLED b+ 00:08:48 \r\n 3350418.9 python 40 CANCELLED b+ 00:10:49 \r\n 3350418.10 python 40 CANCELLED b+ 00:05:51 \r\n 3350418.11 python 40 CANCELLED b+ 00:06:24 \r\n 3351743 train_dyn_yolorun_new_arch accelerated 48 CANCELLED b+ 01:58:23 2-00:00:00 \r\n 3351743.0 python 40 CANCELLED 01:58:29 \r\n 3352103 train_dyn_yolorun_new_arch accelerated 48 CANCELLED b+ 00:09:58 2-00:00:00 \r\n 3352103.0 python 40 CANCELLED 00:10:03 \r\n 3352115 train_dyn_yolorun_new_arch accelerated 48 CANCELLED b+ 23:14:45 2-00:00:00 \r\n 3352115.0 python 40 CANCELLED 23:15:56 \r\n 3352588 train_dyn_yolorun_new_arch accelerated 0 CANCELLED b+ 00:00:00 2-00:00:00 \r\n 3352994.0 python 40 CANCELLED b+ 00:13:38 \r\n 3352994.1 python 40 CANCELLED b+ 00:00:04 \r\n 3352994.2 python 40 COMPLETED 00:09:49 \r\n 3352996.0 python 5 CANCELLED b+ 00:00:01 \r\n 3353884.0 python 40 FAILED 00:01:30 \r\n 3353884.1 python 40 FAILED 00:02:39 \r\n 3353884.2 python 40 COMPLETED 00:10:28 \r\n 3357893.0 python 40 CANCELLED b+ 00:07:38 \r\n 3357893.1 python 40 CANCELLED b+ 00:04:25 \r\n 3357893.2 python 40 CANCELLED b+ 00:00:15 \r\n 3357893.3 python 40 CANCELLED b+ 00:04:41 \r\n 3357893.4 python 40 COMPLETED 00:20:40 \r\n 3357893.5 python 40 CANCELLED b+ 00:04:27 \r\n 3357893.6 python 40 CANCELLED b+ 00:02:17 \r\n 3357893.7 python 40 FAILED 00:08:51 \r\n 3357893.8 python 40 CANCELLED b+ 00:16:40 \r\n 3357893.9 python 40 COMPLETED 00:10:01 \r\n 3358457 train_dyn_yolorun_new_arch accelerated 48 FAILED 00:00:28 2-00:00:00 \r\n 3358457.0 python 40 FAILED 00:00:00 \r\n 3359231.0 python 40 CANCELLED b+ 00:01:53 \r\n 3359231.1 python 40 COMPLETED 00:10:04 \r\n 3359231.2 python 40 COMPLETED 00:09:48 \r\n 3359333 wrap accelerated 6 CANCELLED b+ 00:00:18 10:00:00 \r\n 3359334 wrap accelerated 6 TIMEOUT 10:00:29 10:00:00 \r\n 3359338 wrap accelerated 6 TIMEOUT 10:00:16 10:00:00 \r\n 3359343 train_dyn_new_arch-bugfixed-s+ accelerated 48 COMPLETED 23:19:14 2-00:00:00 \r\n 3359343.0 python 40 COMPLETED 23:18:47 \r\n 3359349 train_dyn_new_arch-bugfixed-t+ accelerated 48 COMPLETED 1-01:00:55 2-00:00:00 \r\n 3359349.0 python 40 COMPLETED 1-01:00:27 \r\n 3365333.0 python 5 CANCELLED b+ 00:00:05 \r\n 3365333.1 python 5 COMPLETED 00:11:36 \r\n 3365333.2 python 5 FAILED 00:00:37 \r\n 3365333.3 python 5 COMPLETED 00:01:35 \r\n 3365333.4 python 5 COMPLETED 00:17:00 \r\n 3365333.5 python 5 FAILED 00:00:51 \r\n 3365333.6 python 5 COMPLETED 00:16:07 \r\n 3365333.7 python 5 COMPLETED 00:17:36 \r\n 3365333.8 python 5 COMPLETED 00:16:28 \r\n 3365333.9 python 5 COMPLETED 00:01:03 \r\n 3365333.10 python 5 COMPLETED 00:13:24 \r\n 3365333.11 python 5 CANCELLED b+ 00:03:34 \r\n 3365334.0 python 5 FAILED 00:00:26 \r\n 3365334.1 python 5 COMPLETED 00:01:46 \r\n 3365334.2 python 5 CANCELLED b+ 00:03:51 \r\n 3365334.3 python 5 COMPLETED 00:03:29 \r\n 3365563.0 python 5 COMPLETED 00:15:24 \r\n 3365563.1 python 5 COMPLETED 00:14:50 \r\n 3365872 train_dynamics_overfit_sample+ accelerated 0 CANCELLED b+ 00:00:00 2-00:00:00 \r\n 3365873 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:26:52 2-00:00:00 \r\n 3365873.0 python 5 COMPLETED 01:26:22 \r\n 3365876 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:40:07 2-00:00:00 \r\n 3365876.0 python 5 COMPLETED 01:39:38 \r\n 3366843 train_dynamics_overfit_sample+ accelerated 0 CANCELLED b+ 00:00:00 2-00:00:00 \r\n 3366883 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:33:31 2-00:00:00 \r\n 3366883.0 python 5 COMPLETED 01:33:02 \r\n 3367418.0 python 5 CANCELLED b+ 00:05:11 \r\n 3367418.1 python 5 COMPLETED 00:04:09 \r\n 3367418.2 python 5 CANCELLED b+ 00:02:55 \r\n 3367418.3 python 5 COMPLETED 00:04:13 \r\n 3367418.4 python 5 CANCELLED b+ 00:04:23 \r\n 3367418.5 python 5 CANCELLED b+ 00:02:57 \r\n 3367418.6 python 5 COMPLETED 00:04:12 \r\n 3367418.7 python 5 COMPLETED 00:04:16 \r\n 3367418.8 python 5 CANCELLED b+ 00:02:55 \r\n 3367418.9 python 5 CANCELLED b+ 00:00:01 \r\n 3367418.10 python 5 COMPLETED 00:03:35 \r\n 3370431.0 python 5 FAILED 00:01:53 \r\n 3370431.1 python 5 FAILED 00:01:35 \r\n 3370431.2 python 5 CANCELLED b+ 00:17:33 \r\n 3370431.3 python 5 CANCELLED b+ 00:17:11 \r\n 3370431.4 python 5 COMPLETED 00:08:21 \r\n 3370715.0 python 20 RUNNING 00:10:23 \r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +610,1472039,"TERMINAL",0,0,"Step 239, loss: 9.393319129943848\r\nStep 240, loss: 9.332643508911133\r\nStep 241, loss: 9.279335975646973\r\nStep 242, loss: 9.48430347442627\r\nStep 243, loss: 9.444723129272461\r\nStep 244, loss: 9.225436210632324\r\nStep 245, loss: 9.21567153930664\r\nStep 246, loss: 9.34753131866455\r\nStep 247, loss: 9.248933792114258\r\nStep 248, loss: 9.256204605102539\r\nStep 249, loss: 9.148299217224121\r\nStep 250, loss: 9.169936180114746\r\nStep 251, loss: 8.963499069213867\r\nStep 252, loss: 9.141301155090332\r\nStep 253, loss: 9.143373489379883\r\nStep 254, loss: 9.256324768066406\r\nStep 255, loss: 9.21156120300293\r\nStep 256, loss: 9.121957778930664\r\nStep 257, loss: 9.145356178283691\r\nStep 258, loss: 9.179287910461426\r\nStep 259, loss: 8.951887130737305\r\nStep 260, loss: 9.172635078430176\r\nStep 261, loss: 9.005840301513672\r\nStep 262, loss: 9.113434791564941\r\nStep 263, loss: 8.805696487426758\r\nStep 264, loss: 8.820012092590332\r\nStep 265, loss: 8.868437767028809\r\nStep 266, loss: 8.873906135559082\r\nStep 267, loss: 8.917597770690918\r\nStep 268, loss: 9.005128860473633\r\nStep 269, loss: 8.811617851257324\r\nStep 270, loss: 8.84359359741211\r\nStep 271, loss: 8.960158348083496\r\nStep 272, loss: 9.073274612426758\r\nStep 273, loss: 8.800353050231934\r\nStep 274, loss: 8.86905574798584\r\nStep 275, loss: 8.783049583435059\r\nStep 276, loss: 9.049469947814941\r\nStep 277, loss: 8.742980003356934\r\nStep 278, loss: 8.820770263671875\r\nStep 279, loss: 8.74662971496582\r\nStep 280, loss: 8.780843734741211\r\nStep 281, loss: 8.7637357711792\r\nStep 282, loss: 8.939576148986816\r\nStep 283, loss: 8.840526580810547\r\nStep 284, loss: 8.902979850769043\r\nStep 285, loss: 8.834574699401855\r\nStep 286, loss: 8.63754653930664\r\nStep 287, loss: 8.65624713897705\r\nStep 288, loss: 8.498268127441406\r\nStep 289, loss: 8.662796020507812\r\nStep 290, loss: 8.78270149230957\r\nStep 291, loss: 8.698641777038574\r\nStep 292, loss: 8.700301170349121\r\nStep 293, loss: 8.656726837158203\r\nStep 294, loss: 8.404195785522461\r\nStep 295, loss: 8.71267318725586\r\nStep 296, loss: 8.580713272094727\r\nStep 297, loss: 8.696793556213379\r\nStep 298, loss: 8.622354507446289\r\nStep 299, loss: 8.547773361206055\r\nStep 300, loss: 8.511931419372559\r\nStep 301, loss: 8.62592601776123\r\nStep 302, loss: 8.557539939880371\r\nStep 303, loss: 8.584827423095703\r\nStep 304, loss: 8.375589370727539\r\nStep 305, loss: 8.413776397705078\r\nStep 306, loss: 8.20652961730957\r\nStep 307, loss: 8.37797737121582\r\nStep 308, loss: 8.527795791625977\r\nStep 309, loss: 8.349091529846191\r\nStep 310, loss: 8.539392471313477\r\nStep 311, loss: 8.61562728881836\r\nStep 312, loss: 8.283681869506836\r\nStep 313, loss: 8.407837867736816\r\nStep 314, loss: 8.361788749694824\r\nStep 315, loss: 8.317115783691406\r\nStep 316, loss: 8.216513633728027\r\nStep 317, loss: 8.435118675231934\r\nStep 318, loss: 8.263867378234863\r\nStep 319, loss: 8.336173057556152\r\nStep 320, loss: 8.248668670654297\r\nStep 321, loss: 8.13427734375\r\nStep 322, loss: 8.352275848388672\r\nStep 323, loss: 8.005666732788086\r\nStep 324, loss: 8.034747123718262\r\nStep 325, loss: 8.130173683166504\r\nStep 326, loss: 8.357478141784668\r\nStep 327, loss: 8.177257537841797\r\nStep 328, loss: 8.227829933166504\r\nStep 329, loss: 8.193879127502441\r\nStep 330, loss: 8.19284439086914\r\nStep 331, loss: 8.317316055297852\r\nStep 332, loss: 7.896291732788086\r\nStep 333, loss: 7.923079967498779\r\nStep 334, loss: 8.150965690612793\r\nStep 335, loss: 8.156876564025879\r\nStep 336, loss: 8.197476387023926\r\nStep 337, loss: 8.104968070983887\r\nStep 338, loss: 8.170830726623535\r\nStep 339, loss: 8.083712577819824\r\nStep 340, loss: 8.002701759338379\r\nStep 341, loss: 8.275883674621582\r\nStep 342, loss: 8.123054504394531\r\nStep 343, loss: 8.061373710632324\r\nStep 344, loss: 8.354568481445312\r\nStep 345, loss: 7.749275207519531\r\nStep 346, loss: 7.865479946136475\r\nStep 347, loss: 8.149580001831055\r\nStep 348, loss: 8.063140869140625\r\nStep 349, loss: 7.9869232177734375\r\nStep 350, loss: 7.965551376342773\r\nStep 351, loss: 7.912382125854492\r\nStep 352, loss: 7.809815406799316\r\nStep 353, loss: 7.7318525314331055\r\nStep 354, loss: 7.99237585067749\r\nStep 355, loss: 7.686720371246338\r\nStep 356, loss: 7.847860336303711\r\nStep 357, loss: 8.05896282196045\r\nStep 358, loss: 7.9990739822387695\r\nStep 359, loss: 7.990407466888428\r\nStep 360, loss: 7.872879505157471\r\nStep 361, loss: 7.6615471839904785\r\nStep 362, loss: 7.864638805389404\r\nStep 363, loss: 7.581545829772949\r\nStep 364, loss: 7.800833225250244\r\nStep 365, loss: 7.945025444030762\r\nStep 366, loss: 7.67780876159668\r\nStep 367, loss: 8.081137657165527\r\nStep 368, loss: 7.838423252105713\r\nStep 369, loss: 7.849606990814209\r\nStep 370, loss: 7.930092811584473\r\nStep 371, loss: 7.8301682472229\r\nStep 372, loss: 7.931652545928955\r\nStep 373, loss: 7.795661926269531\r\nStep 374, loss: 7.660418510437012\r\nStep 375, loss: 7.695881366729736\r\nStep 376, loss: 7.70541524887085\r\nStep 377, loss: 7.812950611114502\r\nStep 378, loss: 7.891514778137207\r\nStep 379, loss: 7.972898006439209\r\nStep 380, loss: 8.09835147857666\r\nStep 381, loss: 7.664462089538574\r\nStep 382, loss: 7.562962055206299\r\nStep 383, loss: 7.750759124755859\r\nStep 384, loss: 7.660296440124512\r\nStep 385, loss: 7.515077590942383\r\nStep 386, loss: 7.824276924133301\r\nStep 387, loss: 7.6342620849609375\r\nStep 388, loss: 7.488607406616211\r\nStep 389, loss: 7.708237648010254\r\nStep 390, loss: 7.866584777832031\r\nStep 391, loss: 7.724985599517822\r\nStep 392, loss: 7.578362941741943\r\nStep 393, loss: 7.3297600746154785\r\nStep 394, loss: 7.767591953277588\r\nStep 395, loss: 7.738459587097168\r\nStep 396, loss: 7.528026580810547\r\nStep 397, loss: 7.397331237792969\r\nStep 398, loss: 7.5247907638549805\r\nStep 399, loss: 7.808922290802002\r\nStep 400, loss: 7.163333415985107\r\nStep 401, loss: 7.73933744430542\r\nStep 402, loss: 7.659040451049805\r\nStep 403, loss: 7.741009712219238\r\nStep 404, loss: 7.5835723876953125\r\nStep 405, loss: 7.593139171600342\r\nStep 406, loss: 7.641324043273926\r\nStep 407, loss: 7.747766017913818\r\nStep 408, loss: 7.458308696746826\r\nStep 409, loss: 7.582541465759277\r\nStep 410, loss: 7.466739654541016\r\nStep 411, loss: 7.498125076293945\r\nStep 412, loss: 7.440773963928223\r\nStep 413, loss: 7.42750358581543\r\nStep 414, loss: 7.630177021026611\r\nStep 415, loss: 7.3094000816345215\r\nStep 416, loss: 7.4828877449035645\r\nStep 417, loss: 7.231750011444092\r\nStep 418, loss: 7.4988579750061035\r\nStep 419, loss: 7.478680610656738\r\nStep 420, loss: 7.274184703826904\r\nStep 421, loss: 7.405371189117432\r\nStep 422, loss: 7.230210304260254\r\nStep 423, loss: 7.356727600097656\r\nStep 424, loss: 7.207230567932129\r\nStep 425, loss: 7.441095352172852\r\nStep 426, loss: 7.30772590637207\r\nStep 427, loss: 7.277124881744385\r\nStep 428, loss: 7.306359767913818\r\nStep 429, loss: 7.37355899810791\r\nStep 430, loss: 7.349822998046875\r\nStep 431, loss: 7.404427528381348\r\nStep 432, loss: 7.277266979217529\r\nStep 433, loss: 7.30579137802124\r\nStep 434, loss: 7.491156101226807\r\nStep 435, loss: 7.135332107543945\r\nStep 436, loss: 7.230020523071289\r\nStep 437, loss: 7.382743835449219\r\nStep 438, loss: 7.358910083770752\r\nStep 439, loss: 7.307190895080566\r\nStep 440, loss: 7.108017444610596\r\nStep 441, loss: 7.09844446182251\r\nStep 442, loss: 7.44235897064209\r\nStep 443, loss: 7.3115153312683105\r\nStep 444, loss: 7.195000171661377\r\nStep 445, loss: 7.622678279876709\r\nStep 446, loss: 7.278513431549072\r\nStep 447, loss: 7.011615753173828\r\nStep 448, loss: 6.817862510681152\r\nStep 449, loss: 7.389958381652832\r\nStep 450, loss: 7.191179275512695\r\nStep 451, loss: 7.041982173919678\r\nStep 452, loss: 7.03852653503418\r\nStep 453, loss: 7.1209187507629395\r\nStep 454, loss: 7.1884589195251465\r\nStep 455, loss: 7.006282329559326\r\nStep 456, loss: 7.056117534637451\r\nStep 457, loss: 7.152760028839111\r\nStep 458, loss: 7.050671100616455\r\nStep 459, loss: 7.031022071838379\r\nStep 460, loss: 6.994027614593506\r\nStep 461, loss: 7.111380100250244\r\nStep 462, loss: 7.066397190093994\r\nStep 463, loss: 6.902746677398682\r\nStep 464, loss: 7.194156646728516\r\nStep 465, loss: 7.23063325881958\r\nStep 466, loss: 6.9680256843566895\r\nStep 467, loss: 7.116633415222168\r\nStep 468, loss: 7.112338066101074\r\nStep 469, loss: 7.140894412994385\r\nStep 470, loss: 7.096400260925293\r\nStep 471, loss: 6.985903739929199\r\nStep 472, loss: 6.770591735839844\r\nStep 473, loss: 7.095915794372559\r\nStep 474, loss: 7.0415778160095215\r\nStep 475, loss: 7.120011806488037\r\nStep 476, loss: 7.3003339767456055\r\nStep 477, loss: 6.872790336608887\r\nStep 478, loss: 7.1554107666015625\r\nStep 479, loss: 6.867790699005127\r\nStep 239, loss: 9.393319129943848\r\nStep 240, loss: 9.332643508911133\r\nStep 241, loss: 9.279335975646973\r\nStep 242, loss: 9.48430347442627\r\nStep 243, loss: 9.444723129272461\r\nStep 244, loss: 9.225436210632324\r\nStep 245, loss: 9.21567153930664\r\nStep 246, loss: 9.34753131866455\r\nStep 247, loss: 9.248933792114258\r\nStep 248, loss: 9.256204605102539\r\nStep 249, loss: 9.148299217224121\r\nStep 250, loss: 9.169936180114746\r\nStep 251, loss: 8.963499069213867\r\nStep 252, loss: 9.141301155090332\r\nStep 253, loss: 9.143373489379883\r\nStep 254, loss: 9.256324768066406\r\nStep 255, loss: 9.21156120300293\r\nStep 256, loss: 9.121957778930664\r\nStep 257, loss: 9.145356178283691\r\nStep 258, loss: 9.179287910461426\r\nStep 259, loss: 8.951887130737305\r\nStep 260, loss: 9.172635078430176\r\nStep 261, loss: 9.005840301513672\r\nStep 262, loss: 9.113434791564941\r\nStep 263, loss: 8.805696487426758\r\nStep 264, loss: 8.820012092590332\r\nStep 265, loss: 8.868437767028809\r\nStep 266, loss: 8.873906135559082\r\nStep 267, loss: 8.917597770690918\r\nStep 268, loss: 9.005128860473633\r\nStep 269, loss: 8.811617851257324\r\nStep 270, loss: 8.84359359741211\r\nStep 271, loss: 8.960158348083496\r\nStep 272, loss: 9.073274612426758\r\nStep 273, loss: 8.800353050231934\r\nStep 274, loss: 8.86905574798584\r\nStep 275, loss: 8.783049583435059\r\nStep 276, loss: 9.049469947814941\r\nStep 277, loss: 8.742980003356934\r\nStep 278, loss: 8.820770263671875\r\nStep 279, loss: 8.74662971496582\r\nStep 280, loss: 8.780843734741211\r\nStep 281, loss: 8.7637357711792\r\nStep 282, loss: 8.939576148986816\r\nStep 283, loss: 8.840526580810547\r\nStep 284, loss: 8.902979850769043\r\nStep 285, loss: 8.834574699401855\r\nStep 286, loss: 8.63754653930664\r\nStep 287, loss: 8.65624713897705\r\nStep 288, loss: 8.498268127441406\r\nStep 289, loss: 8.662796020507812\r\nStep 290, loss: 8.78270149230957\r\nStep 291, loss: 8.698641777038574\r\nStep 292, loss: 8.700301170349121\r\nStep 293, loss: 8.656726837158203\r\nStep 294, loss: 8.404195785522461\r\nStep 295, loss: 8.71267318725586\r\nStep 296, loss: 8.580713272094727\r\nStep 297, loss: 8.696793556213379\r\nStep 298, loss: 8.622354507446289\r\nStep 299, loss: 8.547773361206055\r\nStep 300, loss: 8.511931419372559\r\nStep 301, loss: 8.62592601776123\r\nStep 302, loss: 8.557539939880371\r\nStep 303, loss: 8.584827423095703\r\nStep 304, loss: 8.375589370727539\r\nStep 305, loss: 8.413776397705078\r\nStep 306, loss: 8.20652961730957\r\nStep 307, loss: 8.37797737121582\r\nStep 308, loss: 8.527795791625977\r\nStep 309, loss: 8.349091529846191\r\nStep 310, loss: 8.539392471313477\r\nStep 311, loss: 8.61562728881836\r\nStep 312, loss: 8.283681869506836\r\nStep 313, loss: 8.407837867736816\r\nStep 314, loss: 8.361788749694824\r\nStep 315, loss: 8.317115783691406\r\nStep 316, loss: 8.216513633728027\r\nStep 317, loss: 8.435118675231934\r\nStep 318, loss: 8.263867378234863\r\nStep 319, loss: 8.336173057556152\r\nStep 320, loss: 8.248668670654297\r\nStep 321, loss: 8.13427734375\r\nStep 322, loss: 8.352275848388672\r\nStep 323, loss: 8.005666732788086\r\nStep 324, loss: 8.034747123718262\r\nStep 325, loss: 8.130173683166504\r\nStep 326, loss: 8.357478141784668\r\nStep 327, loss: 8.177257537841797\r\nStep 328, loss: 8.227829933166504\r\nStep 329, loss: 8.193879127502441\r\nStep 330, loss: 8.19284439086914\r\nStep 331, loss: 8.317316055297852\r\nStep 332, loss: 7.896291732788086\r\nStep 333, loss: 7.923079967498779\r\nStep 334, loss: 8.150965690612793\r\nStep 335, loss: 8.156876564025879\r\nStep 336, loss: 8.197476387023926\r\nStep 337, loss: 8.104968070983887\r\nStep 338, loss: 8.170830726623535\r\nStep 339, loss: 8.083712577819824\r\nStep 340, loss: 8.002701759338379\r\nStep 341, loss: 8.275883674621582\r\nStep 342, loss: 8.123054504394531\r\nStep 343, loss: 8.061373710632324\r\nStep 344, loss: 8.354568481445312\r\nStep 345, loss: 7.749275207519531\r\nStep 346, loss: 7.865479946136475\r\nStep 347, loss: 8.149580001831055\r\nStep 348, loss: 8.063140869140625\r\nStep 349, loss: 7.9869232177734375\r\nStep 350, loss: 7.965551376342773\r\nStep 351, loss: 7.912382125854492\r\nStep 352, loss: 7.809815406799316\r\nStep 353, loss: 7.7318525314331055\r\nStep 354, loss: 7.99237585067749\r\nStep 355, loss: 7.686720371246338\r\nStep 356, loss: 7.847860336303711\r\nStep 357, loss: 8.05896282196045\r\nStep 358, loss: 7.9990739822387695\r\nStep 359, loss: 7.990407466888428\r\nStep 239, loss: 9.393319129943848\r\nStep 240, loss: 9.332643508911133\r\nStep 241, loss: 9.279335975646973\r\nStep 242, loss: 9.48430347442627\r\nStep 243, loss: 9.444723129272461\r\nStep 244, loss: 9.225436210632324\r\nStep 245, loss: 9.21567153930664\r\nStep 246, loss: 9.34753131866455\r\nStep 247, loss: 9.248933792114258\r\nStep 248, loss: 9.256204605102539\r\nStep 249, loss: 9.148299217224121\r\nStep 250, loss: 9.169936180114746\r\nStep 251, loss: 8.963499069213867\r\nStep 252, loss: 9.141301155090332\r\nStep 253, loss: 9.143373489379883\r\nStep 254, loss: 9.256324768066406\r\nStep 255, loss: 9.21156120300293\r\nStep 256, loss: 9.121957778930664\r\nStep 257, loss: 9.145356178283691\r\nStep 258, loss: 9.179287910461426\r\nStep 259, loss: 8.951887130737305\r\nStep 260, loss: 9.172635078430176\r\nStep 261, loss: 9.005840301513672\r\nStep 262, loss: 9.113434791564941\r\nStep 263, loss: 8.805696487426758\r\nStep 264, loss: 8.820012092590332\r\nStep 265, loss: 8.868437767028809\r\nStep 266, loss: 8.873906135559082\r\nStep 267, loss: 8.917597770690918\r\nStep 268, loss: 9.005128860473633\r\nStep 269, loss: 8.811617851257324\r\nStep 270, loss: 8.84359359741211\r\nStep 271, loss: 8.960158348083496\r\nStep 272, loss: 9.073274612426758\r\nStep 273, loss: 8.800353050231934\r\nStep 274, loss: 8.86905574798584\r\nStep 275, loss: 8.783049583435059\r\nStep 276, loss: 9.049469947814941\r\nStep 277, loss: 8.742980003356934\r\nStep 278, loss: 8.820770263671875\r\nStep 279, loss: 8.74662971496582\r\nStep 280, loss: 8.780843734741211\r\nStep 281, loss: 8.7637357711792\r\nStep 282, loss: 8.939576148986816\r\nStep 283, loss: 8.840526580810547\r\nStep 284, loss: 8.902979850769043\r\nStep 285, loss: 8.834574699401855\r\nStep 286, loss: 8.63754653930664\r\nStep 287, loss: 8.65624713897705\r\nStep 288, loss: 8.498268127441406\r\nStep 289, loss: 8.662796020507812\r\nStep 290, loss: 8.78270149230957\r\nStep 291, loss: 8.698641777038574\r\nStep 292, loss: 8.700301170349121\r\nStep 293, loss: 8.656726837158203\r\nStep 294, loss: 8.404195785522461\r\nStep 295, loss: 8.71267318725586\r\nStep 296, loss: 8.580713272094727\r\nStep 297, loss: 8.696793556213379\r\nStep 298, loss: 8.622354507446289\r\nStep 299, loss: 8.547773361206055\r\nStep 300, loss: 8.511931419372559\r\nStep 301, loss: 8.62592601776123\r\nStep 302, loss: 8.557539939880371\r\nStep 303, loss: 8.584827423095703\r\nStep 304, loss: 8.375589370727539\r\nStep 305, loss: 8.413776397705078\r\nStep 306, loss: 8.20652961730957\r\nStep 307, loss: 8.37797737121582\r\nStep 308, loss: 8.527795791625977\r\nStep 309, loss: 8.349091529846191\r\nStep 310, loss: 8.539392471313477\r\nStep 311, loss: 8.61562728881836\r\nStep 312, loss: 8.283681869506836\r\nStep 313, loss: 8.407837867736816\r\nStep 314, loss: 8.361788749694824\r\nStep 315, loss: 8.317115783691406\r\nStep 316, loss: 8.216513633728027\r\nStep 317, loss: 8.435118675231934\r\nStep 318, loss: 8.263867378234863\r\nStep 319, loss: 8.336173057556152\r\nStep 320, loss: 8.248668670654297\r\nStep 321, loss: 8.13427734375\r\nStep 322, loss: 8.352275848388672\r\nStep 323, loss: 8.005666732788086\r\nStep 324, loss: 8.034747123718262\r\nStep 325, loss: 8.130173683166504\r\nStep 326, loss: 8.357478141784668\r\nStep 327, loss: 8.177257537841797\r\nStep 328, loss: 8.227829933166504\r\nStep 329, loss: 8.193879127502441\r\nStep 330, loss: 8.19284439086914\r\nStep 331, loss: 8.317316055297852\r\nStep 332, loss: 7.896291732788086\r\nStep 333, loss: 7.923079967498779\r\nStep 334, loss: 8.150965690612793\r\nStep 335, loss: 8.156876564025879\r\nStep 336, loss: 8.197476387023926\r\nStep 337, loss: 8.104968070983887\r\nStep 338, loss: 8.170830726623535\r\nStep 339, loss: 8.083712577819824\r\nStep 340, loss: 8.002701759338379\r\nStep 341, loss: 8.275883674621582\r\nStep 342, loss: 8.123054504394531\r\nStep 343, loss: 8.061373710632324\r\nStep 344, loss: 8.354568481445312\r\nStep 345, loss: 7.749275207519531\r\nStep 346, loss: 7.865479946136475\r\nStep 347, loss: 8.149580001831055\r\nStep 348, loss: 8.063140869140625\r\nStep 349, loss: 7.9869232177734375\r\nStep 350, loss: 7.965551376342773\r\nStep 351, loss: 7.912382125854492\r\nStep 352, loss: 7.809815406799316\r\nStep 353, loss: 7.7318525314331055\r\nStep 354, loss: 7.99237585067749\r\nStep 355, loss: 7.686720371246338\r\nStep 356, loss: 7.847860336303711\r\nStep 357, loss: 8.05896282196045\r\nStep 358, loss: 7.9990739822387695\r\nStep 359, loss: 7.990407466888428\r\nStep 360, loss: 7.872879505157471\r\nStep 361, loss: 7.6615471839904785\r\nStep 362, loss: 7.864638805389404\r\nStep 363, loss: 7.581545829772949\r\nStep 364, loss: 7.800833225250244\r\nStep 365, loss: 7.945025444030762\r\nStep 366, loss: 7.67780876159668\r\nStep 367, loss: 8.081137657165527\r\nStep 368, loss: 7.838423252105713\r\nStep 369, loss: 7.849606990814209\r\nStep 370, loss: 7.930092811584473\r\nStep 371, loss: 7.8301682472229\r\nStep 372, loss: 7.931652545928955\r\nStep 373, loss: 7.795661926269531\r\nStep 374, loss: 7.660418510437012\r\nStep 375, loss: 7.695881366729736\r\nStep 376, loss: 7.70541524887085\r\nStep 377, loss: 7.812950611114502\r\nStep 378, loss: 7.891514778137207\r\nStep 379, loss: 7.972898006439209\r\nStep 380, loss: 8.09835147857666\r\nStep 381, loss: 7.664462089538574\r\nStep 382, loss: 7.562962055206299\r\nStep 383, loss: 7.750759124755859\r\nStep 384, loss: 7.660296440124512\r\nStep 385, loss: 7.515077590942383\r\nStep 386, loss: 7.824276924133301\r\nStep 387, loss: 7.6342620849609375\r\nStep 388, loss: 7.488607406616211\r\nStep 389, loss: 7.708237648010254\r\nStep 390, loss: 7.866584777832031\r\nStep 391, loss: 7.724985599517822\r\nStep 392, loss: 7.578362941741943\r\nStep 393, loss: 7.3297600746154785\r\nStep 394, loss: 7.767591953277588\r\nStep 395, loss: 7.738459587097168\r\nStep 396, loss: 7.528026580810547\r\nStep 397, loss: 7.397331237792969\r\nStep 398, loss: 7.5247907638549805\r\nStep 399, loss: 7.808922290802002\r\nStep 400, loss: 7.163333415985107\r\nStep 401, loss: 7.73933744430542\r\nStep 402, loss: 7.659040451049805\r\nStep 403, loss: 7.741009712219238\r\nStep 404, loss: 7.5835723876953125\r\nStep 405, loss: 7.593139171600342\r\nStep 406, loss: 7.641324043273926\r\nStep 407, loss: 7.747766017913818\r\nStep 408, loss: 7.458308696746826\r\nStep 409, loss: 7.582541465759277\r\nStep 410, loss: 7.466739654541016\r\nStep 411, loss: 7.498125076293945\r\nStep 412, loss: 7.440773963928223\r\nStep 413, loss: 7.42750358581543\r\nStep 414, loss: 7.630177021026611\r\nStep 415, loss: 7.3094000816345215\r\nStep 416, loss: 7.4828877449035645\r\nStep 417, loss: 7.231750011444092\r\nStep 418, loss: 7.4988579750061035\r\nStep 419, loss: 7.478680610656738\r\nStep 420, loss: 7.274184703826904\r\nStep 421, loss: 7.405371189117432\r\nStep 422, loss: 7.230210304260254\r\nStep 423, loss: 7.356727600097656\r\nStep 424, loss: 7.207230567932129\r\nStep 425, loss: 7.441095352172852\r\nStep 426, loss: 7.30772590637207\r\nStep 427, loss: 7.277124881744385\r\nStep 428, loss: 7.306359767913818\r\nStep 429, loss: 7.37355899810791\r\nStep 430, loss: 7.349822998046875\r\nStep 431, loss: 7.404427528381348\r\nStep 432, loss: 7.277266979217529\r\nStep 433, loss: 7.30579137802124\r\nStep 434, loss: 7.491156101226807\r\nStep 435, loss: 7.135332107543945\r\nStep 436, loss: 7.230020523071289\r\nStep 437, loss: 7.382743835449219\r\nStep 438, loss: 7.358910083770752\r\nStep 439, loss: 7.307190895080566\r\nStep 440, loss: 7.108017444610596\r\nStep 441, loss: 7.09844446182251\r\nStep 442, loss: 7.44235897064209\r\nStep 443, loss: 7.3115153312683105\r\nStep 444, loss: 7.195000171661377\r\nStep 445, loss: 7.622678279876709\r\nStep 446, loss: 7.278513431549072\r\nStep 447, loss: 7.011615753173828\r\nStep 448, loss: 6.817862510681152\r\nStep 449, loss: 7.389958381652832\r\nStep 450, loss: 7.191179275512695\r\nStep 451, loss: 7.041982173919678\r\nStep 452, loss: 7.03852653503418\r\nStep 453, loss: 7.1209187507629395\r\nStep 454, loss: 7.1884589195251465\r\nStep 455, loss: 7.006282329559326\r\nStep 456, loss: 7.056117534637451\r\nStep 457, loss: 7.152760028839111\r\nStep 458, loss: 7.050671100616455\r\nStep 459, loss: 7.031022071838379\r\nStep 460, loss: 6.994027614593506\r\nStep 461, loss: 7.111380100250244\r\nStep 462, loss: 7.066397190093994\r\nStep 463, loss: 6.902746677398682\r\nStep 464, loss: 7.194156646728516\r\nStep 465, loss: 7.23063325881958\r\nStep 466, loss: 6.9680256843566895\r\nStep 467, loss: 7.116633415222168\r\nStep 468, loss: 7.112338066101074\r\nStep 469, loss: 7.140894412994385\r\nStep 470, loss: 7.096400260925293\r\nStep 471, loss: 6.985903739929199\r\nStep 472, loss: 6.770591735839844\r\nStep 473, loss: 7.095915794372559\r\nStep 474, loss: 7.0415778160095215\r\nStep 475, loss: 7.120011806488037\r\nStep 476, loss: 7.3003339767456055\r\nStep 477, loss: 6.872790336608887\r\nStep 478, loss: 7.1554107666015625\r\nStep 479, loss: 6.867790699005127\r\nStep 360, loss: 7.872879505157471\r\nStep 361, loss: 7.6615471839904785\r\nStep 362, loss: 7.864638805389404\r\nStep 363, loss: 7.581545829772949\r\nStep 364, loss: 7.800833225250244\r\nStep 365, loss: 7.945025444030762\r\nStep 366, loss: 7.67780876159668\r\nStep 367, loss: 8.081137657165527\r\nStep 368, loss: 7.838423252105713\r\nStep 369, loss: 7.849606990814209\r\nStep 370, loss: 7.930092811584473\r\nStep 371, loss: 7.8301682472229\r\nStep 372, loss: 7.931652545928955\r\nStep 373, loss: 7.795661926269531\r\nStep 374, loss: 7.660418510437012\r\nStep 375, loss: 7.695881366729736\r\nStep 376, loss: 7.70541524887085\r\nStep 377, loss: 7.812950611114502\r\nStep 378, loss: 7.891514778137207\r\nStep 379, loss: 7.972898006439209\r\nStep 380, loss: 8.09835147857666\r\nStep 381, loss: 7.664462089538574\r\nStep 382, loss: 7.562962055206299\r\nStep 383, loss: 7.750759124755859\r\nStep 384, loss: 7.660296440124512\r\nStep 385, loss: 7.515077590942383\r\nStep 386, loss: 7.824276924133301\r\nStep 387, loss: 7.6342620849609375\r\nStep 388, loss: 7.488607406616211\r\nStep 389, loss: 7.708237648010254\r\nStep 390, loss: 7.866584777832031\r\nStep 391, loss: 7.724985599517822\r\nStep 392, loss: 7.578362941741943\r\nStep 393, loss: 7.3297600746154785\r\nStep 394, loss: 7.767591953277588\r\nStep 395, loss: 7.738459587097168\r\nStep 396, loss: 7.528026580810547\r\nStep 397, loss: 7.397331237792969\r\nStep 398, loss: 7.5247907638549805\r\nStep 399, loss: 7.808922290802002\r\nStep 400, loss: 7.163333415985107\r\nStep 401, loss: 7.73933744430542\r\nStep 402, loss: 7.659040451049805\r\nStep 403, loss: 7.741009712219238\r\nStep 404, loss: 7.5835723876953125\r\nStep 405, loss: 7.593139171600342\r\nStep 406, loss: 7.641324043273926\r\nStep 407, loss: 7.747766017913818\r\nStep 408, loss: 7.458308696746826\r\nStep 409, loss: 7.582541465759277\r\nStep 410, loss: 7.466739654541016\r\nStep 411, loss: 7.498125076293945\r\nStep 412, loss: 7.440773963928223\r\nStep 413, loss: 7.42750358581543\r\nStep 414, loss: 7.630177021026611\r\nStep 415, loss: 7.3094000816345215\r\nStep 416, loss: 7.4828877449035645\r\nStep 417, loss: 7.231750011444092\r\nStep 418, loss: 7.4988579750061035\r\nStep 419, loss: 7.478680610656738\r\nStep 420, loss: 7.274184703826904\r\nStep 421, loss: 7.405371189117432\r\nStep 422, loss: 7.230210304260254\r\nStep 423, loss: 7.356727600097656\r\nStep 424, loss: 7.207230567932129\r\nStep 425, loss: 7.441095352172852\r\nStep 426, loss: 7.30772590637207\r\nStep 427, loss: 7.277124881744385\r\nStep 428, loss: 7.306359767913818\r\nStep 429, loss: 7.37355899810791\r\nStep 430, loss: 7.349822998046875\r\nStep 431, loss: 7.404427528381348\r\nStep 432, loss: 7.277266979217529\r\nStep 433, loss: 7.30579137802124\r\nStep 434, loss: 7.491156101226807\r\nStep 435, loss: 7.135332107543945\r\nStep 436, loss: 7.230020523071289\r\nStep 437, loss: 7.382743835449219\r\nStep 438, loss: 7.358910083770752\r\nStep 439, loss: 7.307190895080566\r\nStep 440, loss: 7.108017444610596\r\nStep 441, loss: 7.09844446182251\r\nStep 442, loss: 7.44235897064209\r\nStep 443, loss: 7.3115153312683105\r\nStep 444, loss: 7.195000171661377\r\nStep 445, loss: 7.622678279876709\r\nStep 446, loss: 7.278513431549072\r\nStep 447, loss: 7.011615753173828\r\nStep 448, loss: 6.817862510681152\r\nStep 449, loss: 7.389958381652832\r\nStep 450, loss: 7.191179275512695\r\nStep 451, loss: 7.041982173919678\r\nStep 452, loss: 7.03852653503418\r\nStep 453, loss: 7.1209187507629395\r\nStep 454, loss: 7.1884589195251465\r\nStep 455, loss: 7.006282329559326\r\nStep 456, loss: 7.056117534637451\r\nStep 457, loss: 7.152760028839111\r\nStep 458, loss: 7.050671100616455\r\nStep 459, loss: 7.031022071838379\r\nStep 460, loss: 6.994027614593506\r\nStep 461, loss: 7.111380100250244\r\nStep 462, loss: 7.066397190093994\r\nStep 463, loss: 6.902746677398682\r\nStep 464, loss: 7.194156646728516\r\nStep 465, loss: 7.23063325881958\r\nStep 466, loss: 6.9680256843566895\r\nStep 467, loss: 7.116633415222168\r\nStep 468, loss: 7.112338066101074\r\nStep 469, loss: 7.140894412994385\r\nStep 470, loss: 7.096400260925293\r\nStep 471, loss: 6.985903739929199\r\nStep 472, loss: 6.770591735839844\r\nStep 473, loss: 7.095915794372559\r\nStep 474, loss: 7.0415778160095215\r\nStep 475, loss: 7.120011806488037\r\nStep 476, loss: 7.3003339767456055\r\nStep 477, loss: 6.872790336608887\r\nStep 478, loss: 7.1554107666015625\r\nStep 479, loss: 6.867790699005127\r\nStep 239, loss: 9.393319129943848\r\nStep 240, loss: 9.332643508911133\r\nStep 241, loss: 9.279335975646973\r\nStep 242, loss: 9.48430347442627\r\nStep 243, loss: 9.444723129272461\r\nStep 244, loss: 9.225436210632324\r\nStep 245, loss: 9.21567153930664\r\nStep 246, loss: 9.34753131866455\r\nStep 247, loss: 9.248933792114258\r\nStep 248, loss: 9.256204605102539\r\nStep 249, loss: 9.148299217224121\r\nStep 250, loss: 9.169936180114746\r\nStep 251, loss: 8.963499069213867\r\nStep 252, loss: 9.141301155090332\r\nStep 253, loss: 9.143373489379883\r\nStep 254, loss: 9.256324768066406\r\nStep 255, loss: 9.21156120300293\r\nStep 256, loss: 9.121957778930664\r\nStep 257, loss: 9.145356178283691\r\nStep 258, loss: 9.179287910461426\r\nStep 259, loss: 8.951887130737305\r\nStep 260, loss: 9.172635078430176\r\nStep 261, loss: 9.005840301513672\r\nStep 262, loss: 9.113434791564941\r\nStep 263, loss: 8.805696487426758\r\nStep 264, loss: 8.820012092590332\r\nStep 265, loss: 8.868437767028809\r\nStep 266, loss: 8.873906135559082\r\nStep 267, loss: 8.917597770690918\r\nStep 268, loss: 9.005128860473633\r\nStep 269, loss: 8.811617851257324\r\nStep 270, loss: 8.84359359741211\r\nStep 271, loss: 8.960158348083496\r\nStep 272, loss: 9.073274612426758\r\nStep 273, loss: 8.800353050231934\r\nStep 274, loss: 8.86905574798584\r\nStep 275, loss: 8.783049583435059\r\nStep 276, loss: 9.049469947814941\r\nStep 277, loss: 8.742980003356934\r\nStep 278, loss: 8.820770263671875\r\nStep 279, loss: 8.74662971496582\r\nStep 280, loss: 8.780843734741211\r\nStep 281, loss: 8.7637357711792\r\nStep 282, loss: 8.939576148986816\r\nStep 283, loss: 8.840526580810547\r\nStep 284, loss: 8.902979850769043\r\nStep 285, loss: 8.834574699401855\r\nStep 286, loss: 8.63754653930664\r\nStep 287, loss: 8.65624713897705\r\nStep 288, loss: 8.498268127441406\r\nStep 289, loss: 8.662796020507812\r\nStep 290, loss: 8.78270149230957\r\nStep 291, loss: 8.698641777038574\r\nStep 292, loss: 8.700301170349121\r\nStep 293, loss: 8.656726837158203\r\nStep 294, loss: 8.404195785522461\r\nStep 295, loss: 8.71267318725586\r\nStep 296, loss: 8.580713272094727\r\nStep 297, loss: 8.696793556213379\r\nStep 298, loss: 8.622354507446289\r\nStep 299, loss: 8.547773361206055\r\nStep 300, loss: 8.511931419372559\r\nStep 301, loss: 8.62592601776123\r\nStep 302, loss: 8.557539939880371\r\nStep 303, loss: 8.584827423095703\r\nStep 304, loss: 8.375589370727539\r\nStep 305, loss: 8.413776397705078\r\nStep 306, loss: 8.20652961730957\r\nStep 307, loss: 8.37797737121582\r\nStep 308, loss: 8.527795791625977\r\nStep 309, loss: 8.349091529846191\r\nStep 310, loss: 8.539392471313477\r\nStep 311, loss: 8.61562728881836\r\nStep 312, loss: 8.283681869506836\r\nStep 313, loss: 8.407837867736816\r\nStep 314, loss: 8.361788749694824\r\nStep 315, loss: 8.317115783691406\r\nStep 316, loss: 8.216513633728027\r\nStep 317, loss: 8.435118675231934\r\nStep 318, loss: 8.263867378234863\r\nStep 319, loss: 8.336173057556152\r\nStep 320, loss: 8.248668670654297\r\nStep 321, loss: 8.13427734375\r\nStep 322, loss: 8.352275848388672\r\nStep 323, loss: 8.005666732788086\r\nStep 324, loss: 8.034747123718262\r\nStep 325, loss: 8.130173683166504\r\nStep 326, loss: 8.357478141784668\r\nStep 327, loss: 8.177257537841797\r\nStep 328, loss: 8.227829933166504\r\nStep 329, loss: 8.193879127502441\r\nStep 330, loss: 8.19284439086914\r\nStep 331, loss: 8.317316055297852\r\nStep 332, loss: 7.896291732788086\r\nStep 333, loss: 7.923079967498779\r\nStep 334, loss: 8.150965690612793\r\nStep 335, loss: 8.156876564025879\r\nStep 336, loss: 8.197476387023926\r\nStep 337, loss: 8.104968070983887\r\nStep 338, loss: 8.170830726623535\r\nStep 339, loss: 8.083712577819824\r\nStep 340, loss: 8.002701759338379\r\nStep 341, loss: 8.275883674621582\r\nStep 342, loss: 8.123054504394531\r\nStep 343, loss: 8.061373710632324\r\nStep 344, loss: 8.354568481445312\r\nStep 345, loss: 7.749275207519531\r\nStep 346, loss: 7.865479946136475\r\nStep 347, loss: 8.149580001831055\r\nStep 348, loss: 8.063140869140625\r\nStep 349, loss: 7.9869232177734375\r\nStep 350, loss: 7.965551376342773\r\nStep 351, loss: 7.912382125854492\r\nStep 352, loss: 7.809815406799316\r\nStep 353, loss: 7.7318525314331055\r\nStep 354, loss: 7.99237585067749\r\nStep 355, loss: 7.686720371246338\r\nStep 356, loss: 7.847860336303711\r\nStep 357, loss: 8.05896282196045\r\nStep 358, loss: 7.9990739822387695\r\nStep 359, loss: 7.990407466888428\r\nStep 360, loss: 7.872879505157471\r\nStep 361, loss: 7.6615471839904785\r\nStep 362, loss: 7.864638805389404\r\nStep 363, loss: 7.581545829772949\r\nStep 364, loss: 7.800833225250244\r\nStep 365, loss: 7.945025444030762\r\nStep 366, loss: 7.67780876159668\r\nStep 367, loss: 8.081137657165527\r\nStep 368, loss: 7.838423252105713\r\nStep 369, loss: 7.849606990814209\r\nStep 370, loss: 7.930092811584473\r\nStep 371, loss: 7.8301682472229\r\nStep 372, loss: 7.931652545928955\r\nStep 373, loss: 7.795661926269531\r\nStep 374, loss: 7.660418510437012\r\nStep 375, loss: 7.695881366729736\r\nStep 376, loss: 7.70541524887085\r\nStep 377, loss: 7.812950611114502\r\nStep 378, loss: 7.891514778137207\r\nStep 379, loss: 7.972898006439209\r\nStep 380, loss: 8.09835147857666\r\nStep 381, loss: 7.664462089538574\r\nStep 382, loss: 7.562962055206299\r\nStep 383, loss: 7.750759124755859\r\nStep 384, loss: 7.660296440124512\r\nStep 385, loss: 7.515077590942383\r\nStep 386, loss: 7.824276924133301\r\nStep 387, loss: 7.6342620849609375\r\nStep 388, loss: 7.488607406616211\r\nStep 389, loss: 7.708237648010254\r\nStep 390, loss: 7.866584777832031\r\nStep 391, loss: 7.724985599517822\r\nStep 392, loss: 7.578362941741943\r\nStep 393, loss: 7.3297600746154785\r\nStep 394, loss: 7.767591953277588\r\nStep 395, loss: 7.738459587097168\r\nStep 396, loss: 7.528026580810547\r\nStep 397, loss: 7.397331237792969\r\nStep 398, loss: 7.5247907638549805\r\nStep 399, loss: 7.808922290802002\r\nStep 400, loss: 7.163333415985107\r\nStep 401, loss: 7.73933744430542\r\nStep 402, loss: 7.659040451049805\r\nStep 403, loss: 7.741009712219238\r\nStep 404, loss: 7.5835723876953125\r\nStep 405, loss: 7.593139171600342\r\nStep 406, loss: 7.641324043273926\r\nStep 407, loss: 7.747766017913818\r\nStep 408, loss: 7.458308696746826\r\nStep 409, loss: 7.582541465759277\r\nStep 410, loss: 7.466739654541016\r\nStep 411, loss: 7.498125076293945\r\nStep 412, loss: 7.440773963928223\r\nStep 413, loss: 7.42750358581543\r\nStep 414, loss: 7.630177021026611\r\nStep 415, loss: 7.3094000816345215\r\nStep 416, loss: 7.4828877449035645\r\nStep 417, loss: 7.231750011444092\r\nStep 418, loss: 7.4988579750061035\r\nStep 419, loss: 7.478680610656738\r\nStep 420, loss: 7.274184703826904\r\nStep 421, loss: 7.405371189117432\r\nStep 422, loss: 7.230210304260254\r\nStep 423, loss: 7.356727600097656\r\nStep 424, loss: 7.207230567932129\r\nStep 425, loss: 7.441095352172852\r\nStep 426, loss: 7.30772590637207\r\nStep 427, loss: 7.277124881744385\r\nStep 428, loss: 7.306359767913818\r\nStep 429, loss: 7.37355899810791\r\nStep 430, loss: 7.349822998046875\r\nStep 431, loss: 7.404427528381348\r\nStep 432, loss: 7.277266979217529\r\nStep 433, loss: 7.30579137802124\r\nStep 434, loss: 7.491156101226807\r\nStep 435, loss: 7.135332107543945\r\nStep 436, loss: 7.230020523071289\r\nStep 437, loss: 7.382743835449219\r\nStep 438, loss: 7.358910083770752\r\nStep 439, loss: 7.307190895080566\r\nStep 440, loss: 7.108017444610596\r\nStep 441, loss: 7.09844446182251\r\nStep 442, loss: 7.44235897064209\r\nStep 443, loss: 7.3115153312683105\r\nStep 444, loss: 7.195000171661377\r\nStep 445, loss: 7.622678279876709\r\nStep 446, loss: 7.278513431549072\r\nStep 447, loss: 7.011615753173828\r\nStep 448, loss: 6.817862510681152\r\nStep 449, loss: 7.389958381652832\r\nStep 450, loss: 7.191179275512695\r\nStep 451, loss: 7.041982173919678\r\nStep 452, loss: 7.03852653503418\r\nStep 453, loss: 7.1209187507629395\r\nStep 454, loss: 7.1884589195251465\r\nStep 455, loss: 7.006282329559326\r\nStep 456, loss: 7.056117534637451\r\nStep 457, loss: 7.152760028839111\r\nStep 458, loss: 7.050671100616455\r\nStep 459, loss: 7.031022071838379\r\nStep 460, loss: 6.994027614593506\r\nStep 461, loss: 7.111380100250244\r\nStep 462, loss: 7.066397190093994\r\nStep 463, loss: 6.902746677398682\r\nStep 464, loss: 7.194156646728516\r\nStep 465, loss: 7.23063325881958\r\nStep 466, loss: 6.9680256843566895\r\nStep 467, loss: 7.116633415222168\r\nStep 468, loss: 7.112338066101074\r\nStep 469, loss: 7.140894412994385\r\nStep 470, loss: 7.096400260925293\r\nStep 471, loss: 6.985903739929199\r\nStep 472, loss: 6.770591735839844\r\nStep 473, loss: 7.095915794372559\r\nStep 474, loss: 7.0415778160095215\r\nStep 475, loss: 7.120011806488037\r\nStep 476, loss: 7.3003339767456055\r\nStep 477, loss: 6.872790336608887\r\nStep 478, loss: 7.1554107666015625\r\nStep 479, loss: 6.867790699005127\r\n",,terminal_output +611,1499157,"TERMINAL",0,0,"sacct --format=""JobID%15,JobName%30,Partition%16,AllocCPUS%3,State%12,Elapsed%10,Timelimit%10"" --starttime $(date -d ""last week"" +%Y-%m-%d) | grep -vE "".batch|.extern|interactive"" | grep ""accelerated""",,terminal_command +612,1499222,"TERMINAL",0,0,"]633;E;2025-07-23 15:12:18 sacct --format=""JobID%15,JobName%30,Partition%16,AllocCPUS%3,State%12,Elapsed%10,Timelimit%10"" --starttime $(date -d ""last week"" +%Y-%m-%d) | grep -vE "".batch|.extern|interactive"" | grep ""accelerated"";3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C 3345116 train_dynamics_modelsize_scal+ accelerated 48 TIMEOUT 2-00:00:17 2-00:00:00 \r\n 3348397 train_dynamics_lr_schedule_co+ accelerated 48 COMPLETED 1-03:21:05 2-00:00:00 \r\n 3348399 train_dynamics_lr_schedule_cos accelerated 48 COMPLETED 1-03:28:39 2-00:00:00 \r\n 3348400 train_dynamics_lr_schedule_wsd accelerated 48 COMPLETED 1-03:19:17 2-00:00:00 \r\n 3348592 train_dyn_yolorun accelerated 48 CANCELLED b+ 19:29:04 1-00:00:00 \r\n 3351743 train_dyn_yolorun_new_arch accelerated 48 CANCELLED b+ 01:58:23 2-00:00:00 \r\n 3352103 train_dyn_yolorun_new_arch accelerated 48 CANCELLED b+ 00:09:58 2-00:00:00 \r\n 3352115 train_dyn_yolorun_new_arch accelerated 48 CANCELLED b+ 23:14:45 2-00:00:00 \r\n 3352588 train_dyn_yolorun_new_arch accelerated 0 CANCELLED b+ 00:00:00 2-00:00:00 \r\n 3358457 train_dyn_yolorun_new_arch accelerated 48 FAILED 00:00:28 2-00:00:00 \r\n 3359333 wrap accelerated 6 CANCELLED b+ 00:00:18 10:00:00 \r\n 3359334 wrap accelerated 6 TIMEOUT 10:00:29 10:00:00 \r\n 3359338 wrap accelerated 6 TIMEOUT 10:00:16 10:00:00 \r\n 3359343 train_dyn_new_arch-bugfixed-s+ accelerated 48 COMPLETED 23:19:14 2-00:00:00 \r\n 3359349 train_dyn_new_arch-bugfixed-t+ accelerated 48 COMPLETED 1-01:00:55 2-00:00:00 \r\n 3365872 train_dynamics_overfit_sample+ accelerated 0 CANCELLED b+ 00:00:00 2-00:00:00 \r\n 3365873 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:26:52 2-00:00:00 \r\n 3365876 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:40:07 2-00:00:00 \r\n 3366843 train_dynamics_overfit_sample+ accelerated 0 CANCELLED b+ 00:00:00 2-00:00:00 \r\n 3366883 train_dynamics_overfit_sample+ accelerated 6 COMPLETED 01:33:31 2-00:00:00 \r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +613,1682594,"TERMINAL",0,0,"Step 480, loss: 6.983112335205078\r\nStep 481, loss: 7.115540027618408\r\nStep 482, loss: 6.859630584716797\r\nStep 483, loss: 7.162317276000977\r\nStep 484, loss: 7.2645063400268555\r\nStep 485, loss: 7.095457077026367\r\nStep 486, loss: 7.0156145095825195\r\nStep 487, loss: 6.8184332847595215\r\nStep 488, loss: 6.952903747558594\r\nStep 489, loss: 6.963713645935059\r\nStep 490, loss: 6.7667388916015625\r\nStep 491, loss: 6.931082248687744\r\nStep 492, loss: 6.961340427398682\r\nStep 493, loss: 7.058728218078613\r\nStep 494, loss: 6.897120475769043\r\nStep 495, loss: 6.869744300842285\r\nStep 496, loss: 6.865843296051025\r\nStep 497, loss: 6.935029983520508\r\nStep 498, loss: 6.947277069091797\r\nStep 499, loss: 6.898039817810059\r\nStep 500, loss: 6.908406734466553\r\nStep 501, loss: 6.652162075042725\r\nStep 502, loss: 6.679863452911377\r\nStep 503, loss: 6.796136379241943\r\nStep 504, loss: 6.567356109619141\r\nStep 505, loss: 7.073315620422363\r\nStep 506, loss: 6.8232622146606445\r\nStep 507, loss: 6.834505081176758\r\nStep 508, loss: 6.863307476043701\r\nStep 509, loss: 6.771708965301514\r\nStep 510, loss: 6.884707927703857\r\nStep 511, loss: 6.796646595001221\r\nStep 512, loss: 6.833789825439453\r\nStep 513, loss: 6.7259440422058105\r\nStep 514, loss: 6.863130569458008\r\nStep 515, loss: 6.853940486907959\r\nStep 516, loss: 6.750757217407227\r\nStep 517, loss: 6.462499141693115\r\nStep 518, loss: 6.461126804351807\r\nStep 519, loss: 6.87819766998291\r\nStep 520, loss: 7.006588935852051\r\nStep 521, loss: 6.969130992889404\r\nStep 522, loss: 6.808895111083984\r\nStep 523, loss: 6.943464279174805\r\nStep 524, loss: 7.139779567718506\r\nStep 525, loss: 6.821377754211426\r\nStep 526, loss: 7.0501484870910645\r\nStep 527, loss: 6.7269287109375\r\nStep 528, loss: 6.502286911010742\r\nStep 529, loss: 6.991916656494141\r\nStep 530, loss: 6.809881687164307\r\nStep 531, loss: 6.804445743560791\r\nStep 532, loss: 6.871065616607666\r\nStep 533, loss: 6.632485866546631\r\nStep 534, loss: 6.804165840148926\r\nStep 535, loss: 6.6506452560424805\r\nStep 536, loss: 6.854793548583984\r\nStep 537, loss: 6.7164835929870605\r\nStep 538, loss: 6.879791736602783\r\nStep 539, loss: 6.811768054962158\r\nStep 540, loss: 6.637771129608154\r\nStep 541, loss: 6.691093921661377\r\nStep 542, loss: 6.608028411865234\r\nStep 543, loss: 6.851551055908203\r\nStep 544, loss: 6.784187316894531\r\nStep 545, loss: 6.733605861663818\r\nStep 546, loss: 6.900415897369385\r\nStep 547, loss: 6.957789897918701\r\nStep 548, loss: 6.842604637145996\r\nStep 549, loss: 6.756542682647705\r\nStep 550, loss: 6.680877685546875\r\nStep 551, loss: 6.7578535079956055\r\nStep 552, loss: 6.835852146148682\r\nStep 553, loss: 6.846309661865234\r\nStep 554, loss: 6.76045036315918\r\nStep 555, loss: 6.875919342041016\r\nStep 556, loss: 6.824545860290527\r\nStep 557, loss: 6.6092424392700195\r\nStep 558, loss: 6.697463512420654\r\nStep 559, loss: 6.92261266708374\r\nStep 560, loss: 6.7938008308410645\r\nStep 561, loss: 6.867816925048828\r\nStep 562, loss: 6.752100467681885\r\nStep 563, loss: 6.554731369018555\r\nStep 564, loss: 6.557769775390625\r\nStep 565, loss: 6.742979049682617\r\nStep 566, loss: 6.712817192077637\r\nStep 567, loss: 6.586728572845459\r\nStep 568, loss: 6.741764545440674\r\nStep 569, loss: 6.896045207977295\r\nStep 570, loss: 6.683651447296143\r\nStep 571, loss: 6.844089984893799\r\nStep 572, loss: 6.487730026245117\r\nStep 573, loss: 6.547028064727783\r\nStep 574, loss: 6.402328014373779\r\nStep 575, loss: 6.779541969299316\r\nStep 576, loss: 6.89146614074707\r\nStep 577, loss: 6.612976551055908\r\nStep 578, loss: 6.771749019622803\r\nStep 579, loss: 6.556238651275635\r\nStep 580, loss: 6.693332195281982\r\nStep 581, loss: 6.658258438110352\r\nStep 582, loss: 6.8479323387146\r\nStep 583, loss: 6.655804634094238\r\nStep 584, loss: 6.5051422119140625\r\nStep 585, loss: 6.857949256896973\r\nStep 586, loss: 6.695587635040283\r\nStep 587, loss: 6.870244026184082\r\nStep 588, loss: 6.743365287780762\r\nStep 589, loss: 6.722917556762695\r\nStep 590, loss: 6.706783771514893\r\nStep 591, loss: 6.568246364593506\r\nStep 592, loss: 6.578851222991943\r\nStep 593, loss: 6.753204822540283\r\nStep 594, loss: 6.574229717254639\r\nStep 595, loss: 6.766374588012695\r\nStep 596, loss: 6.641284942626953\r\nStep 597, loss: 6.712769031524658\r\nStep 598, loss: 6.491716384887695\r\nStep 599, loss: 6.898193836212158\r\nStep 600, loss: 6.7026472091674805\r\nStep 601, loss: 6.534832000732422\r\nStep 602, loss: 6.7139716148376465\r\nStep 603, loss: 6.692238807678223\r\nStep 604, loss: 6.685831069946289\r\nStep 605, loss: 6.719960689544678\r\nStep 606, loss: 6.76458215713501\r\nStep 607, loss: 6.115108489990234\r\nStep 608, loss: 6.482534408569336\r\nStep 609, loss: 6.740872859954834\r\nStep 610, loss: 6.589570045471191\r\nStep 611, loss: 6.723851203918457\r\nStep 612, loss: 6.53096866607666\r\nStep 613, loss: 6.657890319824219\r\nStep 614, loss: 6.608828067779541\r\nStep 615, loss: 6.533605575561523\r\nStep 616, loss: 6.6826276779174805\r\nStep 617, loss: 6.5526909828186035\r\nStep 618, loss: 6.545164108276367\r\nStep 619, loss: 6.665560722351074\r\nStep 620, loss: 6.732226371765137\r\nStep 621, loss: 6.427579402923584\r\nStep 622, loss: 6.500489711761475\r\nStep 623, loss: 6.419438362121582\r\nStep 624, loss: 6.411679744720459\r\nStep 625, loss: 6.283699035644531\r\nStep 626, loss: 6.6168928146362305\r\nStep 627, loss: 6.747786521911621\r\nStep 628, loss: 6.7484354972839355\r\nStep 629, loss: 6.781527519226074\r\nStep 630, loss: 6.479393005371094\r\nStep 631, loss: 6.516376972198486\r\nStep 632, loss: 6.578348636627197\r\nStep 633, loss: 6.651546478271484\r\nStep 634, loss: 6.660653591156006\r\nStep 635, loss: 6.57742977142334\r\nStep 636, loss: 6.78727912902832\r\nStep 637, loss: 6.698579788208008\r\nStep 638, loss: 6.652580261230469\r\nStep 639, loss: 6.563304424285889\r\nStep 640, loss: 6.456714153289795\r\nStep 641, loss: 6.529811859130859\r\nStep 642, loss: 6.3940534591674805\r\nStep 643, loss: 6.569890975952148\r\nStep 644, loss: 6.334028720855713\r\nStep 645, loss: 6.3808159828186035\r\nStep 646, loss: 6.5262627601623535\r\nStep 647, loss: 6.602876663208008\r\nStep 648, loss: 6.516875743865967\r\nStep 649, loss: 7.000062465667725\r\nStep 650, loss: 6.422936916351318\r\nStep 651, loss: 6.553537368774414\r\nStep 652, loss: 6.638210296630859\r\nStep 653, loss: 6.648630619049072\r\nStep 654, loss: 6.63344144821167\r\nStep 655, loss: 6.304746627807617\r\nStep 656, loss: 6.512808799743652\r\nStep 657, loss: 6.818546295166016\r\nStep 658, loss: 6.52738618850708\r\nStep 659, loss: 6.410164833068848\r\nStep 660, loss: 6.61316442489624\r\nStep 661, loss: 6.527656555175781\r\nStep 662, loss: 6.427692890167236\r\nStep 663, loss: 6.674975395202637\r\nStep 664, loss: 6.385855674743652\r\nStep 665, loss: 6.429081439971924\r\nStep 666, loss: 6.429695129394531\r\nStep 667, loss: 6.673699855804443\r\nStep 668, loss: 6.5708208084106445\r\nStep 669, loss: 6.557399749755859\r\nStep 670, loss: 6.487589359283447\r\nStep 671, loss: 6.37391471862793\r\nStep 672, loss: 6.321193695068359\r\nStep 673, loss: 6.604182720184326\r\nStep 674, loss: 6.144009590148926\r\nStep 675, loss: 6.443563461303711\r\nStep 676, loss: 6.617156505584717\r\nStep 677, loss: 6.57207727432251\r\nStep 678, loss: 6.564396858215332\r\nStep 679, loss: 6.216585636138916\r\nStep 680, loss: 6.388035774230957\r\nStep 681, loss: 6.428990364074707\r\nStep 682, loss: 6.369060039520264\r\nStep 683, loss: 6.417286396026611\r\nStep 684, loss: 6.46744966506958\r\nStep 685, loss: 6.359501361846924\r\nStep 686, loss: 6.588460445404053\r\nStep 687, loss: 6.498830318450928\r\nStep 688, loss: 6.47416877746582\r\nStep 689, loss: 6.359691619873047\r\nStep 690, loss: 6.246201515197754\r\nStep 691, loss: 6.353740215301514\r\nStep 692, loss: 6.310497283935547\r\nStep 693, loss: 6.230281352996826\r\nStep 694, loss: 6.411371231079102\r\nStep 695, loss: 6.253514766693115\r\nStep 696, loss: 6.237787246704102\r\nStep 697, loss: 6.272624969482422\r\nStep 698, loss: 6.260644912719727\r\nStep 699, loss: 6.421164512634277\r\nStep 700, loss: 6.254258155822754\r\nStep 701, loss: 6.27919864654541\r\nStep 702, loss: 6.325151443481445\r\nStep 703, loss: 6.644067764282227\r\nStep 704, loss: 6.6547932624816895\r\nStep 705, loss: 6.205949783325195\r\nStep 706, loss: 6.244408130645752\r\nStep 707, loss: 6.601984977722168\r\nStep 708, loss: 6.439548969268799\r\nStep 709, loss: 6.626750469207764\r\nStep 710, loss: 6.415212154388428\r\nStep 711, loss: 6.429360389709473\r\nStep 712, loss: 6.42447566986084\r\nStep 713, loss: 6.201557636260986\r\nStep 714, loss: 6.443551540374756\r\nStep 715, loss: 6.187041759490967\r\nStep 716, loss: 6.5973591804504395\r\nStep 717, loss: 6.139087677001953\r\nStep 718, loss: 6.271304607391357\r\nStep 719, loss: 6.3489789962768555\r\nStep 480, loss: 6.983112335205078\r\nStep 481, loss: 7.115540027618408\r\nStep 482, loss: 6.859630584716797\r\nStep 483, loss: 7.162317276000977\r\nStep 484, loss: 7.2645063400268555\r\nStep 485, loss: 7.095457077026367\r\nStep 486, loss: 7.0156145095825195\r\nStep 487, loss: 6.8184332847595215\r\nStep 488, loss: 6.952903747558594\r\nStep 489, loss: 6.963713645935059\r\nStep 490, loss: 6.7667388916015625\r\nStep 491, loss: 6.931082248687744\r\nStep 492, loss: 6.961340427398682\r\nStep 493, loss: 7.058728218078613\r\nStep 494, loss: 6.897120475769043\r\nStep 495, loss: 6.869744300842285\r\nStep 496, loss: 6.865843296051025\r\nStep 497, loss: 6.935029983520508\r\nStep 498, loss: 6.947277069091797\r\nStep 499, loss: 6.898039817810059\r\nStep 500, loss: 6.908406734466553\r\nStep 501, loss: 6.652162075042725\r\nStep 502, loss: 6.679863452911377\r\nStep 503, loss: 6.796136379241943\r\nStep 504, loss: 6.567356109619141\r\nStep 505, loss: 7.073315620422363\r\nStep 506, loss: 6.8232622146606445\r\nStep 507, loss: 6.834505081176758\r\nStep 508, loss: 6.863307476043701\r\nStep 509, loss: 6.771708965301514\r\nStep 510, loss: 6.884707927703857\r\nStep 511, loss: 6.796646595001221\r\nStep 512, loss: 6.833789825439453\r\nStep 513, loss: 6.7259440422058105\r\nStep 514, loss: 6.863130569458008\r\nStep 515, loss: 6.853940486907959\r\nStep 516, loss: 6.750757217407227\r\nStep 517, loss: 6.462499141693115\r\nStep 518, loss: 6.461126804351807\r\nStep 519, loss: 6.87819766998291\r\nStep 520, loss: 7.006588935852051\r\nStep 521, loss: 6.969130992889404\r\nStep 522, loss: 6.808895111083984\r\nStep 523, loss: 6.943464279174805\r\nStep 524, loss: 7.139779567718506\r\nStep 525, loss: 6.821377754211426\r\nStep 526, loss: 7.0501484870910645\r\nStep 527, loss: 6.7269287109375\r\nStep 528, loss: 6.502286911010742\r\nStep 529, loss: 6.991916656494141\r\nStep 530, loss: 6.809881687164307\r\nStep 531, loss: 6.804445743560791\r\nStep 532, loss: 6.871065616607666\r\nStep 533, loss: 6.632485866546631\r\nStep 534, loss: 6.804165840148926\r\nStep 535, loss: 6.6506452560424805\r\nStep 536, loss: 6.854793548583984\r\nStep 537, loss: 6.7164835929870605\r\nStep 538, loss: 6.879791736602783\r\nStep 539, loss: 6.811768054962158\r\nStep 540, loss: 6.637771129608154\r\nStep 541, loss: 6.691093921661377\r\nStep 542, loss: 6.608028411865234\r\nStep 543, loss: 6.851551055908203\r\nStep 544, loss: 6.784187316894531\r\nStep 545, loss: 6.733605861663818\r\nStep 546, loss: 6.900415897369385\r\nStep 547, loss: 6.957789897918701\r\nStep 548, loss: 6.842604637145996\r\nStep 549, loss: 6.756542682647705\r\nStep 550, loss: 6.680877685546875\r\nStep 551, loss: 6.7578535079956055\r\nStep 552, loss: 6.835852146148682\r\nStep 553, loss: 6.846309661865234\r\nStep 554, loss: 6.76045036315918\r\nStep 555, loss: 6.875919342041016\r\nStep 556, loss: 6.824545860290527\r\nStep 557, loss: 6.6092424392700195\r\nStep 558, loss: 6.697463512420654\r\nStep 559, loss: 6.92261266708374\r\nStep 560, loss: 6.7938008308410645\r\nStep 561, loss: 6.867816925048828\r\nStep 562, loss: 6.752100467681885\r\nStep 563, loss: 6.554731369018555\r\nStep 564, loss: 6.557769775390625\r\nStep 565, loss: 6.742979049682617\r\nStep 566, loss: 6.712817192077637\r\nStep 567, loss: 6.586728572845459\r\nStep 568, loss: 6.741764545440674\r\nStep 569, loss: 6.896045207977295\r\nStep 570, loss: 6.683651447296143\r\nStep 571, loss: 6.844089984893799\r\nStep 572, loss: 6.487730026245117\r\nStep 573, loss: 6.547028064727783\r\nStep 574, loss: 6.402328014373779\r\nStep 575, loss: 6.779541969299316\r\nStep 576, loss: 6.89146614074707\r\nStep 577, loss: 6.612976551055908\r\nStep 578, loss: 6.771749019622803\r\nStep 579, loss: 6.556238651275635\r\nStep 580, loss: 6.693332195281982\r\nStep 581, loss: 6.658258438110352\r\nStep 582, loss: 6.8479323387146\r\nStep 583, loss: 6.655804634094238\r\nStep 584, loss: 6.5051422119140625\r\nStep 585, loss: 6.857949256896973\r\nStep 586, loss: 6.695587635040283\r\nStep 587, loss: 6.870244026184082\r\nStep 588, loss: 6.743365287780762\r\nStep 589, loss: 6.722917556762695\r\nStep 590, loss: 6.706783771514893\r\nStep 591, loss: 6.568246364593506\r\nStep 592, loss: 6.578851222991943\r\nStep 593, loss: 6.753204822540283\r\nStep 594, loss: 6.574229717254639\r\nStep 595, loss: 6.766374588012695\r\nStep 596, loss: 6.641284942626953\r\nStep 597, loss: 6.712769031524658\r\nStep 598, loss: 6.491716384887695\r\nStep 599, loss: 6.898193836212158\r\nStep 600, loss: 6.7026472091674805\r\nStep 601, loss: 6.534832000732422\r\nStep 602, loss: 6.7139716148376465\r\nStep 603, loss: 6.692238807678223\r\nStep 604, loss: 6.685831069946289\r\nStep 605, loss: 6.719960689544678\r\nStep 606, loss: 6.76458215713501\r\nStep 607, loss: 6.115108489990234\r\nStep 608, loss: 6.482534408569336\r\nStep 609, loss: 6.740872859954834\r\nStep 610, loss: 6.589570045471191\r\nStep 611, loss: 6.723851203918457\r\nStep 612, loss: 6.53096866607666\r\nStep 613, loss: 6.657890319824219\r\nStep 614, loss: 6.608828067779541\r\nStep 615, loss: 6.533605575561523\r\nStep 616, loss: 6.6826276779174805\r\nStep 617, loss: 6.5526909828186035\r\nStep 618, loss: 6.545164108276367\r\nStep 619, loss: 6.665560722351074\r\nStep 620, loss: 6.732226371765137\r\nStep 621, loss: 6.427579402923584\r\nStep 622, loss: 6.500489711761475\r\nStep 623, loss: 6.419438362121582\r\nStep 624, loss: 6.411679744720459\r\nStep 625, loss: 6.283699035644531\r\nStep 626, loss: 6.6168928146362305\r\nStep 627, loss: 6.747786521911621\r\nStep 628, loss: 6.7484354972839355\r\nStep 629, loss: 6.781527519226074\r\nStep 630, loss: 6.479393005371094\r\nStep 631, loss: 6.516376972198486\r\nStep 632, loss: 6.578348636627197\r\nStep 633, loss: 6.651546478271484\r\nStep 634, loss: 6.660653591156006\r\nStep 635, loss: 6.57742977142334\r\nStep 636, loss: 6.78727912902832\r\nStep 637, loss: 6.698579788208008\r\nStep 638, loss: 6.652580261230469\r\nStep 639, loss: 6.563304424285889\r\nStep 640, loss: 6.456714153289795\r\nStep 641, loss: 6.529811859130859\r\nStep 642, loss: 6.3940534591674805\r\nStep 643, loss: 6.569890975952148\r\nStep 644, loss: 6.334028720855713\r\nStep 645, loss: 6.3808159828186035\r\nStep 646, loss: 6.5262627601623535\r\nStep 647, loss: 6.602876663208008\r\nStep 648, loss: 6.516875743865967\r\nStep 649, loss: 7.000062465667725\r\nStep 650, loss: 6.422936916351318\r\nStep 651, loss: 6.553537368774414\r\nStep 652, loss: 6.638210296630859\r\nStep 653, loss: 6.648630619049072\r\nStep 654, loss: 6.63344144821167\r\nStep 655, loss: 6.304746627807617\r\nStep 656, loss: 6.512808799743652\r\nStep 657, loss: 6.818546295166016\r\nStep 658, loss: 6.52738618850708\r\nStep 659, loss: 6.410164833068848\r\nStep 660, loss: 6.61316442489624\r\nStep 661, loss: 6.527656555175781\r\nStep 662, loss: 6.427692890167236\r\nStep 663, loss: 6.674975395202637\r\nStep 664, loss: 6.385855674743652\r\nStep 665, loss: 6.429081439971924\r\nStep 666, loss: 6.429695129394531\r\nStep 667, loss: 6.673699855804443\r\nStep 668, loss: 6.5708208084106445\r\nStep 669, loss: 6.557399749755859\r\nStep 670, loss: 6.487589359283447\r\nStep 671, loss: 6.37391471862793\r\nStep 672, loss: 6.321193695068359\r\nStep 673, loss: 6.604182720184326\r\nStep 674, loss: 6.144009590148926\r\nStep 675, loss: 6.443563461303711\r\nStep 676, loss: 6.617156505584717\r\nStep 677, loss: 6.57207727432251\r\nStep 678, loss: 6.564396858215332\r\nStep 679, loss: 6.216585636138916\r\nStep 680, loss: 6.388035774230957\r\nStep 681, loss: 6.428990364074707\r\nStep 682, loss: 6.369060039520264\r\nStep 683, loss: 6.417286396026611\r\nStep 684, loss: 6.46744966506958\r\nStep 685, loss: 6.359501361846924\r\nStep 686, loss: 6.588460445404053\r\nStep 687, loss: 6.498830318450928\r\nStep 688, loss: 6.47416877746582\r\nStep 689, loss: 6.359691619873047\r\nStep 690, loss: 6.246201515197754\r\nStep 691, loss: 6.353740215301514\r\nStep 692, loss: 6.310497283935547\r\nStep 693, loss: 6.230281352996826\r\nStep 694, loss: 6.411371231079102\r\nStep 695, loss: 6.253514766693115\r\nStep 696, loss: 6.237787246704102\r\nStep 697, loss: 6.272624969482422\r\nStep 698, loss: 6.260644912719727\r\nStep 699, loss: 6.421164512634277\r\nStep 700, loss: 6.254258155822754\r\nStep 701, loss: 6.27919864654541\r\nStep 702, loss: 6.325151443481445\r\nStep 703, loss: 6.644067764282227\r\nStep 704, loss: 6.6547932624816895\r\nStep 705, loss: 6.205949783325195\r\nStep 706, loss: 6.244408130645752\r\nStep 707, loss: 6.601984977722168\r\nStep 708, loss: 6.439548969268799\r\nStep 709, loss: 6.626750469207764\r\nStep 710, loss: 6.415212154388428\r\nStep 711, loss: 6.429360389709473\r\nStep 712, loss: 6.42447566986084\r\nStep 713, loss: 6.201557636260986\r\nStep 714, loss: 6.443551540374756\r\nStep 715, loss: 6.187041759490967\r\nStep 716, loss: 6.5973591804504395\r\nStep 717, loss: 6.139087677001953\r\nStep 718, loss: 6.271304607391357\r\nStep 719, loss: 6.3489789962768555\r\nStep 480, loss: 6.983112335205078\r\nStep 481, loss: 7.115540027618408\r\nStep 482, loss: 6.859630584716797\r\nStep 483, loss: 7.162317276000977\r\nStep 484, loss: 7.2645063400268555\r\nStep 485, loss: 7.095457077026367\r\nStep 486, loss: 7.0156145095825195\r\nStep 487, loss: 6.8184332847595215\r\nStep 488, loss: 6.952903747558594\r\nStep 489, loss: 6.963713645935059\r\nStep 490, loss: 6.7667388916015625\r\nStep 491, loss: 6.931082248687744\r\nStep 492, loss: 6.961340427398682\r\nStep 493, loss: 7.058728218078613\r\nStep 494, loss: 6.897120475769043\r\nStep 495, loss: 6.869744300842285\r\nStep 496, loss: 6.865843296051025\r\nStep 497, loss: 6.935029983520508\r\nStep 498, loss: 6.947277069091797\r\nStep 499, loss: 6.898039817810059\r\nStep 500, loss: 6.908406734466553\r\nStep 501, loss: 6.652162075042725\r\nStep 502, loss: 6.679863452911377\r\nStep 503, loss: 6.796136379241943\r\nStep 504, loss: 6.567356109619141\r\nStep 505, loss: 7.073315620422363\r\nStep 506, loss: 6.8232622146606445\r\nStep 507, loss: 6.834505081176758\r\nStep 508, loss: 6.863307476043701\r\nStep 509, loss: 6.771708965301514\r\nStep 510, loss: 6.884707927703857\r\nStep 511, loss: 6.796646595001221\r\nStep 512, loss: 6.833789825439453\r\nStep 513, loss: 6.7259440422058105\r\nStep 514, loss: 6.863130569458008\r\nStep 515, loss: 6.853940486907959\r\nStep 516, loss: 6.750757217407227\r\nStep 517, loss: 6.462499141693115\r\nStep 518, loss: 6.461126804351807\r\nStep 519, loss: 6.87819766998291\r\nStep 520, loss: 7.006588935852051\r\nStep 521, loss: 6.969130992889404\r\nStep 522, loss: 6.808895111083984\r\nStep 523, loss: 6.943464279174805\r\nStep 524, loss: 7.139779567718506\r\nStep 525, loss: 6.821377754211426\r\nStep 526, loss: 7.0501484870910645\r\nStep 527, loss: 6.7269287109375\r\nStep 528, loss: 6.502286911010742\r\nStep 529, loss: 6.991916656494141\r\nStep 530, loss: 6.809881687164307\r\nStep 531, loss: 6.804445743560791\r\nStep 532, loss: 6.871065616607666\r\nStep 533, loss: 6.632485866546631\r\nStep 534, loss: 6.804165840148926\r\nStep 535, loss: 6.6506452560424805\r\nStep 536, loss: 6.854793548583984\r\nStep 537, loss: 6.7164835929870605\r\nStep 538, loss: 6.879791736602783\r\nStep 539, loss: 6.811768054962158\r\nStep 540, loss: 6.637771129608154\r\nStep 541, loss: 6.691093921661377\r\nStep 542, loss: 6.608028411865234\r\nStep 543, loss: 6.851551055908203\r\nStep 544, loss: 6.784187316894531\r\nStep 545, loss: 6.733605861663818\r\nStep 546, loss: 6.900415897369385\r\nStep 547, loss: 6.957789897918701\r\nStep 548, loss: 6.842604637145996\r\nStep 549, loss: 6.756542682647705\r\nStep 550, loss: 6.680877685546875\r\nStep 551, loss: 6.7578535079956055\r\nStep 552, loss: 6.835852146148682\r\nStep 553, loss: 6.846309661865234\r\nStep 554, loss: 6.76045036315918\r\nStep 555, loss: 6.875919342041016\r\nStep 556, loss: 6.824545860290527\r\nStep 557, loss: 6.6092424392700195\r\nStep 558, loss: 6.697463512420654\r\nStep 559, loss: 6.92261266708374\r\nStep 560, loss: 6.7938008308410645\r\nStep 561, loss: 6.867816925048828\r\nStep 562, loss: 6.752100467681885\r\nStep 563, loss: 6.554731369018555\r\nStep 564, loss: 6.557769775390625\r\nStep 565, loss: 6.742979049682617\r\nStep 566, loss: 6.712817192077637\r\nStep 567, loss: 6.586728572845459\r\nStep 568, loss: 6.741764545440674\r\nStep 569, loss: 6.896045207977295\r\nStep 570, loss: 6.683651447296143\r\nStep 571, loss: 6.844089984893799\r\nStep 572, loss: 6.487730026245117\r\nStep 573, loss: 6.547028064727783\r\nStep 574, loss: 6.402328014373779\r\nStep 575, loss: 6.779541969299316\r\nStep 576, loss: 6.89146614074707\r\nStep 577, loss: 6.612976551055908\r\nStep 578, loss: 6.771749019622803\r\nStep 579, loss: 6.556238651275635\r\nStep 580, loss: 6.693332195281982\r\nStep 581, loss: 6.658258438110352\r\nStep 582, loss: 6.8479323387146\r\nStep 583, loss: 6.655804634094238\r\nStep 584, loss: 6.5051422119140625\r\nStep 585, loss: 6.857949256896973\r\nStep 586, loss: 6.695587635040283\r\nStep 587, loss: 6.870244026184082\r\nStep 588, loss: 6.743365287780762\r\nStep 589, loss: 6.722917556762695\r\nStep 590, loss: 6.706783771514893\r\nStep 591, loss: 6.568246364593506\r\nStep 592, loss: 6.578851222991943\r\nStep 593, loss: 6.753204822540283\r\nStep 594, loss: 6.574229717254639\r\nStep 595, loss: 6.766374588012695\r\nStep 596, loss: 6.641284942626953\r\nStep 597, loss: 6.712769031524658\r\nStep 598, loss: 6.491716384887695\r\nStep 599, loss: 6.898193836212158\r\nStep 600, loss: 6.7026472091674805\r\nStep 601, loss: 6.534832000732422\r\nStep 602, loss: 6.7139716148376465\r\nStep 603, loss: 6.692238807678223\r\nStep 604, loss: 6.685831069946289\r\nStep 605, loss: 6.719960689544678\r\nStep 606, loss: 6.76458215713501\r\nStep 607, loss: 6.115108489990234\r\nStep 608, loss: 6.482534408569336\r\nStep 609, loss: 6.740872859954834\r\nStep 610, loss: 6.589570045471191\r\nStep 611, loss: 6.723851203918457\r\nStep 612, loss: 6.53096866607666\r\nStep 613, loss: 6.657890319824219\r\nStep 614, loss: 6.608828067779541\r\nStep 615, loss: 6.533605575561523\r\nStep 616, loss: 6.6826276779174805\r\nStep 617, loss: 6.5526909828186035\r\nStep 618, loss: 6.545164108276367\r\nStep 619, loss: 6.665560722351074\r\nStep 620, loss: 6.732226371765137\r\nStep 621, loss: 6.427579402923584\r\nStep 622, loss: 6.500489711761475\r\nStep 623, loss: 6.419438362121582\r\nStep 624, loss: 6.411679744720459\r\nStep 625, loss: 6.283699035644531\r\nStep 626, loss: 6.6168928146362305\r\nStep 627, loss: 6.747786521911621\r\nStep 628, loss: 6.7484354972839355\r\nStep 629, loss: 6.781527519226074\r\nStep 630, loss: 6.479393005371094\r\nStep 631, loss: 6.516376972198486\r\nStep 632, loss: 6.578348636627197\r\nStep 633, loss: 6.651546478271484\r\nStep 634, loss: 6.660653591156006\r\nStep 635, loss: 6.57742977142334\r\nStep 636, loss: 6.78727912902832\r\nStep 637, loss: 6.698579788208008\r\nStep 638, loss: 6.652580261230469\r\nStep 639, loss: 6.563304424285889\r\nStep 640, loss: 6.456714153289795\r\nStep 641, loss: 6.529811859130859\r\nStep 642, loss: 6.3940534591674805\r\nStep 643, loss: 6.569890975952148\r\nStep 644, loss: 6.334028720855713\r\nStep 645, loss: 6.3808159828186035\r\nStep 646, loss: 6.5262627601623535\r\nStep 647, loss: 6.602876663208008\r\nStep 648, loss: 6.516875743865967\r\nStep 649, loss: 7.000062465667725\r\nStep 650, loss: 6.422936916351318\r\nStep 651, loss: 6.553537368774414\r\nStep 652, loss: 6.638210296630859\r\nStep 653, loss: 6.648630619049072\r\nStep 654, loss: 6.63344144821167\r\nStep 655, loss: 6.304746627807617\r\nStep 656, loss: 6.512808799743652\r\nStep 657, loss: 6.818546295166016\r\nStep 658, loss: 6.52738618850708\r\nStep 659, loss: 6.410164833068848\r\nStep 660, loss: 6.61316442489624\r\nStep 661, loss: 6.527656555175781\r\nStep 662, loss: 6.427692890167236\r\nStep 663, loss: 6.674975395202637\r\nStep 664, loss: 6.385855674743652\r\nStep 665, loss: 6.429081439971924\r\nStep 666, loss: 6.429695129394531\r\nStep 667, loss: 6.673699855804443\r\nStep 668, loss: 6.5708208084106445\r\nStep 669, loss: 6.557399749755859\r\nStep 670, loss: 6.487589359283447\r\nStep 671, loss: 6.37391471862793\r\nStep 672, loss: 6.321193695068359\r\nStep 673, loss: 6.604182720184326\r\nStep 674, loss: 6.144009590148926\r\nStep 675, loss: 6.443563461303711\r\nStep 676, loss: 6.617156505584717\r\nStep 677, loss: 6.57207727432251\r\nStep 678, loss: 6.564396858215332\r\nStep 679, loss: 6.216585636138916\r\nStep 680, loss: 6.388035774230957\r\nStep 681, loss: 6.428990364074707\r\nStep 682, loss: 6.369060039520264\r\nStep 683, loss: 6.417286396026611\r\nStep 684, loss: 6.46744966506958\r\nStep 685, loss: 6.359501361846924\r\nStep 686, loss: 6.588460445404053\r\nStep 687, loss: 6.498830318450928\r\nStep 688, loss: 6.47416877746582\r\nStep 689, loss: 6.359691619873047\r\nStep 690, loss: 6.246201515197754\r\nStep 691, loss: 6.353740215301514\r\nStep 692, loss: 6.310497283935547\r\nStep 693, loss: 6.230281352996826\r\nStep 694, loss: 6.411371231079102\r\nStep 695, loss: 6.253514766693115\r\nStep 696, loss: 6.237787246704102\r\nStep 697, loss: 6.272624969482422\r\nStep 698, loss: 6.260644912719727\r\nStep 699, loss: 6.421164512634277\r\nStep 700, loss: 6.254258155822754\r\nStep 701, loss: 6.27919864654541\r\nStep 702, loss: 6.325151443481445\r\nStep 703, loss: 6.644067764282227\r\nStep 704, loss: 6.6547932624816895\r\nStep 705, loss: 6.205949783325195\r\nStep 706, loss: 6.244408130645752\r\nStep 707, loss: 6.601984977722168\r\nStep 708, loss: 6.439548969268799\r\nStep 709, loss: 6.626750469207764\r\nStep 710, loss: 6.415212154388428\r\nStep 711, loss: 6.429360389709473\r\nStep 712, loss: 6.42447566986084\r\nStep 713, loss: 6.201557636260986\r\nStep 714, loss: 6.443551540374756\r\nStep 715, loss: 6.187041759490967\r\nStep 716, loss: 6.5973591804504395\r\nStep 717, loss: 6.139087677001953\r\nStep 718, loss: 6.271304607391357\r\nStep 719, loss: 6.3489789962768555\r\nStep 480, loss: 6.983112335205078\r\nStep 481, loss: 7.115540027618408\r\nStep 482, loss: 6.859630584716797\r\nStep 483, loss: 7.162317276000977\r\nStep 484, loss: 7.2645063400268555\r\nStep 485, loss: 7.095457077026367\r\nStep 486, loss: 7.0156145095825195\r\nStep 487, loss: 6.8184332847595215\r\nStep 488, loss: 6.952903747558594\r\nStep 489, loss: 6.963713645935059\r\nStep 490, loss: 6.7667388916015625\r\nStep 491, loss: 6.931082248687744\r\nStep 492, loss: 6.961340427398682\r\nStep 493, loss: 7.058728218078613\r\nStep 494, loss: 6.897120475769043\r\nStep 495, loss: 6.869744300842285\r\nStep 496, loss: 6.865843296051025\r\nStep 497, loss: 6.935029983520508\r\nStep 498, loss: 6.947277069091797\r\nStep 499, loss: 6.898039817810059\r\nStep 500, loss: 6.908406734466553\r\nStep 501, loss: 6.652162075042725\r\nStep 502, loss: 6.679863452911377\r\nStep 503, loss: 6.796136379241943\r\nStep 504, loss: 6.567356109619141\r\nStep 505, loss: 7.073315620422363\r\nStep 506, loss: 6.8232622146606445\r\nStep 507, loss: 6.834505081176758\r\nStep 508, loss: 6.863307476043701\r\nStep 509, loss: 6.771708965301514\r\nStep 510, loss: 6.884707927703857\r\nStep 511, loss: 6.796646595001221\r\nStep 512, loss: 6.833789825439453\r\nStep 513, loss: 6.7259440422058105\r\nStep 514, loss: 6.863130569458008\r\nStep 515, loss: 6.853940486907959\r\nStep 516, loss: 6.750757217407227\r\nStep 517, loss: 6.462499141693115\r\nStep 518, loss: 6.461126804351807\r\nStep 519, loss: 6.87819766998291\r\nStep 520, loss: 7.006588935852051\r\nStep 521, loss: 6.969130992889404\r\nStep 522, loss: 6.808895111083984\r\nStep 523, loss: 6.943464279174805\r\nStep 524, loss: 7.139779567718506\r\nStep 525, loss: 6.821377754211426\r\nStep 526, loss: 7.0501484870910645\r\nStep 527, loss: 6.7269287109375\r\nStep 528, loss: 6.502286911010742\r\nStep 529, loss: 6.991916656494141\r\nStep 530, loss: 6.809881687164307\r\nStep 531, loss: 6.804445743560791\r\nStep 532, loss: 6.871065616607666\r\nStep 533, loss: 6.632485866546631\r\nStep 534, loss: 6.804165840148926\r\nStep 535, loss: 6.6506452560424805\r\nStep 536, loss: 6.854793548583984\r\nStep 537, loss: 6.7164835929870605\r\nStep 538, loss: 6.879791736602783\r\nStep 539, loss: 6.811768054962158\r\nStep 540, loss: 6.637771129608154\r\nStep 541, loss: 6.691093921661377\r\nStep 542, loss: 6.608028411865234\r\nStep 543, loss: 6.851551055908203\r\nStep 544, loss: 6.784187316894531\r\nStep 545, loss: 6.733605861663818\r\nStep 546, loss: 6.900415897369385\r\nStep 547, loss: 6.957789897918701\r\nStep 548, loss: 6.842604637145996\r\nStep 549, loss: 6.756542682647705\r\nStep 550, loss: 6.680877685546875\r\nStep 551, loss: 6.7578535079956055\r\nStep 552, loss: 6.835852146148682\r\nStep 553, loss: 6.846309661865234\r\nStep 554, loss: 6.76045036315918\r\nStep 555, loss: 6.875919342041016\r\nStep 556, loss: 6.824545860290527\r\nStep 557, loss: 6.6092424392700195\r\nStep 558, loss: 6.697463512420654\r\nStep 559, loss: 6.92261266708374\r\nStep 560, loss: 6.7938008308410645\r\nStep 561, loss: 6.867816925048828\r\nStep 562, loss: 6.752100467681885\r\nStep 563, loss: 6.554731369018555\r\nStep 564, loss: 6.557769775390625\r\nStep 565, loss: 6.742979049682617\r\nStep 566, loss: 6.712817192077637\r\nStep 567, loss: 6.586728572845459\r\nStep 568, loss: 6.741764545440674\r\nStep 569, loss: 6.896045207977295\r\nStep 570, loss: 6.683651447296143\r\nStep 571, loss: 6.844089984893799\r\nStep 572, loss: 6.487730026245117\r\nStep 573, loss: 6.547028064727783\r\nStep 574, loss: 6.402328014373779\r\nStep 575, loss: 6.779541969299316\r\nStep 576, loss: 6.89146614074707\r\nStep 577, loss: 6.612976551055908\r\nStep 578, loss: 6.771749019622803\r\nStep 579, loss: 6.556238651275635\r\nStep 580, loss: 6.693332195281982\r\nStep 581, loss: 6.658258438110352\r\nStep 582, loss: 6.8479323387146\r\nStep 583, loss: 6.655804634094238\r\nStep 584, loss: 6.5051422119140625\r\nStep 585, loss: 6.857949256896973\r\nStep 586, loss: 6.695587635040283\r\nStep 587, loss: 6.870244026184082\r\nStep 588, loss: 6.743365287780762\r\nStep 589, loss: 6.722917556762695\r\nStep 590, loss: 6.706783771514893\r\nStep 591, loss: 6.568246364593506\r\nStep 592, loss: 6.578851222991943\r\nStep 593, loss: 6.753204822540283\r\nStep 594, loss: 6.574229717254639\r\nStep 595, loss: 6.766374588012695\r\nStep 596, loss: 6.641284942626953\r\nStep 597, loss: 6.712769031524658\r\nStep 598, loss: 6.491716384887695\r\nStep 599, loss: 6.898193836212158\r\nStep 600, loss: 6.7026472091674805\r\nStep 601, loss: 6.534832000732422\r\nStep 602, loss: 6.7139716148376465\r\nStep 603, loss: 6.692238807678223\r\nStep 604, loss: 6.685831069946289\r\nStep 605, loss: 6.719960689544678\r\nStep 606, loss: 6.76458215713501\r\nStep 607, loss: 6.115108489990234\r\nStep 608, loss: 6.482534408569336\r\nStep 609, loss: 6.740872859954834\r\nStep 610, loss: 6.589570045471191\r\nStep 611, loss: 6.723851203918457\r\nStep 612, loss: 6.53096866607666\r\nStep 613, loss: 6.657890319824219\r\nStep 614, loss: 6.608828067779541\r\nStep 615, loss: 6.533605575561523\r\nStep 616, loss: 6.6826276779174805\r\nStep 617, loss: 6.5526909828186035\r\nStep 618, loss: 6.545164108276367\r\nStep 619, loss: 6.665560722351074\r\nStep 620, loss: 6.732226371765137\r\nStep 621, loss: 6.427579402923584\r\nStep 622, loss: 6.500489711761475\r\nStep 623, loss: 6.419438362121582\r\nStep 624, loss: 6.411679744720459\r\nStep 625, loss: 6.283699035644531\r\nStep 626, loss: 6.6168928146362305\r\nStep 627, loss: 6.747786521911621\r\nStep 628, loss: 6.7484354972839355\r\nStep 629, loss: 6.781527519226074\r\nStep 630, loss: 6.479393005371094\r\nStep 631, loss: 6.516376972198486\r\nStep 632, loss: 6.578348636627197\r\nStep 633, loss: 6.651546478271484\r\nStep 634, loss: 6.660653591156006\r\nStep 635, loss: 6.57742977142334\r\nStep 636, loss: 6.78727912902832\r\nStep 637, loss: 6.698579788208008\r\nStep 638, loss: 6.652580261230469\r\nStep 639, loss: 6.563304424285889\r\nStep 640, loss: 6.456714153289795\r\nStep 641, loss: 6.529811859130859\r\nStep 642, loss: 6.3940534591674805\r\nStep 643, loss: 6.569890975952148\r\nStep 644, loss: 6.334028720855713\r\nStep 645, loss: 6.3808159828186035\r\nStep 646, loss: 6.5262627601623535\r\nStep 647, loss: 6.602876663208008\r\nStep 648, loss: 6.516875743865967\r\nStep 649, loss: 7.000062465667725\r\nStep 650, loss: 6.422936916351318\r\nStep 651, loss: 6.553537368774414\r\nStep 652, loss: 6.638210296630859\r\nStep 653, loss: 6.648630619049072\r\nStep 654, loss: 6.63344144821167\r\nStep 655, loss: 6.304746627807617\r\nStep 656, loss: 6.512808799743652\r\nStep 657, loss: 6.818546295166016\r\nStep 658, loss: 6.52738618850708\r\nStep 659, loss: 6.410164833068848\r\nStep 660, loss: 6.61316442489624\r\nStep 661, loss: 6.527656555175781\r\nStep 662, loss: 6.427692890167236\r\nStep 663, loss: 6.674975395202637\r\nStep 664, loss: 6.385855674743652\r\nStep 665, loss: 6.429081439971924\r\nStep 666, loss: 6.429695129394531\r\nStep 667, loss: 6.673699855804443\r\nStep 668, loss: 6.5708208084106445\r\nStep 669, loss: 6.557399749755859\r\nStep 670, loss: 6.487589359283447\r\nStep 671, loss: 6.37391471862793\r\nStep 672, loss: 6.321193695068359\r\nStep 673, loss: 6.604182720184326\r\nStep 674, loss: 6.144009590148926\r\nStep 675, loss: 6.443563461303711\r\nStep 676, loss: 6.617156505584717\r\nStep 677, loss: 6.57207727432251\r\nStep 678, loss: 6.564396858215332\r\nStep 679, loss: 6.216585636138916\r\nStep 680, loss: 6.388035774230957\r\nStep 681, loss: 6.428990364074707\r\nStep 682, loss: 6.369060039520264\r\nStep 683, loss: 6.417286396026611\r\nStep 684, loss: 6.46744966506958\r\nStep 685, loss: 6.359501361846924\r\nStep 686, loss: 6.588460445404053\r\nStep 687, loss: 6.498830318450928\r\nStep 688, loss: 6.47416877746582\r\nStep 689, loss: 6.359691619873047\r\nStep 690, loss: 6.246201515197754\r\nStep 691, loss: 6.353740215301514\r\nStep 692, loss: 6.310497283935547\r\nStep 693, loss: 6.230281352996826\r\nStep 694, loss: 6.411371231079102\r\nStep 695, loss: 6.253514766693115\r\nStep 696, loss: 6.237787246704102\r\nStep 697, loss: 6.272624969482422\r\nStep 698, loss: 6.260644912719727\r\nStep 699, loss: 6.421164512634277\r\nStep 700, loss: 6.254258155822754\r\nStep 701, loss: 6.27919864654541\r\nStep 702, loss: 6.325151443481445\r\nStep 703, loss: 6.644067764282227\r\nStep 704, loss: 6.6547932624816895\r\nStep 705, loss: 6.205949783325195\r\nStep 706, loss: 6.244408130645752\r\nStep 707, loss: 6.601984977722168\r\nStep 708, loss: 6.439548969268799\r\nStep 709, loss: 6.626750469207764\r\nStep 710, loss: 6.415212154388428\r\nStep 711, loss: 6.429360389709473\r\nStep 712, loss: 6.42447566986084\r\nStep 713, loss: 6.201557636260986\r\nStep 714, loss: 6.443551540374756\r\nStep 715, loss: 6.187041759490967\r\nStep 716, loss: 6.5973591804504395\r\nStep 717, loss: 6.139087677001953\r\nStep 718, loss: 6.271304607391357\r\nStep 719, loss: 6.3489789962768555\r\n",,terminal_output +614,1895905,"TERMINAL",0,0,"Step 720, loss: 6.494800090789795\r\nStep 721, loss: 6.459004878997803\r\nStep 722, loss: 6.038323879241943\r\nStep 723, loss: 6.239955902099609\r\nStep 724, loss: 6.164690017700195\r\nStep 725, loss: 6.023187160491943\r\nStep 726, loss: 6.360325336456299\r\nStep 727, loss: 6.236156940460205\r\nStep 728, loss: 6.494116306304932\r\nStep 729, loss: 6.3753180503845215\r\nStep 730, loss: 6.421533107757568\r\nStep 731, loss: 6.415943145751953\r\nStep 732, loss: 6.322390556335449\r\nStep 733, loss: 6.467452049255371\r\nStep 734, loss: 6.394821643829346\r\nStep 735, loss: 6.386702060699463\r\nStep 736, loss: 6.096451282501221\r\nStep 737, loss: 6.265408515930176\r\nStep 738, loss: 6.323524475097656\r\nStep 739, loss: 6.211463451385498\r\nStep 740, loss: 6.442022323608398\r\nStep 741, loss: 6.454809188842773\r\nStep 742, loss: 6.425616264343262\r\nStep 743, loss: 6.3252410888671875\r\nStep 744, loss: 6.326277256011963\r\nStep 745, loss: 6.334142684936523\r\nStep 746, loss: 6.1762824058532715\r\nStep 747, loss: 6.280177116394043\r\nStep 748, loss: 6.367738246917725\r\nStep 749, loss: 6.582081317901611\r\nStep 750, loss: 6.327296257019043\r\nStep 751, loss: 6.393777847290039\r\nStep 752, loss: 6.371941089630127\r\nStep 753, loss: 6.315849781036377\r\nStep 754, loss: 6.108580112457275\r\nStep 755, loss: 6.430636882781982\r\nStep 756, loss: 6.307670593261719\r\nStep 757, loss: 6.541366100311279\r\nStep 758, loss: 6.3531646728515625\r\nStep 759, loss: 6.430586338043213\r\nStep 760, loss: 6.280318260192871\r\nStep 761, loss: 6.202603816986084\r\nStep 762, loss: 6.395080089569092\r\nStep 763, loss: 6.365248203277588\r\nStep 764, loss: 6.4529008865356445\r\nStep 765, loss: 6.3356852531433105\r\nStep 766, loss: 6.305147647857666\r\nStep 767, loss: 6.277276039123535\r\nStep 768, loss: 6.354142189025879\r\nStep 769, loss: 6.301516532897949\r\nStep 770, loss: 6.12398099899292\r\nStep 771, loss: 6.372938632965088\r\nStep 772, loss: 6.411468505859375\r\nStep 773, loss: 6.192555904388428\r\nStep 774, loss: 5.948481559753418\r\nStep 775, loss: 6.330682754516602\r\nStep 776, loss: 6.2989959716796875\r\nStep 777, loss: 6.1877264976501465\r\nStep 778, loss: 6.277063846588135\r\nStep 779, loss: 6.297116279602051\r\nStep 780, loss: 6.239116668701172\r\nStep 781, loss: 6.166967391967773\r\nStep 782, loss: 6.13632345199585\r\nStep 783, loss: 6.336677551269531\r\nStep 784, loss: 6.051889419555664\r\nStep 785, loss: 6.353695869445801\r\nStep 786, loss: 6.255046367645264\r\nStep 787, loss: 6.1380510330200195\r\nStep 788, loss: 6.241057395935059\r\nStep 789, loss: 6.224755764007568\r\nStep 790, loss: 6.2249298095703125\r\nStep 791, loss: 6.107750415802002\r\nStep 792, loss: 6.3312273025512695\r\nStep 793, loss: 6.514132499694824\r\nStep 794, loss: 6.212930202484131\r\nStep 795, loss: 6.130778789520264\r\nStep 796, loss: 6.1399664878845215\r\nStep 797, loss: 6.231768608093262\r\nStep 798, loss: 6.27024507522583\r\nStep 799, loss: 6.25049352645874\r\nStep 800, loss: 6.419933795928955\r\nStep 801, loss: 6.354002952575684\r\nStep 802, loss: 6.1690449714660645\r\nStep 803, loss: 6.404770851135254\r\nStep 804, loss: 6.176255226135254\r\nStep 805, loss: 6.160345554351807\r\nStep 806, loss: 6.026020050048828\r\nStep 807, loss: 6.120425701141357\r\nStep 808, loss: 6.201480865478516\r\nStep 809, loss: 5.947427272796631\r\nStep 810, loss: 6.122364044189453\r\nStep 811, loss: 6.248709678649902\r\nStep 812, loss: 5.753745079040527\r\nStep 813, loss: 5.991898536682129\r\nStep 814, loss: 6.357680320739746\r\nStep 815, loss: 6.341106414794922\r\nStep 816, loss: 6.066565990447998\r\nStep 817, loss: 6.128784656524658\r\nStep 818, loss: 6.312588214874268\r\nStep 819, loss: 6.097206115722656\r\nStep 820, loss: 6.430177688598633\r\nStep 821, loss: 6.41364049911499\r\nStep 822, loss: 6.162365436553955\r\nStep 823, loss: 6.151688098907471\r\nStep 824, loss: 6.222545623779297\r\nStep 825, loss: 6.097970962524414\r\nStep 826, loss: 6.1611409187316895\r\nStep 827, loss: 6.074286460876465\r\nStep 828, loss: 6.186343193054199\r\nStep 829, loss: 6.176538467407227\r\nStep 830, loss: 6.377328872680664\r\nStep 831, loss: 6.027149200439453\r\nStep 832, loss: 6.137227535247803\r\nStep 833, loss: 5.964600086212158\r\nStep 834, loss: 6.055864334106445\r\nStep 835, loss: 6.2789201736450195\r\nStep 836, loss: 6.094759464263916\r\nStep 837, loss: 6.065205097198486\r\nStep 838, loss: 6.283683776855469\r\nStep 839, loss: 6.331536293029785\r\nStep 840, loss: 6.18127965927124\r\nStep 841, loss: 6.406125068664551\r\nStep 842, loss: 6.1166863441467285\r\nStep 843, loss: 6.483761310577393\r\nStep 844, loss: 6.2247514724731445\r\nStep 845, loss: 6.057960510253906\r\nStep 846, loss: 6.135593414306641\r\nStep 847, loss: 6.375853061676025\r\nStep 848, loss: 6.284068584442139\r\nStep 849, loss: 6.4397501945495605\r\nStep 850, loss: 6.404928207397461\r\nStep 851, loss: 6.106307506561279\r\nStep 852, loss: 6.114143371582031\r\nStep 853, loss: 6.0966901779174805\r\nStep 854, loss: 6.145266532897949\r\nStep 855, loss: 6.366427898406982\r\nStep 856, loss: 6.286569118499756\r\nStep 857, loss: 6.32176399230957\r\nStep 858, loss: 6.094587802886963\r\nStep 859, loss: 6.3647260665893555\r\nStep 860, loss: 6.1760149002075195\r\nStep 861, loss: 6.4247517585754395\r\nStep 862, loss: 5.97499418258667\r\nStep 863, loss: 6.248610496520996\r\nStep 864, loss: 6.039318084716797\r\nStep 865, loss: 5.916710376739502\r\nStep 866, loss: 6.325806617736816\r\nStep 867, loss: 6.1494221687316895\r\nStep 868, loss: 6.232687950134277\r\nStep 869, loss: 6.429934978485107\r\nStep 870, loss: 6.108285427093506\r\nStep 871, loss: 5.781312465667725\r\nStep 872, loss: 6.014883995056152\r\nStep 873, loss: 6.2820658683776855\r\nStep 874, loss: 6.351875305175781\r\nStep 875, loss: 6.195911407470703\r\nStep 876, loss: 6.172882080078125\r\nStep 877, loss: 6.110972881317139\r\nStep 878, loss: 6.278717041015625\r\nStep 879, loss: 6.230126857757568\r\nStep 880, loss: 6.232804775238037\r\nStep 881, loss: 6.269811153411865\r\nStep 882, loss: 6.088776588439941\r\nStep 883, loss: 6.010383129119873\r\nStep 884, loss: 6.178844928741455\r\nStep 885, loss: 6.003259181976318\r\nStep 886, loss: 6.166425704956055\r\nStep 887, loss: 6.099825859069824\r\nStep 888, loss: 6.068012714385986\r\nStep 889, loss: 6.143975734710693\r\nStep 890, loss: 6.1533203125\r\nStep 891, loss: 6.34451150894165\r\nStep 892, loss: 6.029151439666748\r\nStep 893, loss: 6.1852898597717285\r\nStep 894, loss: 6.0513129234313965\r\nStep 895, loss: 6.044767379760742\r\nStep 896, loss: 6.009609222412109\r\nStep 897, loss: 6.071310997009277\r\nStep 898, loss: 6.046830177307129\r\nStep 899, loss: 6.27297306060791\r\nStep 900, loss: 5.977374076843262\r\nStep 901, loss: 6.2357282638549805\r\nStep 902, loss: 6.078926086425781\r\nStep 903, loss: 6.232381343841553\r\nStep 904, loss: 6.1165056228637695\r\nStep 905, loss: 5.883611679077148\r\nStep 906, loss: 6.12237024307251\r\nStep 907, loss: 5.954657077789307\r\nStep 908, loss: 6.123085975646973\r\nStep 909, loss: 6.128693580627441\r\nStep 910, loss: 6.249812126159668\r\nStep 911, loss: 6.336694240570068\r\nStep 912, loss: 6.18726921081543\r\nStep 913, loss: 6.142055988311768\r\nStep 914, loss: 6.15005350112915\r\nStep 915, loss: 6.2247843742370605\r\nStep 916, loss: 5.9457573890686035\r\nStep 917, loss: 6.241605758666992\r\nStep 918, loss: 5.9106245040893555\r\nStep 919, loss: 6.1775126457214355\r\nStep 920, loss: 6.269512176513672\r\nStep 921, loss: 6.126793384552002\r\nStep 922, loss: 6.060258388519287\r\nStep 923, loss: 6.299481391906738\r\nStep 924, loss: 6.196365833282471\r\nStep 925, loss: 6.079230785369873\r\nStep 926, loss: 5.997030258178711\r\nStep 927, loss: 5.936086177825928\r\nStep 928, loss: 6.205533504486084\r\nStep 929, loss: 5.959074020385742\r\nStep 930, loss: 6.096640586853027\r\nStep 931, loss: 5.794227600097656\r\nStep 932, loss: 6.476027011871338\r\nStep 933, loss: 6.135471820831299\r\nStep 934, loss: 5.915626049041748\r\nStep 935, loss: 5.807375431060791\r\nStep 936, loss: 6.103608131408691\r\nStep 937, loss: 6.056851387023926\r\nStep 938, loss: 6.022923469543457\r\nStep 939, loss: 6.044239521026611\r\nStep 940, loss: 6.298294544219971\r\nStep 941, loss: 6.026649475097656\r\nStep 942, loss: 5.9728193283081055\r\nStep 943, loss: 6.076418399810791\r\nStep 944, loss: 6.213939189910889\r\nStep 945, loss: 6.397153377532959\r\nStep 946, loss: 6.119306564331055\r\nStep 947, loss: 6.056985378265381\r\nStep 948, loss: 6.051384925842285\r\nStep 949, loss: 6.099973678588867\r\nStep 950, loss: 6.143035411834717\r\nStep 951, loss: 5.803652286529541\r\nStep 952, loss: 6.222076416015625\r\nStep 953, loss: 5.963755130767822\r\nStep 954, loss: 6.132559776306152\r\nStep 955, loss: 6.049510478973389\r\nStep 956, loss: 6.106955528259277\r\nStep 957, loss: 5.84633207321167\r\nStep 958, loss: 6.419929027557373\r\nStep 959, loss: 6.266198635101318\r\nStep 720, loss: 6.494800090789795\r\nStep 721, loss: 6.459004878997803\r\nStep 722, loss: 6.038323879241943\r\nStep 723, loss: 6.239955902099609\r\nStep 724, loss: 6.164690017700195\r\nStep 725, loss: 6.023187160491943\r\nStep 726, loss: 6.360325336456299\r\nStep 727, loss: 6.236156940460205\r\nStep 728, loss: 6.494116306304932\r\nStep 729, loss: 6.3753180503845215\r\nStep 730, loss: 6.421533107757568\r\nStep 731, loss: 6.415943145751953\r\nStep 732, loss: 6.322390556335449\r\nStep 733, loss: 6.467452049255371\r\nStep 734, loss: 6.394821643829346\r\nStep 735, loss: 6.386702060699463\r\nStep 736, loss: 6.096451282501221\r\nStep 737, loss: 6.265408515930176\r\nStep 738, loss: 6.323524475097656\r\nStep 739, loss: 6.211463451385498\r\nStep 740, loss: 6.442022323608398\r\nStep 741, loss: 6.454809188842773\r\nStep 742, loss: 6.425616264343262\r\nStep 743, loss: 6.3252410888671875\r\nStep 744, loss: 6.326277256011963\r\nStep 745, loss: 6.334142684936523\r\nStep 746, loss: 6.1762824058532715\r\nStep 747, loss: 6.280177116394043\r\nStep 748, loss: 6.367738246917725\r\nStep 749, loss: 6.582081317901611\r\nStep 750, loss: 6.327296257019043\r\nStep 751, loss: 6.393777847290039\r\nStep 752, loss: 6.371941089630127\r\nStep 753, loss: 6.315849781036377\r\nStep 754, loss: 6.108580112457275\r\nStep 755, loss: 6.430636882781982\r\nStep 756, loss: 6.307670593261719\r\nStep 757, loss: 6.541366100311279\r\nStep 758, loss: 6.3531646728515625\r\nStep 759, loss: 6.430586338043213\r\nStep 760, loss: 6.280318260192871\r\nStep 761, loss: 6.202603816986084\r\nStep 762, loss: 6.395080089569092\r\nStep 763, loss: 6.365248203277588\r\nStep 764, loss: 6.4529008865356445\r\nStep 765, loss: 6.3356852531433105\r\nStep 766, loss: 6.305147647857666\r\nStep 767, loss: 6.277276039123535\r\nStep 768, loss: 6.354142189025879\r\nStep 769, loss: 6.301516532897949\r\nStep 770, loss: 6.12398099899292\r\nStep 771, loss: 6.372938632965088\r\nStep 772, loss: 6.411468505859375\r\nStep 773, loss: 6.192555904388428\r\nStep 774, loss: 5.948481559753418\r\nStep 775, loss: 6.330682754516602\r\nStep 776, loss: 6.2989959716796875\r\nStep 777, loss: 6.1877264976501465\r\nStep 778, loss: 6.277063846588135\r\nStep 779, loss: 6.297116279602051\r\nStep 780, loss: 6.239116668701172\r\nStep 781, loss: 6.166967391967773\r\nStep 782, loss: 6.13632345199585\r\nStep 783, loss: 6.336677551269531\r\nStep 784, loss: 6.051889419555664\r\nStep 785, loss: 6.353695869445801\r\nStep 786, loss: 6.255046367645264\r\nStep 787, loss: 6.1380510330200195\r\nStep 788, loss: 6.241057395935059\r\nStep 789, loss: 6.224755764007568\r\nStep 790, loss: 6.2249298095703125\r\nStep 791, loss: 6.107750415802002\r\nStep 792, loss: 6.3312273025512695\r\nStep 793, loss: 6.514132499694824\r\nStep 794, loss: 6.212930202484131\r\nStep 795, loss: 6.130778789520264\r\nStep 796, loss: 6.1399664878845215\r\nStep 797, loss: 6.231768608093262\r\nStep 798, loss: 6.27024507522583\r\nStep 799, loss: 6.25049352645874\r\nStep 800, loss: 6.419933795928955\r\nStep 801, loss: 6.354002952575684\r\nStep 802, loss: 6.1690449714660645\r\nStep 803, loss: 6.404770851135254\r\nStep 804, loss: 6.176255226135254\r\nStep 805, loss: 6.160345554351807\r\nStep 806, loss: 6.026020050048828\r\nStep 807, loss: 6.120425701141357\r\nStep 808, loss: 6.201480865478516\r\nStep 809, loss: 5.947427272796631\r\nStep 810, loss: 6.122364044189453\r\nStep 811, loss: 6.248709678649902\r\nStep 812, loss: 5.753745079040527\r\nStep 813, loss: 5.991898536682129\r\nStep 814, loss: 6.357680320739746\r\nStep 815, loss: 6.341106414794922\r\nStep 816, loss: 6.066565990447998\r\nStep 817, loss: 6.128784656524658\r\nStep 818, loss: 6.312588214874268\r\nStep 819, loss: 6.097206115722656\r\nStep 820, loss: 6.430177688598633\r\nStep 821, loss: 6.41364049911499\r\nStep 822, loss: 6.162365436553955\r\nStep 823, loss: 6.151688098907471\r\nStep 824, loss: 6.222545623779297\r\nStep 825, loss: 6.097970962524414\r\nStep 826, loss: 6.1611409187316895\r\nStep 827, loss: 6.074286460876465\r\nStep 828, loss: 6.186343193054199\r\nStep 829, loss: 6.176538467407227\r\nStep 830, loss: 6.377328872680664\r\nStep 831, loss: 6.027149200439453\r\nStep 832, loss: 6.137227535247803\r\nStep 833, loss: 5.964600086212158\r\nStep 834, loss: 6.055864334106445\r\nStep 835, loss: 6.2789201736450195\r\nStep 836, loss: 6.094759464263916\r\nStep 837, loss: 6.065205097198486\r\nStep 838, loss: 6.283683776855469\r\nStep 839, loss: 6.331536293029785\r\nStep 840, loss: 6.18127965927124\r\nStep 841, loss: 6.406125068664551\r\nStep 842, loss: 6.1166863441467285\r\nStep 843, loss: 6.483761310577393\r\nStep 844, loss: 6.2247514724731445\r\nStep 845, loss: 6.057960510253906\r\nStep 846, loss: 6.135593414306641\r\nStep 847, loss: 6.375853061676025\r\nStep 848, loss: 6.284068584442139\r\nStep 849, loss: 6.4397501945495605\r\nStep 850, loss: 6.404928207397461\r\nStep 851, loss: 6.106307506561279\r\nStep 852, loss: 6.114143371582031\r\nStep 853, loss: 6.0966901779174805\r\nStep 854, loss: 6.145266532897949\r\nStep 855, loss: 6.366427898406982\r\nStep 856, loss: 6.286569118499756\r\nStep 857, loss: 6.32176399230957\r\nStep 858, loss: 6.094587802886963\r\nStep 859, loss: 6.3647260665893555\r\nStep 860, loss: 6.1760149002075195\r\nStep 861, loss: 6.4247517585754395\r\nStep 862, loss: 5.97499418258667\r\nStep 863, loss: 6.248610496520996\r\nStep 864, loss: 6.039318084716797\r\nStep 865, loss: 5.916710376739502\r\nStep 866, loss: 6.325806617736816\r\nStep 867, loss: 6.1494221687316895\r\nStep 868, loss: 6.232687950134277\r\nStep 869, loss: 6.429934978485107\r\nStep 870, loss: 6.108285427093506\r\nStep 871, loss: 5.781312465667725\r\nStep 872, loss: 6.014883995056152\r\nStep 873, loss: 6.2820658683776855\r\nStep 874, loss: 6.351875305175781\r\nStep 875, loss: 6.195911407470703\r\nStep 876, loss: 6.172882080078125\r\nStep 877, loss: 6.110972881317139\r\nStep 878, loss: 6.278717041015625\r\nStep 879, loss: 6.230126857757568\r\nStep 880, loss: 6.232804775238037\r\nStep 881, loss: 6.269811153411865\r\nStep 882, loss: 6.088776588439941\r\nStep 883, loss: 6.010383129119873\r\nStep 884, loss: 6.178844928741455\r\nStep 885, loss: 6.003259181976318\r\nStep 886, loss: 6.166425704956055\r\nStep 887, loss: 6.099825859069824\r\nStep 888, loss: 6.068012714385986\r\nStep 889, loss: 6.143975734710693\r\nStep 890, loss: 6.1533203125\r\nStep 891, loss: 6.34451150894165\r\nStep 892, loss: 6.029151439666748\r\nStep 893, loss: 6.1852898597717285\r\nStep 894, loss: 6.0513129234313965\r\nStep 895, loss: 6.044767379760742\r\nStep 896, loss: 6.009609222412109\r\nStep 897, loss: 6.071310997009277\r\nStep 898, loss: 6.046830177307129\r\nStep 899, loss: 6.27297306060791\r\nStep 900, loss: 5.977374076843262\r\nStep 901, loss: 6.2357282638549805\r\nStep 902, loss: 6.078926086425781\r\nStep 903, loss: 6.232381343841553\r\nStep 904, loss: 6.1165056228637695\r\nStep 905, loss: 5.883611679077148\r\nStep 906, loss: 6.12237024307251\r\nStep 907, loss: 5.954657077789307\r\nStep 908, loss: 6.123085975646973\r\nStep 909, loss: 6.128693580627441\r\nStep 910, loss: 6.249812126159668\r\nStep 911, loss: 6.336694240570068\r\nStep 912, loss: 6.18726921081543\r\nStep 913, loss: 6.142055988311768\r\nStep 914, loss: 6.15005350112915\r\nStep 915, loss: 6.2247843742370605\r\nStep 916, loss: 5.9457573890686035\r\nStep 917, loss: 6.241605758666992\r\nStep 918, loss: 5.9106245040893555\r\nStep 919, loss: 6.1775126457214355\r\nStep 920, loss: 6.269512176513672\r\nStep 921, loss: 6.126793384552002\r\nStep 922, loss: 6.060258388519287\r\nStep 923, loss: 6.299481391906738\r\nStep 924, loss: 6.196365833282471\r\nStep 925, loss: 6.079230785369873\r\nStep 926, loss: 5.997030258178711\r\nStep 927, loss: 5.936086177825928\r\nStep 928, loss: 6.205533504486084\r\nStep 929, loss: 5.959074020385742\r\nStep 930, loss: 6.096640586853027\r\nStep 931, loss: 5.794227600097656\r\nStep 932, loss: 6.476027011871338\r\nStep 933, loss: 6.135471820831299\r\nStep 934, loss: 5.915626049041748\r\nStep 935, loss: 5.807375431060791\r\nStep 936, loss: 6.103608131408691\r\nStep 937, loss: 6.056851387023926\r\nStep 938, loss: 6.022923469543457\r\nStep 939, loss: 6.044239521026611\r\nStep 940, loss: 6.298294544219971\r\nStep 941, loss: 6.026649475097656\r\nStep 942, loss: 5.9728193283081055\r\nStep 943, loss: 6.076418399810791\r\nStep 944, loss: 6.213939189910889\r\nStep 945, loss: 6.397153377532959\r\nStep 946, loss: 6.119306564331055\r\nStep 947, loss: 6.056985378265381\r\nStep 948, loss: 6.051384925842285\r\nStep 949, loss: 6.099973678588867\r\nStep 950, loss: 6.143035411834717\r\nStep 951, loss: 5.803652286529541\r\nStep 952, loss: 6.222076416015625\r\nStep 953, loss: 5.963755130767822\r\nStep 954, loss: 6.132559776306152\r\nStep 955, loss: 6.049510478973389\r\nStep 956, loss: 6.106955528259277\r\nStep 957, loss: 5.84633207321167\r\nStep 958, loss: 6.419929027557373\r\nStep 959, loss: 6.266198635101318\r\nStep 720, loss: 6.494800090789795\r\nStep 721, loss: 6.459004878997803\r\nStep 722, loss: 6.038323879241943\r\nStep 723, loss: 6.239955902099609\r\nStep 724, loss: 6.164690017700195\r\nStep 725, loss: 6.023187160491943\r\nStep 726, loss: 6.360325336456299\r\nStep 727, loss: 6.236156940460205\r\nStep 728, loss: 6.494116306304932\r\nStep 729, loss: 6.3753180503845215\r\nStep 730, loss: 6.421533107757568\r\nStep 731, loss: 6.415943145751953\r\nStep 732, loss: 6.322390556335449\r\nStep 733, loss: 6.467452049255371\r\nStep 734, loss: 6.394821643829346\r\nStep 735, loss: 6.386702060699463\r\nStep 736, loss: 6.096451282501221\r\nStep 737, loss: 6.265408515930176\r\nStep 738, loss: 6.323524475097656\r\nStep 739, loss: 6.211463451385498\r\nStep 740, loss: 6.442022323608398\r\nStep 741, loss: 6.454809188842773\r\nStep 742, loss: 6.425616264343262\r\nStep 743, loss: 6.3252410888671875\r\nStep 744, loss: 6.326277256011963\r\nStep 745, loss: 6.334142684936523\r\nStep 746, loss: 6.1762824058532715\r\nStep 747, loss: 6.280177116394043\r\nStep 748, loss: 6.367738246917725\r\nStep 749, loss: 6.582081317901611\r\nStep 750, loss: 6.327296257019043\r\nStep 751, loss: 6.393777847290039\r\nStep 752, loss: 6.371941089630127\r\nStep 753, loss: 6.315849781036377\r\nStep 754, loss: 6.108580112457275\r\nStep 755, loss: 6.430636882781982\r\nStep 756, loss: 6.307670593261719\r\nStep 757, loss: 6.541366100311279\r\nStep 758, loss: 6.3531646728515625\r\nStep 759, loss: 6.430586338043213\r\nStep 760, loss: 6.280318260192871\r\nStep 761, loss: 6.202603816986084\r\nStep 762, loss: 6.395080089569092\r\nStep 763, loss: 6.365248203277588\r\nStep 764, loss: 6.4529008865356445\r\nStep 765, loss: 6.3356852531433105\r\nStep 766, loss: 6.305147647857666\r\nStep 767, loss: 6.277276039123535\r\nStep 768, loss: 6.354142189025879\r\nStep 769, loss: 6.301516532897949\r\nStep 770, loss: 6.12398099899292\r\nStep 771, loss: 6.372938632965088\r\nStep 772, loss: 6.411468505859375\r\nStep 773, loss: 6.192555904388428\r\nStep 774, loss: 5.948481559753418\r\nStep 775, loss: 6.330682754516602\r\nStep 776, loss: 6.2989959716796875\r\nStep 777, loss: 6.1877264976501465\r\nStep 778, loss: 6.277063846588135\r\nStep 779, loss: 6.297116279602051\r\nStep 780, loss: 6.239116668701172\r\nStep 781, loss: 6.166967391967773\r\nStep 782, loss: 6.13632345199585\r\nStep 783, loss: 6.336677551269531\r\nStep 784, loss: 6.051889419555664\r\nStep 785, loss: 6.353695869445801\r\nStep 786, loss: 6.255046367645264\r\nStep 787, loss: 6.1380510330200195\r\nStep 788, loss: 6.241057395935059\r\nStep 789, loss: 6.224755764007568\r\nStep 790, loss: 6.2249298095703125\r\nStep 791, loss: 6.107750415802002\r\nStep 792, loss: 6.3312273025512695\r\nStep 793, loss: 6.514132499694824\r\nStep 794, loss: 6.212930202484131\r\nStep 795, loss: 6.130778789520264\r\nStep 796, loss: 6.1399664878845215\r\nStep 797, loss: 6.231768608093262\r\nStep 798, loss: 6.27024507522583\r\nStep 799, loss: 6.25049352645874\r\nStep 800, loss: 6.419933795928955\r\nStep 801, loss: 6.354002952575684\r\nStep 802, loss: 6.1690449714660645\r\nStep 803, loss: 6.404770851135254\r\nStep 804, loss: 6.176255226135254\r\nStep 805, loss: 6.160345554351807\r\nStep 806, loss: 6.026020050048828\r\nStep 807, loss: 6.120425701141357\r\nStep 808, loss: 6.201480865478516\r\nStep 809, loss: 5.947427272796631\r\nStep 810, loss: 6.122364044189453\r\nStep 811, loss: 6.248709678649902\r\nStep 812, loss: 5.753745079040527\r\nStep 813, loss: 5.991898536682129\r\nStep 814, loss: 6.357680320739746\r\nStep 815, loss: 6.341106414794922\r\nStep 816, loss: 6.066565990447998\r\nStep 817, loss: 6.128784656524658\r\nStep 818, loss: 6.312588214874268\r\nStep 819, loss: 6.097206115722656\r\nStep 820, loss: 6.430177688598633\r\nStep 821, loss: 6.41364049911499\r\nStep 822, loss: 6.162365436553955\r\nStep 823, loss: 6.151688098907471\r\nStep 824, loss: 6.222545623779297\r\nStep 825, loss: 6.097970962524414\r\nStep 826, loss: 6.1611409187316895\r\nStep 827, loss: 6.074286460876465\r\nStep 828, loss: 6.186343193054199\r\nStep 829, loss: 6.176538467407227\r\nStep 830, loss: 6.377328872680664\r\nStep 831, loss: 6.027149200439453\r\nStep 832, loss: 6.137227535247803\r\nStep 833, loss: 5.964600086212158\r\nStep 834, loss: 6.055864334106445\r\nStep 835, loss: 6.2789201736450195\r\nStep 836, loss: 6.094759464263916\r\nStep 837, loss: 6.065205097198486\r\nStep 838, loss: 6.283683776855469\r\nStep 839, loss: 6.331536293029785\r\nStep 840, loss: 6.18127965927124\r\nStep 841, loss: 6.406125068664551\r\nStep 842, loss: 6.1166863441467285\r\nStep 843, loss: 6.483761310577393\r\nStep 844, loss: 6.2247514724731445\r\nStep 845, loss: 6.057960510253906\r\nStep 846, loss: 6.135593414306641\r\nStep 847, loss: 6.375853061676025\r\nStep 848, loss: 6.284068584442139\r\nStep 849, loss: 6.4397501945495605\r\nStep 850, loss: 6.404928207397461\r\nStep 851, loss: 6.106307506561279\r\nStep 852, loss: 6.114143371582031\r\nStep 853, loss: 6.0966901779174805\r\nStep 854, loss: 6.145266532897949\r\nStep 855, loss: 6.366427898406982\r\nStep 856, loss: 6.286569118499756\r\nStep 857, loss: 6.32176399230957\r\nStep 858, loss: 6.094587802886963\r\nStep 859, loss: 6.3647260665893555\r\nStep 860, loss: 6.1760149002075195\r\nStep 861, loss: 6.4247517585754395\r\nStep 862, loss: 5.97499418258667\r\nStep 863, loss: 6.248610496520996\r\nStep 864, loss: 6.039318084716797\r\nStep 865, loss: 5.916710376739502\r\nStep 866, loss: 6.325806617736816\r\nStep 867, loss: 6.1494221687316895\r\nStep 868, loss: 6.232687950134277\r\nStep 869, loss: 6.429934978485107\r\nStep 870, loss: 6.108285427093506\r\nStep 871, loss: 5.781312465667725\r\nStep 872, loss: 6.014883995056152\r\nStep 873, loss: 6.2820658683776855\r\nStep 874, loss: 6.351875305175781\r\nStep 875, loss: 6.195911407470703\r\nStep 876, loss: 6.172882080078125\r\nStep 877, loss: 6.110972881317139\r\nStep 878, loss: 6.278717041015625\r\nStep 879, loss: 6.230126857757568\r\nStep 880, loss: 6.232804775238037\r\nStep 881, loss: 6.269811153411865\r\nStep 882, loss: 6.088776588439941\r\nStep 883, loss: 6.010383129119873\r\nStep 884, loss: 6.178844928741455\r\nStep 885, loss: 6.003259181976318\r\nStep 886, loss: 6.166425704956055\r\nStep 887, loss: 6.099825859069824\r\nStep 888, loss: 6.068012714385986\r\nStep 889, loss: 6.143975734710693\r\nStep 890, loss: 6.1533203125\r\nStep 891, loss: 6.34451150894165\r\nStep 892, loss: 6.029151439666748\r\nStep 893, loss: 6.1852898597717285\r\nStep 894, loss: 6.0513129234313965\r\nStep 895, loss: 6.044767379760742\r\nStep 896, loss: 6.009609222412109\r\nStep 897, loss: 6.071310997009277\r\nStep 898, loss: 6.046830177307129\r\nStep 899, loss: 6.27297306060791\r\nStep 900, loss: 5.977374076843262\r\nStep 901, loss: 6.2357282638549805\r\nStep 902, loss: 6.078926086425781\r\nStep 903, loss: 6.232381343841553\r\nStep 904, loss: 6.1165056228637695\r\nStep 905, loss: 5.883611679077148\r\nStep 906, loss: 6.12237024307251\r\nStep 907, loss: 5.954657077789307\r\nStep 908, loss: 6.123085975646973\r\nStep 909, loss: 6.128693580627441\r\nStep 910, loss: 6.249812126159668\r\nStep 911, loss: 6.336694240570068\r\nStep 912, loss: 6.18726921081543\r\nStep 913, loss: 6.142055988311768\r\nStep 914, loss: 6.15005350112915\r\nStep 915, loss: 6.2247843742370605\r\nStep 916, loss: 5.9457573890686035\r\nStep 917, loss: 6.241605758666992\r\nStep 918, loss: 5.9106245040893555\r\nStep 919, loss: 6.1775126457214355\r\nStep 920, loss: 6.269512176513672\r\nStep 921, loss: 6.126793384552002\r\nStep 922, loss: 6.060258388519287\r\nStep 923, loss: 6.299481391906738\r\nStep 924, loss: 6.196365833282471\r\nStep 925, loss: 6.079230785369873\r\nStep 926, loss: 5.997030258178711\r\nStep 927, loss: 5.936086177825928\r\nStep 928, loss: 6.205533504486084\r\nStep 929, loss: 5.959074020385742\r\nStep 930, loss: 6.096640586853027\r\nStep 931, loss: 5.794227600097656\r\nStep 932, loss: 6.476027011871338\r\nStep 933, loss: 6.135471820831299\r\nStep 934, loss: 5.915626049041748\r\nStep 935, loss: 5.807375431060791\r\nStep 936, loss: 6.103608131408691\r\nStep 937, loss: 6.056851387023926\r\nStep 938, loss: 6.022923469543457\r\nStep 939, loss: 6.044239521026611\r\nStep 940, loss: 6.298294544219971\r\nStep 941, loss: 6.026649475097656\r\nStep 942, loss: 5.9728193283081055\r\nStep 943, loss: 6.076418399810791\r\nStep 944, loss: 6.213939189910889\r\nStep 945, loss: 6.397153377532959\r\nStep 946, loss: 6.119306564331055\r\nStep 947, loss: 6.056985378265381\r\nStep 948, loss: 6.051384925842285\r\nStep 949, loss: 6.099973678588867\r\nStep 950, loss: 6.143035411834717\r\nStep 951, loss: 5.803652286529541\r\nStep 952, loss: 6.222076416015625\r\nStep 953, loss: 5.963755130767822\r\nStep 954, loss: 6.132559776306152\r\nStep 955, loss: 6.049510478973389\r\nStep 956, loss: 6.106955528259277\r\nStep 957, loss: 5.84633207321167\r\nStep 958, loss: 6.419929027557373\r\nStep 959, loss: 6.266198635101318\r\nStep 720, loss: 6.494800090789795\r\nStep 721, loss: 6.459004878997803\r\nStep 722, loss: 6.038323879241943\r\nStep 723, loss: 6.239955902099609\r\nStep 724, loss: 6.164690017700195\r\nStep 725, loss: 6.023187160491943\r\nStep 726, loss: 6.360325336456299\r\nStep 727, loss: 6.236156940460205\r\nStep 728, loss: 6.494116306304932\r\nStep 729, loss: 6.3753180503845215\r\nStep 730, loss: 6.421533107757568\r\nStep 731, loss: 6.415943145751953\r\nStep 732, loss: 6.322390556335449\r\nStep 733, loss: 6.467452049255371\r\nStep 734, loss: 6.394821643829346\r\nStep 735, loss: 6.386702060699463\r\nStep 736, loss: 6.096451282501221\r\nStep 737, loss: 6.265408515930176\r\nStep 738, loss: 6.323524475097656\r\nStep 739, loss: 6.211463451385498\r\nStep 740, loss: 6.442022323608398\r\nStep 741, loss: 6.454809188842773\r\nStep 742, loss: 6.425616264343262\r\nStep 743, loss: 6.3252410888671875\r\nStep 744, loss: 6.326277256011963\r\nStep 745, loss: 6.334142684936523\r\nStep 746, loss: 6.1762824058532715\r\nStep 747, loss: 6.280177116394043\r\nStep 748, loss: 6.367738246917725\r\nStep 749, loss: 6.582081317901611\r\nStep 750, loss: 6.327296257019043\r\nStep 751, loss: 6.393777847290039\r\nStep 752, loss: 6.371941089630127\r\nStep 753, loss: 6.315849781036377\r\nStep 754, loss: 6.108580112457275\r\nStep 755, loss: 6.430636882781982\r\nStep 756, loss: 6.307670593261719\r\nStep 757, loss: 6.541366100311279\r\nStep 758, loss: 6.3531646728515625\r\nStep 759, loss: 6.430586338043213\r\nStep 760, loss: 6.280318260192871\r\nStep 761, loss: 6.202603816986084\r\nStep 762, loss: 6.395080089569092\r\nStep 763, loss: 6.365248203277588\r\nStep 764, loss: 6.4529008865356445\r\nStep 765, loss: 6.3356852531433105\r\nStep 766, loss: 6.305147647857666\r\nStep 767, loss: 6.277276039123535\r\nStep 768, loss: 6.354142189025879\r\nStep 769, loss: 6.301516532897949\r\nStep 770, loss: 6.12398099899292\r\nStep 771, loss: 6.372938632965088\r\nStep 772, loss: 6.411468505859375\r\nStep 773, loss: 6.192555904388428\r\nStep 774, loss: 5.948481559753418\r\nStep 775, loss: 6.330682754516602\r\nStep 776, loss: 6.2989959716796875\r\nStep 777, loss: 6.1877264976501465\r\nStep 778, loss: 6.277063846588135\r\nStep 779, loss: 6.297116279602051\r\nStep 780, loss: 6.239116668701172\r\nStep 781, loss: 6.166967391967773\r\nStep 782, loss: 6.13632345199585\r\nStep 783, loss: 6.336677551269531\r\nStep 784, loss: 6.051889419555664\r\nStep 785, loss: 6.353695869445801\r\nStep 786, loss: 6.255046367645264\r\nStep 787, loss: 6.1380510330200195\r\nStep 788, loss: 6.241057395935059\r\nStep 789, loss: 6.224755764007568\r\nStep 790, loss: 6.2249298095703125\r\nStep 791, loss: 6.107750415802002\r\nStep 792, loss: 6.3312273025512695\r\nStep 793, loss: 6.514132499694824\r\nStep 794, loss: 6.212930202484131\r\nStep 795, loss: 6.130778789520264\r\nStep 796, loss: 6.1399664878845215\r\nStep 797, loss: 6.231768608093262\r\nStep 798, loss: 6.27024507522583\r\nStep 799, loss: 6.25049352645874\r\nStep 800, loss: 6.419933795928955\r\nStep 801, loss: 6.354002952575684\r\nStep 802, loss: 6.1690449714660645\r\nStep 803, loss: 6.404770851135254\r\nStep 804, loss: 6.176255226135254\r\nStep 805, loss: 6.160345554351807\r\nStep 806, loss: 6.026020050048828\r\nStep 807, loss: 6.120425701141357\r\nStep 808, loss: 6.201480865478516\r\nStep 809, loss: 5.947427272796631\r\nStep 810, loss: 6.122364044189453\r\nStep 811, loss: 6.248709678649902\r\nStep 812, loss: 5.753745079040527\r\nStep 813, loss: 5.991898536682129\r\nStep 814, loss: 6.357680320739746\r\nStep 815, loss: 6.341106414794922\r\nStep 816, loss: 6.066565990447998\r\nStep 817, loss: 6.128784656524658\r\nStep 818, loss: 6.312588214874268\r\nStep 819, loss: 6.097206115722656\r\nStep 820, loss: 6.430177688598633\r\nStep 821, loss: 6.41364049911499\r\nStep 822, loss: 6.162365436553955\r\nStep 823, loss: 6.151688098907471\r\nStep 824, loss: 6.222545623779297\r\nStep 825, loss: 6.097970962524414\r\nStep 826, loss: 6.1611409187316895\r\nStep 827, loss: 6.074286460876465\r\nStep 828, loss: 6.186343193054199\r\nStep 829, loss: 6.176538467407227\r\nStep 830, loss: 6.377328872680664\r\nStep 831, loss: 6.027149200439453\r\nStep 832, loss: 6.137227535247803\r\nStep 833, loss: 5.964600086212158\r\nStep 834, loss: 6.055864334106445\r\nStep 835, loss: 6.2789201736450195\r\nStep 836, loss: 6.094759464263916\r\nStep 837, loss: 6.065205097198486\r\nStep 838, loss: 6.283683776855469\r\nStep 839, loss: 6.331536293029785\r\nStep 840, loss: 6.18127965927124\r\nStep 841, loss: 6.406125068664551\r\nStep 842, loss: 6.1166863441467285\r\nStep 843, loss: 6.483761310577393\r\nStep 844, loss: 6.2247514724731445\r\nStep 845, loss: 6.057960510253906\r\nStep 846, loss: 6.135593414306641\r\nStep 847, loss: 6.375853061676025\r\nStep 848, loss: 6.284068584442139\r\nStep 849, loss: 6.4397501945495605\r\nStep 850, loss: 6.404928207397461\r\nStep 851, loss: 6.106307506561279\r\nStep 852, loss: 6.114143371582031\r\nStep 853, loss: 6.0966901779174805\r\nStep 854, loss: 6.145266532897949\r\nStep 855, loss: 6.366427898406982\r\nStep 856, loss: 6.286569118499756\r\nStep 857, loss: 6.32176399230957\r\nStep 858, loss: 6.094587802886963\r\nStep 859, loss: 6.3647260665893555\r\nStep 860, loss: 6.1760149002075195\r\nStep 861, loss: 6.4247517585754395\r\nStep 862, loss: 5.97499418258667\r\nStep 863, loss: 6.248610496520996\r\nStep 864, loss: 6.039318084716797\r\nStep 865, loss: 5.916710376739502\r\nStep 866, loss: 6.325806617736816\r\nStep 867, loss: 6.1494221687316895\r\nStep 868, loss: 6.232687950134277\r\nStep 869, loss: 6.429934978485107\r\nStep 870, loss: 6.108285427093506\r\nStep 871, loss: 5.781312465667725\r\nStep 872, loss: 6.014883995056152\r\nStep 873, loss: 6.2820658683776855\r\nStep 874, loss: 6.351875305175781\r\nStep 875, loss: 6.195911407470703\r\nStep 876, loss: 6.172882080078125\r\nStep 877, loss: 6.110972881317139\r\nStep 878, loss: 6.278717041015625\r\nStep 879, loss: 6.230126857757568\r\nStep 880, loss: 6.232804775238037\r\nStep 881, loss: 6.269811153411865\r\nStep 882, loss: 6.088776588439941\r\nStep 883, loss: 6.010383129119873\r\nStep 884, loss: 6.178844928741455\r\nStep 885, loss: 6.003259181976318\r\nStep 886, loss: 6.166425704956055\r\nStep 887, loss: 6.099825859069824\r\nStep 888, loss: 6.068012714385986\r\nStep 889, loss: 6.143975734710693\r\nStep 890, loss: 6.1533203125\r\nStep 891, loss: 6.34451150894165\r\nStep 892, loss: 6.029151439666748\r\nStep 893, loss: 6.1852898597717285\r\nStep 894, loss: 6.0513129234313965\r\nStep 895, loss: 6.044767379760742\r\nStep 896, loss: 6.009609222412109\r\nStep 897, loss: 6.071310997009277\r\nStep 898, loss: 6.046830177307129\r\nStep 899, loss: 6.27297306060791\r\nStep 900, loss: 5.977374076843262\r\nStep 901, loss: 6.2357282638549805\r\nStep 902, loss: 6.078926086425781\r\nStep 903, loss: 6.232381343841553\r\nStep 904, loss: 6.1165056228637695\r\nStep 905, loss: 5.883611679077148\r\nStep 906, loss: 6.12237024307251\r\nStep 907, loss: 5.954657077789307\r\nStep 908, loss: 6.123085975646973\r\nStep 909, loss: 6.128693580627441\r\nStep 910, loss: 6.249812126159668\r\nStep 911, loss: 6.336694240570068\r\nStep 912, loss: 6.18726921081543\r\nStep 913, loss: 6.142055988311768\r\nStep 914, loss: 6.15005350112915\r\nStep 915, loss: 6.2247843742370605\r\nStep 916, loss: 5.9457573890686035\r\nStep 917, loss: 6.241605758666992\r\nStep 918, loss: 5.9106245040893555\r\nStep 919, loss: 6.1775126457214355\r\nStep 920, loss: 6.269512176513672\r\nStep 921, loss: 6.126793384552002\r\nStep 922, loss: 6.060258388519287\r\nStep 923, loss: 6.299481391906738\r\nStep 924, loss: 6.196365833282471\r\nStep 925, loss: 6.079230785369873\r\nStep 926, loss: 5.997030258178711\r\nStep 927, loss: 5.936086177825928\r\nStep 928, loss: 6.205533504486084\r\nStep 929, loss: 5.959074020385742\r\nStep 930, loss: 6.096640586853027\r\nStep 931, loss: 5.794227600097656\r\nStep 932, loss: 6.476027011871338\r\nStep 933, loss: 6.135471820831299\r\nStep 934, loss: 5.915626049041748\r\nStep 935, loss: 5.807375431060791\r\nStep 936, loss: 6.103608131408691\r\nStep 937, loss: 6.056851387023926\r\nStep 938, loss: 6.022923469543457\r\nStep 939, loss: 6.044239521026611\r\nStep 940, loss: 6.298294544219971\r\nStep 941, loss: 6.026649475097656\r\nStep 942, loss: 5.9728193283081055\r\nStep 943, loss: 6.076418399810791\r\nStep 944, loss: 6.213939189910889\r\nStep 945, loss: 6.397153377532959\r\nStep 946, loss: 6.119306564331055\r\nStep 947, loss: 6.056985378265381\r\nStep 948, loss: 6.051384925842285\r\nStep 949, loss: 6.099973678588867\r\nStep 950, loss: 6.143035411834717\r\nStep 951, loss: 5.803652286529541\r\nStep 952, loss: 6.222076416015625\r\nStep 953, loss: 5.963755130767822\r\nStep 954, loss: 6.132559776306152\r\nStep 955, loss: 6.049510478973389\r\nStep 956, loss: 6.106955528259277\r\nStep 957, loss: 5.84633207321167\r\nStep 958, loss: 6.419929027557373\r\nStep 959, loss: 6.266198635101318\r\n",,terminal_output +615,2107568,"TERMINAL",0,0,"Step 960, loss: 6.033169746398926\r\nStep 961, loss: 5.9731950759887695\r\nStep 962, loss: 6.23307991027832\r\nStep 963, loss: 6.19943904876709\r\nStep 964, loss: 6.101730823516846\r\nStep 965, loss: 5.880825042724609\r\nStep 966, loss: 6.041092395782471\r\nStep 967, loss: 5.844327926635742\r\nStep 968, loss: 6.100806713104248\r\nStep 969, loss: 5.9085588455200195\r\nStep 970, loss: 6.149028778076172\r\nStep 971, loss: 6.034543037414551\r\nStep 972, loss: 6.1568379402160645\r\nStep 973, loss: 6.1643290519714355\r\nStep 974, loss: 6.053659439086914\r\nStep 975, loss: 5.939082622528076\r\nStep 976, loss: 6.221123695373535\r\nStep 977, loss: 6.16931676864624\r\nStep 978, loss: 6.01908016204834\r\nStep 979, loss: 5.891488075256348\r\nStep 980, loss: 6.0749616622924805\r\nStep 981, loss: 5.928627014160156\r\nStep 982, loss: 5.956394672393799\r\nStep 983, loss: 6.202558517456055\r\nStep 984, loss: 6.094108581542969\r\nStep 985, loss: 6.13236665725708\r\nStep 986, loss: 6.0032830238342285\r\nStep 987, loss: 6.115525722503662\r\nStep 988, loss: 6.120043754577637\r\nStep 989, loss: 6.080606937408447\r\nStep 990, loss: 5.95778226852417\r\nStep 991, loss: 6.070457458496094\r\nStep 992, loss: 6.171233654022217\r\nStep 993, loss: 6.293028831481934\r\nStep 994, loss: 6.380928039550781\r\nStep 995, loss: 5.713109016418457\r\nStep 996, loss: 6.084442615509033\r\nStep 997, loss: 6.261641502380371\r\nStep 998, loss: 6.047240257263184\r\nStep 999, loss: 6.079785346984863\r\nSaved checkpoint at step 1000\r\nStep 1000, loss: 6.035843372344971\r\nStep 1001, loss: 6.108051776885986\r\nStep 1002, loss: 6.116758346557617\r\nStep 1003, loss: 5.7977752685546875\r\nStep 1004, loss: 5.812051773071289\r\nStep 1005, loss: 6.0407915115356445\r\nStep 1006, loss: 5.839900016784668\r\nStep 1007, loss: 6.123900890350342\r\nStep 1008, loss: 5.853964328765869\r\nStep 1009, loss: 6.022670745849609\r\nStep 1010, loss: 5.817185878753662\r\nStep 1011, loss: 6.083367824554443\r\nStep 1012, loss: 5.918321132659912\r\nStep 1013, loss: 5.9524922370910645\r\nStep 1014, loss: 5.891664505004883\r\nStep 1015, loss: 6.013959884643555\r\nStep 1016, loss: 6.131072521209717\r\nStep 1017, loss: 6.220623016357422\r\nStep 1018, loss: 6.159714221954346\r\nStep 1019, loss: 6.052178382873535\r\nStep 1020, loss: 5.91204833984375\r\nStep 1021, loss: 6.04052209854126\r\nStep 1022, loss: 5.849857330322266\r\nStep 1023, loss: 5.954801559448242\r\nStep 1024, loss: 6.2058305740356445\r\nStep 1025, loss: 6.279372215270996\r\nStep 1026, loss: 6.238644123077393\r\nStep 1027, loss: 6.061661720275879\r\nStep 1028, loss: 5.963023662567139\r\nStep 1029, loss: 6.048671722412109\r\nStep 1030, loss: 5.897725582122803\r\nStep 1031, loss: 5.683635234832764\r\nStep 1032, loss: 6.129833221435547\r\nStep 1033, loss: 5.741326332092285\r\nStep 1034, loss: 6.160009384155273\r\nStep 1035, loss: 6.1450700759887695\r\nStep 1036, loss: 6.172642707824707\r\nStep 1037, loss: 6.058366298675537\r\nStep 1038, loss: 5.94293212890625\r\nStep 1039, loss: 6.21563720703125\r\nStep 1040, loss: 6.0814290046691895\r\nStep 1041, loss: 6.043020725250244\r\nStep 1042, loss: 6.056408882141113\r\nStep 1043, loss: 6.062867641448975\r\nStep 1044, loss: 6.200379848480225\r\nStep 1045, loss: 5.873588562011719\r\nStep 1046, loss: 5.891491889953613\r\nStep 1047, loss: 6.071397304534912\r\nStep 1048, loss: 6.056345462799072\r\nStep 1049, loss: 5.798945426940918\r\nStep 1050, loss: 6.132298469543457\r\nStep 1051, loss: 6.100583076477051\r\nStep 1052, loss: 6.139461994171143\r\nStep 1053, loss: 5.960715293884277\r\nStep 1054, loss: 5.81058406829834\r\nStep 1055, loss: 6.169018268585205\r\nStep 1056, loss: 5.877782821655273\r\nStep 1057, loss: 5.879096508026123\r\nStep 1058, loss: 6.154821395874023\r\nStep 1059, loss: 5.982166290283203\r\nStep 1060, loss: 5.943661212921143\r\nStep 1061, loss: 5.901979923248291\r\nStep 1062, loss: 6.248556137084961\r\nStep 1063, loss: 5.954067707061768\r\nStep 1064, loss: 6.096056938171387\r\nStep 1065, loss: 6.0446577072143555\r\nStep 1066, loss: 6.038807392120361\r\nStep 1067, loss: 6.111401081085205\r\nStep 1068, loss: 5.758209228515625\r\nStep 1069, loss: 6.067367076873779\r\nStep 1070, loss: 6.034627914428711\r\nStep 1071, loss: 5.70315408706665\r\nStep 1072, loss: 6.187563896179199\r\nStep 1073, loss: 6.03395414352417\r\nStep 1074, loss: 6.183241844177246\r\nStep 1075, loss: 5.988609313964844\r\nStep 1076, loss: 5.9848151206970215\r\nStep 960, loss: 6.033169746398926\r\nStep 961, loss: 5.9731950759887695\r\nStep 962, loss: 6.23307991027832\r\nStep 963, loss: 6.19943904876709\r\nStep 964, loss: 6.101730823516846\r\nStep 965, loss: 5.880825042724609\r\nStep 966, loss: 6.041092395782471\r\nStep 967, loss: 5.844327926635742\r\nStep 968, loss: 6.100806713104248\r\nStep 969, loss: 5.9085588455200195\r\nStep 970, loss: 6.149028778076172\r\nStep 971, loss: 6.034543037414551\r\nStep 972, loss: 6.1568379402160645\r\nStep 973, loss: 6.1643290519714355\r\nStep 974, loss: 6.053659439086914\r\nStep 975, loss: 5.939082622528076\r\nStep 976, loss: 6.221123695373535\r\nStep 977, loss: 6.16931676864624\r\nStep 978, loss: 6.01908016204834\r\nStep 979, loss: 5.891488075256348\r\nStep 980, loss: 6.0749616622924805\r\nStep 981, loss: 5.928627014160156\r\nStep 982, loss: 5.956394672393799\r\nStep 983, loss: 6.202558517456055\r\nStep 984, loss: 6.094108581542969\r\nStep 985, loss: 6.13236665725708\r\nStep 986, loss: 6.0032830238342285\r\nStep 987, loss: 6.115525722503662\r\nStep 988, loss: 6.120043754577637\r\nStep 989, loss: 6.080606937408447\r\nStep 990, loss: 5.95778226852417\r\nStep 991, loss: 6.070457458496094\r\nStep 992, loss: 6.171233654022217\r\nStep 993, loss: 6.293028831481934\r\nStep 994, loss: 6.380928039550781\r\nStep 995, loss: 5.713109016418457\r\nStep 996, loss: 6.084442615509033\r\nStep 997, loss: 6.261641502380371\r\nStep 998, loss: 6.047240257263184\r\nStep 999, loss: 6.079785346984863\r\nSaved checkpoint at step 1000\r\nStep 1000, loss: 6.035843372344971\r\nStep 1001, loss: 6.108051776885986\r\nStep 1002, loss: 6.116758346557617\r\nStep 1003, loss: 5.7977752685546875\r\nStep 1004, loss: 5.812051773071289\r\nStep 1005, loss: 6.0407915115356445\r\nStep 1006, loss: 5.839900016784668\r\nStep 1007, loss: 6.123900890350342\r\nStep 1008, loss: 5.853964328765869\r\nStep 1009, loss: 6.022670745849609\r\nStep 1010, loss: 5.817185878753662\r\nStep 1011, loss: 6.083367824554443\r\nStep 1012, loss: 5.918321132659912\r\nStep 1013, loss: 5.9524922370910645\r\nStep 1014, loss: 5.891664505004883\r\nStep 1015, loss: 6.013959884643555\r\nStep 1016, loss: 6.131072521209717\r\nStep 1017, loss: 6.220623016357422\r\nStep 1018, loss: 6.159714221954346\r\nStep 1019, loss: 6.052178382873535\r\nStep 1020, loss: 5.91204833984375\r\nStep 1021, loss: 6.04052209854126\r\nStep 1022, loss: 5.849857330322266\r\nStep 1023, loss: 5.954801559448242\r\nStep 1024, loss: 6.2058305740356445\r\nStep 1025, loss: 6.279372215270996\r\nStep 1026, loss: 6.238644123077393\r\nStep 1027, loss: 6.061661720275879\r\nStep 1028, loss: 5.963023662567139\r\nStep 1029, loss: 6.048671722412109\r\nStep 1030, loss: 5.897725582122803\r\nStep 1031, loss: 5.683635234832764\r\nStep 1032, loss: 6.129833221435547\r\nStep 1033, loss: 5.741326332092285\r\nStep 1034, loss: 6.160009384155273\r\nStep 1035, loss: 6.1450700759887695\r\nStep 1036, loss: 6.172642707824707\r\nStep 1037, loss: 6.058366298675537\r\nStep 1038, loss: 5.94293212890625\r\nStep 1039, loss: 6.21563720703125\r\nStep 1040, loss: 6.0814290046691895\r\nStep 1041, loss: 6.043020725250244\r\nStep 1042, loss: 6.056408882141113\r\nStep 1043, loss: 6.062867641448975\r\nStep 1044, loss: 6.200379848480225\r\nStep 1045, loss: 5.873588562011719\r\nStep 1046, loss: 5.891491889953613\r\nStep 1047, loss: 6.071397304534912\r\nStep 1048, loss: 6.056345462799072\r\nStep 1049, loss: 5.798945426940918\r\nStep 1050, loss: 6.132298469543457\r\nStep 1051, loss: 6.100583076477051\r\nStep 1052, loss: 6.139461994171143\r\nStep 1053, loss: 5.960715293884277\r\nStep 1054, loss: 5.81058406829834\r\nStep 1055, loss: 6.169018268585205\r\nStep 1056, loss: 5.877782821655273\r\nStep 1057, loss: 5.879096508026123\r\nStep 1058, loss: 6.154821395874023\r\nStep 1059, loss: 5.982166290283203\r\nStep 1060, loss: 5.943661212921143\r\nStep 1061, loss: 5.901979923248291\r\nStep 1062, loss: 6.248556137084961\r\nStep 1063, loss: 5.954067707061768\r\nStep 1064, loss: 6.096056938171387\r\nStep 1065, loss: 6.0446577072143555\r\nStep 1066, loss: 6.038807392120361\r\nStep 1067, loss: 6.111401081085205\r\nStep 1068, loss: 5.758209228515625\r\nStep 1069, loss: 6.067367076873779\r\nStep 1070, loss: 6.034627914428711\r\nStep 1071, loss: 5.70315408706665\r\nStep 1072, loss: 6.187563896179199\r\nStep 1073, loss: 6.03395414352417\r\nStep 1074, loss: 6.183241844177246\r\nStep 1075, loss: 5.988609313964844\r\nStep 1076, loss: 5.9848151206970215\r\nStep 1077, loss: 5.997825622558594\r\nStep 1078, loss: 5.9602274894714355\r\nStep 1079, loss: 6.185821533203125\r\nStep 1080, loss: 6.113000392913818\r\nStep 1081, loss: 5.813572406768799\r\nStep 1082, loss: 6.031902313232422\r\nStep 1083, loss: 6.246595859527588\r\nStep 1084, loss: 6.071300029754639\r\nStep 1085, loss: 6.011821269989014\r\nStep 1086, loss: 5.969056606292725\r\nStep 1087, loss: 5.798336029052734\r\nStep 1088, loss: 5.98215913772583\r\nStep 1089, loss: 6.03286600112915\r\nStep 1090, loss: 6.208583831787109\r\nStep 1091, loss: 6.222377300262451\r\nStep 1092, loss: 5.836613178253174\r\nStep 1093, loss: 5.803591251373291\r\nStep 1094, loss: 6.220637321472168\r\nStep 1095, loss: 5.981595516204834\r\nStep 1096, loss: 5.857076168060303\r\nStep 1097, loss: 6.277763843536377\r\nStep 1098, loss: 6.037679672241211\r\nStep 1099, loss: 5.905936241149902\r\nStep 1100, loss: 5.9843974113464355\r\nStep 1101, loss: 5.916110515594482\r\nStep 1102, loss: 6.055380344390869\r\nStep 1103, loss: 6.084897041320801\r\nStep 1104, loss: 5.830936431884766\r\nStep 1105, loss: 5.786175727844238\r\nStep 1106, loss: 6.168269157409668\r\nStep 1107, loss: 6.115348815917969\r\nStep 1108, loss: 5.961764812469482\r\nStep 1109, loss: 6.075418949127197\r\nStep 1110, loss: 6.103539943695068\r\nStep 1111, loss: 5.9397053718566895\r\nStep 1112, loss: 5.756953716278076\r\nStep 1113, loss: 6.1014862060546875\r\nStep 1114, loss: 6.234945774078369\r\nStep 1115, loss: 5.937449932098389\r\nStep 1116, loss: 5.904244422912598\r\nStep 1117, loss: 5.861159324645996\r\nStep 1118, loss: 6.074119567871094\r\nStep 1119, loss: 6.089116096496582\r\nStep 1120, loss: 6.2227935791015625\r\nStep 1121, loss: 5.813777923583984\r\nStep 1122, loss: 5.870038986206055\r\nStep 1123, loss: 5.962041854858398\r\nStep 1124, loss: 5.867313861846924\r\nStep 1125, loss: 5.885199069976807\r\nStep 1126, loss: 5.777262210845947\r\nStep 1127, loss: 5.913913726806641\r\nStep 1128, loss: 5.945501327514648\r\nStep 1129, loss: 5.744322776794434\r\nStep 1130, loss: 5.698674201965332\r\nStep 1131, loss: 5.983011245727539\r\nStep 1132, loss: 5.925379753112793\r\nStep 1133, loss: 5.851949691772461\r\nStep 1134, loss: 6.076478481292725\r\nStep 1135, loss: 5.9468793869018555\r\nStep 1136, loss: 5.812188148498535\r\nStep 1137, loss: 5.960692882537842\r\nStep 1138, loss: 5.93118143081665\r\nStep 1139, loss: 5.746397495269775\r\nStep 1140, loss: 5.487229824066162\r\nStep 1141, loss: 5.96766471862793\r\nStep 1142, loss: 6.15070915222168\r\nStep 1143, loss: 5.831969738006592\r\nStep 1144, loss: 5.911952972412109\r\nStep 1145, loss: 6.042840003967285\r\nStep 1146, loss: 6.114328384399414\r\nStep 1147, loss: 5.988990306854248\r\nStep 1148, loss: 6.143561840057373\r\nStep 1149, loss: 5.8496809005737305\r\nStep 1150, loss: 5.917818546295166\r\nStep 1151, loss: 5.904750823974609\r\nStep 1152, loss: 5.7825026512146\r\nStep 1153, loss: 5.831714630126953\r\nStep 1154, loss: 5.84997034072876\r\nStep 1155, loss: 5.978759765625\r\nStep 1156, loss: 5.795989036560059\r\nStep 1157, loss: 5.987977981567383\r\nStep 1158, loss: 5.974612236022949\r\nStep 1159, loss: 5.713154315948486\r\nStep 1160, loss: 5.978531837463379\r\nStep 1161, loss: 6.028726100921631\r\nStep 1162, loss: 6.2134480476379395\r\nStep 1163, loss: 5.937756061553955\r\nStep 1164, loss: 5.921319484710693\r\nStep 1165, loss: 5.687232494354248\r\nStep 1166, loss: 5.6782050132751465\r\nStep 1167, loss: 5.836981773376465\r\nStep 1168, loss: 5.84519100189209\r\nStep 1169, loss: 5.64429235458374\r\nStep 1170, loss: 6.175731658935547\r\nStep 1171, loss: 5.916308403015137\r\nStep 1172, loss: 5.7501654624938965\r\nStep 1173, loss: 5.901617050170898\r\nStep 1174, loss: 5.585201740264893\r\nStep 1175, loss: 5.608188152313232\r\nStep 1176, loss: 5.832749843597412\r\nStep 1177, loss: 6.019138813018799\r\nStep 1178, loss: 6.006782054901123\r\nStep 1179, loss: 5.7725067138671875\r\nStep 1180, loss: 6.102262020111084\r\nStep 1181, loss: 6.279216766357422\r\nStep 1182, loss: 5.8809099197387695\r\nStep 1183, loss: 5.799651622772217\r\nStep 1184, loss: 5.9189887046813965\r\nStep 1185, loss: 6.031906604766846\r\nStep 1186, loss: 6.16923713684082\r\nStep 1187, loss: 5.766611099243164\r\nStep 1188, loss: 5.909252643585205\r\nStep 1189, loss: 5.908527374267578\r\nStep 1190, loss: 5.756149768829346\r\nStep 1191, loss: 6.382627010345459\r\nStep 1192, loss: 6.20557165145874\r\nStep 1193, loss: 5.904033184051514\r\nStep 1077, loss: 5.997825622558594\r\nStep 1078, loss: 5.9602274894714355\r\nStep 1079, loss: 6.185821533203125\r\nStep 1080, loss: 6.113000392913818\r\nStep 1081, loss: 5.813572406768799\r\nStep 1082, loss: 6.031902313232422\r\nStep 1083, loss: 6.246595859527588\r\nStep 1084, loss: 6.071300029754639\r\nStep 1085, loss: 6.011821269989014\r\nStep 1086, loss: 5.969056606292725\r\nStep 1087, loss: 5.798336029052734\r\nStep 1088, loss: 5.98215913772583\r\nStep 1089, loss: 6.03286600112915\r\nStep 1090, loss: 6.208583831787109\r\nStep 1091, loss: 6.222377300262451\r\nStep 1092, loss: 5.836613178253174\r\nStep 1093, loss: 5.803591251373291\r\nStep 1094, loss: 6.220637321472168\r\nStep 1095, loss: 5.981595516204834\r\nStep 1096, loss: 5.857076168060303\r\nStep 1097, loss: 6.277763843536377\r\nStep 1098, loss: 6.037679672241211\r\nStep 1099, loss: 5.905936241149902\r\nStep 1100, loss: 5.9843974113464355\r\nStep 1101, loss: 5.916110515594482\r\nStep 1102, loss: 6.055380344390869\r\nStep 1103, loss: 6.084897041320801\r\nStep 1104, loss: 5.830936431884766\r\nStep 1105, loss: 5.786175727844238\r\nStep 1106, loss: 6.168269157409668\r\nStep 1107, loss: 6.115348815917969\r\nStep 1108, loss: 5.961764812469482\r\nStep 1109, loss: 6.075418949127197\r\nStep 1110, loss: 6.103539943695068\r\nStep 1111, loss: 5.9397053718566895\r\nStep 1112, loss: 5.756953716278076\r\nStep 1113, loss: 6.1014862060546875\r\nStep 1114, loss: 6.234945774078369\r\nStep 1115, loss: 5.937449932098389\r\nStep 1116, loss: 5.904244422912598\r\nStep 1117, loss: 5.861159324645996\r\nStep 1118, loss: 6.074119567871094\r\nStep 1119, loss: 6.089116096496582\r\nStep 1120, loss: 6.2227935791015625\r\nStep 1121, loss: 5.813777923583984\r\nStep 1122, loss: 5.870038986206055\r\nStep 1123, loss: 5.962041854858398\r\nStep 1124, loss: 5.867313861846924\r\nStep 1125, loss: 5.885199069976807\r\nStep 1126, loss: 5.777262210845947\r\nStep 1127, loss: 5.913913726806641\r\nStep 1128, loss: 5.945501327514648\r\nStep 1129, loss: 5.744322776794434\r\nStep 1130, loss: 5.698674201965332\r\nStep 1131, loss: 5.983011245727539\r\nStep 1132, loss: 5.925379753112793\r\nStep 1133, loss: 5.851949691772461\r\nStep 1134, loss: 6.076478481292725\r\nStep 1135, loss: 5.9468793869018555\r\nStep 1136, loss: 5.812188148498535\r\nStep 1137, loss: 5.960692882537842\r\nStep 1138, loss: 5.93118143081665\r\nStep 1139, loss: 5.746397495269775\r\nStep 1140, loss: 5.487229824066162\r\nStep 1141, loss: 5.96766471862793\r\nStep 1142, loss: 6.15070915222168\r\nStep 1143, loss: 5.831969738006592\r\nStep 1144, loss: 5.911952972412109\r\nStep 1145, loss: 6.042840003967285\r\nStep 1146, loss: 6.114328384399414\r\nStep 1147, loss: 5.988990306854248\r\nStep 1148, loss: 6.143561840057373\r\nStep 1149, loss: 5.8496809005737305\r\nStep 1150, loss: 5.917818546295166\r\nStep 1151, loss: 5.904750823974609\r\nStep 1152, loss: 5.7825026512146\r\nStep 1153, loss: 5.831714630126953\r\nStep 1154, loss: 5.84997034072876\r\nStep 1155, loss: 5.978759765625\r\nStep 1156, loss: 5.795989036560059\r\nStep 1157, loss: 5.987977981567383\r\nStep 1158, loss: 5.974612236022949\r\nStep 1159, loss: 5.713154315948486\r\nStep 1160, loss: 5.978531837463379\r\nStep 1161, loss: 6.028726100921631\r\nStep 1162, loss: 6.2134480476379395\r\nStep 1163, loss: 5.937756061553955\r\nStep 1164, loss: 5.921319484710693\r\nStep 1165, loss: 5.687232494354248\r\nStep 1166, loss: 5.6782050132751465\r\nStep 1167, loss: 5.836981773376465\r\nStep 1168, loss: 5.84519100189209\r\nStep 1169, loss: 5.64429235458374\r\nStep 1170, loss: 6.175731658935547\r\nStep 1171, loss: 5.916308403015137\r\nStep 1172, loss: 5.7501654624938965\r\nStep 1173, loss: 5.901617050170898\r\nStep 1174, loss: 5.585201740264893\r\nStep 1175, loss: 5.608188152313232\r\nStep 1176, loss: 5.832749843597412\r\nStep 1177, loss: 6.019138813018799\r\nStep 1178, loss: 6.006782054901123\r\nStep 1179, loss: 5.7725067138671875\r\nStep 1180, loss: 6.102262020111084\r\nStep 1181, loss: 6.279216766357422\r\nStep 1182, loss: 5.8809099197387695\r\nStep 1183, loss: 5.799651622772217\r\nStep 1184, loss: 5.9189887046813965\r\nStep 1185, loss: 6.031906604766846\r\nStep 1186, loss: 6.16923713684082\r\nStep 1187, loss: 5.766611099243164\r\nStep 1188, loss: 5.909252643585205\r\nStep 1189, loss: 5.908527374267578\r\nStep 1190, loss: 5.756149768829346\r\nStep 1191, loss: 6.382627010345459\r\nStep 1192, loss: 6.20557165145874\r\nStep 1193, loss: 5.904033184051514\r\nStep 960, loss: 6.033169746398926\r\nStep 961, loss: 5.9731950759887695\r\nStep 962, loss: 6.23307991027832\r\nStep 963, loss: 6.19943904876709\r\nStep 964, loss: 6.101730823516846\r\nStep 965, loss: 5.880825042724609\r\nStep 966, loss: 6.041092395782471\r\nStep 967, loss: 5.844327926635742\r\nStep 968, loss: 6.100806713104248\r\nStep 969, loss: 5.9085588455200195\r\nStep 970, loss: 6.149028778076172\r\nStep 971, loss: 6.034543037414551\r\nStep 972, loss: 6.1568379402160645\r\nStep 973, loss: 6.1643290519714355\r\nStep 974, loss: 6.053659439086914\r\nStep 975, loss: 5.939082622528076\r\nStep 976, loss: 6.221123695373535\r\nStep 977, loss: 6.16931676864624\r\nStep 978, loss: 6.01908016204834\r\nStep 979, loss: 5.891488075256348\r\nStep 980, loss: 6.0749616622924805\r\nStep 981, loss: 5.928627014160156\r\nStep 982, loss: 5.956394672393799\r\nStep 983, loss: 6.202558517456055\r\nStep 984, loss: 6.094108581542969\r\nStep 985, loss: 6.13236665725708\r\nStep 986, loss: 6.0032830238342285\r\nStep 987, loss: 6.115525722503662\r\nStep 988, loss: 6.120043754577637\r\nStep 989, loss: 6.080606937408447\r\nStep 990, loss: 5.95778226852417\r\nStep 991, loss: 6.070457458496094\r\nStep 992, loss: 6.171233654022217\r\nStep 993, loss: 6.293028831481934\r\nStep 994, loss: 6.380928039550781\r\nStep 995, loss: 5.713109016418457\r\nStep 996, loss: 6.084442615509033\r\nStep 997, loss: 6.261641502380371\r\nStep 998, loss: 6.047240257263184\r\nStep 999, loss: 6.079785346984863\r\nSaved checkpoint at step 1000\r\nStep 1000, loss: 6.035843372344971\r\nStep 1001, loss: 6.108051776885986\r\nStep 1002, loss: 6.116758346557617\r\nStep 1003, loss: 5.7977752685546875\r\nStep 1004, loss: 5.812051773071289\r\nStep 1005, loss: 6.0407915115356445\r\nStep 1006, loss: 5.839900016784668\r\nStep 1007, loss: 6.123900890350342\r\nStep 1008, loss: 5.853964328765869\r\nStep 1009, loss: 6.022670745849609\r\nStep 1010, loss: 5.817185878753662\r\nStep 1011, loss: 6.083367824554443\r\nStep 1012, loss: 5.918321132659912\r\nStep 1013, loss: 5.9524922370910645\r\nStep 1014, loss: 5.891664505004883\r\nStep 1015, loss: 6.013959884643555\r\nStep 1016, loss: 6.131072521209717\r\nStep 1017, loss: 6.220623016357422\r\nStep 1018, loss: 6.159714221954346\r\nStep 1019, loss: 6.052178382873535\r\nStep 1020, loss: 5.91204833984375\r\nStep 1021, loss: 6.04052209854126\r\nStep 1022, loss: 5.849857330322266\r\nStep 1023, loss: 5.954801559448242\r\nStep 1024, loss: 6.2058305740356445\r\nStep 1025, loss: 6.279372215270996\r\nStep 1026, loss: 6.238644123077393\r\nStep 1027, loss: 6.061661720275879\r\nStep 1028, loss: 5.963023662567139\r\nStep 1029, loss: 6.048671722412109\r\nStep 1030, loss: 5.897725582122803\r\nStep 1031, loss: 5.683635234832764\r\nStep 1032, loss: 6.129833221435547\r\nStep 1033, loss: 5.741326332092285\r\nStep 1034, loss: 6.160009384155273\r\nStep 1035, loss: 6.1450700759887695\r\nStep 1036, loss: 6.172642707824707\r\nStep 1037, loss: 6.058366298675537\r\nStep 1038, loss: 5.94293212890625\r\nStep 1039, loss: 6.21563720703125\r\nStep 1040, loss: 6.0814290046691895\r\nStep 1041, loss: 6.043020725250244\r\nStep 1042, loss: 6.056408882141113\r\nStep 1043, loss: 6.062867641448975\r\nStep 1044, loss: 6.200379848480225\r\nStep 1045, loss: 5.873588562011719\r\nStep 1046, loss: 5.891491889953613\r\nStep 1047, loss: 6.071397304534912\r\nStep 1048, loss: 6.056345462799072\r\nStep 1049, loss: 5.798945426940918\r\nStep 1050, loss: 6.132298469543457\r\nStep 1051, loss: 6.100583076477051\r\nStep 1052, loss: 6.139461994171143\r\nStep 1053, loss: 5.960715293884277\r\nStep 1054, loss: 5.81058406829834\r\nStep 1055, loss: 6.169018268585205\r\nStep 1056, loss: 5.877782821655273\r\nStep 1057, loss: 5.879096508026123\r\nStep 1058, loss: 6.154821395874023\r\nStep 1059, loss: 5.982166290283203\r\nStep 1060, loss: 5.943661212921143\r\nStep 1061, loss: 5.901979923248291\r\nStep 1062, loss: 6.248556137084961\r\nStep 1063, loss: 5.954067707061768\r\nStep 1064, loss: 6.096056938171387\r\nStep 1065, loss: 6.0446577072143555\r\nStep 1066, loss: 6.038807392120361\r\nStep 1067, loss: 6.111401081085205\r\nStep 1068, loss: 5.758209228515625\r\nStep 1069, loss: 6.067367076873779\r\nStep 1070, loss: 6.034627914428711\r\nStep 1071, loss: 5.70315408706665\r\nStep 1072, loss: 6.187563896179199\r\nStep 1073, loss: 6.03395414352417\r\nStep 1074, loss: 6.183241844177246\r\nStep 1075, loss: 5.988609313964844\r\nStep 1076, loss: 5.9848151206970215\r\nStep 1077, loss: 5.997825622558594\r\nStep 1078, loss: 5.9602274894714355\r\nStep 1079, loss: 6.185821533203125\r\nStep 1080, loss: 6.113000392913818\r\nStep 1081, loss: 5.813572406768799\r\nStep 1082, loss: 6.031902313232422\r\nStep 1083, loss: 6.246595859527588\r\nStep 1084, loss: 6.071300029754639\r\nStep 1085, loss: 6.011821269989014\r\nStep 1086, loss: 5.969056606292725\r\nStep 1087, loss: 5.798336029052734\r\nStep 1088, loss: 5.98215913772583\r\nStep 1089, loss: 6.03286600112915\r\nStep 1090, loss: 6.208583831787109\r\nStep 1091, loss: 6.222377300262451\r\nStep 1092, loss: 5.836613178253174\r\nStep 1093, loss: 5.803591251373291\r\nStep 1094, loss: 6.220637321472168\r\nStep 1095, loss: 5.981595516204834\r\nStep 1096, loss: 5.857076168060303\r\nStep 1097, loss: 6.277763843536377\r\nStep 1098, loss: 6.037679672241211\r\nStep 1099, loss: 5.905936241149902\r\nStep 1100, loss: 5.9843974113464355\r\nStep 1101, loss: 5.916110515594482\r\nStep 1102, loss: 6.055380344390869\r\nStep 1103, loss: 6.084897041320801\r\nStep 1104, loss: 5.830936431884766\r\nStep 1105, loss: 5.786175727844238\r\nStep 1106, loss: 6.168269157409668\r\nStep 1107, loss: 6.115348815917969\r\nStep 1108, loss: 5.961764812469482\r\nStep 1109, loss: 6.075418949127197\r\nStep 1110, loss: 6.103539943695068\r\nStep 1111, loss: 5.9397053718566895\r\nStep 1112, loss: 5.756953716278076\r\nStep 1113, loss: 6.1014862060546875\r\nStep 1114, loss: 6.234945774078369\r\nStep 1115, loss: 5.937449932098389\r\nStep 1116, loss: 5.904244422912598\r\nStep 1117, loss: 5.861159324645996\r\nStep 1118, loss: 6.074119567871094\r\nStep 1119, loss: 6.089116096496582\r\nStep 1120, loss: 6.2227935791015625\r\nStep 1121, loss: 5.813777923583984\r\nStep 1122, loss: 5.870038986206055\r\nStep 1123, loss: 5.962041854858398\r\nStep 1124, loss: 5.867313861846924\r\nStep 1125, loss: 5.885199069976807\r\nStep 1126, loss: 5.777262210845947\r\nStep 1127, loss: 5.913913726806641\r\nStep 1128, loss: 5.945501327514648\r\nStep 1129, loss: 5.744322776794434\r\nStep 1130, loss: 5.698674201965332\r\nStep 1131, loss: 5.983011245727539\r\nStep 1132, loss: 5.925379753112793\r\nStep 1133, loss: 5.851949691772461\r\nStep 1134, loss: 6.076478481292725\r\nStep 1135, loss: 5.9468793869018555\r\nStep 1136, loss: 5.812188148498535\r\nStep 1137, loss: 5.960692882537842\r\nStep 1138, loss: 5.93118143081665\r\nStep 1139, loss: 5.746397495269775\r\nStep 1140, loss: 5.487229824066162\r\nStep 1141, loss: 5.96766471862793\r\nStep 1142, loss: 6.15070915222168\r\nStep 1143, loss: 5.831969738006592\r\nStep 1144, loss: 5.911952972412109\r\nStep 1145, loss: 6.042840003967285\r\nStep 1146, loss: 6.114328384399414\r\nStep 1147, loss: 5.988990306854248\r\nStep 1148, loss: 6.143561840057373\r\nStep 1149, loss: 5.8496809005737305\r\nStep 1150, loss: 5.917818546295166\r\nStep 1151, loss: 5.904750823974609\r\nStep 1152, loss: 5.7825026512146\r\nStep 1153, loss: 5.831714630126953\r\nStep 1154, loss: 5.84997034072876\r\nStep 1155, loss: 5.978759765625\r\nStep 1156, loss: 5.795989036560059\r\nStep 1157, loss: 5.987977981567383\r\nStep 1158, loss: 5.974612236022949\r\nStep 1159, loss: 5.713154315948486\r\nStep 1160, loss: 5.978531837463379\r\nStep 1161, loss: 6.028726100921631\r\nStep 1162, loss: 6.2134480476379395\r\nStep 1163, loss: 5.937756061553955\r\nStep 1164, loss: 5.921319484710693\r\nStep 1165, loss: 5.687232494354248\r\nStep 1166, loss: 5.6782050132751465\r\nStep 1167, loss: 5.836981773376465\r\nStep 1168, loss: 5.84519100189209\r\nStep 1169, loss: 5.64429235458374\r\nStep 1170, loss: 6.175731658935547\r\nStep 1171, loss: 5.916308403015137\r\nStep 1172, loss: 5.7501654624938965\r\nStep 1173, loss: 5.901617050170898\r\nStep 1174, loss: 5.585201740264893\r\nStep 1175, loss: 5.608188152313232\r\nStep 1176, loss: 5.832749843597412\r\nStep 1177, loss: 6.019138813018799\r\nStep 1178, loss: 6.006782054901123\r\nStep 1179, loss: 5.7725067138671875\r\nStep 1180, loss: 6.102262020111084\r\nStep 1181, loss: 6.279216766357422\r\nStep 1182, loss: 5.8809099197387695\r\nStep 1183, loss: 5.799651622772217\r\nStep 1184, loss: 5.9189887046813965\r\nStep 1185, loss: 6.031906604766846\r\nStep 1186, loss: 6.16923713684082\r\nStep 1187, loss: 5.766611099243164\r\nStep 1188, loss: 5.909252643585205\r\nStep 1189, loss: 5.908527374267578\r\nStep 1190, loss: 5.756149768829346\r\nStep 1191, loss: 6.382627010345459\r\nStep 1192, loss: 6.20557165145874\r\nStep 1193, loss: 5.904033184051514\r\nStep 960, loss: 6.033169746398926\r\nStep 961, loss: 5.9731950759887695\r\nStep 962, loss: 6.23307991027832\r\nStep 963, loss: 6.19943904876709\r\nStep 964, loss: 6.101730823516846\r\nStep 965, loss: 5.880825042724609\r\nStep 966, loss: 6.041092395782471\r\nStep 967, loss: 5.844327926635742\r\nStep 968, loss: 6.100806713104248\r\nStep 969, loss: 5.9085588455200195\r\nStep 970, loss: 6.149028778076172\r\nStep 971, loss: 6.034543037414551\r\nStep 972, loss: 6.1568379402160645\r\nStep 973, loss: 6.1643290519714355\r\nStep 974, loss: 6.053659439086914\r\nStep 975, loss: 5.939082622528076\r\nStep 976, loss: 6.221123695373535\r\nStep 977, loss: 6.16931676864624\r\nStep 978, loss: 6.01908016204834\r\nStep 979, loss: 5.891488075256348\r\nStep 980, loss: 6.0749616622924805\r\nStep 981, loss: 5.928627014160156\r\nStep 982, loss: 5.956394672393799\r\nStep 983, loss: 6.202558517456055\r\nStep 984, loss: 6.094108581542969\r\nStep 985, loss: 6.13236665725708\r\nStep 986, loss: 6.0032830238342285\r\nStep 987, loss: 6.115525722503662\r\nStep 988, loss: 6.120043754577637\r\nStep 989, loss: 6.080606937408447\r\nStep 990, loss: 5.95778226852417\r\nStep 991, loss: 6.070457458496094\r\nStep 992, loss: 6.171233654022217\r\nStep 993, loss: 6.293028831481934\r\nStep 994, loss: 6.380928039550781\r\nStep 995, loss: 5.713109016418457\r\nStep 996, loss: 6.084442615509033\r\nStep 997, loss: 6.261641502380371\r\nStep 998, loss: 6.047240257263184\r\nStep 999, loss: 6.079785346984863\r\nSaved checkpoint at step 1000\r\nStep 1000, loss: 6.035843372344971\r\nStep 1001, loss: 6.108051776885986\r\nStep 1002, loss: 6.116758346557617\r\nStep 1003, loss: 5.7977752685546875\r\nStep 1004, loss: 5.812051773071289\r\nStep 1005, loss: 6.0407915115356445\r\nStep 1006, loss: 5.839900016784668\r\nStep 1007, loss: 6.123900890350342\r\nStep 1008, loss: 5.853964328765869\r\nStep 1009, loss: 6.022670745849609\r\nStep 1010, loss: 5.817185878753662\r\nStep 1011, loss: 6.083367824554443\r\nStep 1012, loss: 5.918321132659912\r\nStep 1013, loss: 5.9524922370910645\r\nStep 1014, loss: 5.891664505004883\r\nStep 1015, loss: 6.013959884643555\r\nStep 1016, loss: 6.131072521209717\r\nStep 1017, loss: 6.220623016357422\r\nStep 1018, loss: 6.159714221954346\r\nStep 1019, loss: 6.052178382873535\r\nStep 1020, loss: 5.91204833984375\r\nStep 1021, loss: 6.04052209854126\r\nStep 1022, loss: 5.849857330322266\r\nStep 1023, loss: 5.954801559448242\r\nStep 1024, loss: 6.2058305740356445\r\nStep 1025, loss: 6.279372215270996\r\nStep 1026, loss: 6.238644123077393\r\nStep 1027, loss: 6.061661720275879\r\nStep 1028, loss: 5.963023662567139\r\nStep 1029, loss: 6.048671722412109\r\nStep 1030, loss: 5.897725582122803\r\nStep 1031, loss: 5.683635234832764\r\nStep 1032, loss: 6.129833221435547\r\nStep 1033, loss: 5.741326332092285\r\nStep 1034, loss: 6.160009384155273\r\nStep 1035, loss: 6.1450700759887695\r\nStep 1036, loss: 6.172642707824707\r\nStep 1037, loss: 6.058366298675537\r\nStep 1038, loss: 5.94293212890625\r\nStep 1039, loss: 6.21563720703125\r\nStep 1040, loss: 6.0814290046691895\r\nStep 1041, loss: 6.043020725250244\r\nStep 1042, loss: 6.056408882141113\r\nStep 1043, loss: 6.062867641448975\r\nStep 1044, loss: 6.200379848480225\r\nStep 1045, loss: 5.873588562011719\r\nStep 1046, loss: 5.891491889953613\r\nStep 1047, loss: 6.071397304534912\r\nStep 1048, loss: 6.056345462799072\r\nStep 1049, loss: 5.798945426940918\r\nStep 1050, loss: 6.132298469543457\r\nStep 1051, loss: 6.100583076477051\r\nStep 1052, loss: 6.139461994171143\r\nStep 1053, loss: 5.960715293884277\r\nStep 1054, loss: 5.81058406829834\r\nStep 1055, loss: 6.169018268585205\r\nStep 1056, loss: 5.877782821655273\r\nStep 1057, loss: 5.879096508026123\r\nStep 1058, loss: 6.154821395874023\r\nStep 1059, loss: 5.982166290283203\r\nStep 1060, loss: 5.943661212921143\r\nStep 1061, loss: 5.901979923248291\r\nStep 1062, loss: 6.248556137084961\r\nStep 1063, loss: 5.954067707061768\r\nStep 1064, loss: 6.096056938171387\r\nStep 1065, loss: 6.0446577072143555\r\nStep 1066, loss: 6.038807392120361\r\nStep 1067, loss: 6.111401081085205\r\nStep 1068, loss: 5.758209228515625\r\nStep 1069, loss: 6.067367076873779\r\nStep 1070, loss: 6.034627914428711\r\nStep 1071, loss: 5.70315408706665\r\nStep 1072, loss: 6.187563896179199\r\nStep 1073, loss: 6.03395414352417\r\nStep 1074, loss: 6.183241844177246\r\nStep 1075, loss: 5.988609313964844\r\nStep 1076, loss: 5.9848151206970215\r\nStep 1077, loss: 5.997825622558594\r\nStep 1078, loss: 5.9602274894714355\r\nStep 1079, loss: 6.185821533203125\r\nStep 1080, loss: 6.113000392913818\r\nStep 1081, loss: 5.813572406768799\r\nStep 1082, loss: 6.031902313232422\r\nStep 1083, loss: 6.246595859527588\r\nStep 1084, loss: 6.071300029754639\r\nStep 1085, loss: 6.011821269989014\r\nStep 1086, loss: 5.969056606292725\r\nStep 1087, loss: 5.798336029052734\r\nStep 1088, loss: 5.98215913772583\r\nStep 1089, loss: 6.03286600112915\r\nStep 1090, loss: 6.208583831787109\r\nStep 1091, loss: 6.222377300262451\r\nStep 1092, loss: 5.836613178253174\r\nStep 1093, loss: 5.803591251373291\r\nStep 1094, loss: 6.220637321472168\r\nStep 1095, loss: 5.981595516204834\r\nStep 1096, loss: 5.857076168060303\r\nStep 1097, loss: 6.277763843536377\r\nStep 1098, loss: 6.037679672241211\r\nStep 1099, loss: 5.905936241149902\r\nStep 1100, loss: 5.9843974113464355\r\nStep 1101, loss: 5.916110515594482\r\nStep 1102, loss: 6.055380344390869\r\nStep 1103, loss: 6.084897041320801\r\nStep 1104, loss: 5.830936431884766\r\nStep 1105, loss: 5.786175727844238\r\nStep 1106, loss: 6.168269157409668\r\nStep 1107, loss: 6.115348815917969\r\nStep 1108, loss: 5.961764812469482\r\nStep 1109, loss: 6.075418949127197\r\nStep 1110, loss: 6.103539943695068\r\nStep 1111, loss: 5.9397053718566895\r\nStep 1112, loss: 5.756953716278076\r\nStep 1113, loss: 6.1014862060546875\r\nStep 1114, loss: 6.234945774078369\r\nStep 1115, loss: 5.937449932098389\r\nStep 1116, loss: 5.904244422912598\r\nStep 1117, loss: 5.861159324645996\r\nStep 1118, loss: 6.074119567871094\r\nStep 1119, loss: 6.089116096496582\r\nStep 1120, loss: 6.2227935791015625\r\nStep 1121, loss: 5.813777923583984\r\nStep 1122, loss: 5.870038986206055\r\nStep 1123, loss: 5.962041854858398\r\nStep 1124, loss: 5.867313861846924\r\nStep 1125, loss: 5.885199069976807\r\nStep 1126, loss: 5.777262210845947\r\nStep 1127, loss: 5.913913726806641\r\nStep 1128, loss: 5.945501327514648\r\nStep 1129, loss: 5.744322776794434\r\nStep 1130, loss: 5.698674201965332\r\nStep 1131, loss: 5.983011245727539\r\nStep 1132, loss: 5.925379753112793\r\nStep 1133, loss: 5.851949691772461\r\nStep 1134, loss: 6.076478481292725\r\nStep 1135, loss: 5.9468793869018555\r\nStep 1136, loss: 5.812188148498535\r\nStep 1137, loss: 5.960692882537842\r\nStep 1138, loss: 5.93118143081665\r\nStep 1139, loss: 5.746397495269775\r\nStep 1140, loss: 5.487229824066162\r\nStep 1141, loss: 5.96766471862793\r\nStep 1142, loss: 6.15070915222168\r\nStep 1143, loss: 5.831969738006592\r\nStep 1144, loss: 5.911952972412109\r\nStep 1145, loss: 6.042840003967285\r\nStep 1146, loss: 6.114328384399414\r\nStep 1147, loss: 5.988990306854248\r\nStep 1148, loss: 6.143561840057373\r\nStep 1149, loss: 5.8496809005737305\r\nStep 1150, loss: 5.917818546295166\r\nStep 1151, loss: 5.904750823974609\r\nStep 1152, loss: 5.7825026512146\r\nStep 1153, loss: 5.831714630126953\r\nStep 1154, loss: 5.84997034072876\r\nStep 1155, loss: 5.978759765625\r\nStep 1156, loss: 5.795989036560059\r\nStep 1157, loss: 5.987977981567383\r\nStep 1158, loss: 5.974612236022949\r\nStep 1159, loss: 5.713154315948486\r\nStep 1160, loss: 5.978531837463379\r\nStep 1161, loss: 6.028726100921631\r\nStep 1162, loss: 6.2134480476379395\r\nStep 1163, loss: 5.937756061553955\r\nStep 1164, loss: 5.921319484710693\r\nStep 1165, loss: 5.687232494354248\r\nStep 1166, loss: 5.6782050132751465\r\nStep 1167, loss: 5.836981773376465\r\nStep 1168, loss: 5.84519100189209\r\nStep 1169, loss: 5.64429235458374\r\nStep 1170, loss: 6.175731658935547\r\nStep 1171, loss: 5.916308403015137\r\nStep 1172, loss: 5.7501654624938965\r\nStep 1173, loss: 5.901617050170898\r\nStep 1174, loss: 5.585201740264893\r\nStep 1175, loss: 5.608188152313232\r\nStep 1176, loss: 5.832749843597412\r\nStep 1177, loss: 6.019138813018799\r\nStep 1178, loss: 6.006782054901123\r\nStep 1179, loss: 5.7725067138671875\r\nStep 1180, loss: 6.102262020111084\r\nStep 1181, loss: 6.279216766357422\r\nStep 1182, loss: 5.8809099197387695\r\nStep 1183, loss: 5.799651622772217\r\nStep 1184, loss: 5.9189887046813965\r\nStep 1185, loss: 6.031906604766846\r\nStep 1186, loss: 6.16923713684082\r\nStep 1187, loss: 5.766611099243164\r\nStep 1188, loss: 5.909252643585205\r\nStep 1189, loss: 5.908527374267578\r\nStep 1190, loss: 5.756149768829346\r\nStep 1191, loss: 6.382627010345459\r\nStep 1192, loss: 6.20557165145874\r\nStep 1193, loss: 5.904033184051514\r\n",,terminal_output +616,2144456,"TERMINAL",0,0,"srun",,terminal_focus +617,2145174,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3370715.0 tasks 0-3: running\r\n",,terminal_output +618,2145319,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3370715.0\r\nProcess SpawnProcess-3:\r\nProcess SpawnProcess-5:\r\nsrun: forcing job termination\r\nProcess SpawnProcess-4:\r\nProcess SpawnProcess-5:\r\nProcess SpawnProcess-1:\r\nProcess SpawnProcess-8:\r\nProcess SpawnProcess-1:\r\nProcess SpawnProcess-2:\r\nProcess SpawnProcess-8:\r\nProcess SpawnProcess-2:\r\nProcess SpawnProcess-1:\r\nProcess SpawnProcess-4:\r\nProcess SpawnProcess-2:\r\nProcess SpawnProcess-8:\r\nProcess SpawnProcess-2:\r\nProcess SpawnProcess-3:\r\nProcess SpawnProcess-3:\r\nProcess SpawnProcess-1:\r\nProcess SpawnProcess-4:\r\nProcess SpawnProcess-8:\r\nProcess SpawnProcess-4:\r\nsrun: Job step aborted: Waiting up to 32 seconds for job step to finish.\r\nProcess SpawnProcess-3:\r\nslurmstepd: error: *** STEP 3370715.0 ON hkn0402 CANCELLED AT 2025-07-23T15:23:04 ***\r\n",,terminal_output +619,2145495,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3370715.0\r\nsrun: job abort in progress\r\n",,terminal_output +620,2145677,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3370715.0\r\n",,terminal_output +621,2145910,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3370715.0\r\nsrun: job abort in progress\r\n",,terminal_output +622,2146164,"TERMINAL",0,0,"^Csrun: sending Ctrl-C to StepId=3370715.0\r\n",,terminal_output +623,2146423,"TERMINAL",0,0,"^C",,terminal_output +624,2146551,"TERMINAL",0,0,"]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +625,2146630,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +626,2147146,"TERMINAL",0,0,"sh slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",,terminal_output +627,2152320,"TERMINAL",0,0,"8",,terminal_output +628,2152530,"TERMINAL",0,0,"",,terminal_output +629,2153684,"TERMINAL",0,0,"",,terminal_output +630,2153823,"TERMINAL",0,0,"",,terminal_output +631,2155130,"TERMINAL",0,0,"",,terminal_output +632,2155257,"TERMINAL",0,0,"",,terminal_output +633,2155537,"TERMINAL",0,0,"",,terminal_output +634,2155640,"TERMINAL",0,0,"",,terminal_output +635,2155810,"TERMINAL",0,0,"",,terminal_output +636,2157248,"TERMINAL",0,0,"\r\n\r",,terminal_output +637,2158810,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +638,2166523,"TERMINAL",0,0,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",,terminal_output +639,2167696,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +640,2168521,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",1235,0,"",shellscript,selection_mouse +641,2168524,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",1234,0,"",shellscript,selection_command +642,2169910,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",1218,0,"",shellscript,selection_mouse +643,2171345,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",1021,0,"",shellscript,selection_mouse +644,2171349,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",1020,0,"",shellscript,selection_command +645,2173087,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",931,0,"",shellscript,selection_mouse +646,2173755,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"",shellscript,selection_mouse +647,2176731,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,3,"",shellscript,content +648,2177259,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,1,"",shellscript,content +649,2177584,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,4,"",shellscript,content +650,2181280,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,1,"",shellscript,content +651,2181568,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,17,"",shellscript,content +652,2181769,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,1,"",shellscript,content +653,2182078,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,19,"",shellscript,content +654,2183014,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,1,"",shellscript,content +655,2183498,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,6,"",shellscript,content +656,2184294,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,1,"",shellscript,content +657,2184680,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,1,"",shellscript,content +658,2185116,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,1,"",shellscript,content +659,2188573,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"a",shellscript,content +660,2188574,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",931,0,"",shellscript,selection_keyboard +661,2188682,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",931,0,"m",shellscript,content +662,2188683,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",932,0,"",shellscript,selection_keyboard +663,2189018,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",931,1,"",shellscript,content +664,2189137,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,1,"",shellscript,content +665,2189308,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"m",shellscript,content +666,2189308,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",931,0,"",shellscript,selection_keyboard +667,2189389,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",931,0,"a",shellscript,content +668,2189390,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",932,0,"",shellscript,selection_keyboard +669,2189467,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",932,0,"s",shellscript,content +670,2189468,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",933,0,"",shellscript,selection_keyboard +671,2189524,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",933,0,"k",shellscript,content +672,2189524,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",934,0,"",shellscript,selection_keyboard +673,2189817,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",934,0,"g",shellscript,content +674,2189818,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",935,0,"",shellscript,selection_keyboard +675,2189965,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",935,0,"i",shellscript,content +676,2189966,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",936,0,"",shellscript,selection_keyboard +677,2190083,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",936,0,"t",shellscript,content +678,2190084,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",937,0,"",shellscript,selection_keyboard +679,2191014,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",937,0,"-",shellscript,content +680,2191015,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",938,0,"",shellscript,selection_keyboard +681,2195263,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",938,0,"m",shellscript,content +682,2195264,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",939,0,"",shellscript,selection_keyboard +683,2195412,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",939,0,"a",shellscript,content +684,2195413,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",940,0,"",shellscript,selection_keyboard +685,2195735,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",940,0,"s",shellscript,content +686,2195736,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",941,0,"",shellscript,selection_keyboard +687,2195899,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",941,0,"k",shellscript,content +688,2195900,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",942,0,"",shellscript,selection_keyboard +689,2196277,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",942,0,"l",shellscript,content +690,2196277,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",943,0,"",shellscript,selection_keyboard +691,2196492,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",943,0,"i",shellscript,content +692,2196493,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",944,0,"",shellscript,selection_keyboard +693,2196635,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",944,0,"m",shellscript,content +694,2196636,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",945,0,"",shellscript,selection_keyboard +695,2197078,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",944,1,"",shellscript,content +696,2197218,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",943,1,"",shellscript,content +697,2197350,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",942,1,"",shellscript,content +698,2197879,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",942,0,"p",shellscript,content +699,2197879,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",943,0,"",shellscript,selection_keyboard +700,2198069,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",943,0,"r",shellscript,content +701,2198069,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",944,0,"",shellscript,selection_keyboard +702,2198171,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",944,0,"o",shellscript,content +703,2198172,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",945,0,"",shellscript,selection_keyboard +704,2198455,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",945,0,"g",shellscript,content +705,2198457,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",946,0,"",shellscript,selection_keyboard +706,2198629,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",946,0,"-",shellscript,content +707,2198630,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",947,0,"",shellscript,selection_keyboard +708,2199072,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",946,1,"",shellscript,content +709,2199213,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",945,1,"",shellscript,content +710,2199645,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",945,0,"b",shellscript,content +711,2199646,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",946,0,"",shellscript,selection_keyboard +712,2199882,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",946,0,"-",shellscript,content +713,2199883,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",947,0,"",shellscript,selection_keyboard +714,2200128,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",947,0,"f",shellscript,content +715,2200129,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",948,0,"",shellscript,selection_keyboard +716,2200420,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",948,0,"e",shellscript,content +717,2200421,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",949,0,"",shellscript,selection_keyboard +718,2200793,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",948,1,"",shellscript,content +719,2200925,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",948,0,"i",shellscript,content +720,2200926,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",949,0,"",shellscript,selection_keyboard +721,2201270,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",949,0,"x",shellscript,content +722,2201270,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",950,0,"",shellscript,selection_keyboard +723,2203295,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",960,0,"",shellscript,selection_mouse +724,2204306,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",960,0,"/",shellscript,content +725,2204307,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",961,0,"",shellscript,selection_keyboard +726,2205103,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",961,0,"$",shellscript,content +727,2205104,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",962,0,"",shellscript,selection_keyboard +728,2205601,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",962,0,"j",shellscript,content +729,2205602,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",963,0,"",shellscript,selection_keyboard +730,2205751,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",963,0,"o",shellscript,content +731,2205752,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",964,0,"",shellscript,selection_keyboard +732,2205950,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",964,0,"b",shellscript,content +733,2205950,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",965,0,"",shellscript,selection_keyboard +734,2207594,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",962,3,"slurm_job_id",shellscript,content +735,2210629,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",1488,0,"",shellscript,selection_mouse +736,2212744,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",393,0,"",shellscript,selection_mouse +737,2214624,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",398,1,"l",shellscript,selection_command +738,2214808,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",398,2,"lo",shellscript,selection_command +739,2214915,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",398,3,"log",shellscript,selection_command +740,2215204,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",398,4,"logs",shellscript,selection_command +741,2215418,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",403,5,"logs_",shellscript,selection_command +742,2215663,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",403,6,"logs_f",shellscript,selection_command +743,2215833,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",403,7,"logs_fr",shellscript,selection_command +744,2216030,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",403,8,"logs_fra",shellscript,selection_command +745,2216871,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",403,9,"logs_fran",shellscript,selection_command +746,2216996,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",403,10,"logs_franz",shellscript,selection_command +747,2221989,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",403,10,"logs_mihir",shellscript,content +748,2221992,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",248,10,"logs_franz",shellscript,selection_command +749,2222688,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",248,10,"logs_mihir",shellscript,content +750,2225165,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",268,0,"",shellscript,selection_mouse +751,2226407,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",260,0,"",shellscript,selection_mouse +752,2227521,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",933,0,"",shellscript,selection_mouse +753,2227643,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,7,"maskgit",shellscript,selection_mouse +754,2227824,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,16,"maskgit-maskprob",shellscript,selection_mouse +755,2227883,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,69,"maskgit-maskprob-fix/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR",shellscript,selection_mouse +756,2228205,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",918,19,"checkpoints/maskgit",shellscript,selection_mouse +757,2228264,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",929,8,"/maskgit",shellscript,selection_mouse +758,2228281,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,7,"maskgit",shellscript,selection_mouse +759,2228341,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,8,"maskgit-",shellscript,selection_mouse +760,2228360,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,16,"maskgit-maskprob",shellscript,selection_mouse +761,2228459,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,17,"maskgit-maskprob-",shellscript,selection_mouse +762,2228479,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,20,"maskgit-maskprob-fix",shellscript,selection_mouse +763,2231220,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",260,0,"",shellscript,selection_mouse +764,2232424,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +765,2233912,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +766,2236038,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",272,1,"m",shellscript,selection_command +767,2236535,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",288,1,"b",shellscript,selection_command +768,2236750,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",414,2,"bi",shellscript,selection_command +769,2236886,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",414,3,"big",shellscript,selection_command +770,2239180,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",414,4,"big-",shellscript,selection_command +771,2239457,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",414,5,"big-r",shellscript,selection_command +772,2239970,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",414,6,"big-ru",shellscript,selection_command +773,2240044,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",414,7,"big-run",shellscript,selection_command +774,2240153,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",414,8,"big-runs",shellscript,selection_command +775,2243415,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",414,8,"maskgit-maskprob-fix",shellscript,content +776,2243419,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",259,8,"big-runs",shellscript,selection_command +777,2244049,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",259,8,"maskgit-maskprob-fix",shellscript,content +778,2245643,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",302,0,"",shellscript,selection_mouse +779,2246492,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,0,"",shellscript,selection_mouse +780,2248620,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,1,"b",shellscript,selection_command +781,2248855,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,1,"b",shellscript,selection_command +782,2249136,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,2,"ba",shellscript,selection_command +783,2249634,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,3,"bat",shellscript,selection_command +784,2249648,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,4,"batc",shellscript,selection_command +785,2249703,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,5,"batch",shellscript,selection_command +786,2249713,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,6,"batchs",shellscript,selection_command +787,2249771,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,7,"batchsi",shellscript,selection_command +788,2249777,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,8,"batchsiz",shellscript,selection_command +789,2249843,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,9,"batchsize",shellscript,selection_command +790,2249843,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,10,"batchsize-",shellscript,selection_command +791,2249856,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,11,"batchsize-s",shellscript,selection_command +792,2249919,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,12,"batchsize-sc",shellscript,selection_command +793,2249920,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,13,"batchsize-sca",shellscript,selection_command +794,2249976,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,14,"batchsize-scal",shellscript,selection_command +795,2249986,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,15,"batchsize-scali",shellscript,selection_command +796,2250049,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,16,"batchsize-scalin",shellscript,selection_command +797,2250053,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,17,"batchsize-scaling",shellscript,selection_command +798,2250076,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,18,"batchsize-scaling-",shellscript,selection_command +799,2250134,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,19,"batchsize-scaling-l",shellscript,selection_command +800,2250135,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,20,"batchsize-scaling-li",shellscript,selection_command +801,2250192,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,21,"batchsize-scaling-lin",shellscript,selection_command +802,2250193,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,22,"batchsize-scaling-line",shellscript,selection_command +803,2250251,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,23,"batchsize-scaling-linea",shellscript,selection_command +804,2250251,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,24,"batchsize-scaling-linear",shellscript,selection_command +805,2250386,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,25,"batchsize-scaling-linear-",shellscript,selection_command +806,2250580,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,26,"batchsize-scaling-linear-l",shellscript,selection_command +807,2251570,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",468,26,"",shellscript,content +808,2251571,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,26,"",shellscript,content +809,2251575,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,0,"",shellscript,selection_command +810,2252987,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",299,0,"",shellscript,selection_command +811,2253175,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",299,1,"",shellscript,content +812,2254146,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",299,1,"",shellscript,content +813,2254563,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",439,0,"",shellscript,selection_command +814,2254888,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",438,0,"",shellscript,selection_command +815,2255076,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",438,1,"",shellscript,content +816,2255224,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",438,1,"",shellscript,content +817,2261670,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",475,0,"",shellscript,selection_mouse +818,2265133,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",480,0,"",shellscript,selection_mouse +819,2266180,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,0,"",shellscript,selection_mouse +820,2266599,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,1,"_",shellscript,selection_mouse +821,2266636,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,2,"_b",shellscript,selection_mouse +822,2266702,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,3,"_ba",shellscript,selection_mouse +823,2266702,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,4,"_bat",shellscript,selection_mouse +824,2266815,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,5,"_batc",shellscript,selection_mouse +825,2266873,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,6,"_batch",shellscript,selection_mouse +826,2266893,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,7,"_batch_",shellscript,selection_mouse +827,2266910,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,8,"_batch_s",shellscript,selection_mouse +828,2266926,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,9,"_batch_si",shellscript,selection_mouse +829,2266988,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,10,"_batch_siz",shellscript,selection_mouse +830,2267368,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,11,"_batch_size",shellscript,selection_mouse +831,2267774,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,12,"_batch_size_",shellscript,selection_mouse +832,2267791,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,13,"_batch_size_s",shellscript,selection_mouse +833,2267849,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,14,"_batch_size_sc",shellscript,selection_mouse +834,2267850,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,15,"_batch_size_sca",shellscript,selection_mouse +835,2267850,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,16,"_batch_size_scal",shellscript,selection_mouse +836,2267907,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,17,"_batch_size_scali",shellscript,selection_mouse +837,2267908,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,18,"_batch_size_scalin",shellscript,selection_mouse +838,2267910,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,19,"_batch_size_scaling",shellscript,selection_mouse +839,2267942,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,20,"_batch_size_scaling_",shellscript,selection_mouse +840,2267999,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,21,"_batch_size_scaling_l",shellscript,selection_mouse +841,2268000,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,22,"_batch_size_scaling_li",shellscript,selection_mouse +842,2268007,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,23,"_batch_size_scaling_lin",shellscript,selection_mouse +843,2268070,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,24,"_batch_size_scaling_line",shellscript,selection_mouse +844,2268129,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,25,"_batch_size_scaling_linea",shellscript,selection_mouse +845,2268189,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,26,"_batch_size_scaling_linear",shellscript,selection_mouse +846,2268208,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,27,"_batch_size_scaling_linear_",shellscript,selection_mouse +847,2268296,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,28,"_batch_size_scaling_linear_l",shellscript,selection_mouse +848,2268460,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,29,"_batch_size_scaling_linear_lr",shellscript,selection_mouse +849,2268893,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,29,"",shellscript,content +850,2270545,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,0,"_",shellscript,content +851,2270546,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",483,0,"",shellscript,selection_keyboard +852,2270940,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",483,0,"m",shellscript,content +853,2270940,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",484,0,"",shellscript,selection_keyboard +854,2271162,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",484,0,"a",shellscript,content +855,2271163,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",485,0,"",shellscript,selection_keyboard +856,2271269,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",485,0,"s",shellscript,content +857,2271270,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",486,0,"",shellscript,selection_keyboard +858,2271366,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",486,0,"k",shellscript,content +859,2271367,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",487,0,"",shellscript,selection_keyboard +860,2271690,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",487,0,"p",shellscript,content +861,2271691,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",488,0,"",shellscript,selection_keyboard +862,2271973,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",488,0,"i",shellscript,content +863,2271974,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",489,0,"",shellscript,selection_keyboard +864,2272280,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",489,0,"´",shellscript,content +865,2272281,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",490,0,"",shellscript,selection_keyboard +866,2272590,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",489,1,"",shellscript,content +867,2272590,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",489,0,"",shellscript,selection_keyboard +868,2272702,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",488,1,"",shellscript,content +869,2272865,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",488,0,"r",shellscript,content +870,2272866,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",489,0,"",shellscript,selection_keyboard +871,2272926,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",489,0,"o",shellscript,content +872,2272926,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",490,0,"",shellscript,selection_keyboard +873,2273496,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",490,0,"b",shellscript,content +874,2273497,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",491,0,"",shellscript,selection_keyboard +875,2274365,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",491,0,"_",shellscript,content +876,2274366,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",492,0,"",shellscript,selection_keyboard +877,2274634,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",492,0,"f",shellscript,content +878,2274635,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",493,0,"",shellscript,selection_keyboard +879,2274733,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",493,0,"i",shellscript,content +880,2274733,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",494,0,"",shellscript,selection_keyboard +881,2274855,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",494,0,"x",shellscript,content +882,2274856,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",495,0,"",shellscript,selection_keyboard +883,2278334,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",494,0,"",shellscript,selection_command +884,2279751,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",1310,0,"",shellscript,selection_mouse +885,2279752,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",1309,0,"",shellscript,selection_command +886,2298243,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit-maskprob-fix/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit-maskprob-fix/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_maskprob_fix_8_node\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=384 \\n --init_lr=0 \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-batch-size-scaling-linear-lr-8-node-$slurm_job_id \\n --tags dynamics batch-size-scaling linear-lr 8-node \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir\n",shellscript,tab +887,2299648,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +888,2299942,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",1148,0,"",shellscript,selection_mouse +889,2301282,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",488,7,"",shellscript,content +890,2301785,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",488,0,"i",shellscript,content +891,2301811,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,7,"",shellscript,content +892,2301877,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",482,0,"_batch_size_scaling_linear_lr",shellscript,content +893,2301880,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",438,0,"b",shellscript,content +894,2301898,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",438,0,"-",shellscript,content +895,2301966,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",299,0,"r",shellscript,content +896,2301969,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",299,0,"-",shellscript,content +897,2301993,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",442,0,"atchsize-scaling-linear-lr",shellscript,content +898,2301993,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",300,0,"batchsize-scaling-linear-l",shellscript,content +899,2302009,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",259,20,"big-runs",shellscript,content +900,2302069,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",414,20,"big-runs",shellscript,content +901,2302074,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",248,10,"logs_franz",shellscript,content +902,2302133,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",403,10,"logs_franz",shellscript,content +903,2302138,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",962,12,"job",shellscript,content +904,2302198,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",960,5,"",shellscript,content +905,2302201,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",948,2,"",shellscript,content +906,2302225,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",948,0,"e",shellscript,content +907,2302274,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",945,4,"",shellscript,content +908,2302289,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",945,0,"g-",shellscript,content +909,2302320,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",942,5,"",shellscript,content +910,2302385,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",942,0,"lim",shellscript,content +911,2302388,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",938,7,"",shellscript,content +912,2302402,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,8,"",shellscript,content +913,2302466,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"am",shellscript,content +914,2302469,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,2,"",shellscript,content +915,2302498,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"lr",shellscript,content +916,2302559,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"-",shellscript,content +917,2302560,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"linear",shellscript,content +918,2302592,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"/",shellscript,content +919,2302608,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"dynamics_cotraining",shellscript,content +920,2302672,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"/",shellscript,content +921,2302673,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"batchsize_scaling",shellscript,content +922,2302733,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"/",shellscript,content +923,2302742,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"runs",shellscript,content +924,2302771,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"-",shellscript,content +925,2302796,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"big",shellscript,content +926,2303594,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,0,"z",shellscript,content +927,2303595,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",931,0,"",shellscript,selection_keyboard +928,2303616,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",931,0,"z",shellscript,content +929,2303617,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",932,0,"",shellscript,selection_keyboard +930,2303686,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",932,0,"z",shellscript,content +931,2303687,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",933,0,"",shellscript,selection_keyboard +932,2304943,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch",930,3,"",shellscript,content +933,2309864,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +934,2313084,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",503,0,"",shellscript,selection_mouse +935,2350364,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +936,2351505,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +937,2360204,"TERMINAL",0,0,"s",,terminal_output +938,2360383,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +939,2360532,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +940,2360654,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +941,2360867,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +942,2360980,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +943,2361282,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",,terminal_output +944,2361997,"TERMINAL",0,0,"[?25l\rslurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch[?25h",,terminal_output +945,2364902,"TERMINAL",0,0,"[?25la[1@b[?25h",,terminal_output +946,2375483,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",0,0,"",shellscript,tab +947,2376955,"slurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch",1505,0,"",shellscript,selection_mouse +948,2379018,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +949,2380690,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1484,0,"",shellscript,selection_mouse +950,2381355,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,0,"",shellscript,selection_mouse +951,2382082,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,1,"b",shellscript,selection_command +952,2382338,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,1,"b",shellscript,selection_command +953,2382695,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,2,"ba",shellscript,selection_command +954,2383188,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,3,"bat",shellscript,selection_command +955,2383252,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,4,"batc",shellscript,selection_command +956,2383253,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,5,"batch",shellscript,selection_command +957,2383277,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,6,"batch-",shellscript,selection_command +958,2383329,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,7,"batch-s",shellscript,selection_command +959,2383345,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,8,"batch-si",shellscript,selection_command +960,2383579,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,9,"batch-siz",shellscript,selection_command +961,2388299,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,0,"",shellscript,selection_command +962,2389178,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1484,5,"",shellscript,content +963,2389179,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,5,"",shellscript,content +964,2389517,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1479,1,"",shellscript,content +965,2389517,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,1,"",shellscript,content +966,2389834,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1478,4,"",shellscript,content +967,2389835,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,4,"",shellscript,content +968,2390109,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1474,1,"",shellscript,content +969,2390109,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,1,"",shellscript,content +970,2390467,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1473,7,"",shellscript,content +971,2390467,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,7,"",shellscript,content +972,2391484,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1466,0,"m",shellscript,content +973,2391485,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1412,0,"m",shellscript,content +974,2391485,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1413,0,"",shellscript,selection_keyboard +975,2391567,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1468,0,"a",shellscript,content +976,2391567,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1413,0,"a",shellscript,content +977,2391568,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1414,0,"",shellscript,selection_keyboard +978,2391642,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1470,0,"s",shellscript,content +979,2391642,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1414,0,"s",shellscript,content +980,2391643,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1415,0,"",shellscript,selection_keyboard +981,2391701,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1472,0,"k",shellscript,content +982,2391702,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1415,0,"k",shellscript,content +983,2391702,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1416,0,"",shellscript,selection_keyboard +984,2392601,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1474,0,"p",shellscript,content +985,2392601,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1416,0,"p",shellscript,content +986,2392602,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1417,0,"",shellscript,selection_keyboard +987,2392727,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1476,0,"r",shellscript,content +988,2392727,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1417,0,"r",shellscript,content +989,2392728,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1418,0,"",shellscript,selection_keyboard +990,2392925,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1478,0,"o",shellscript,content +991,2392926,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1418,0,"o",shellscript,content +992,2392926,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1419,0,"",shellscript,selection_keyboard +993,2393435,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1480,0,"b",shellscript,content +994,2393435,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1419,0,"b",shellscript,content +995,2393436,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1420,0,"",shellscript,selection_keyboard +996,2393799,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1482,0,"-",shellscript,content +997,2393800,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1420,0,"-",shellscript,content +998,2393800,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1421,0,"",shellscript,selection_keyboard +999,2394532,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1484,0,"f",shellscript,content +1000,2394533,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1421,0,"f",shellscript,content +1001,2394533,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1422,0,"",shellscript,selection_keyboard +1002,2394592,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1486,0,"i",shellscript,content +1003,2394592,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1422,0,"i",shellscript,content +1004,2394593,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1423,0,"",shellscript,selection_keyboard +1005,2394738,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1488,0,"x",shellscript,content +1006,2394738,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1423,0,"x",shellscript,content +1007,2394739,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1424,0,"",shellscript,selection_keyboard +1008,2396863,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1425,0,"",shellscript,selection_command +1009,2398358,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1491,6,"",shellscript,content +1010,2398358,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1425,6,"",shellscript,content +1011,2398754,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1485,1,"",shellscript,content +1012,2398755,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1425,1,"",shellscript,content +1013,2399354,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1484,2,"",shellscript,content +1014,2399355,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1425,2,"",shellscript,content +1015,2399868,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1482,1,"",shellscript,content +1016,2399869,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1425,1,"",shellscript,content +1017,2408780,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1424,0,"",shellscript,selection_command +1018,2425703,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1019,2432468,"TERMINAL",0,0,"\r",,terminal_output +1020,2433204,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",,terminal_output +1021,2434079,"TERMINAL",0,0,"\rslurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch\r\n[?2004l\r",,terminal_output +1022,2434153,"TERMINAL",0,0,"Submitted batch job 3370768\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1023,2435169,"TERMINAL",0,0,"[?25lq[?25h",,terminal_output +1024,2435235,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1025,2435301,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1026,2435422,"TERMINAL",0,0,"[?25lu[?25h[?25le[?25h",,terminal_output +1027,2435621,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0402.localdomain: Wed Jul 23 15:27:54 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3370768 accelerat train_dy tum_cte0 PD\t0:00\t 8 (Priority)3370715 dev_accel interact tum_cte0 R32:05\t 1 hkn0402",,terminal_output +1028,2436629,"TERMINAL",0,0,"56",,terminal_output +1029,2437610,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1030,2464237,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1031,2466855,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1032,2466966,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1033,2467124,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1034,2467195,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1035,2467264,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1036,2467407,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1037,2467486,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1038,2467598,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1039,2468493,"TERMINAL",0,0,"3370768",,terminal_output +1040,2468899,"TERMINAL",0,0,"3370768\r\n[?2004l\r]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1041,2476149,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1042,2482713,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +1043,2482797,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1044,2482879,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1045,2483117,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1046,2483748,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1047,2483843,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1048,2483939,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +1049,2484083,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0402:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0402 jafar_jobs]$ ",,terminal_output +1050,2485142,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1051,2485658,"TERMINAL",0,0,"[?25ly[?25h",,terminal_output +1052,2485829,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1053,2485900,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1054,2486079,"TERMINAL",0,0,"[?25l-[?25h",,terminal_output +1055,2486259,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +1056,2486347,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1057,2486420,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1058,2486563,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1059,2486669,"TERMINAL",0,0,"[?25le[?25h[?25lr[?25h",,terminal_output +1060,2487501,"TERMINAL",0,0,"\r\n[?2004l\rsending incremental file list\r\n",,terminal_output +1061,2490339,"TERMINAL",0,0,"./\r\ngeneration_1753196800.0453017.gif\r\ngenie.py\r\noverfit_dir.zip\r\n",,terminal_output +1062,2490390,"TERMINAL",0,0,"sample.py\r\ntrain_dynamics.py\r\ntrain_lam.py\r\ntrain_tokenizer.py\r\n",,terminal_output +1063,2491172,"TERMINAL",0,0,"input_pipeline/download/openai/\r\ninput_pipeline/download/openai/download_actions_files.py\r\nmodels/\r\nmodels/dynamics.py\r\nmodels/lam.py\r\nmodels/tokenizer.py\r\noverfit_dir/\r\noverfit_dir/oai_sample_seed69_0.npy\r\noverfit_dir/oai_sample_seed69_1.npy\r\noverfit_dir/oai_sample_seed69_10.npy\r\noverfit_dir/oai_sample_seed69_11.npy\r\noverfit_dir/oai_sample_seed69_2.npy\r\noverfit_dir/oai_sample_seed69_3.npy\r\noverfit_dir/oai_sample_seed69_4.npy\r\noverfit_dir/oai_sample_seed69_5.npy\r\noverfit_dir/oai_sample_seed69_6.npy\r\noverfit_dir/oai_sample_seed69_7.npy\r\noverfit_dir/oai_sample_seed69_8.npy\r\noverfit_dir/oai_sample_seed69_9.npy\r\noverfit_dir/sample_oai_dataset.npy\r\noverfit_dir/sample_oai_dataset_seed42.npy\r\nslurm/dev/\r\nslurm/dev/alfred/\r\nslurm/dev/alfred/berlin/\r\nslurm/dev/alfred/berlin/input_pipeline_ws/\r\nslurm/dev/alfred/berlin/input_pipeline_ws/download_10xx.sbatch\r\nslurm/dev/alfred/berlin/input_pipeline_ws/download_6xx.sbatch\r\nslurm/dev/alfred/berlin/input_pipeline_ws/download_7xx.sbatch\r\nslurm/dev/alfred/berlin/input_pipeline_ws/download_8xx.sbatch\r\nslurm/dev/alfred/berlin/input_pipeline_ws/download_9xx.sbatch\r\nslurm/dev/alfred/berlin/input_pipeline_ws/download_index_json.sh\r\nslurm/dev/alfred/berlin/input_pipeline_ws/mp4_to_array_record_open_ai.sbatch\r\nslurm/dev/alfred/berlin/input_pipeline_ws/mp4_to_array_record_open_ai_chunked.sbatch\r\nslurm/dev/alfred/berlin/input_pipeline_ws/mp4_to_array_record_open_ai_dev.sbatch\r\nslurm/dev/alfred/berlin/input_pipeline_ws/mp4_to_array_record_open_ai_to_fast.sbatch\r\nslurm/dev/alfred/berlin/input_pipeline_ws/mp4_to_npy_open_ai.sbatch\r\nslurm/dev/alfred/berlin/job_requeueing/\r\nslurm/dev/alfred/berlin/job_requeueing/cpu_requeue_dev.sbatch\r\nslurm/dev/alfred/berlin/job_requeueing/hello_world.py\r\nslurm/dev/alfred/berlin/job_requeueing/notes.md\r\nslurm/dev/alfred/berlin/job_requeueing/train_lam_chain_dev.sbatch\r\nslurm/dev/alfred/berlin/job_requeueing/train_lam_requeue_dev.sbatch\r\nslurm/dev/alfred/berlin/job_requeueing/train_lam_requeue_dev_gemini.sbatch\r\nslurm/dev/alfred/berlin/job_requeueing/dynamic_lr_tuning/\r\nslurm/dev/alfred/berlin/job_requeueing/dynamic_lr_tuning/lr_tuning_dynamics.sh\r\nslurm/dev/alfred/berlin/job_requeueing/dynamic_lr_tuning/train_dynacmis_lr_general.sbatch\r\nslurm/dev/alfred/berlin/job_requeueing/dynamic_validation/\r\nslurm/dev/alfred/berlin/job_requeueing/dynamic_validation/lr_tuning_dynamics.sh\r\nslurm/dev/alfred/berlin/job_requeueing/dynamic_validation/train_dynacmis_lr_general.sbatch\r\nslurm/dev/alfred/berlin/job_requeueing/lam_lr_tuning/\r\nslurm/dev/alfred/berlin/job_requeueing/lam_lr_tuning/lr_tuning_lam.sh\r\nslurm/dev/alfred/berlin/job_requeueing/lam_lr_tuning/train_lam_lr_general.sbatch\r\nslurm/dev/alfred/berlin/job_requeueing/tokenizer_lr_tuning/\r\nslurm/dev/alfred/berlin/job_requeueing/tokenizer_lr_tuning/lr_tuning_tokenizer.sh\r\nslurm/dev/alfred/berlin/job_requeueing/tokenizer_lr_tuning/train_tokenizer_lr_general.sbatch\r\nslurm/dev/alfred/berlin/validation/\r\nslurm/dev/alfred/berlin/validation/tokenizer_lr_tuning/\r\nslurm/dev/alfred/berlin/validation/tokenizer_lr_tuning/tokeinizer_val.sh\r\nslurm/dev/alfred/berlin/validation/tokenizer_lr_tuning/train_tokenizer_lr_general.sbatch\r\nslurm/dev/alfred/berlin/validation/tokenizer_without_optimizer/\r\nslurm/dev/alfred/berlin/validation/tokenizer_without_optimizer/lr_tuning_tokenizer.sh\r\nslurm/dev/alfred/berlin/validation/tokenizer_without_optimizer/train_tokenizer_lr_general.sbatch\r\nslurm/dev/alfred/horeka/\r\nslurm/dev/alfred/horeka/allocate/\r\nslurm/dev/alfred/horeka/allocate/cpu.sh\r\nslurm/dev/alfred/horeka/allocate/multigpu_gpu.sh\r\nslurm/dev/alfred/horeka/allocate/single_gpu.sh\r\nslurm/dev/alfred/horeka/batchsize_scaling/\r\nslurm/dev/alfred/horeka/batchsize_scaling/adjusted_lr/\r\nslurm/dev/alfred/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_16_nodes.sbatch\r\nslurm/dev/alfred/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_1_nodes.sbatch\r\nslurm/dev/alfred/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_1_nodes.sh\r\nslurm/dev/alfred/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_2_nodes.sbatch\r\nslurm/dev/alfred/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_32_nodes.sbatch\r\nslurm/dev/alfred/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes.sbatch\r\nslurm/dev/alfred/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes.sh\r\nslurm/dev/alfred/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes_frequent_chkpt.sbatch\r\nslurm/dev/alfred/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_64_nodes.sbatch\r\nslurm/dev/alfred/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_8_nodes.sbatch\r\nslurm/dev/alfred/horeka/batchsize_scaling/const_lr/\r\nslurm/dev/alfred/horeka/batchsize_scaling/const_lr/train_tokenizer_16_nodes.sbatch\r\nslurm/dev/alfred/horeka/batchsize_scaling/const_lr/train_tokenizer_1_nodes.sbatch\r\nslurm/dev/alfred/horeka/batchsize_scaling/const_lr/train_tokenizer_2_nodes.sbatch\r\nslurm/dev/alfred/horeka/batchsize_scaling/const_lr/train_tokenizer_32_nodes.sbatch\r\nslurm/dev/alfred/horeka/batchsize_scaling/const_lr/train_tokenizer_4_nodes.sbatch\r\nslurm/dev/alfred/horeka/batchsize_scaling/const_lr/train_tokenizer_8_nodes.sbatch\r\nslurm/dev/alfred/horeka/batchsize_scaling/oai_subset/\r\nslurm/dev/alfred/horeka/batchsize_scaling/oai_subset/train_tokenizer_1_nodes.sh\r\nslurm/dev/alfred/horeka/batchsize_scaling/oai_subset/train_tokenizer_2_nodes.sh\r\nslurm/dev/alfred/horeka/batchsize_scaling/oai_subset/train_tokenizer_2_nodes_samples_500.sbatch\r\nslurm/dev/alfred/horeka/checkpoint_fix/\r\nslurm/dev/alfred/horeka/checkpoint_fix/train_tokenizer.sh\r\nslurm/dev/alfred/horeka/coinrun/\r\nslurm/dev/alfred/horeka/coinrun/train_tokenizer_coinrun.sbatch\r\nslurm/dev/alfred/horeka/coinrun/base/\r\nslurm/dev/alfred/horeka/coinrun/base/train_dynamics_coinrun.sbatch\r\nslurm/dev/alfred/horeka/coinrun/base/train_lam_coinrun.sbatch\r\nslurm/dev/alfred/horeka/coinrun/base/train_tokenizer_coinrun.sbatch\r\nslurm/dev/alfred/horeka/coinrun/latent_action_ablation/\r\nslurm/dev/alfred/horeka/coinrun/latent_action_ablation/train_dynamics_coinrun.sbatch\r\nslurm/dev/alfred/horeka/coinrun/latent_action_ablation/train_dynamics_coinrun.sh\r\nslurm/dev/alfred/horeka/coinrun/latent_action_ablation/train_lam_12.sbatch\r\nslurm/dev/alfred/horeka/coinrun/latent_action_ablation/train_lam_24.sbatch\r\nslurm/dev/alfred/horeka/coinrun/latent_action_ablation/train_lam_48.sbatch\r\nslurm/dev/alfred/horeka/coinrun/latent_action_ablation/train_lam_6.sbatch\r\nslurm/dev/alfred/horeka/coinrun/latent_action_ablation/train_lam_6.sh\r\nslurm/dev/alfred/horeka/coinrun/latent_action_ablation/train_tokenizer_coinrun.sbatch\r\nslurm/dev/alfred/horeka/coinrun/latent_action_ablation/train_tokenizer_coinrun.sh\r\nslurm/dev/alfred/horeka/generate_single_samples/\r\nslurm/dev/alfred/horeka/generate_single_samples/generate_samples_50k.sh\r\nslurm/dev/alfred/horeka/input_pipeline_local/\r\nslurm/dev/alfred/horeka/input_pipeline_local/download_10xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_local/download_6xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_local/download_7xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_local/download_8xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_local/download_9xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_local/download_index_json.sh\r\nslurm/dev/alfred/horeka/input_pipeline_local/mp4_to_array_record_open_ai_6xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_local/mp4_to_array_record_open_ai_dev.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_local/mp4_to_npy_open_ai_10xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_local/mp4_to_npy_open_ai_6xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_local/mp4_to_npy_open_ai_7xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_local/mp4_to_npy_open_ai_8xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_local/mp4_to_npy_open_ai_9xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_ws/\r\nslurm/dev/alfred/horeka/input_pipeline_ws/actions/\r\nslurm/dev/alfred/horeka/input_pipeline_ws/actions/download_actions.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_ws/actions/download_actions_all.sh\r\nslurm/dev/alfred/horeka/input_pipeline_ws/videos/\r\nslurm/dev/alfred/horeka/input_pipeline_ws/videos/download_10xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_ws/videos/download_6xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_ws/videos/download_7xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_ws/videos/download_8xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_ws/videos/download_9xx.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_ws/videos/download_index_json.sh\r\nslurm/dev/alfred/horeka/input_pipeline_ws/videos/mp4_to_array_record_open_ai.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_ws/videos/mp4_to_array_record_open_ai_chunked.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_ws/videos/mp4_to_array_record_open_ai_dev.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_ws/videos/mp4_to_array_record_open_ai_to_fast.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_ws/videos/mp4_to_npy_open_ai.sbatch\r\nslurm/dev/alfred/horeka/job_chaining/\r\nslurm/dev/alfred/horeka/job_chaining/chain_example.sh\r\nslurm/dev/alfred/horeka/job_requeueing/\r\nslurm/dev/alfred/horeka/job_requeueing/train_lam_chain_dev.sbatch\r\nslurm/dev/alfred/horeka/job_requeueing/train_lam_requeue_dev.sbatch\r\nslurm/dev/alfred/horeka/job_requeueing/example_tokenizer_lr_tuning/\r\nslurm/dev/alfred/horeka/job_requeueing/example_tokenizer_lr_tuning/lr_tuning_tokenizer.sh\r\nslurm/dev/alfred/horeka/job_requeueing/example_tokenizer_lr_tuning/train_tokenizer_lr_general.sbatch\r\nslurm/dev/alfred/horeka/job_requeueing/lr_tuning/\r\nslurm/dev/alfred/horeka/job_requeueing/lr_tuning/tokenizer_lr_tuning.py\r\nslurm/dev/alfred/horeka/job_requeueing/lr_tuning/tokenizer/\r\nslurm/dev/alfred/horeka/job_requeueing/lr_tuning/tokenizer/lr_tuning_tokenizer.sh\r\nslurm/dev/alfred/horeka/job_requeueing/lr_tuning/tokenizer/train_tokenizer_lr_general.sbatch\r\nslurm/dev/alfred/horeka/masked_lim/\r\nslurm/dev/alfred/horeka/masked_lim/masked_lim_dev.sbatch\r\nslurm/dev/alfred/horeka/masked_lim/masked_lim_yolo.sbatch\r\nslurm/dev/alfred/horeka/masked_lim/masked_lim_yolo.sh\r\nslurm/dev/alfred/horeka/overfit_minecraft_single_sample/\r\nslurm/dev/alfred/horeka/overfit_minecraft_single_sample/train_dynamics_overfit_sample.sbatch\r\nslurm/dev/alfred/horeka/overfit_minecraft_single_sample/train_dynamics_overfit_sample.sh\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/train_lam_dev.sh\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/train_lam_init.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/train_lam_samples_12.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/train_lam_samples_12288.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/train_lam_samples_1536.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/train_lam_samples_24576.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/train_lam_samples_3072.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/train_lam_samples_384.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/train_lam_samples_49152.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/train_lam_samples_6144.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/train_lam_samples_96.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/from_ckpt/\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/from_ckpt/train_lam_dev.sh\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/lam/from_ckpt/train_lam_samples_12.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/tokenizer/\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/tokenizer/train_tokenizer_dev.sh\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_12.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_12288.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_1536.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_24576.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_3072.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_384.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_49152.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_6144.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_96.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/tokenizer/from_ckpt/\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/tokenizer/from_ckpt/train_tokenizer_samples_12.sbatch\r\nslurm/dev/alfred/horeka/overfit_run_ds_oai/tokenizer/from_ckpt/train_tokenizer_samples_12.sh\r\nslurm/dev/alfred/horeka/overfit_sample/\r\nslurm/dev/alfred/horeka/overfit_sample/train_dynamics_overfit_sample.sbatch\r\nslurm/dev/alfred/horeka/overfit_sample/train_lam_overfit_sample.sbatch\r\nslurm/dev/alfred/horeka/overfit_sample/train_tokenizer_overfit_sample.sbatch\r\nslurm/dev/alfred/horeka/overfit_sample/train_tokenizer_overfit_sample_size_0.6_mio.sbatch\r\nslurm/dev/alfred/horeka/overfit_sample/train_tokenizer_overfit_sample_size_0_5.sh\r\nslurm/dev/alfred/horeka/overfit_sample/train_tokenizer_overfit_sample_size_21_mio.sbatch\r\nslurm/dev/alfred/horeka/overfit_sample/train_tokenizer_overfit_sample_size_2_mio.sbatch\r\nslurm/dev/alfred/horeka/overfit_sample/train_tokenizer_overfit_sample_size_9_mio.sbatch\r\nslurm/dev/alfred/horeka/overfit_sample/train_tokenizer_overfit_sample_size_small_mio.sbatch\r\nslurm/dev/alfred/horeka/preprocess/\r\nslurm/dev/alfred/horeka/preprocess/mp4_to_npy_10xx.sbatch\r\nslurm/dev/alfred/horeka/preprocess/mp4_to_npy_6xx.sbatch\r\nslurm/dev/alfred/horeka/preprocess/mp4_to_npy_7xx.sbatch\r\nslurm/dev/alfred/horeka/preprocess/mp4_to_npy_8xx.sbatch\r\nslurm/dev/alfred/horeka/preprocess/mp4_to_npy_9xx.sbatch\r\nslurm/dev/alfred/horeka/preprocess/mp4_to_npy_open_ai.sbatch\r\nslurm/dev/alfred/horeka/preprocess/mp4_to_npy_test.sbatch\r\nslurm/dev/alfred/horeka/preprocess/npy_to_tfrecord.sbatch\r\nslurm/dev/alfred/horeka/preprocess/npy_to_tfrecord_10xx.sbatch\r\nslurm/dev/alfred/horeka/preprocess/npy_to_tfrecord_6xx.sbatch\r\nslurm/dev/alfred/horeka/preprocess/npy_to_tfrecord_7xx.sbatch\r\nslurm/dev/alfred/horeka/preprocess/npy_to_tfrecord_8xx.sbatch\r\nslurm/dev/alfred/horeka/preprocess/npy_to_tfrecord_9xx.sbatch\r\nslurm/dev/alfred/horeka/preprocess/preprocess_video_splitter.sbatch\r\nslurm/dev/alfred/horeka/preprocess/preprocess_video_to_npy.sbatch\r\nslurm/dev/alfred/horeka/preprocess/preprocess_video_to_npy_test.sh\r\nslurm/dev/alfred/horeka/procgen/\r\nslurm/dev/alfred/horeka/procgen/cp_script.sh\r\nslurm/dev/alfred/horeka/procgen/data_gen_gym_acrobot.sbatch\r\nslurm/dev/alfred/horeka/procgen/data_gen_gym_carracing.sbatch\r\nslurm/dev/alfred/horeka/procgen/data_gen_gym_coinrun.sbatch\r\nslurm/dev/alfred/horeka/procgen/data_gen_gym_coinrun.sh\r\nslurm/dev/alfred/horeka/procgen/data_gen_gym_mountaincar copy.sbatch\r\nslurm/dev/alfred/horeka/procgen/data_gen_gym_mountaincar.sbatch\r\nslurm/dev/alfred/horeka/procgen/data_gen_gym_multi.sbatch\r\nslurm/dev/alfred/horeka/procgen/data_gen_gym_pendulum.sbatch\r\nslurm/dev/alfred/horeka/rsync/\r\nslurm/dev/alfred/horeka/rsync/rsync.sbatch\r\nslurm/dev/alfred/horeka/rsync/rsync_tf_records.sbatch\r\nslurm/dev/alfred/horeka/sample_jafar/\r\nslurm/dev/alfred/horeka/sample_jafar/sample_coinrun.sbatch\r\nslurm/dev/alfred/horeka/sampling/\r\nslurm/dev/alfred/horeka/sampling/sample_coinrun.sh\r\nslurm/dev/alfred/horeka/sampling/sample_knoms.sbatch\r\nslurm/dev/alfred/horeka/sampling/sample_knoms.sh\r\nslurm/dev/alfred/horeka/sampling/sample_knoms_mihir.sh\r\nslurm/dev/alfred/horeka/train_dyn/\r\nslurm/dev/alfred/horeka/train_dyn/train_dyn_knoms_full.sbatch\r\nslurm/dev/alfred/horeka/train_dyn_dev/\r\nslurm/dev/alfred/horeka/train_dyn_dev/train_dyn.sh\r\nslurm/dev/alfred/horeka/train_dyn_dev/train_dyn_checkpt_loading_test_dev.sbatch\r\nslurm/dev/alfred/horeka/train_dyn_dev/train_dyn_dev.sbatch\r\nslurm/dev/alfred/horeka/train_dyn_dev/train_dyn_single_batch.sh\r\nslurm/dev/alfred/horeka/train_lam/\r\nslurm/dev/alfred/horeka/train_lam/train_lam_full.sbatch\r\nslurm/dev/alfred/horeka/train_lam_dev/\r\nslurm/dev/alfred/horeka/train_lam_dev/train_lam.sh\r\nslurm/dev/alfred/horeka/train_lam_dev/train_lam_dev.sbatch\r\nslurm/dev/alfred/horeka/train_lam_dev/train_lam_full_dev.sbatch\r\nslurm/dev/alfred/horeka/train_lam_dev/train_lam_single_batch.sh\r\nslurm/dev/alfred/horeka/train_tokenizer/\r\nslurm/dev/alfred/horeka/train_tokenizer/train_lam_oai_dev copy.sbatch\r\nslurm/dev/alfred/horeka/train_tokenizer/train_lam_oai_dev.sbatch\r\nslurm/dev/alfred/horeka/train_tokenizer/train_tokenizer_knoms_overfit_single_batch.sbatch\r\nslurm/dev/alfred/horeka/train_tokenizer/train_tokenizer_knoms_overfit_single_sample.sbatch\r\nslurm/dev/alfred/horeka/train_tokenizer/train_tokenizer_knoms_overfit_tfrecord_10.sbatch\r\nslurm/dev/alfred/horeka/train_tokenizer/train_tokenizer_knoms_overfit_tfrecord_full.sbatch\r\nslurm/dev/alfred/horeka/train_tokenizer_dev/\r\nslurm/dev/alfred/horeka/train_tokenizer_dev/train_tokenizer.sbatch\r\nslurm/dev/alfred/horeka/train_tokenizer_dev/train_tokenizer.sh\r\nslurm/dev/alfred/horeka/train_tokenizer_dev/train_tokenizer_h100.sbatch\r\nslurm/dev/alfred/horeka/train_tokenizer_dev/train_tokenizer_overfit_tfrecord_10.sh\r\nslurm/dev/alfred/horeka/train_tokenizer_dev/train_tokenizer_single_batch.sh\r\nslurm/dev/alfred/horeka/validation/\r\nslurm/dev/alfred/horeka/validation/tokenizer_lr_tuning/\r\nslurm/dev/alfred/horeka/validation/tokenizer_lr_tuning/lr_tuning_tokenizer.sh\r\nslurm/dev/alfred/horeka/validation/tokenizer_lr_tuning/train_tokenizer_lr_general.sbatch\r\nslurm/dev/alfred/horeka/validation/tokenizer_without_optimizer/\r\nslurm/dev/alfred/horeka/validation/tokenizer_without_optimizer/lr_tuning_tokenizer.sh\r\nslurm/dev/alfred/horeka/validation/tokenizer_without_optimizer/train_tokenizer_lr_general.sbatch\r\nslurm/dev/alfred/parallel_lam_dynamics_training/\r\nslurm/dev/alfred/parallel_lam_dynamics_training/train_lam_init_params.sbatch\r\nslurm/dev/alfred/parallel_lam_dynamics_training/train_parallel_lam_dynamics.sbatch\r\nslurm/dev/alfred/parallel_lam_dynamics_training/train_parallel_lam_dynamics_lr_1e-4.sbatch\r\nslurm/dev/alfred/parallel_lam_dynamics_training/train_parallel_lam_dynamics_mock.sbatch\r\nslurm/jobs/\r\nslurm/jobs/alfred/\r\nslurm/jobs/alfred/berlin/\r\nslurm/jobs/alfred/berlin/download_actions.sbatch\r\nslurm/jobs/alfred/berlin/download_actions.sh\r\nslurm/jobs/alfred/horeka/\r\nslurm/jobs/alfred/horeka/preproc_mp4_to_npy_open_ai.sbatch\r\nslurm/jobs/alfred/horeka/preproc_npy_to_tfrecord_open_ai.sbatch\r\nslurm/jobs/alfred/horeka/train_dyn_knoms.sbatch\r\nslurm/jobs/alfred/horeka/train_lam_knoms.sbatch\r\nslurm/jobs/alfred/horeka/train_tokenizer_knoms.sbatch\r\nslurm/jobs/alfred/horeka/lr_tuning/\r\nslurm/jobs/alfred/horeka/lr_tuning/tokenizer/\r\nslurm/jobs/alfred/horeka/lr_tuning/tokenizer/lr_tuning_tokenizer.sh\r\nslurm/jobs/alfred/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4.sbatch\r\nslurm/jobs/alfred/horeka/lr_tuning/tokenizer/train_tokenizer_lr_5e-5.sbatch\r\nslurm/jobs/alfred/horeka/lr_tuning/tokenizer/train_tokenizer_lr_general.sbatch\r\nslurm/jobs/franz/\r\nslurm/jobs/franz/horeka/\r\nslurm/jobs/franz/horeka/batchsize_scaling/\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/start_runs.sh\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/tester.sh\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_16_nodes.sbatch\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_1_nodes.sbatch\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_2_nodes.sbatch\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_4_nodes.sbatch\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/linear_lr/train_dynamics_8_nodes.sbatch\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/sqrt_lr/\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/sqrt_lr/start_runs.sh\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/sqrt_lr/tester.sh\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/sqrt_lr/train_dynamics_16_nodes.sbatch\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/sqrt_lr/train_dynamics_1_nodes.sbatch\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/sqrt_lr/train_dynamics_2_nodes.sbatch\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/sqrt_lr/train_dynamics_4_nodes.sbatch\r\nslurm/jobs/franz/horeka/batchsize_scaling/dynamics_cotraining_new_arch/sqrt_lr/train_dynamics_8_nodes.sbatch\r\nslurm/jobs/mihir/horeka/\r\nslurm/jobs/mihir/horeka/batchsize_scaling/dynamics_cotraining/sqrt_lr/tester.sh\r\nslurm/jobs/mihir/horeka/mask_prob_fix/\r\nslurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch\r\nslurm/jobs/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch\r\nslurm/jobs/mihir/horeka/yolo-runs/sampling.sh\r\nslurm/utils/\r\nslurm/utils/create_dev_dir.sh\r\ntests/\r\ntests/test_dataloader.py\r\nutils/\r\nutils/dataloader.py\r\nutils/dataset_utils.py\r\nutils/lr_utils.py\r\nutils/nn.py\r\n",,terminal_output +1064,2491729,"TERMINAL",0,0,"\r\nsent 36,060,362 bytes received 5,360 bytes 8,014,604.89 bytes/sec\r\ntotal size is 185,091,063 speedup is 5.13\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0402 jafar_jobs]$ ",,terminal_output +1065,2493802,"TERMINAL",0,0,"sync-runner",,terminal_output +1066,2494125,"TERMINAL",0,0,"runner",,terminal_output +1067,2494509,"TERMINAL",0,0,"scancel 3370768",,terminal_output +1068,2495022,"TERMINAL",0,0,"queue",,terminal_output +1069,2495552,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",,terminal_output +1070,2499904,"TERMINAL",0,0,"\r\n[?2004l\rSubmitted batch job 3370769\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0402 jafar_jobs]$ ",,terminal_output +1071,2503005,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1072,2503070,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1073,2503271,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1074,2503338,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +1075,2503503,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +1076,2504229,"TERMINAL",0,0,"[?25l/[?25h",,terminal_output +1077,2504512,"TERMINAL",0,0,"[?25lj[?25h[?25la[?25h",,terminal_output +1078,2504707,"TERMINAL",0,0,"far",,terminal_output +1079,2505092,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1080,2561098,"TERMINAL",0,0,"bash",,terminal_focus +1081,2562190,"TERMINAL",0,0,"queue",,terminal_command +1082,2562232,"TERMINAL",0,0,"]633;E;2025-07-23 15:30:01 queue;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C",,terminal_output +1083,2562302,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Wed Jul 23 15:30:01 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3370769 accelerat train_dy tum_cte0 PD\t0:00\t 8 (Priority)3370715 dev_accel interact tum_cte0 R34:12\t 1 hkn0402",,terminal_output +1084,2563346,"TERMINAL",0,0,"23",,terminal_output +1085,2564473,"TERMINAL",0,0,"34",,terminal_output +1086,2565512,"TERMINAL",0,0,"45",,terminal_output +1087,2566520,"TERMINAL",0,0,"56",,terminal_output +1088,2567544,"TERMINAL",0,0,"67",,terminal_output +1089,2568571,"TERMINAL",0,0,"78",,terminal_output +1090,2569576,"TERMINAL",0,0,"89",,terminal_output +1091,2570620,"TERMINAL",0,0,"920",,terminal_output +1092,2571656,"TERMINAL",0,0,"101",,terminal_output +1093,2572699,"TERMINAL",0,0,"13",,terminal_output +1094,2573739,"TERMINAL",0,0,"34",,terminal_output +1095,2574779,"TERMINAL",0,0,"45",,terminal_output +1096,2575822,"TERMINAL",0,0,"56",,terminal_output +1097,2576970,"TERMINAL",0,0,"67",,terminal_output +1098,2577988,"TERMINAL",0,0,"78",,terminal_output +1099,2579011,"TERMINAL",0,0,"89",,terminal_output +1100,2580038,"TERMINAL",0,0,"930",,terminal_output +1101,2581039,"TERMINAL",0,0,"201",,terminal_output +1102,2582186,"TERMINAL",0,0,"12",,terminal_output +1103,2583137,"TERMINAL",0,0,"23",,terminal_output +1104,2584238,"TERMINAL",0,0,"34",,terminal_output +1105,2585260,"TERMINAL",0,0,"45",,terminal_output +1106,2586284,"TERMINAL",0,0,"56",,terminal_output +1107,2587409,"TERMINAL",0,0,"67",,terminal_output +1108,2588366,"TERMINAL",0,0,"78",,terminal_output +1109,2589407,"TERMINAL",0,0,"89",,terminal_output +1110,2590487,"TERMINAL",0,0,"940",,terminal_output +1111,2591536,"TERMINAL",0,0,"301",,terminal_output +1112,2592638,"TERMINAL",0,0,"12",,terminal_output +1113,2593592,"TERMINAL",0,0,"23",,terminal_output +1114,2594639,"TERMINAL",0,0,"34",,terminal_output +1115,2595698,"TERMINAL",0,0,"46",,terminal_output +1116,2596738,"TERMINAL",0,0,"67",,terminal_output +1117,2597766,"TERMINAL",0,0,"78",,terminal_output +1118,2598813,"TERMINAL",0,0,"89",,terminal_output +1119,2599901,"TERMINAL",0,0,"950",,terminal_output +1120,2600931,"TERMINAL",0,0,"401",,terminal_output +1121,2601183,"TERMINAL",0,0,"srun",,terminal_focus +1122,2601872,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1123,2601982,"TERMINAL",0,0,"12",,terminal_output +1124,2602049,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1125,2602241,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1126,2602393,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1127,2602474,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1128,2602594,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1129,2602674,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1130,2602783,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1131,2603007,"TERMINAL",0,0,"23",,terminal_output +1132,2603074,"TERMINAL",0,0,"3370769",,terminal_output +1133,2603382,"TERMINAL",0,0,"3370769\r\n[?2004l\r]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1134,2604054,"TERMINAL",0,0,"\r315 dev_accel interact R34:54\t 1 hkn0402",,terminal_output +1135,2605095,"TERMINAL",0,0,"45",,terminal_output +1136,2606147,"TERMINAL",0,0,"56",,terminal_output +1137,2607186,"TERMINAL",0,0,"67",,terminal_output +1138,2607908,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1139,2608258,"TERMINAL",0,0,"78",,terminal_output +1140,2608422,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1186,0,"",shellscript,selection_mouse +1141,2609271,"TERMINAL",0,0,"89",,terminal_output +1142,2610334,"TERMINAL",0,0,"95:00",,terminal_output +1143,2611367,"TERMINAL",0,0,"501",,terminal_output +1144,2612397,"TERMINAL",0,0,"12",,terminal_output +1145,2613536,"TERMINAL",0,0,"23",,terminal_output +1146,2614489,"TERMINAL",0,0,"34",,terminal_output +1147,2615570,"TERMINAL",0,0,"45",,terminal_output +1148,2616594,"TERMINAL",0,0,"56",,terminal_output +1149,2617622,"TERMINAL",0,0,"67",,terminal_output +1150,2618673,"TERMINAL",0,0,"78",,terminal_output +1151,2619695,"TERMINAL",0,0,"810",,terminal_output +1152,2620742,"TERMINAL",0,0,"1:001",,terminal_output +1153,2621784,"TERMINAL",0,0,"12",,terminal_output +1154,2622838,"TERMINAL",0,0,"23",,terminal_output +1155,2623903,"TERMINAL",0,0,"34",,terminal_output +1156,2624927,"TERMINAL",0,0,"45",,terminal_output +1157,2626013,"TERMINAL",0,0,"56",,terminal_output +1158,2627036,"TERMINAL",0,0,"67",,terminal_output +1159,2628069,"TERMINAL",0,0,"78",,terminal_output +1160,2629189,"TERMINAL",0,0,"89",,terminal_output +1161,2630213,"TERMINAL",0,0,"920",,terminal_output +1162,2631236,"TERMINAL",0,0,"101",,terminal_output +1163,2632261,"TERMINAL",0,0,"12",,terminal_output +1164,2633429,"TERMINAL",0,0,"23",,terminal_output +1165,2634411,"TERMINAL",0,0,"34",,terminal_output +1166,2635438,"TERMINAL",0,0,"45",,terminal_output +1167,2636457,"TERMINAL",0,0,"56",,terminal_output +1168,2637464,"TERMINAL",0,0,"67",,terminal_output +1169,2638612,"TERMINAL",0,0,"78",,terminal_output +1170,2639552,"TERMINAL",0,0,"89",,terminal_output +1171,2640594,"TERMINAL",0,0,"930",,terminal_output +1172,2641633,"TERMINAL",0,0,"201",,terminal_output +1173,2642690,"TERMINAL",0,0,"13",,terminal_output +1174,2643724,"TERMINAL",0,0,"34",,terminal_output +1175,2644771,"TERMINAL",0,0,"45",,terminal_output +1176,2645884,"TERMINAL",0,0,"56",,terminal_output +1177,2646853,"TERMINAL",0,0,"67",,terminal_output +1178,2647924,"TERMINAL",0,0,"78",,terminal_output +1179,2648956,"TERMINAL",0,0,"89",,terminal_output +1180,2650077,"TERMINAL",0,0,"940",,terminal_output +1181,2651101,"TERMINAL",0,0,"301",,terminal_output +1182,2652083,"TERMINAL",0,0,"12",,terminal_output +1183,2653153,"TERMINAL",0,0,"23",,terminal_output +1184,2654288,"TERMINAL",0,0,"34",,terminal_output +1185,2655302,"TERMINAL",0,0,"45",,terminal_output +1186,2656271,"TERMINAL",0,0,"56",,terminal_output +1187,2657350,"TERMINAL",0,0,"67",,terminal_output +1188,2658353,"TERMINAL",0,0,"78",,terminal_output +1189,2659500,"TERMINAL",0,0,"89",,terminal_output +1190,2660523,"TERMINAL",0,0,"950",,terminal_output +1191,2661519,"TERMINAL",0,0,"401",,terminal_output +1192,2662570,"TERMINAL",0,0,"12",,terminal_output +1193,2663595,"TERMINAL",0,0,"23",,terminal_output +1194,2664602,"TERMINAL",0,0,"34",,terminal_output +1195,2665650,"TERMINAL",0,0,"45",,terminal_output +1196,2666725,"TERMINAL",0,0,"57",,terminal_output +1197,2667729,"TERMINAL",0,0,"78",,terminal_output +1198,2668802,"TERMINAL",0,0,"89",,terminal_output +1199,2669841,"TERMINAL",0,0,"96:00",,terminal_output +1200,2670966,"TERMINAL",0,0,"501",,terminal_output +1201,2671920,"TERMINAL",0,0,"12",,terminal_output +1202,2673014,"TERMINAL",0,0,"23",,terminal_output +1203,2674045,"TERMINAL",0,0,"34",,terminal_output +1204,2675165,"TERMINAL",0,0,"45",,terminal_output +1205,2676189,"TERMINAL",0,0,"56",,terminal_output +1206,2677223,"TERMINAL",0,0,"67",,terminal_output +1207,2678238,"TERMINAL",0,0,"78",,terminal_output +1208,2679267,"TERMINAL",0,0,"89",,terminal_output +1209,2680392,"TERMINAL",0,0,"910",,terminal_output +1210,2681416,"TERMINAL",0,0,"2:001",,terminal_output +1211,2682439,"TERMINAL",0,0,"12",,terminal_output +1212,2683462,"TERMINAL",0,0,"23",,terminal_output +1213,2684491,"TERMINAL",0,0,"34",,terminal_output +1214,2685514,"TERMINAL",0,0,"45",,terminal_output +1215,2686638,"TERMINAL",0,0,"56",,terminal_output +1216,2687586,"TERMINAL",0,0,"67",,terminal_output +1217,2688672,"TERMINAL",0,0,"78",,terminal_output +1218,2689675,"TERMINAL",0,0,"89",,terminal_output +1219,2690728,"TERMINAL",0,0,"921",,terminal_output +1220,2691766,"TERMINAL",0,0,"112",,terminal_output +1221,2692820,"TERMINAL",0,0,"23",,terminal_output +1222,2693865,"TERMINAL",0,0,"34",,terminal_output +1223,2694933,"TERMINAL",0,0,"45",,terminal_output +1224,2696059,"TERMINAL",0,0,"56",,terminal_output +1225,2697040,"TERMINAL",0,0,"67",,terminal_output +1226,2698052,"TERMINAL",0,0,"78",,terminal_output +1227,2699128,"TERMINAL",0,0,"89",,terminal_output +1228,2700154,"TERMINAL",0,0,"930",,terminal_output +1229,2701283,"TERMINAL",0,0,"201",,terminal_output +1230,2702301,"TERMINAL",0,0,"12",,terminal_output +1231,2703286,"TERMINAL",0,0,"23",,terminal_output +1232,2704334,"TERMINAL",0,0,"34",,terminal_output +1233,2705384,"TERMINAL",0,0,"45",,terminal_output +1234,2706505,"TERMINAL",0,0,"56",,terminal_output +1235,2707524,"TERMINAL",0,0,"67",,terminal_output +1236,2708550,"TERMINAL",0,0,"78",,terminal_output +1237,2709570,"TERMINAL",0,0,"89",,terminal_output +1238,2710599,"TERMINAL",0,0,"940",,terminal_output +1239,2711645,"TERMINAL",0,0,"301",,terminal_output +1240,2712693,"TERMINAL",0,0,"13",,terminal_output +1241,2713726,"TERMINAL",0,0,"34",,terminal_output +1242,2714765,"TERMINAL",0,0,"45",,terminal_output +1243,2715808,"TERMINAL",0,0,"56",,terminal_output +1244,2716841,"TERMINAL",0,0,"67",,terminal_output +1245,2717969,"TERMINAL",0,0,"78",,terminal_output +1246,2719013,"TERMINAL",0,0,"89",,terminal_output +1247,2720020,"TERMINAL",0,0,"950",,terminal_output +1248,2721041,"TERMINAL",0,0,"401",,terminal_output +1249,2722068,"TERMINAL",0,0,"12",,terminal_output +1250,2723112,"TERMINAL",0,0,"23",,terminal_output +1251,2724165,"TERMINAL",0,0,"34",,terminal_output +1252,2725344,"TERMINAL",0,0,"45",,terminal_output +1253,2726252,"TERMINAL",0,0,"56",,terminal_output +1254,2727401,"TERMINAL",0,0,"67",,terminal_output +1255,2728429,"TERMINAL",0,0,"78",,terminal_output +1256,2729446,"TERMINAL",0,0,"89",,terminal_output +1257,2730436,"TERMINAL",0,0,"97:00",,terminal_output +1258,2731487,"TERMINAL",0,0,"501",,terminal_output +1259,2732526,"TERMINAL",0,0,"12",,terminal_output +1260,2733653,"TERMINAL",0,0,"23",,terminal_output +1261,2734661,"TERMINAL",0,0,"34",,terminal_output +1262,2735648,"TERMINAL",0,0,"45",,terminal_output +1263,2736726,"TERMINAL",0,0,"57",,terminal_output +1264,2737732,"TERMINAL",0,0,"78",,terminal_output +1265,2738806,"TERMINAL",0,0,"89",,terminal_output +1266,2739836,"TERMINAL",0,0,"910",,terminal_output +1267,2740886,"TERMINAL",0,0,"3:001",,terminal_output +1268,2742137,"TERMINAL",0,0,"12",,terminal_output +1269,2743162,"TERMINAL",0,0,"23",,terminal_output +1270,2744208,"TERMINAL",0,0,"34",,terminal_output +1271,2745223,"TERMINAL",0,0,"45",,terminal_output +1272,2746313,"TERMINAL",0,0,"56",,terminal_output +1273,2747388,"TERMINAL",0,0,"67",,terminal_output +1274,2748430,"TERMINAL",0,0,"78",,terminal_output +1275,2749408,"TERMINAL",0,0,"89",,terminal_output +1276,2750396,"TERMINAL",0,0,"920",,terminal_output +1277,2751433,"TERMINAL",0,0,"101",,terminal_output +1278,2752592,"TERMINAL",0,0,"12",,terminal_output +1279,2753530,"TERMINAL",0,0,"23",,terminal_output +1280,2754567,"TERMINAL",0,0,"34",,terminal_output +1281,2755621,"TERMINAL",0,0,"45",,terminal_output +1282,2756686,"TERMINAL",0,0,"56",,terminal_output +1283,2757704,"TERMINAL",0,0,"68",,terminal_output +1284,2758749,"TERMINAL",0,0,"89",,terminal_output +1285,2759795,"TERMINAL",0,0,"930",,terminal_output +1286,2760876,"TERMINAL",0,0,"201",,terminal_output +1287,2761900,"TERMINAL",0,0,"12",,terminal_output +1288,2762937,"TERMINAL",0,0,"23",,terminal_output +1289,2763967,"TERMINAL",0,0,"34",,terminal_output +1290,2765084,"TERMINAL",0,0,"45",,terminal_output +1291,2766066,"TERMINAL",0,0,"56",,terminal_output +1292,2767103,"TERMINAL",0,0,"67",,terminal_output +1293,2768157,"TERMINAL",0,0,"78",,terminal_output +1294,2769272,"TERMINAL",0,0,"89",,terminal_output +1295,2770215,"TERMINAL",0,0,"940",,terminal_output +1296,2771323,"TERMINAL",0,0,"301",,terminal_output +1297,2772349,"TERMINAL",0,0,"12",,terminal_output +1298,2773376,"TERMINAL",0,0,"23",,terminal_output +1299,2774520,"TERMINAL",0,0,"34",,terminal_output +1300,2775454,"TERMINAL",0,0,"45",,terminal_output +1301,2776499,"TERMINAL",0,0,"56",,terminal_output +1302,2777552,"TERMINAL",0,0,"67",,terminal_output +1303,2778687,"TERMINAL",0,0,"78",,terminal_output +1304,2779626,"TERMINAL",0,0,"89",,terminal_output +1305,2780678,"TERMINAL",0,0,"950",,terminal_output +1306,2781731,"TERMINAL",0,0,"402",,terminal_output +1307,2782777,"TERMINAL",0,0,"23",,terminal_output +1308,2783830,"TERMINAL",0,0,"34",,terminal_output +1309,2784964,"TERMINAL",0,0,"45",,terminal_output +1310,2785999,"TERMINAL",0,0,"56",,terminal_output +1311,2786986,"TERMINAL",0,0,"67",,terminal_output +1312,2788151,"TERMINAL",0,0,"78",,terminal_output +1313,2789141,"TERMINAL",0,0,"89",,terminal_output +1314,2790162,"TERMINAL",0,0,"98:00",,terminal_output +1315,2791192,"TERMINAL",0,0,"501",,terminal_output +1316,2792210,"TERMINAL",0,0,"12",,terminal_output +1317,2793249,"TERMINAL",0,0,"23",,terminal_output +1318,2794428,"TERMINAL",0,0,"34",,terminal_output +1319,2795394,"TERMINAL",0,0,"45",,terminal_output +1320,2796445,"TERMINAL",0,0,"56",,terminal_output +1321,2797549,"TERMINAL",0,0,"67",,terminal_output +1322,2798536,"TERMINAL",0,0,"78",,terminal_output +1323,2799627,"TERMINAL",0,0,"89",,terminal_output +1324,2800567,"TERMINAL",0,0,"910",,terminal_output +1325,2801666,"TERMINAL",0,0,"4:001",,terminal_output +1326,2802774,"TERMINAL",0,0,"12",,terminal_output +1327,2803715,"TERMINAL",0,0,"24",,terminal_output +1328,2804760,"TERMINAL",0,0,"45",,terminal_output +1329,2805825,"TERMINAL",0,0,"56",,terminal_output +1330,2806846,"TERMINAL",0,0,"67",,terminal_output +1331,2807987,"TERMINAL",0,0,"78",,terminal_output +1332,2808955,"TERMINAL",0,0,"89",,terminal_output +1333,2809979,"TERMINAL",0,0,"920",,terminal_output +1334,2811004,"TERMINAL",0,0,"101",,terminal_output +1335,2812088,"TERMINAL",0,0,"12",,terminal_output +1336,2813086,"TERMINAL",0,0,"23",,terminal_output +1337,2814228,"TERMINAL",0,0,"34",,terminal_output +1338,2815255,"TERMINAL",0,0,"45",,terminal_output +1339,2816277,"TERMINAL",0,0,"56",,terminal_output +1340,2817306,"TERMINAL",0,0,"67",,terminal_output +1341,2818322,"TERMINAL",0,0,"78",,terminal_output +1342,2819347,"TERMINAL",0,0,"89",,terminal_output +1343,2820377,"TERMINAL",0,0,"930",,terminal_output +1344,2821499,"TERMINAL",0,0,"201",,terminal_output +1345,2822499,"TERMINAL",0,0,"12",,terminal_output +1346,2823500,"TERMINAL",0,0,"23",,terminal_output +1347,2824589,"TERMINAL",0,0,"34",,terminal_output +1348,2825594,"TERMINAL",0,0,"45",,terminal_output +1349,2826721,"TERMINAL",0,0,"56",,terminal_output +1350,2827674,"TERMINAL",0,0,"67",,terminal_output +1351,2828712,"TERMINAL",0,0,"79",,terminal_output +1352,2829755,"TERMINAL",0,0,"940",,terminal_output +1353,2830792,"TERMINAL",0,0,"301",,terminal_output +1354,2831834,"TERMINAL",0,0,"12",,terminal_output +1355,2832965,"TERMINAL",0,0,"23",,terminal_output +1356,2833924,"TERMINAL",0,0,"34",,terminal_output +1357,2835018,"TERMINAL",0,0,"45",,terminal_output +1358,2836027,"TERMINAL",0,0,"56",,terminal_output +1359,2837068,"TERMINAL",0,0,"67",,terminal_output +1360,2838190,"TERMINAL",0,0,"78",,terminal_output +1361,2839157,"TERMINAL",0,0,"89",,terminal_output +1362,2840235,"TERMINAL",0,0,"950",,terminal_output +1363,2841259,"TERMINAL",0,0,"401",,terminal_output +1364,2842280,"TERMINAL",0,0,"12",,terminal_output +1365,2843414,"TERMINAL",0,0,"23",,terminal_output +1366,2844442,"TERMINAL",0,0,"34",,terminal_output +1367,2845412,"TERMINAL",0,0,"45",,terminal_output +1368,2846458,"TERMINAL",0,0,"56",,terminal_output +1369,2847500,"TERMINAL",0,0,"67",,terminal_output +1370,2848541,"TERMINAL",0,0,"78",,terminal_output +1371,2850043,"TERMINAL",0,0,"89:00",,terminal_output +1372,2851193,"TERMINAL",0,0,"501",,terminal_output +1373,2852217,"TERMINAL",0,0,"12",,terminal_output +1374,2853173,"TERMINAL",0,0,"23",,terminal_output +1375,2854275,"TERMINAL",0,0,"34",,terminal_output +1376,2855266,"TERMINAL",0,0,"45",,terminal_output +1377,2856315,"TERMINAL",0,0,"56",,terminal_output +1378,2857437,"TERMINAL",0,0,"67",,terminal_output +1379,2858464,"TERMINAL",0,0,"78",,terminal_output +1380,2859453,"TERMINAL",0,0,"89",,terminal_output +1381,2860515,"TERMINAL",0,0,"910",,terminal_output +1382,2861553,"TERMINAL",0,0,"5:001",,terminal_output +1383,2862598,"TERMINAL",0,0,"12",,terminal_output +1384,2863669,"TERMINAL",0,0,"23",,terminal_output +1385,2864688,"TERMINAL",0,0,"34",,terminal_output +1386,2865734,"TERMINAL",0,0,"56",,terminal_output +1387,2866764,"TERMINAL",0,0,"67",,terminal_output +1388,2867816,"TERMINAL",0,0,"78",,terminal_output +1389,2868854,"TERMINAL",0,0,"89",,terminal_output +1390,2869899,"TERMINAL",0,0,"920",,terminal_output +1391,2870928,"TERMINAL",0,0,"101",,terminal_output +1392,2871984,"TERMINAL",0,0,"12",,terminal_output +1393,2873110,"TERMINAL",0,0,"23",,terminal_output +1394,2874130,"TERMINAL",0,0,"34",,terminal_output +1395,2875159,"TERMINAL",0,0,"45",,terminal_output +1396,2876157,"TERMINAL",0,0,"56",,terminal_output +1397,2877182,"TERMINAL",0,0,"67",,terminal_output +1398,2878229,"TERMINAL",0,0,"78",,terminal_output +1399,2879261,"TERMINAL",0,0,"89",,terminal_output +1400,2880384,"TERMINAL",0,0,"930",,terminal_output +1401,2881333,"TERMINAL",0,0,"201",,terminal_output +1402,2882434,"TERMINAL",0,0,"12",,terminal_output +1403,2883424,"TERMINAL",0,0,"23",,terminal_output +1404,2884520,"TERMINAL",0,0,"34",,terminal_output +1405,2884609,"TERMINAL",0,0,"watch",,terminal_focus +1406,2884870,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1407,2889399,"TERMINAL",0,0,"python",,terminal_command +1408,2889467,"TERMINAL",0,0,"]633;E;2025-07-23 15:35:28 python;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;CPython 3.9.18 (main, Jun 27 2025, 00:00:00) \r\n[GCC 11.4.1 20231218 (Red Hat 11.4.1-4)] on linux\r\nType ""help"", ""copyright"", ""credits"" or ""license"" for more information.\r\n>>> ",,terminal_output +1409,2889948,"TERMINAL",0,0,"8",,terminal_output +1410,2891014,"TERMINAL",0,0,"[?25l*[?25h",,terminal_output +1411,2891450,"TERMINAL",0,0,"[?25l4[?25h",,terminal_output +1412,2893024,"TERMINAL",0,0,"\r\n32\r\n>>> ",,terminal_output +1413,2900093,"TERMINAL",0,0,"3",,terminal_output +1414,2900220,"TERMINAL",0,0,"[?25l2[?25h",,terminal_output +1415,2900543,"TERMINAL",0,0,"[?25l*[?25h",,terminal_output +1416,2904066,"TERMINAL",0,0,"[?25l2[?25h",,terminal_output +1417,2904207,"TERMINAL",0,0,"[?25l4[?25h",,terminal_output +1418,2904583,"TERMINAL",0,0,"[?25l*[?25h",,terminal_output +1419,2905739,"TERMINAL",0,0,"[?25l3[?25h[?25l0[?25h",,terminal_output +1420,2907689,"TERMINAL",0,0,"\r\n23040\r\n>>> ",,terminal_output +1421,3198347,"TERMINAL",0,0,"\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1422,3205079,"TERMINAL",0,0,"srun",,terminal_focus +1423,3205287,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1424,3206831,"TERMINAL",0,0,"q",,terminal_output +1425,3206902,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1426,3207020,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1427,3207125,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1428,3207190,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1429,3207528,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0402.localdomain: Wed Jul 23 15:40:46 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3370715 dev_accel interact tum_cte0 R44:57\t 1 hkn0402",,terminal_output +1430,3208549,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1431,3210278,"TERMINAL",0,0,"bash",,terminal_focus +1432,3212942,"TERMINAL",0,0,"cd slurm/",,terminal_command +1433,3214220,"TERMINAL",0,0,"git pull",,terminal_command +1434,3214268,"TERMINAL",0,0,"]633;E;2025-07-23 15:40:53 git pull;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C",,terminal_output +1435,3215771,"TERMINAL",0,0,"Already up to date.\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar/slurm]633;D;0",,terminal_output +1436,3221433,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1437,3339618,"TERMINAL",0,0,"srun",,terminal_focus +1438,3340187,"TERMINAL",0,0,"c",,terminal_output +1439,3340273,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1440,3340505,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1441,3340795,"TERMINAL",0,0,"dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sh",,terminal_output +1442,3341040,"TERMINAL",0,0,"\rdev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sh\r\n[?2004l\rbash: cd: dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sh: No such file or directory\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1443,3346692,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1444,3346740,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1445,3347270,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1446,3347514,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1447,3347593,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1448,3347701,"TERMINAL",0,0,"urm/",,terminal_output +1449,3348103,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0402:~/Projects/jafar/slurm[?2004h(jafar) [tum_cte0515@hkn0402 slurm]$ ",,terminal_output +1450,3348404,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +1451,3348504,"TERMINAL",0,0,"[?25li[?25h[?25lt[?25h",,terminal_output +1452,3348610,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1453,3348676,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +1454,3348846,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1455,3349001,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1456,3349133,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1457,3349299,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1458,3350939,"TERMINAL",0,0,"remote: Enumerating objects: 13, done.\r\nremote: Counting objects: 7% (1/13)\rremote: Counting objects: 15% (2/13)\rremote: Counting objects: 23% (3/13)\rremote: Counting objects: 30% (4/13)\rremote: Counting objects: 38% (5/13)\rremote: Counting objects: 46% (6/13)\rremote: Counting objects: 53% (7/13)\rremote: Counting objects: 61% (8/13)\rremote: Counting objects: 69% (9/13)\rremote: Counting objects: 76% (10/13)\rremote: Counting objects: 84% (11/13)\rremote: Counting objects: 92% (12/13)\rremote: Counting objects: 100% (13/13)\rremote: Counting objects: 100% (13/13), done.\r\nremote: Compressing objects: 16% (1/6)\rremote: Compressing objects: 33% (2/6)\rremote: Compressing objects: 50% (3/6)\rremote: Compressing objects: 66% (4/6)\rremote: Compressing objects: 83% (5/6)\rremote: Compressing objects: 100% (6/6)\rremote: Compressing objects: 100% (6/6), done.\r\nremote: Total 9 (delta 3), reused 9 (delta 3), pack-reused 0 (from 0)\r\nUnpacking objects: 11% (1/9)\rUnpacking objects: 22% (2/9)\rUnpacking objects: 33% (3/9)\rUnpacking objects: 44% (4/9)\rUnpacking objects: 55% (5/9)\rUnpacking objects: 66% (6/9)\rUnpacking objects: 77% (7/9)\rUnpacking objects: 88% (8/9)\rUnpacking objects: 100% (9/9)\rUnpacking objects: 100% (9/9), 1.96 KiB | 43.00 KiB/s, done.\r\n",,terminal_output +1459,3351198,"TERMINAL",0,0,"From github.com:p-doom/slurm\r\n 5d7446e..657c79e main -> origin/main\r\nUpdating 5d7446e..657c79e\r\n",,terminal_output +1460,3351266,"TERMINAL",0,0,"Fast-forward\r\n dev/alfred/horeka/masked_lim_noise/masked_lim_dev.sbatch | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sh | 16 ++++++++++++++++\r\n 3 files changed, 178 insertions(+)\r\n create mode 100644 dev/alfred/horeka/masked_lim_noise/masked_lim_dev.sbatch\r\n create mode 100644 dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch\r\n create mode 100644 dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sh\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar/slurm[?2004h(jafar) [tum_cte0515@hkn0402 slurm]$ ",,terminal_output +1461,3355222,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sh",0,0,"#!/bin/bash\n\nmask_limits=(0.0 0.5)\njob_name=""dynamics-masked_lim_guassian_noise""\n\n# job_name=""dynamics-masked_lim_guassian_noise_dev""\n# mask_limits=(0.0)\n\nfor mask_limit in ${mask_limits[@]}; do\n MASK_LIMIT=${mask_limit} \\n sbatch \\n --job-name=""${job_name}_${mask_limit}"" \\n slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch\n # slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_dev.sbatch\ndone\n\n",shellscript,tab +1462,3362901,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=24:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_alfred/dynamics-masked_lim/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_alfred/dynamics-masked_lim/%x_%j.log\n#SBATCH --job-name=masked_lim_0.5-1\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv_jafar/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\ntags=""masked_lim dynamics MASK_LIMIT_${MASK_LIMIT} guassian_noise""\n\nCHECKPOINT_DIR=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/checkpoints_alfred/${job_name}_${slurm_job_id}""\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --ckpt_dir $CHECKPOINT_DIR \\n --wandb_id $SLURM_JOB_ID \\n --batch_size=96 \\n --max_lr=1e-4 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=""${job_name}_${slurm_job_id}"" \\n --tags $tags \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir \\n --mask_limit=$MASK_LIMIT &\n\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +1463,3368379,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1464,3369491,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1465,3374229,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",0,0,"",shellscript,tab +1466,3374231,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",454,0,"",shellscript,selection_mouse +1467,3374545,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",476,0,"",shellscript,selection_mouse +1468,3376540,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1111,0,"",shellscript,selection_mouse +1469,3376656,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1110,1,"\n",shellscript,selection_mouse +1470,3376657,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1073,38,"estore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +1471,3376657,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1024,87," restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +1472,3376657,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",986,125,"\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +1473,3376658,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",847,264,"\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +1474,3376659,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",819,292,"}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +1475,3376672,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",808,303," exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +1476,3376691,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",723,388," # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +1477,3376705,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",676,435," # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +1478,3376763,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",567,544,"requeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +1479,3376764,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",515,596,"# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +1480,3376771,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",514,597,"\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +1481,3376789,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",463,648,"#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +1482,3376810,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",445,666,"#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +1483,3379393,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",445,0,"",shellscript,selection_command +1484,3383336,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1485,3383337,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",453,0,"",shellscript,selection_mouse +1486,3383935,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",503,0,"",shellscript,selection_mouse +1487,3385786,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",503,0,"#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n",shellscript,content +1488,3385791,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",503,0,"",shellscript,selection_command +1489,3387680,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",506,0,"",shellscript,selection_mouse +1490,3387886,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",524,0,"",shellscript,selection_command +1491,3388414,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",572,0,"",shellscript,selection_command +1492,3388417,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",576,0,"",shellscript,selection_command +1493,3388467,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",628,0,"",shellscript,selection_command +1494,3388480,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",644,0,"",shellscript,selection_command +1495,3388507,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",737,0,"",shellscript,selection_command +1496,3388663,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",784,0,"",shellscript,selection_command +1497,3401009,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",0,0,"",shellscript,tab +1498,3401010,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1612,0,"",shellscript,selection_mouse +1499,3401065,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1611,1,"}",shellscript,selection_mouse +1500,3401074,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1610,2,"d}",shellscript,selection_mouse +1501,3401138,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1609,3,"id}",shellscript,selection_mouse +1502,3401139,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1608,4,"_id}",shellscript,selection_mouse +1503,3401148,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1607,5,"b_id}",shellscript,selection_mouse +1504,3401160,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1604,8,"_job_id}",shellscript,selection_mouse +1505,3401175,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1601,11,"urm_job_id}",shellscript,selection_mouse +1506,3401237,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1599,13,"slurm_job_id}",shellscript,selection_mouse +1507,3401238,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1598,14,"{slurm_job_id}",shellscript,selection_mouse +1508,3401238,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1597,15,"${slurm_job_id}",shellscript,selection_mouse +1509,3401275,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1596,16,"_${slurm_job_id}",shellscript,selection_mouse +1510,3401334,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1595,17,"}_${slurm_job_id}",shellscript,selection_mouse +1511,3401392,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1594,18,"e}_${slurm_job_id}",shellscript,selection_mouse +1512,3406108,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1513,3406109,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1591,0,"",shellscript,selection_mouse +1514,3406195,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1581,12,"slurm_job_id",shellscript,selection_mouse +1515,3406379,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1580,13,"$slurm_job_id",shellscript,selection_mouse +1516,3406398,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1579,14,"/$slurm_job_id",shellscript,selection_mouse +1517,3406461,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1571,22,"job_name/$slurm_job_id",shellscript,selection_mouse +1518,3407503,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1593,0,"",shellscript,selection_mouse +1519,3407503,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1592,0,"",shellscript,selection_command +1520,3408199,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1618,0,"",shellscript,selection_mouse +1521,3408200,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1617,0,"",shellscript,selection_command +1522,3415239,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",0,0,"",shellscript,tab +1523,3415240,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1864,0,"",shellscript,selection_mouse +1524,3415332,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1860,17,"restore_ckpt_flag",shellscript,selection_mouse +1525,3415472,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1855,25," $restore_ckpt_flag \\n",shellscript,selection_mouse +1526,3420578,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1866,0,"",shellscript,selection_mouse +1527,3422406,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1528,3422407,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1883,0,"",shellscript,selection_mouse +1529,3422411,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1882,0,"",shellscript,selection_command +1530,3423002,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1883,0,"\n $restore_ckpt_flag \",shellscript,content +1531,3423031,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1888,0,"",shellscript,selection_command +1532,3425455,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",0,0,"",shellscript,tab +1533,3425456,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1922,0,"",shellscript,selection_mouse +1534,3427440,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1535,3427441,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1908,0,"",shellscript,selection_mouse +1536,3427457,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1907,0,"",shellscript,selection_command +1537,3428055,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1908,0,"\n --wandb_id $SLURM_JOB_ID \",shellscript,content +1538,3428057,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1913,0,"",shellscript,selection_command +1539,3431334,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2302,0,"",shellscript,selection_mouse +1540,3431759,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2343,0,"",shellscript,selection_mouse +1541,3431768,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2342,0,"",shellscript,selection_command +1542,3434235,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2343,0,"",shellscript,selection_command +1543,3435790,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2343,0," ",shellscript,content +1544,3435792,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2344,0,"",shellscript,selection_keyboard +1545,3437007,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2344,0,"&",shellscript,content +1546,3437008,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2345,0,"",shellscript,selection_keyboard +1547,3440283,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",0,0,"",shellscript,tab +1548,3440285,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",2294,0,"",shellscript,selection_mouse +1549,3440327,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",2294,5,"\nchil",shellscript,selection_mouse +1550,3440361,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",2294,14,"\nchild_pid=$!\n",shellscript,selection_mouse +1551,3440362,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",2294,23,"\nchild_pid=$!\n\nwait $ch",shellscript,selection_mouse +1552,3440422,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",2294,31,"\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +1553,3442703,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",2294,0,"",shellscript,selection_command +1554,3443681,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1555,3443682,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2346,0,"",shellscript,selection_mouse +1556,3445067,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2346,0,"\nchild_pid=$!\n\nwait $child_pid\n",shellscript,content +1557,3445071,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2346,0,"",shellscript,selection_command +1558,3446600,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2346,0,"\n",shellscript,content +1559,3447834,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2347,1,"",shellscript,content +1560,3448863,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2360,0,"",shellscript,selection_mouse +1561,3449598,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",2377,0,"",shellscript,selection_mouse +1562,3465962,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",0,0,"",shellscript,tab +1563,3465964,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1089,0,"",shellscript,selection_mouse +1564,3466119,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1043,0,"",shellscript,selection_mouse +1565,3466173,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",1043,0,"\nchild_pid=$!\n\nwait $child_pid\n",shellscript,content +1566,3468999,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",459,0,"",shellscript,selection_mouse +1567,3469267,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",514,0,"",shellscript,selection_mouse +1568,3469644,"slurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch",486,0,"",shellscript,selection_mouse +1569,3487965,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1570,3487965,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",999,0,"",shellscript,selection_mouse +1571,3504230,"TERMINAL",0,0,"bash",,terminal_focus +1572,3505625,"TERMINAL",0,0,"idling",,terminal_command +1573,3505692,"TERMINAL",0,0,"]633;E;2025-07-23 15:45:44 idling;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1990.localdomain: Wed Jul 23 15:45:44 2025Partition dev_cpuonly: 10 nodes idle\rPartition cpuonly: 17 nodes idle\rPartition dev_accelerated:\t 1 nodes idle\rPartition accelerated:\t 3 nodes idle\rPartition dev_accelerated-h100 :\t 1 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 7 nodes idle",,terminal_output +1574,3506795,"TERMINAL",0,0,"5",,terminal_output +1575,3507513,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1125,0,"",shellscript,selection_mouse +1576,3507531,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1124,0,"",shellscript,selection_command +1577,3507753,"TERMINAL",0,0,"7",,terminal_output +1578,3508797,"TERMINAL",0,0,"8",,terminal_output +1579,3509834,"TERMINAL",0,0,"9",,terminal_output +1580,3510496,"TERMINAL",0,0,"srun",,terminal_focus +1581,3510702,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0402 slurm]$ ",,terminal_output +1582,3510876,"TERMINAL",0,0,"50",,terminal_output +1583,3511927,"TERMINAL",0,0,"1",,terminal_output +1584,3512962,"TERMINAL",0,0,"2",,terminal_output +1585,3514038,"TERMINAL",0,0,"3",,terminal_output +1586,3515065,"TERMINAL",0,0,"4",,terminal_output +1587,3515262,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1588,3515263,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1120,0,"",shellscript,selection_mouse +1589,3515279,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1119,0,"",shellscript,selection_command +1590,3516090,"TERMINAL",0,0,"5",,terminal_output +1591,3516814,"TERMINAL",0,0,"watch",,terminal_focus +1592,3517130,"TERMINAL",0,0,"6",,terminal_output +1593,3517525,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1164,0,"",shellscript,selection_mouse +1594,3517544,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",1163,0,"",shellscript,selection_command +1595,3518173,"TERMINAL",0,0,"7",,terminal_output +1596,3518548,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",542,0,"",shellscript,selection_mouse +1597,3519209,"TERMINAL",0,0,"8",,terminal_output +1598,3520282,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",83,0,"",shellscript,selection_mouse +1599,3520333,"TERMINAL",0,0,"9",,terminal_output +1600,3521360,"TERMINAL",0,0,"6:00",,terminal_output +1601,3521770,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",82,0,"",shellscript,selection_command +1602,3521949,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",82,1,"",shellscript,content +1603,3522129,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",82,1,"",shellscript,content +1604,3522408,"TERMINAL",0,0,"1",,terminal_output +1605,3523376,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",82,0,"0",shellscript,content +1606,3523377,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",83,0,"",shellscript,selection_keyboard +1607,3523456,"TERMINAL",0,0,"2",,terminal_output +1608,3523506,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",83,0,"0",shellscript,content +1609,3523506,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",84,0,"",shellscript,selection_keyboard +1610,3523633,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",83,0,"",shellscript,selection_command +1611,3524029,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",84,0,"",shellscript,selection_command +1612,3524231,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",85,0,"",shellscript,selection_command +1613,3524502,"TERMINAL",0,0,"3",,terminal_output +1614,3525614,"TERMINAL",0,0,"4",,terminal_output +1615,3525817,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",85,1,"1",shellscript,content +1616,3526160,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",86,0,"",shellscript,selection_command +1617,3526631,"TERMINAL",0,0,"5",,terminal_output +1618,3526925,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",86,1,"5",shellscript,content +1619,3527654,"TERMINAL",0,0,"6",,terminal_output +1620,3528688,"TERMINAL",0,0,"7",,terminal_output +1621,3529712,"TERMINAL",0,0,"8",,terminal_output +1622,3530738,"TERMINAL",0,0,"10",,terminal_output +1623,3531934,"TERMINAL",0,0,"1",,terminal_output +1624,3532824,"TERMINAL",0,0,"2",,terminal_output +1625,3533866,"TERMINAL",0,0,"3",,terminal_output +1626,3534924,"TERMINAL",0,0,"4",,terminal_output +1627,3535265,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",38,0,"",shellscript,selection_mouse +1628,3535268,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",37,0,"",shellscript,selection_command +1629,3535946,"TERMINAL",0,0,"5",,terminal_output +1630,3536220,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",86,0,"",shellscript,selection_mouse +1631,3536977,"TERMINAL",0,0,"6",,terminal_output +1632,3537974,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",86,1,"0",shellscript,content +1633,3538056,"TERMINAL",0,0,"7",,terminal_output +1634,3538499,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",85,1,"0",shellscript,content +1635,3538500,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",85,0,"",shellscript,selection_command +1636,3538913,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",82,2,"",shellscript,content +1637,3538921,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",82,0,"",shellscript,selection_command +1638,3539069,"TERMINAL",0,0,"8",,terminal_output +1639,3539475,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",82,0,"8",shellscript,content +1640,3539486,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",82,0,"",shellscript,selection_command +1641,3539918,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",82,0,"4",shellscript,content +1642,3539922,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",82,0,"",shellscript,selection_command +1643,3540101,"TERMINAL",0,0,"9",,terminal_output +1644,3541146,"TERMINAL",0,0,"20",,terminal_output +1645,3542211,"TERMINAL",0,0,"125",,terminal_output +1646,3543220,"TERMINAL",0,0,"2",,terminal_output +1647,3543459,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes copy.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit-maskprob-fix/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit-maskprob-fix/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_maskprob_fix_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=384 \\n --init_lr=0 \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-maskprob-fix-8-node-$slurm_job_id \\n --tags dynamics maskprob-fix 8-node \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +1648,3544281,"TERMINAL",0,0,"3",,terminal_output +1649,3545409,"TERMINAL",0,0,"411",,terminal_output +1650,3546345,"TERMINAL",0,0,"5",,terminal_output +1651,3547432,"TERMINAL",0,0,"6",,terminal_output +1652,3548238,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit-maskprob-fix/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit-maskprob-fix/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_maskprob_fix_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=384 \\n --init_lr=0 \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-maskprob-fix-8-node-$slurm_job_id \\n --tags dynamics maskprob-fix 8-node \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +1653,3548509,"TERMINAL",0,0,"7",,terminal_output +1654,3549001,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",571,0,"",shellscript,selection_mouse +1655,3549002,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",570,0,"",shellscript,selection_command +1656,3549435,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",148,0,"",shellscript,selection_mouse +1657,3549437,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",147,0,"",shellscript,selection_command +1658,3549491,"TERMINAL",0,0,"8",,terminal_output +1659,3550387,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",38,0,"",shellscript,selection_mouse +1660,3550389,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",37,0,"",shellscript,selection_command +1661,3550498,"TERMINAL",0,0,"9",,terminal_output +1662,3550722,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",38,0,"",shellscript,selection_command +1663,3550852,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",37,1,"",shellscript,content +1664,3550989,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",37,0,"1",shellscript,content +1665,3550989,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",38,0,"",shellscript,selection_keyboard +1666,3551351,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",37,0,"",shellscript,selection_command +1667,3551499,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",55,0,"",shellscript,selection_command +1668,3551566,"TERMINAL",0,0,"30",,terminal_output +1669,3552056,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",66,0,"",shellscript,selection_command +1670,3552158,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",65,1,"",shellscript,content +1671,3552592,"TERMINAL",0,0,"1",,terminal_output +1672,3552709,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",65,0,"j",shellscript,content +1673,3552709,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",66,0,"",shellscript,selection_keyboard +1674,3553164,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",65,1,"",shellscript,content +1675,3553252,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",64,0,"",shellscript,selection_command +1676,3553385,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",88,0,"",shellscript,selection_command +1677,3553559,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",115,0,"",shellscript,selection_command +1678,3553676,"TERMINAL",0,0,"2",,terminal_output +1679,3553932,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",88,0,"",shellscript,selection_command +1680,3554078,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",64,0,"",shellscript,selection_command +1681,3554473,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",65,0,"",shellscript,selection_command +1682,3554663,"TERMINAL",0,0,"3",,terminal_output +1683,3555162,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",65,0,"1",shellscript,content +1684,3555163,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",66,0,"",shellscript,selection_keyboard +1685,3555652,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",65,0,"",shellscript,selection_command +1686,3555702,"TERMINAL",0,0,"4",,terminal_output +1687,3555802,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",89,0,"",shellscript,selection_command +1688,3555965,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",117,0,"",shellscript,selection_command +1689,3556225,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",147,0,"",shellscript,selection_command +1690,3556839,"TERMINAL",0,0,"6",,terminal_output +1691,3556890,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",148,0,"",shellscript,selection_command +1692,3557182,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",147,1,"",shellscript,content +1693,3557791,"TERMINAL",0,0,"7",,terminal_output +1694,3558326,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",147,0,"5",shellscript,content +1695,3558327,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",148,0,"",shellscript,selection_keyboard +1696,3558778,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",147,0,"",shellscript,selection_command +1697,3558836,"TERMINAL",0,0,"8",,terminal_output +1698,3558944,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",168,0,"",shellscript,selection_command +1699,3559412,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",169,0,"",shellscript,selection_command +1700,3559617,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",168,1,"",shellscript,content +1701,3559723,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",168,0,"1",shellscript,content +1702,3559724,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",169,0,"",shellscript,selection_keyboard +1703,3559860,"TERMINAL",0,0,"9",,terminal_output +1704,3559977,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",168,0,"",shellscript,selection_command +1705,3560173,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",142,0,"",shellscript,selection_command +1706,3560333,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",110,0,"",shellscript,selection_command +1707,3560467,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",86,0,"",shellscript,selection_command +1708,3560780,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",85,0,"",shellscript,selection_command +1709,3560897,"TERMINAL",0,0,"40",,terminal_output +1710,3560938,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",84,0,"",shellscript,selection_command +1711,3561075,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",83,0,"",shellscript,selection_command +1712,3561291,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",82,0,"",shellscript,selection_command +1713,3561866,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",82,1,"",shellscript,content +1714,3561943,"TERMINAL",0,0,"1",,terminal_output +1715,3562061,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",82,1,"",shellscript,content +1716,3562715,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",82,0,"0",shellscript,content +1717,3562716,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",83,0,"",shellscript,selection_keyboard +1718,3562854,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",83,0,"0",shellscript,content +1719,3562855,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",84,0,"",shellscript,selection_keyboard +1720,3563019,"TERMINAL",0,0,"2",,terminal_output +1721,3563069,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",83,0,"",shellscript,selection_command +1722,3563208,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",84,0,"",shellscript,selection_command +1723,3563388,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",85,0,"",shellscript,selection_command +1724,3563551,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",86,0,"",shellscript,selection_command +1725,3563977,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",85,0,"",shellscript,selection_command +1726,3564034,"TERMINAL",0,0,"3",,terminal_output +1727,3564686,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",85,1,"1",shellscript,content +1728,3564865,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",86,0,"",shellscript,selection_command +1729,3565065,"TERMINAL",0,0,"4",,terminal_output +1730,3566093,"TERMINAL",0,0,"5",,terminal_output +1731,3566558,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",86,1,"5",shellscript,content +1732,3567028,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",110,0,"",shellscript,selection_command +1733,3567131,"TERMINAL",0,0,"6",,terminal_output +1734,3567185,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",142,0,"",shellscript,selection_command +1735,3567311,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",168,0,"",shellscript,selection_command +1736,3567445,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",189,0,"",shellscript,selection_command +1737,3567989,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",329,0,"",shellscript,selection_command +1738,3568171,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",468,0,"",shellscript,selection_command +1739,3568182,"TERMINAL",0,0,"7",,terminal_output +1740,3568357,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",519,0,"",shellscript,selection_command +1741,3568671,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",468,0,"",shellscript,selection_command +1742,3569211,"TERMINAL",0,0,"8",,terminal_output +1743,3570090,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",548,0,"",shellscript,selection_mouse +1744,3570255,"TERMINAL",0,0,"9",,terminal_output +1745,3571123,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",547,0,"",shellscript,selection_command +1746,3571239,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",547,1,"",shellscript,content +1747,3571294,"TERMINAL",0,0,"50",,terminal_output +1748,3571769,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",548,0,"",shellscript,selection_command +1749,3571905,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",549,0,"",shellscript,selection_command +1750,3572024,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",550,0,"",shellscript,selection_command +1751,3572212,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",551,0,"",shellscript,selection_command +1752,3572331,"TERMINAL",0,0,"1",,terminal_output +1753,3572791,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",550,1,"",shellscript,content +1754,3573368,"TERMINAL",0,0,"2",,terminal_output +1755,3574348,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",550,0,"3",shellscript,content +1756,3574349,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",551,0,"",shellscript,selection_keyboard +1757,3574392,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",551,0,"0",shellscript,content +1758,3574393,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",552,0,"",shellscript,selection_keyboard +1759,3574410,"TERMINAL",0,0,"3",,terminal_output +1760,3574587,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",552,0,"s",shellscript,content +1761,3574587,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",553,0,"",shellscript,selection_keyboard +1762,3574774,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",553,0,"e",shellscript,content +1763,3574775,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",554,0,"",shellscript,selection_keyboard +1764,3574983,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",554,0,"c",shellscript,content +1765,3574984,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",555,0,"",shellscript,selection_keyboard +1766,3575480,"TERMINAL",0,0,"4",,terminal_output +1767,3575532,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",555,4,"",shellscript,content +1768,3575988,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",554,0,"",shellscript,selection_command +1769,3576182,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",571,0,"",shellscript,selection_command +1770,3576371,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",605,0,"",shellscript,selection_command +1771,3576481,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",638,0,"",shellscript,selection_command +1772,3576497,"TERMINAL",0,0,"5",,terminal_output +1773,3577011,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",673,0,"",shellscript,selection_command +1774,3577015,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",766,0,"",shellscript,selection_command +1775,3577083,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",813,0,"",shellscript,selection_command +1776,3577089,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",863,0,"",shellscript,selection_command +1777,3577105,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",874,0,"",shellscript,selection_command +1778,3577162,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",876,0,"",shellscript,selection_command +1779,3577166,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",878,0,"",shellscript,selection_command +1780,3577223,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",902,0,"",shellscript,selection_command +1781,3577224,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",904,0,"",shellscript,selection_command +1782,3577284,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",938,0,"",shellscript,selection_command +1783,3577284,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",983,0,"",shellscript,selection_command +1784,3577346,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1043,0,"",shellscript,selection_command +1785,3577347,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1076,0,"",shellscript,selection_command +1786,3577406,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1111,0,"",shellscript,selection_command +1787,3577410,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1123,0,"",shellscript,selection_command +1788,3577478,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1158,0,"",shellscript,selection_command +1789,3577479,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1165,0,"",shellscript,selection_command +1790,3577499,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1167,0,"",shellscript,selection_command +1791,3577528,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1168,0,"",shellscript,selection_command +1792,3577545,"TERMINAL",0,0,"6",,terminal_output +1793,3577564,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1169,0,"",shellscript,selection_command +1794,3577626,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1192,0,"",shellscript,selection_command +1795,3577627,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1199,0,"",shellscript,selection_command +1796,3577642,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1201,0,"",shellscript,selection_command +1797,3577704,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1230,0,"",shellscript,selection_command +1798,3577706,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1260,0,"",shellscript,selection_command +1799,3577782,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1286,0,"",shellscript,selection_command +1800,3577783,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1288,0,"",shellscript,selection_command +1801,3577795,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1322,0,"",shellscript,selection_command +1802,3577831,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1411,0,"",shellscript,selection_command +1803,3577890,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1435,0,"",shellscript,selection_command +1804,3577891,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1462,0,"",shellscript,selection_command +1805,3577953,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1464,0,"",shellscript,selection_command +1806,3577954,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1498,0,"",shellscript,selection_command +1807,3578027,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1616,0,"",shellscript,selection_command +1808,3578028,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1618,0,"",shellscript,selection_command +1809,3578091,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1652,0,"",shellscript,selection_command +1810,3578092,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1713,0,"",shellscript,selection_command +1811,3578097,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1814,0,"",shellscript,selection_command +1812,3578130,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1830,0,"",shellscript,selection_command +1813,3578168,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1832,0,"",shellscript,selection_command +1814,3578188,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1863,0,"",shellscript,selection_command +1815,3578217,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1881,0,"",shellscript,selection_command +1816,3578280,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1906,0,"",shellscript,selection_command +1817,3578294,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1937,0,"",shellscript,selection_command +1818,3578327,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1970,0,"",shellscript,selection_command +1819,3578339,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1993,0,"",shellscript,selection_command +1820,3578395,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2011,0,"",shellscript,selection_command +1821,3578396,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2031,0,"",shellscript,selection_command +1822,3578437,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2063,0,"",shellscript,selection_command +1823,3578458,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2075,0,"",shellscript,selection_command +1824,3578518,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2110,0,"",shellscript,selection_command +1825,3578589,"TERMINAL",0,0,"7",,terminal_output +1826,3578689,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2075,0,"",shellscript,selection_command +1827,3578857,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2063,0,"",shellscript,selection_command +1828,3579008,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2031,0,"",shellscript,selection_command +1829,3579190,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2011,0,"",shellscript,selection_command +1830,3579677,"TERMINAL",0,0,"8",,terminal_output +1831,3579694,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1993,0,"",shellscript,selection_command +1832,3579932,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1992,0,"",shellscript,selection_command +1833,3580652,"TERMINAL",0,0,"9",,terminal_output +1834,3580720,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1991,1,"",shellscript,content +1835,3580848,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1990,1,"",shellscript,content +1836,3580974,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1989,1,"",shellscript,content +1837,3581106,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1989,0,"1",shellscript,content +1838,3581107,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1990,0,"",shellscript,selection_keyboard +1839,3581178,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1990,0,"2",shellscript,content +1840,3581179,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1991,0,"",shellscript,selection_keyboard +1841,3581594,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1990,0,"",shellscript,selection_command +1842,3581670,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2010,0,"",shellscript,selection_command +1843,3581689,"TERMINAL",0,0,"7:00",,terminal_output +1844,3581820,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2030,0,"",shellscript,selection_command +1845,3581970,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2050,0,"",shellscript,selection_command +1846,3582429,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2063,0,"",shellscript,selection_command +1847,3582748,"TERMINAL",0,0,"2",,terminal_output +1848,3582885,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2062,0,"",shellscript,selection_command +1849,3583005,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2061,0,"",shellscript,selection_command +1850,3583535,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2060,1,"",shellscript,content +1851,3583777,"TERMINAL",0,0,"3",,terminal_output +1852,3583959,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2059,0,"",shellscript,selection_command +1853,3584094,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2073,0,"",shellscript,selection_command +1854,3584250,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2102,0,"",shellscript,selection_command +1855,3584662,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2111,0,"",shellscript,selection_command +1856,3584808,"TERMINAL",0,0,"4",,terminal_output +1857,3584990,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2110,0,"",shellscript,selection_command +1858,3585106,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2109,0,"",shellscript,selection_command +1859,3585315,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2108,1,"",shellscript,content +1860,3585657,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2107,0,"",shellscript,selection_command +1861,3585849,"TERMINAL",0,0,"5",,terminal_output +1862,3586900,"TERMINAL",0,0,"6 7",,terminal_output +1863,3587970,"TERMINAL",0,0,"7",,terminal_output +1864,3588980,"TERMINAL",0,0,"8",,terminal_output +1865,3589237,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2106,0,"",shellscript,selection_command +1866,3589414,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2105,0,"",shellscript,selection_command +1867,3590136,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2105,4,"",shellscript,content +1868,3590166,"TERMINAL",0,0,"9",,terminal_output +1869,3591095,"TERMINAL",0,0,"10",,terminal_output +1870,3591107,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2105,0,"1",shellscript,content +1871,3591108,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2106,0,"",shellscript,selection_keyboard +1872,3591205,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2106,0,"0",shellscript,content +1873,3591206,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2107,0,"",shellscript,selection_keyboard +1874,3591304,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2107,0,"0",shellscript,content +1875,3591304,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2108,0,"",shellscript,selection_keyboard +1876,3591417,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2108,0," ",shellscript,content +1877,3591418,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2109,0,"",shellscript,selection_keyboard +1878,3591508,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2108,0,"",shellscript,selection_command +1879,3592103,"TERMINAL",0,0,"1",,terminal_output +1880,3593144,"TERMINAL",0,0,"2",,terminal_output +1881,3594184,"TERMINAL",0,0,"3",,terminal_output +1882,3594478,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2256,0,"",shellscript,selection_mouse +1883,3594513,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2255,0,"",shellscript,selection_command +1884,3595017,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2145,0,"",shellscript,selection_mouse +1885,3595298,"TERMINAL",0,0,"4",,terminal_output +1886,3595555,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2205,0,"",shellscript,selection_mouse +1887,3596045,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2150,0,"",shellscript,selection_mouse +1888,3596321,"TERMINAL",0,0,"5",,terminal_output +1889,3597308,"TERMINAL",0,0,"6",,terminal_output +1890,3597517,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2150,0,"-",shellscript,content +1891,3597518,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2151,0,"",shellscript,selection_keyboard +1892,3597595,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2151,0,"d",shellscript,content +1893,3597596,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2152,0,"",shellscript,selection_keyboard +1894,3597699,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2152,0,"e",shellscript,content +1895,3597700,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2153,0,"",shellscript,selection_keyboard +1896,3597830,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2153,0,"v",shellscript,content +1897,3597831,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2154,0,"",shellscript,selection_keyboard +1898,3598360,"TERMINAL",0,0,"7",,terminal_output +1899,3599409,"TERMINAL",0,0,"8",,terminal_output +1900,3600248,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",2153,0,"",shellscript,selection_command +1901,3600470,"TERMINAL",0,0,"9",,terminal_output +1902,3601479,"TERMINAL",0,0,"20",,terminal_output +1903,3602010,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar/slurm]633;D;0",,terminal_output +1904,3604016,"TERMINAL",0,0,"cd ,,",,terminal_command +1905,3604938,"TERMINAL",0,0,"cd ..",,terminal_command +1906,3604960,"TERMINAL",0,0,"]633;E;2025-07-23 15:47:24 cd ..;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1907,3608435,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command +1908,3608462,"TERMINAL",0,0,"]633;E;2025-07-23 15:47:27 source .venv/bin/activate;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1909,3614545,"TERMINAL",0,0,"srun",,terminal_focus +1910,3615523,"TERMINAL",0,0,"[?25lq[?25h",,terminal_output +1911,3615582,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1912,3615735,"TERMINAL",0,0,"[?25le[?25h[?25lu[?25h",,terminal_output +1913,3615872,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1914,3615957,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0402.localdomain: Wed Jul 23 15:47:35 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3370715 dev_accel interact tum_cte0 R51:46\t 1 hkn0402",,terminal_output +1915,3617091,"TERMINAL",0,0,"67",,terminal_output +1916,3617170,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0402:~/Projects/jafar/slurm[?2004h(jafar) [tum_cte0515@hkn0402 slurm]$ ",,terminal_output +1917,3618674,"TERMINAL",0,0,"[?25lc[?25h[?25ld[?25h",,terminal_output +1918,3618768,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +1919,3618833,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +1920,3619009,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +1921,3619125,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1922,3619243,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1923,3619377,"TERMINAL",0,0,"[?25ls\r\n[?2004l\rdata generation_1753196800.0453017.gif overfit_dir.zip scripts_horeka train_tokenizer.py\r\ndebug genie.py __pycache__ slurm utils\r\ndiff.diff gifs README.md slurm-3359333.out wandb\r\ndiff.log input_pipeline read_tf_record.py slurm-3359334.out weekend-job-requeuer.sh\r\nframe-knoms.png LICENSE requirements-franz.txt slurm-3359338.out weekend-job-starter.sh\r\nframe.png logs requirements.txt tests\r\nframes models sample.py train_dynamics.py\r\ngenerate_dataset.py overfit_dir scripts_cremers train_lam.py\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ [?25h",,terminal_output +1924,3632633,"TERMINAL",0,0,"idl^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1925,3632706,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1926,3632901,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1927,3633383,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +1928,3633589,"TERMINAL",0,0,"[?25ld[?25h[?25ll[?25h",,terminal_output +1929,3633858,"TERMINAL",0,0,"[?25li[?25h[?25ln[?25h[?25lg[?25h",,terminal_output +1930,3633971,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1931,3634035,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn0402.localdomain: Wed Jul 23 15:47:53 2025Partition dev_cpuonly:\t 9 nodes idle\rPartition cpuonly: 25 nodes idle\rPartition dev_accelerated:\t 1 nodes idle\rPartition accelerated:\t 7 nodes idle\rPartition dev_accelerated-h100 :\t 1 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 7 nodes idle",,terminal_output +1932,3635018,"TERMINAL",0,0,"4",,terminal_output +1933,3636043,"TERMINAL",0,0,"5",,terminal_output +1934,3636985,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1935,3639349,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",,terminal_output +1936,3641155,"TERMINAL",0,0,"[?25l\rslurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch[?25h",,terminal_output +1937,3642333,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch\r",,terminal_output +1938,3642511,"TERMINAL",0,0,"[?25lblslurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch\r[?25haslurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch\r",,terminal_output +1939,3642684,"TERMINAL",0,0,"[?25ltslurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch\r[?25h",,terminal_output +1940,3642914,"TERMINAL",0,0,"[?25lclslurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch\r[?25hhslurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch\r",,terminal_output +1941,3643073,"TERMINAL",0,0,"[?25l slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch\r[?25h",,terminal_output +1942,3643220,"TERMINAL",0,0,"\r\n[?2004l\rSubmitted batch job 3370787\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1943,3643679,"TERMINAL",0,0,"[?25lq[?25h[?25lu[?25h",,terminal_output +1944,3643834,"TERMINAL",0,0,"[?25le[?25h[?25lu[?25h",,terminal_output +1945,3643989,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1946,3644071,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0402.localdomain: Wed Jul 23 15:48:03 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3370787 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3370715 dev_accel interact tum_cte0 R52:14\t 1 hkn0402",,terminal_output +1947,3645110,"TERMINAL",0,0,"45",,terminal_output +1948,3646148,"TERMINAL",0,0,"56",,terminal_output +1949,3647162,"TERMINAL",0,0,"67",,terminal_output +1950,3648128,"TERMINAL",0,0,"78",,terminal_output +1951,3649142,"TERMINAL",0,0,"89",,terminal_output +1952,3650158,"TERMINAL",0,0,"920",,terminal_output +1953,3651187,"TERMINAL",0,0,"101",,terminal_output +1954,3652192,"TERMINAL",0,0,"12",,terminal_output +1955,3653208,"TERMINAL",0,0,"23",,terminal_output +1956,3654262,"TERMINAL",0,0,"34",,terminal_output +1957,3655245,"TERMINAL",0,0,"45",,terminal_output +1958,3656260,"TERMINAL",0,0,"56",,terminal_output +1959,3657283,"TERMINAL",0,0,"67",,terminal_output +1960,3658318,"TERMINAL",0,0,"78",,terminal_output +1961,3659314,"TERMINAL",0,0,"89",,terminal_output +1962,3659831,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",0,0,"",shellscript,tab +1963,3659832,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",111,0,"",shellscript,selection_mouse +1964,3660336,"TERMINAL",0,0,"930",,terminal_output +1965,3660824,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",111,0,"d",shellscript,content +1966,3660825,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",112,0,"",shellscript,selection_keyboard +1967,3660931,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",112,0,"e",shellscript,content +1968,3660932,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",113,0,"",shellscript,selection_keyboard +1969,3661076,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",113,0,"v",shellscript,content +1970,3661077,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",114,0,"",shellscript,selection_keyboard +1971,3661357,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",114,0,"_",shellscript,content +1972,3661358,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",115,0,"",shellscript,selection_keyboard +1973,3661376,"TERMINAL",0,0,"201",,terminal_output +1974,3661627,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",114,0,"",shellscript,selection_command +1975,3662387,"TERMINAL",0,0,"12",,terminal_output +1976,3662794,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1977,3663298,"TERMINAL",0,0,"queue",,terminal_output +1978,3663575,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",,terminal_output +1979,3664106,"TERMINAL",0,0,"\r\n[?2004l\rSubmitted batch job 3370788\r\n]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +1980,3664749,"TERMINAL",0,0,"[?25lq[?25h",,terminal_output +1981,3664964,"TERMINAL",0,0,"[?25le[?25h[?25lu[?25h",,terminal_output +1982,3665963,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1983,3666030,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1984,3666112,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1985,3666280,"TERMINAL",0,0,"[?25le[?25h\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0402.localdomain: Wed Jul 23 15:48:25 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3370787 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3370788 dev_accel train_dy tum_cte0 PD\t0:00\t 1 (QOSMaxJobsPerUserLimit)3370715 dev_accel interact tum_cte0 R52:36\t 1 hkn0402",,terminal_output +1986,3667288,"TERMINAL",0,0,"67",,terminal_output +1987,3668304,"TERMINAL",0,0,"78",,terminal_output +1988,3669322,"TERMINAL",0,0,"89",,terminal_output +1989,3670407,"TERMINAL",0,0,"940",,terminal_output +1990,3671353,"TERMINAL",0,0,"301",,terminal_output +1991,3672449,"TERMINAL",0,0,"12",,terminal_output +1992,3673474,"TERMINAL",0,0,"23",,terminal_output +1993,3674500,"TERMINAL",0,0,"34",,terminal_output +1994,3675425,"TERMINAL",0,0,"45",,terminal_output +1995,3676452,"TERMINAL",0,0,"56",,terminal_output +1996,3677470,"TERMINAL",0,0,"67",,terminal_output +1997,3678498,"TERMINAL",0,0,"78",,terminal_output +1998,3679514,"TERMINAL",0,0,"89",,terminal_output +1999,3680539,"TERMINAL",0,0,"950",,terminal_output +2000,3681522,"TERMINAL",0,0,"401",,terminal_output +2001,3682192,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0402 jafar]$ ",,terminal_output +2002,3682581,"TERMINAL",0,0,"[?2004l\r\r\nexit\r\nsalloc: Relinquishing job allocation 3370715\r\nsalloc: Job allocation 3370715 has been revoked.\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +2003,3687321,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command +2004,3688021,"TERMINAL",0,0,"queue",,terminal_command +2005,3688078,"TERMINAL",0,0,"]633;E;2025-07-23 15:48:47 queue;469e5d18-6e08-4909-a55e-e2644c9abc02]633;C",,terminal_output +2006,3688142,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Wed Jul 23 15:48:47 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3370787 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3370715 dev_accel interact tum_cte0 CG52:52\t 1 hkn04023370788 dev_accel train_dy tum_cte0 PD\t0:00\t 1 (QOSMaxJobsPerUserLimit)",,terminal_output +2007,3689157,"TERMINAL",0,0,"8",,terminal_output +2008,3690207,"TERMINAL",0,0,"9 Rhkn0403",,terminal_output +2009,3691250,"TERMINAL",0,0,"501",,terminal_output +2010,3692317,"TERMINAL",0,0,"12",,terminal_output +2011,3693344,"TERMINAL",0,0,"\r288train_dy R 0:033",,terminal_output +2012,3694362,"TERMINAL",0,0,"34",,terminal_output +2013,3695492,"TERMINAL",0,0,"45",,terminal_output +2014,3696513,"TERMINAL",0,0,"56",,terminal_output +2015,3697333,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +2016,3699793,"TERMINAL",0,0,"scancel 3370787",,terminal_command +2017,3699820,"TERMINAL",0,0,"]633;E;2025-07-23 15:48:59 scancel 3370787;469e5d18-6e08-4909-a55e-e2644c9abc02]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +2018,3701835,"TERMINAL",0,0,"queue",,terminal_command +2019,3701875,"TERMINAL",0,0,"]633;E;2025-07-23 15:49:01 queue;469e5d18-6e08-4909-a55e-e2644c9abc02]633;C",,terminal_output +2020,3701940,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Wed Jul 23 15:49:01 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3370788 dev_accel train_dy tum_cte0 R\t0:12\t 1 hkn0403",,terminal_output +2021,3702948,"TERMINAL",0,0,"23",,terminal_output +2022,3704001,"TERMINAL",0,0,"34",,terminal_output +2023,3705225,"TERMINAL",0,0,"45",,terminal_output +2024,3706146,"TERMINAL",0,0,"56",,terminal_output +2025,3707381,"TERMINAL",0,0,"67",,terminal_output +2026,3708371,"TERMINAL",0,0,"78",,terminal_output +2027,3709424,"TERMINAL",0,0,"89",,terminal_output +2028,3710645,"TERMINAL",0,0,"920",,terminal_output +2029,3711671,"TERMINAL",0,0,"101",,terminal_output +2030,3712653,"TERMINAL",0,0,"12",,terminal_output +2031,3713579,"TERMINAL",0,0,"23",,terminal_output +2032,3714610,"TERMINAL",0,0,"34",,terminal_output +2033,3715663,"TERMINAL",0,0,"45",,terminal_output +2034,3716795,"TERMINAL",0,0,"57",,terminal_output +2035,3716878,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",0,0,"",shellscript,tab +2036,3716879,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",550,0,"",shellscript,selection_mouse +2037,3717729,"TERMINAL",0,0,"78",,terminal_output +2038,3718766,"TERMINAL",0,0,"89",,terminal_output +2039,3719203,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",549,0,"",shellscript,selection_mouse +2040,3719865,"TERMINAL",0,0,"930",,terminal_output +2041,3720888,"TERMINAL",0,0,"201",,terminal_output +2042,3721906,"TERMINAL",0,0,"12",,terminal_output +2043,3722947,"TERMINAL",0,0,"23",,terminal_output +2044,3724062,"TERMINAL",0,0,"34",,terminal_output +2045,3725033,"TERMINAL",0,0,"45",,terminal_output +2046,3726114,"TERMINAL",0,0,"56",,terminal_output +2047,3727131,"TERMINAL",0,0,"67",,terminal_output +2048,3728169,"TERMINAL",0,0,"78",,terminal_output +2049,3729190,"TERMINAL",0,0,"89",,terminal_output +2050,3730221,"TERMINAL",0,0,"940",,terminal_output +2051,3731435,"TERMINAL",0,0,"301",,terminal_output +2052,3732457,"TERMINAL",0,0,"12",,terminal_output +2053,3733481,"TERMINAL",0,0,"23",,terminal_output +2054,3734504,"TERMINAL",0,0,"34",,terminal_output +2055,3735530,"TERMINAL",0,0,"45",,terminal_output +2056,3736553,"TERMINAL",0,0,"56",,terminal_output +2057,3737680,"TERMINAL",0,0,"67",,terminal_output +2058,3738689,"TERMINAL",0,0,"78",,terminal_output +2059,3739732,"TERMINAL",0,0,"89",,terminal_output +2060,3740368,"TERMINAL",0,0,"bash",,terminal_focus +2061,3740751,"TERMINAL",0,0,"951",,terminal_output +2062,3741777,"TERMINAL",0,0,"412",,terminal_output +2063,3742800,"TERMINAL",0,0,"23",,terminal_output +2064,3743320,"TERMINAL",0,0,"watch",,terminal_focus +2065,3743853,"TERMINAL",0,0,"34",,terminal_output +2066,3744948,"TERMINAL",0,0,"45",,terminal_output +2067,3745212,"TERMINAL",0,0,"bash",,terminal_focus +2068,3745978,"TERMINAL",0,0,"56",,terminal_output +2069,3746973,"TERMINAL",0,0,"67",,terminal_output +2070,3747547,"TERMINAL",0,0,"cd $ws_dir",,terminal_command +2071,3747844,"TERMINAL",0,0,"ls",,terminal_command +2072,3747894,"TERMINAL",0,0,"]633;E;2025-07-23 15:49:47 ls;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C",,terminal_output +2073,3748022,"TERMINAL",0,0,"78",,terminal_output +2074,3748118,"TERMINAL",0,0,"checkpoints count_items.sh data data_new huggingface logs possibly_corrupt_files_in_this_workspace.txt scripts\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared]633;D;0",,terminal_output +2075,3749066,"TERMINAL",0,0,"89",,terminal_output +2076,3749310,"TERMINAL",0,0,"cd checkpoints/",,terminal_command +2077,3749542,"TERMINAL",0,0,"ls",,terminal_command +2078,3749579,"TERMINAL",0,0,"]633;E;2025-07-23 15:49:48 ls;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C",,terminal_output +2079,3750171,"TERMINAL",0,0,"0000 3290392 3292329 3292337 3296540 3297577 3297727 3299258 3300672 3307618 3311672 3316022 interactive train_dynamics_lr_schedule_const train_tokenizer_minecraft_overfit_sample\r\n3290283 3290439 3292330 3292338 3296571 3297578 3299016 3299259 3301025 3307619 3313562 big-runs lam train_dynamics_lr_schedule_cos wrap\r\n3290284 3290440 3292331 3292339 3296573 3297582 3299062 3299272 3301026 3309662 3313563 causal lam-1-action train_dynamics_lr_schedule_wsd\r\n3290295 3291405 3292332 3294600 3296574 3297586 3299063 3299579 3301027 3309663 3313564 checkpoints_alfred lam_ckpt_dir train_dyn_new_arch-bugfixed-spatial-shift\r\n3290296 3292213 3292333 3294601 3296575 3297606 3299065 3300233 3301029 3309699 3313565 coinrun lam_main_test train_dyn_new_arch-bugfixed-temporal-shift\r\n3290366 3292221 3292334 3294602 3297569 3297671 3299066 3300290 3301030 3310436 3313570 debug maskgit-maskprob-fix train_dyn_yolorun_new_arch\r\n3290367 3292258 3292335 3294603 3297575 3297693 3299068 3300658 3301031 3310437 3313571 dyn tokenizer train_lam_minecraft_overfit_sample\r\n3290391 3292328 3292336 3296502 3297576 3297706 3299069 3300663 3306801 3311671 3313572 dynamics_ckpt_dir tokenizer_ckpt_dir train_tokenizer_batch_size_scaling_16_node\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints]633;D;0",,terminal_output +2080,3750182,"TERMINAL",0,0,"91:00",,terminal_output +2081,3751160,"TERMINAL",0,0,"501",,terminal_output +2082,3752225,"TERMINAL",0,0,"12",,terminal_output +2083,3753251,"TERMINAL",0,0,"23",,terminal_output +2084,3754301,"TERMINAL",0,0,"34",,terminal_output +2085,3754457,"TERMINAL",0,0,"cd maskgit-maskprob-fix/",,terminal_command +2086,3754796,"TERMINAL",0,0,"ls",,terminal_command +2087,3755353,"TERMINAL",0,0,"45",,terminal_output +2088,3756407,"TERMINAL",0,0,"56",,terminal_output +2089,3757443,"TERMINAL",0,0,"67",,terminal_output +2090,3757481,"TERMINAL",0,0,"cd train_dynamics_maskprob_fix_8_node/3370788/",,terminal_command +2091,3757877,"TERMINAL",0,0,"ls",,terminal_command +2092,3758491,"TERMINAL",0,0,"78",,terminal_output +2093,3759651,"TERMINAL",0,0,"89",,terminal_output +2094,3760567,"TERMINAL",0,0,"910",,terminal_output +2095,3761012,"TERMINAL",0,0,"ls",,terminal_command +2096,3761044,"TERMINAL",0,0,"]633;E;2025-07-23 15:50:00 ls;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3370788]633;D;0",,terminal_output +2097,3761595,"TERMINAL",0,0,"50:001",,terminal_output +2098,3762674,"TERMINAL",0,0,"12",,terminal_output +2099,3763681,"TERMINAL",0,0,"23",,terminal_output +2100,3764728,"TERMINAL",0,0,"35",,terminal_output +2101,3765798,"TERMINAL",0,0,"56",,terminal_output +2102,3766040,"TERMINAL",0,0,"watch -n1 ls",,terminal_command +2103,3766095,"TERMINAL",0,0,"]633;E;2025-07-23 15:50:05 watch -n1 ls;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C[?1049h(B[?7hEvery 1.0s: lshkn1990.localdomain: Wed Jul 23 15:50:05 2025",,terminal_output +2104,3766864,"TERMINAL",0,0,"67",,terminal_output +2105,3767171,"TERMINAL",0,0,"6",,terminal_output +2106,3767839,"TERMINAL",0,0,"78",,terminal_output +2107,3768133,"TERMINAL",0,0,"7",,terminal_output +2108,3768877,"TERMINAL",0,0,"89",,terminal_output +2109,3769221,"TERMINAL",0,0,"8",,terminal_output +2110,3769942,"TERMINAL",0,0,"920",,terminal_output +2111,3770131,"TERMINAL",0,0,"9",,terminal_output +2112,3770954,"TERMINAL",0,0,"101",,terminal_output +2113,3771147,"TERMINAL",0,0,"10",,terminal_output +2114,3771992,"TERMINAL",0,0,"12",,terminal_output +2115,3772166,"TERMINAL",0,0,"1",,terminal_output +2116,3773030,"TERMINAL",0,0,"23",,terminal_output +2117,3773181,"TERMINAL",0,0,"2",,terminal_output +2118,3774136,"TERMINAL",0,0,"34",,terminal_output +2119,3774205,"TERMINAL",0,0,"3",,terminal_output +2120,3775160,"TERMINAL",0,0,"45",,terminal_output +2121,3775211,"TERMINAL",0,0,"4",,terminal_output +2122,3776145,"TERMINAL",0,0,"56",,terminal_output +2123,3776253,"TERMINAL",0,0,"5",,terminal_output +2124,3777219,"TERMINAL",0,0,"67",,terminal_output +2125,3777244,"TERMINAL",0,0,"6",,terminal_output +2126,3780169,"TERMINAL",0,0,"7889",,terminal_output +2127,3780169,"TERMINAL",0,0,"78",,terminal_output +2128,3780290,"TERMINAL",0,0,"9",,terminal_output +2129,3780424,"TERMINAL",0,0,"930",,terminal_output +2130,3781305,"TERMINAL",0,0,"20",,terminal_output +2131,3781366,"TERMINAL",0,0,"201",,terminal_output +2132,3782344,"TERMINAL",0,0,"1",,terminal_output +2133,3782400,"TERMINAL",0,0,"12",,terminal_output +2134,3783342,"TERMINAL",0,0,"2",,terminal_output +2135,3783451,"TERMINAL",0,0,"23",,terminal_output +2136,3784356,"TERMINAL",0,0,"3",,terminal_output +2137,3784485,"TERMINAL",0,0,"34",,terminal_output +2138,3785400,"TERMINAL",0,0,"4",,terminal_output +2139,3785523,"TERMINAL",0,0,"45",,terminal_output +2140,3786390,"TERMINAL",0,0,"5",,terminal_output +2141,3786562,"TERMINAL",0,0,"56",,terminal_output +2142,3787446,"TERMINAL",0,0,"6",,terminal_output +2143,3787602,"TERMINAL",0,0,"67",,terminal_output +2144,3788421,"TERMINAL",0,0,"7",,terminal_output +2145,3788673,"TERMINAL",0,0,"78",,terminal_output +2146,3789444,"TERMINAL",0,0,"8",,terminal_output +2147,3789700,"TERMINAL",0,0,"840",,terminal_output +2148,3790518,"TERMINAL",0,0,"9",,terminal_output +2149,3790729,"TERMINAL",0,0,"301",,terminal_output +2150,3791544,"TERMINAL",0,0,"30",,terminal_output +2151,3791851,"TERMINAL",0,0,"12",,terminal_output +2152,3792566,"TERMINAL",0,0,"1",,terminal_output +2153,3792874,"TERMINAL",0,0,"23",,terminal_output +2154,3793627,"TERMINAL",0,0,"2",,terminal_output +2155,3793846,"TERMINAL",0,0,"34",,terminal_output +2156,3794612,"TERMINAL",0,0,"3",,terminal_output +2157,3794886,"TERMINAL",0,0,"45",,terminal_output +2158,3795585,"TERMINAL",0,0,"4",,terminal_output +2159,3795946,"TERMINAL",0,0,"56",,terminal_output +2160,3796661,"TERMINAL",0,0,"5",,terminal_output +2161,3796961,"TERMINAL",0,0,"67",,terminal_output +2162,3797568,"TERMINAL",0,0,"6",,terminal_output +2163,3798003,"TERMINAL",0,0,"78",,terminal_output +2164,3798608,"TERMINAL",0,0,"7",,terminal_output +2165,3799046,"TERMINAL",0,0,"89",,terminal_output +2166,3799602,"TERMINAL",0,0,"8",,terminal_output +2167,3800145,"TERMINAL",0,0,"950",,terminal_output +2168,3800660,"TERMINAL",0,0,"9",,terminal_output +2169,3801170,"TERMINAL",0,0,"401",,terminal_output +2170,3801635,"TERMINAL",0,0,"40",,terminal_output +2171,3802194,"TERMINAL",0,0,"12",,terminal_output +2172,3802705,"TERMINAL",0,0,"1",,terminal_output +2173,3803217,"TERMINAL",0,0,"23",,terminal_output +2174,3803689,"TERMINAL",0,0,"2",,terminal_output +2175,3804246,"TERMINAL",0,0,"34",,terminal_output +2176,3804756,"TERMINAL",0,0,"3",,terminal_output +2177,3805286,"TERMINAL",0,0,"45",,terminal_output +2178,3805700,"TERMINAL",0,0,"5",,terminal_output +2179,3806337,"TERMINAL",0,0,"56",,terminal_output +2180,3806721,"TERMINAL",0,0,"6",,terminal_output +2181,3807417,"TERMINAL",0,0,"67",,terminal_output +2182,3807733,"TERMINAL",0,0,"7",,terminal_output +2183,3808440,"TERMINAL",0,0,"78",,terminal_output +2184,3808752,"TERMINAL",0,0,"8",,terminal_output +2185,3809564,"TERMINAL",0,0,"89",,terminal_output +2186,3809772,"TERMINAL",0,0,"9",,terminal_output +2187,3810496,"TERMINAL",0,0,"92:00",,terminal_output +2188,3810781,"TERMINAL",0,0,"50",,terminal_output +2189,3811537,"TERMINAL",0,0,"501",,terminal_output +2190,3811796,"TERMINAL",0,0,"1",,terminal_output +2191,3812580,"TERMINAL",0,0,"12",,terminal_output +2192,3812812,"TERMINAL",0,0,"2",,terminal_output +2193,3813673,"TERMINAL",0,0,"23",,terminal_output +2194,3813829,"TERMINAL",0,0,"3",,terminal_output +2195,3814685,"TERMINAL",0,0,"34",,terminal_output +2196,3814849,"TERMINAL",0,0,"4",,terminal_output +2197,3815697,"TERMINAL",0,0,"46",,terminal_output +2198,3815865,"TERMINAL",0,0,"5",,terminal_output +2199,3816836,"TERMINAL",0,0,"67",,terminal_output +2200,3816888,"TERMINAL",0,0,"6",,terminal_output +2201,3817860,"TERMINAL",0,0,"78",,terminal_output +2202,3817913,"TERMINAL",0,0,"7",,terminal_output +2203,3818913,"TERMINAL",0,0,"89",,terminal_output +2204,3818914,"TERMINAL",0,0,"8",,terminal_output +2205,3819875,"TERMINAL",0,0,"910",,terminal_output +2206,3819938,"TERMINAL",0,0,"9",,terminal_output +2207,3821035,"TERMINAL",0,0,"1:001",,terminal_output +2208,3821036,"TERMINAL",0,0,"1:00",,terminal_output +2209,3821961,"TERMINAL",0,0,"1",,terminal_output +2210,3821976,"TERMINAL",0,0,"12",,terminal_output +2211,3822979,"TERMINAL",0,0,"2",,terminal_output +2212,3823046,"TERMINAL",0,0,"23",,terminal_output +2213,3823995,"TERMINAL",0,0,"3",,terminal_output +2214,3824060,"TERMINAL",0,0,"34",,terminal_output +2215,3825033,"TERMINAL",0,0,"4",,terminal_output +2216,3825086,"TERMINAL",0,0,"45",,terminal_output +2217,3826051,"TERMINAL",0,0,"5",,terminal_output +2218,3826159,"TERMINAL",0,0,"56",,terminal_output +2219,3827046,"TERMINAL",0,0,"6",,terminal_output +2220,3827190,"TERMINAL",0,0,"67",,terminal_output +2221,3828064,"TERMINAL",0,0,"7",,terminal_output +2222,3828627,"TERMINAL",0,0,"78",,terminal_output +2223,3829137,"TERMINAL",0,0,"8",,terminal_output +2224,3829678,"TERMINAL",0,0,"89",,terminal_output +2225,3830251,"TERMINAL",0,0,"9",,terminal_output +2226,3830762,"TERMINAL",0,0,"921",,terminal_output +2227,3831114,"TERMINAL",0,0,"10",,terminal_output +2228,3831789,"TERMINAL",0,0,"112",,terminal_output +2229,3832199,"TERMINAL",0,0,"1",,terminal_output +2230,3833023,"TERMINAL",0,0,"23",,terminal_output +2231,3833262,"TERMINAL",0,0,"2",,terminal_output +2232,3833865,"TERMINAL",0,0,"34",,terminal_output +2233,3834252,"TERMINAL",0,0,"3",,terminal_output +2234,3834963,"TERMINAL",0,0,"45",,terminal_output +2235,3835185,"TERMINAL",0,0,"4",,terminal_output +2236,3835984,"TERMINAL",0,0,"56",,terminal_output +2237,3836196,"TERMINAL",0,0,"5",,terminal_output +2238,3837003,"TERMINAL",0,0,"67",,terminal_output +2239,3837217,"TERMINAL",0,0,"6",,terminal_output +2240,3838048,"TERMINAL",0,0,"78",,terminal_output +2241,3838228,"TERMINAL",0,0,"7",,terminal_output +2242,3839164,"TERMINAL",0,0,"89",,terminal_output +2243,3839293,"TERMINAL",0,0,"8",,terminal_output +2244,3840245,"TERMINAL",0,0,"930",,terminal_output +2245,3840261,"TERMINAL",0,0,"9",,terminal_output +2246,3841280,"TERMINAL",0,0,"20",,terminal_output +2247,3841303,"TERMINAL",0,0,"201",,terminal_output +2248,3842295,"TERMINAL",0,0,"1",,terminal_output +2249,3842359,"TERMINAL",0,0,"12",,terminal_output +2250,3843312,"TERMINAL",0,0,"2",,terminal_output +2251,3843377,"TERMINAL",0,0,"23",,terminal_output +2252,3844367,"TERMINAL",0,0,"3",,terminal_output +2253,3844473,"TERMINAL",0,0,"34",,terminal_output +2254,3845363,"TERMINAL",0,0,"4",,terminal_output +2255,3845488,"TERMINAL",0,0,"45",,terminal_output +2256,3846377,"TERMINAL",0,0,"5",,terminal_output +2257,3846532,"TERMINAL",0,0,"56",,terminal_output +2258,3847382,"TERMINAL",0,0,"6",,terminal_output +2259,3847567,"TERMINAL",0,0,"67",,terminal_output +2260,3848396,"TERMINAL",0,0,"7",,terminal_output +2261,3848683,"TERMINAL",0,0,"78",,terminal_output +2262,3849456,"TERMINAL",0,0,"8",,terminal_output +2263,3849656,"TERMINAL",0,0,"89",,terminal_output +2264,3850526,"TERMINAL",0,0,"9",,terminal_output +2265,3850706,"TERMINAL",0,0,"941",,terminal_output +2266,3851515,"TERMINAL",0,0,"30",,terminal_output +2267,3851779,"TERMINAL",0,0,"312",,terminal_output +2268,3852575,"TERMINAL",0,0,"1",,terminal_output +2269,3852882,"TERMINAL",0,0,"23",,terminal_output +2270,3853520,"TERMINAL",0,0,"2",,terminal_output +2271,3853830,"TERMINAL",0,0,"34",,terminal_output +2272,3854620,"TERMINAL",0,0,"3",,terminal_output +2273,3854923,"TERMINAL",0,0,"45",,terminal_output +2274,3855556,"TERMINAL",0,0,"4",,terminal_output +2275,3855953,"TERMINAL",0,0,"56",,terminal_output +2276,3856582,"TERMINAL",0,0,"5",,terminal_output +2277,3856974,"TERMINAL",0,0,"67",,terminal_output +2278,3857798,"TERMINAL",0,0,"6",,terminal_output +2279,3858010,"TERMINAL",0,0,"78",,terminal_output +2280,3858745,"TERMINAL",0,0,"8",,terminal_output +2281,3859128,"TERMINAL",0,0,"89",,terminal_output +2282,3859846,"TERMINAL",0,0,"9",,terminal_output +2283,3860155,"TERMINAL",0,0,"950",,terminal_output +2284,3860871,"TERMINAL",0,0,"40",,terminal_output +2285,3861133,"TERMINAL",0,0,"401",,terminal_output +2286,3861794,"TERMINAL",0,0,"1",,terminal_output +2287,3862169,"TERMINAL",0,0,"12",,terminal_output +2288,3862801,"TERMINAL",0,0,"2",,terminal_output +2289,3863229,"TERMINAL",0,0,"23",,terminal_output +2290,3863821,"TERMINAL",0,0,"3",,terminal_output +2291,3864248,"TERMINAL",0,0,"34",,terminal_output +2292,3864862,"TERMINAL",0,0,"4",,terminal_output +2293,3865292,"TERMINAL",0,0,"45",,terminal_output +2294,3865885,"TERMINAL",0,0,"5",,terminal_output +2295,3866346,"TERMINAL",0,0,"56",,terminal_output +2296,3866872,"TERMINAL",0,0,"6",,terminal_output +2297,3867423,"TERMINAL",0,0,"67",,terminal_output +2298,3867946,"TERMINAL",0,0,"7",,terminal_output +2299,3868445,"TERMINAL",0,0,"78",,terminal_output +2300,3868907,"TERMINAL",0,0,"8",,terminal_output +2301,3869571,"TERMINAL",0,0,"89",,terminal_output +2302,3870076,"TERMINAL",0,0,"9",,terminal_output +2303,3870514,"TERMINAL",0,0,"93:00",,terminal_output +2304,3871108,"TERMINAL",0,0,"50",,terminal_output +2305,3871564,"TERMINAL",0,0,"501",,terminal_output +2306,3872131,"TERMINAL",0,0,"1",,terminal_output +2307,3872644,"TERMINAL",0,0,"12",,terminal_output +2308,3873118,"TERMINAL",0,0,"2",,terminal_output +2309,3873669,"TERMINAL",0,0,"23",,terminal_output +2310,3874142,"TERMINAL",0,0,"3",,terminal_output +2311,3874697,"TERMINAL",0,0,"34",,terminal_output +2312,3875205,"TERMINAL",0,0,"4",,terminal_output +2313,3875731,"TERMINAL",0,0,"56",,terminal_output +2314,3876230,"TERMINAL",0,0,"5",,terminal_output +2315,3876846,"TERMINAL",0,0,"67",,terminal_output +2316,3877259,"TERMINAL",0,0,"6",,terminal_output +2317,3877818,"TERMINAL",0,0,"78",,terminal_output +2318,3878199,"TERMINAL",0,0,"7",,terminal_output +2319,3878856,"TERMINAL",0,0,"89",,terminal_output +2320,3879218,"TERMINAL",0,0,"8",,terminal_output +2321,3879913,"TERMINAL",0,0,"910",,terminal_output +2322,3880282,"TERMINAL",0,0,"9",,terminal_output +2323,3880964,"TERMINAL",0,0,"2:001",,terminal_output +2324,3881254,"TERMINAL",0,0,"2:00",,terminal_output +2325,3882069,"TERMINAL",0,0,"12",,terminal_output +2326,3882269,"TERMINAL",0,0,"1",,terminal_output +2327,3883091,"TERMINAL",0,0,"23",,terminal_output +2328,3883283,"TERMINAL",0,0,"2",,terminal_output +2329,3884115,"TERMINAL",0,0,"34",,terminal_output +2330,3884302,"TERMINAL",0,0,"3",,terminal_output +2331,3885167,"TERMINAL",0,0,"45",,terminal_output +2332,3885320,"TERMINAL",0,0,"4",,terminal_output +2333,3886263,"TERMINAL",0,0,"56",,terminal_output +2334,3886371,"TERMINAL",0,0,"5",,terminal_output +2335,3887236,"TERMINAL",0,0,"67",,terminal_output +2336,3887351,"TERMINAL",0,0,"6",,terminal_output +2337,3888272,"TERMINAL",0,0,"78",,terminal_output +2338,3888381,"TERMINAL",0,0,"7",,terminal_output +2339,3889337,"TERMINAL",0,0,"89",,terminal_output +2340,3889390,"TERMINAL",0,0,"8",,terminal_output +2341,3890359,"TERMINAL",0,0,"920",,terminal_output +2342,3890422,"TERMINAL",0,0,"9",,terminal_output +2343,3891414,"TERMINAL",0,0,"101",,terminal_output +2344,3891443,"TERMINAL",0,0,"10",,terminal_output +2345,3892433,"TERMINAL",0,0,"1",,terminal_output +2346,3892459,"TERMINAL",0,0,"12",,terminal_output +2347,3893535,"TERMINAL",0,0,"2",,terminal_output +2348,3893536,"TERMINAL",0,0,"23",,terminal_output +2349,3894468,"TERMINAL",0,0,"3",,terminal_output +2350,3894530,"TERMINAL",0,0,"34",,terminal_output +2351,3895483,"TERMINAL",0,0,"4",,terminal_output +2352,3895609,"TERMINAL",0,0,"45",,terminal_output +2353,3896497,"TERMINAL",0,0,"5",,terminal_output +2354,3896615,"TERMINAL",0,0,"56",,terminal_output +2355,3897516,"TERMINAL",0,0,"6",,terminal_output +2356,3897673,"TERMINAL",0,0,"67",,terminal_output +2357,3898557,"TERMINAL",0,0,"7",,terminal_output +2358,3898716,"TERMINAL",0,0,"79",,terminal_output +2359,3899548,"TERMINAL",0,0,"8",,terminal_output +2360,3899755,"TERMINAL",0,0,"930",,terminal_output +2361,3900602,"TERMINAL",0,0,"9",,terminal_output +2362,3900796,"TERMINAL",0,0,"201",,terminal_output +2363,3901587,"TERMINAL",0,0,"20",,terminal_output +2364,3901845,"TERMINAL",0,0,"12",,terminal_output +2365,3902618,"TERMINAL",0,0,"1",,terminal_output +2366,3902874,"TERMINAL",0,0,"23",,terminal_output +2367,3903674,"TERMINAL",0,0,"2",,terminal_output +2368,3903919,"TERMINAL",0,0,"34",,terminal_output +2369,3904652,"TERMINAL",0,0,"3",,terminal_output +2370,3905003,"TERMINAL",0,0,"45",,terminal_output +2371,3905719,"TERMINAL",0,0,"4",,terminal_output +2372,3906002,"TERMINAL",0,0,"56",,terminal_output +2373,3906744,"TERMINAL",0,0,"5",,terminal_output +2374,3907043,"TERMINAL",0,0,"67",,terminal_output +2375,3907701,"TERMINAL",0,0,"7",,terminal_output +2376,3908180,"TERMINAL",0,0,"78",,terminal_output +2377,3908720,"TERMINAL",0,0,"8",,terminal_output +2378,3909202,"TERMINAL",0,0,"89",,terminal_output +2379,3909735,"TERMINAL",0,0,"9",,terminal_output +2380,3910226,"TERMINAL",0,0,"940",,terminal_output +2381,3910840,"TERMINAL",0,0,"30",,terminal_output +2382,3911204,"TERMINAL",0,0,"301",,terminal_output +2383,3911864,"TERMINAL",0,0,"1",,terminal_output +2384,3912256,"TERMINAL",0,0,"12",,terminal_output +2385,3912780,"TERMINAL",0,0,"2",,terminal_output +2386,3913399,"TERMINAL",0,0,"23",,terminal_output +2387,3913806,"TERMINAL",0,0,"3",,terminal_output +2388,3914534,"TERMINAL",0,0,"34",,terminal_output +2389,3914838,"TERMINAL",0,0,"4",,terminal_output +2390,3915466,"TERMINAL",0,0,"45",,terminal_output +2391,3915829,"TERMINAL",0,0,"5",,terminal_output +2392,3916574,"TERMINAL",0,0,"56",,terminal_output +2393,3916881,"TERMINAL",0,0,"6",,terminal_output +2394,3917548,"TERMINAL",0,0,"67",,terminal_output +2395,3917862,"TERMINAL",0,0,"7",,terminal_output +2396,3918622,"TERMINAL",0,0,"78",,terminal_output +2397,3918878,"TERMINAL",0,0,"8",,terminal_output +2398,3919638,"TERMINAL",0,0,"89",,terminal_output +2399,3919894,"TERMINAL",0,0,"9",,terminal_output +2400,3920679,"TERMINAL",0,0,"950",,terminal_output +2401,3920977,"TERMINAL",0,0,"40",,terminal_output +2402,3921795,"TERMINAL",0,0,"412",,terminal_output +2403,3921928,"TERMINAL",0,0,"1",,terminal_output +2404,3922756,"TERMINAL",0,0,"23",,terminal_output +2405,3922943,"TERMINAL",0,0,"2",,terminal_output +2406,3923799,"TERMINAL",0,0,"34",,terminal_output +2407,3923960,"TERMINAL",0,0,"3",,terminal_output +2408,3924868,"TERMINAL",0,0,"45",,terminal_output +2409,3924986,"TERMINAL",0,0,"4",,terminal_output +2410,3925992,"TERMINAL",0,0,"56",,terminal_output +2411,3926007,"TERMINAL",0,0,"5",,terminal_output +2412,3926929,"TERMINAL",0,0,"67",,terminal_output +2413,3927042,"TERMINAL",0,0,"6",,terminal_output +2414,3927968,"TERMINAL",0,0,"78",,terminal_output +2415,3928031,"TERMINAL",0,0,"7",,terminal_output +2416,3929020,"TERMINAL",0,0,"89",,terminal_output +2417,3929040,"TERMINAL",0,0,"8",,terminal_output +2418,3930056,"TERMINAL",0,0,"94:00",,terminal_output +2419,3930075,"TERMINAL",0,0,"9",,terminal_output +2420,3931119,"TERMINAL",0,0,"50",,terminal_output +2421,3931130,"TERMINAL",0,0,"501",,terminal_output +2422,3932139,"TERMINAL",0,0,"1",,terminal_output +2423,3932139,"TERMINAL",0,0,"12",,terminal_output +2424,3933114,"TERMINAL",0,0,"2",,terminal_output +2425,3933180,"TERMINAL",0,0,"23",,terminal_output +2426,3934133,"TERMINAL",0,0,"3",,terminal_output +2427,3934215,"TERMINAL",0,0,"34",,terminal_output +2428,3935148,"TERMINAL",0,0,"4",,terminal_output +2429,3935254,"TERMINAL",0,0,"45",,terminal_output +2430,3936238,"TERMINAL",0,0,"5",,terminal_output +2431,3936289,"TERMINAL",0,0,"56",,terminal_output +2432,3937258,"TERMINAL",0,0,"6",,terminal_output +2433,3937368,"TERMINAL",0,0,"67",,terminal_output +2434,3938245,"TERMINAL",0,0,"7",,terminal_output +2435,3938365,"TERMINAL",0,0,"78",,terminal_output +2436,3939212,"TERMINAL",0,0,"8",,terminal_output +2437,3939401,"TERMINAL",0,0,"89",,terminal_output +2438,3940226,"TERMINAL",0,0,"9",,terminal_output +2439,3940544,"TERMINAL",0,0,"910",,terminal_output +2440,3941260,"TERMINAL",0,0,"3:00",,terminal_output +2441,3941532,"TERMINAL",0,0,"3:001",,terminal_output +2442,3942281,"TERMINAL",0,0,"1",,terminal_output +2443,3942584,"TERMINAL",0,0,"12",,terminal_output +2444,3943278,"TERMINAL",0,0,"2",,terminal_output +2445,3943589,"TERMINAL",0,0,"23",,terminal_output +2446,3944305,"TERMINAL",0,0,"3",,terminal_output +2447,3944740,"TERMINAL",0,0,"34",,terminal_output +2448,3945321,"TERMINAL",0,0,"4",,terminal_output +2449,3945761,"TERMINAL",0,0,"45",,terminal_output +2450,3946336,"TERMINAL",0,0,"5",,terminal_output +2451,3946782,"TERMINAL",0,0,"57",,terminal_output +2452,3947353,"TERMINAL",0,0,"6",,terminal_output +2453,3947755,"TERMINAL",0,0,"78",,terminal_output +2454,3948369,"TERMINAL",0,0,"7",,terminal_output +2455,3949227,"TERMINAL",0,0,"89",,terminal_output +2456,3949393,"TERMINAL",0,0,"8",,terminal_output +2457,3950264,"TERMINAL",0,0,"920",,terminal_output +2458,3950401,"TERMINAL",0,0,"9",,terminal_output +2459,3951305,"TERMINAL",0,0,"101",,terminal_output +2460,3951419,"TERMINAL",0,0,"10",,terminal_output +2461,3952364,"TERMINAL",0,0,"12",,terminal_output +2462,3952434,"TERMINAL",0,0,"1",,terminal_output +2463,3953408,"TERMINAL",0,0,"23",,terminal_output +2464,3953473,"TERMINAL",0,0,"2",,terminal_output +2465,3954504,"TERMINAL",0,0,"34",,terminal_output +2466,3954505,"TERMINAL",0,0,"3",,terminal_output +2467,3955591,"TERMINAL",0,0,"4",,terminal_output +2468,3955591,"TERMINAL",0,0,"45",,terminal_output +2469,3956497,"TERMINAL",0,0,"5",,terminal_output +2470,3956563,"TERMINAL",0,0,"56",,terminal_output +2471,3957536,"TERMINAL",0,0,"6",,terminal_output +2472,3957590,"TERMINAL",0,0,"67",,terminal_output +2473,3958558,"TERMINAL",0,0,"7",,terminal_output +2474,3958619,"TERMINAL",0,0,"78",,terminal_output +2475,3959555,"TERMINAL",0,0,"8",,terminal_output +2476,3959664,"TERMINAL",0,0,"89",,terminal_output +2477,3960608,"TERMINAL",0,0,"9",,terminal_output +2478,3960717,"TERMINAL",0,0,"931",,terminal_output +2479,3961641,"TERMINAL",0,0,"20",,terminal_output +2480,3961759,"TERMINAL",0,0,"212",,terminal_output +2481,3962602,"TERMINAL",0,0,"1",,terminal_output +2482,3962796,"TERMINAL",0,0,"23",,terminal_output +2483,3963691,"TERMINAL",0,0,"2",,terminal_output +2484,3963846,"TERMINAL",0,0,"34",,terminal_output +2485,3964707,"TERMINAL",0,0,"3",,terminal_output +2486,3964886,"TERMINAL",0,0,"45",,terminal_output +2487,3965650,"TERMINAL",0,0,"4",,terminal_output +2488,3966033,"TERMINAL",0,0,"56",,terminal_output +2489,3966667,"TERMINAL",0,0,"5",,terminal_output +2490,3966963,"TERMINAL",0,0,"67",,terminal_output +2491,3967775,"TERMINAL",0,0,"6",,terminal_output +2492,3968098,"TERMINAL",0,0,"78",,terminal_output +2493,3968702,"TERMINAL",0,0,"8",,terminal_output +2494,3969051,"TERMINAL",0,0,"89",,terminal_output +2495,3969824,"TERMINAL",0,0,"9",,terminal_output +2496,3970144,"TERMINAL",0,0,"940",,terminal_output +2497,3970746,"TERMINAL",0,0,"30",,terminal_output +2498,3971258,"TERMINAL",0,0,"301",,terminal_output +2499,3971763,"TERMINAL",0,0,"1",,terminal_output +2500,3972206,"TERMINAL",0,0,"12",,terminal_output +2501,3972229,"TERMINAL",0,0,"watch",,terminal_focus +2502,3972768,"TERMINAL",0,0,"2",,terminal_output +2503,3973205,"TERMINAL",0,0,"23",,terminal_output +2504,3973783,"TERMINAL",0,0,"3",,terminal_output +2505,3974245,"TERMINAL",0,0,"34",,terminal_output +2506,3974850,"TERMINAL",0,0,"4",,terminal_output +2507,3975279,"TERMINAL",0,0,"45",,terminal_output +2508,3975816,"TERMINAL",0,0,"5",,terminal_output +2509,3976321,"TERMINAL",0,0,"56",,terminal_output +2510,3976836,"TERMINAL",0,0,"6",,terminal_output +2511,3977401,"TERMINAL",0,0,"67",,terminal_output +2512,3977911,"TERMINAL",0,0,"7",,terminal_output +2513,3978428,"TERMINAL",0,0,"78",,terminal_output +2514,3978860,"TERMINAL",0,0,"8",,terminal_output +2515,3979449,"TERMINAL",0,0,"89",,terminal_output +2516,3979880,"TERMINAL",0,0,"9",,terminal_output +2517,3980596,"TERMINAL",0,0,"950",,terminal_output +2518,3980899,"TERMINAL",0,0,"40",,terminal_output +2519,3981519,"TERMINAL",0,0,"401",,terminal_output +2520,3981912,"TERMINAL",0,0,"1",,terminal_output +2521,3982554,"TERMINAL",0,0,"12",,terminal_output +2522,3982952,"TERMINAL",0,0,"2",,terminal_output +2523,3983610,"TERMINAL",0,0,"23",,terminal_output +2524,3983962,"TERMINAL",0,0,"3",,terminal_output +2525,3984670,"TERMINAL",0,0,"34",,terminal_output +2526,3984985,"TERMINAL",0,0,"4",,terminal_output +2527,3985682,"TERMINAL",0,0,"46",,terminal_output +2528,3985979,"TERMINAL",0,0,"5",,terminal_output +2529,3986721,"TERMINAL",0,0,"67",,terminal_output +2530,3986994,"TERMINAL",0,0,"6",,terminal_output +2531,3987847,"TERMINAL",0,0,"78",,terminal_output +2532,3988007,"TERMINAL",0,0,"7",,terminal_output +2533,3988804,"TERMINAL",0,0,"89",,terminal_output +2534,3989034,"TERMINAL",0,0,"8",,terminal_output +2535,3989850,"TERMINAL",0,0,"95:00",,terminal_output +2536,3990047,"TERMINAL",0,0,"9\r000100.orbax-checkpoint-tmp-0",,terminal_output +2537,3990916,"TERMINAL",0,0,"501",,terminal_output +2538,3991070,"TERMINAL",0,0,"50",,terminal_output +2539,3991923,"TERMINAL",0,0,"12",,terminal_output +2540,3992078,"TERMINAL",0,0,"1",,terminal_output +2541,3992973,"TERMINAL",0,0,"23",,terminal_output +2542,3993095,"TERMINAL",0,0,"2",,terminal_output +2543,3994046,"TERMINAL",0,0,"34",,terminal_output +2544,3994180,"TERMINAL",0,0,"3",,terminal_output +2545,3995090,"TERMINAL",0,0,"45",,terminal_output +2546,3995148,"TERMINAL",0,0,"4",,terminal_output +2547,3996135,"TERMINAL",0,0,"56",,terminal_output +2548,3996217,"TERMINAL",0,0,"5",,terminal_output +2549,3997250,"TERMINAL",0,0,"67",,terminal_output +2550,3997256,"TERMINAL",0,0,"6",,terminal_output +2551,3998256,"TERMINAL",0,0,"78",,terminal_output +2552,3998256,"TERMINAL",0,0,"7",,terminal_output +2553,3999229,"TERMINAL",0,0,"8",,terminal_output +2554,3999229,"TERMINAL",0,0,"89",,terminal_output +2555,4000259,"TERMINAL",0,0,"9",,terminal_output +2556,4000277,"TERMINAL",0,0,"910",,terminal_output +2557,4001267,"TERMINAL",0,0,"4:00",,terminal_output +2558,4001323,"TERMINAL",0,0,"4:001",,terminal_output +2559,4002269,"TERMINAL",0,0,"1",,terminal_output +2560,4002394,"TERMINAL",0,0,"12",,terminal_output +2561,4003434,"TERMINAL",0,0,"2",,terminal_output +2562,4003435,"TERMINAL",0,0,"23",,terminal_output +2563,4004289,"TERMINAL",0,0,"3",,terminal_output +2564,4004429,"TERMINAL",0,0,"34",,terminal_output +2565,4005311,"TERMINAL",0,0,"4",,terminal_output +2566,4005481,"TERMINAL",0,0,"45",,terminal_output +2567,4006324,"TERMINAL",0,0,"5",,terminal_output +2568,4006591,"TERMINAL",0,0,"56",,terminal_output +2569,4007367,"TERMINAL",0,0,"6",,terminal_output +2570,4007561,"TERMINAL",0,0,"67",,terminal_output +2571,4008427,"TERMINAL",0,0,"7",,terminal_output +2572,4008604,"TERMINAL",0,0,"78",,terminal_output +2573,4009386,"TERMINAL",0,0,"8",,terminal_output +2574,4009711,"TERMINAL",0,0,"89",,terminal_output +2575,4010414,"TERMINAL",0,0,"9",,terminal_output +2576,4010740,"TERMINAL",0,0,"921",,terminal_output +2577,4011415,"TERMINAL",0,0,"10",,terminal_output +2578,4011746,"TERMINAL",0,0,"112",,terminal_output +2579,4012545,"TERMINAL",0,0,"1",,terminal_output +2580,4012804,"TERMINAL",0,0,"23",,terminal_output +2581,4013445,"TERMINAL",0,0,"2",,terminal_output +2582,4013864,"TERMINAL",0,0,"34",,terminal_output +2583,4014481,"TERMINAL",0,0,"3",,terminal_output +2584,4014896,"TERMINAL",0,0,"45",,terminal_output +2585,4015468,"TERMINAL",0,0,"4",,terminal_output +2586,4016017,"TERMINAL",0,0,"56",,terminal_output +2587,4016481,"TERMINAL",0,0,"5",,terminal_output +2588,4016984,"TERMINAL",0,0,"67",,terminal_output +2589,4017506,"TERMINAL",0,0,"6",,terminal_output +2590,4018008,"TERMINAL",0,0,"78",,terminal_output +2591,4018570,"TERMINAL",0,0,"7",,terminal_output +2592,4019080,"TERMINAL",0,0,"89",,terminal_output +2593,4019697,"TERMINAL",0,0,"8",,terminal_output +2594,4020107,"TERMINAL",0,0,"930",,terminal_output +2595,4020578,"TERMINAL",0,0,"9",,terminal_output +2596,4021167,"TERMINAL",0,0,"201",,terminal_output +2597,4021646,"TERMINAL",0,0,"20",,terminal_output +2598,4022253,"TERMINAL",0,0,"12",,terminal_output +2599,4022584,"TERMINAL",0,0,"1",,terminal_output +2600,4023229,"TERMINAL",0,0,"23",,terminal_output +2601,4023609,"TERMINAL",0,0,"2",,terminal_output +2602,4024327,"TERMINAL",0,0,"34",,terminal_output +2603,4024631,"TERMINAL",0,0,"3",,terminal_output +2604,4025305,"TERMINAL",0,0,"45",,terminal_output +2605,4025739,"TERMINAL",0,0,"4",,terminal_output +2606,4026449,"TERMINAL",0,0,"56",,terminal_output +2607,4026648,"TERMINAL",0,0,"5",,terminal_output +2608,4027475,"TERMINAL",0,0,"67",,terminal_output +2609,4027665,"TERMINAL",0,0,"6",,terminal_output +2610,4028426,"TERMINAL",0,0,"78",,terminal_output +2611,4028684,"TERMINAL",0,0,"7",,terminal_output +2612,4029536,"TERMINAL",0,0,"89",,terminal_output +2613,4029693,"TERMINAL",0,0,"8",,terminal_output +2614,4030552,"TERMINAL",0,0,"940",,terminal_output +2615,4030709,"TERMINAL",0,0,"30",,terminal_output +2616,4031577,"TERMINAL",0,0,"301",,terminal_output +2617,4031745,"TERMINAL",0,0,"1",,terminal_output +2618,4032620,"TERMINAL",0,0,"12",,terminal_output +2619,4032749,"TERMINAL",0,0,"2",,terminal_output +2620,4033684,"TERMINAL",0,0,"23",,terminal_output +2621,4033802,"TERMINAL",0,0,"3",,terminal_output +2622,4034714,"TERMINAL",0,0,"35",,terminal_output +2623,4034782,"TERMINAL",0,0,"4",,terminal_output +2624,4035796,"TERMINAL",0,0,"56",,terminal_output +2625,4035808,"TERMINAL",0,0,"5",,terminal_output +2626,4036849,"TERMINAL",0,0,"67",,terminal_output +2627,4036930,"TERMINAL",0,0,"6",,terminal_output +2628,4037855,"TERMINAL",0,0,"7",,terminal_output +2629,4037856,"TERMINAL",0,0,"78",,terminal_output +2630,4038879,"TERMINAL",0,0,"8",,terminal_output +2631,4038899,"TERMINAL",0,0,"89",,terminal_output +2632,4039858,"TERMINAL",0,0,"9",,terminal_output +2633,4039932,"TERMINAL",0,0,"950",,terminal_output +2634,4040891,"TERMINAL",0,0,"40",,terminal_output +2635,4041013,"TERMINAL",0,0,"401",,terminal_output +2636,4041893,"TERMINAL",0,0,"1",,terminal_output +2637,4042025,"TERMINAL",0,0,"12",,terminal_output +2638,4042938,"TERMINAL",0,0,"2",,terminal_output +2639,4043083,"TERMINAL",0,0,"23",,terminal_output +2640,4043937,"TERMINAL",0,0,"3",,terminal_output +2641,4044158,"TERMINAL",0,0,"34",,terminal_output +2642,4044998,"TERMINAL",0,0,"4",,terminal_output +2643,4045139,"TERMINAL",0,0,"45",,terminal_output +2644,4045960,"TERMINAL",0,0,"5",,terminal_output +2645,4046223,"TERMINAL",0,0,"56",,terminal_output +2646,4046984,"TERMINAL",0,0,"6",,terminal_output +2647,4047240,"TERMINAL",0,0,"67",,terminal_output +2648,4048184,"TERMINAL",0,0,"7",,terminal_output +2649,4048267,"TERMINAL",0,0,"78",,terminal_output +2650,4049087,"TERMINAL",0,0,"8",,terminal_output +2651,4049318,"TERMINAL",0,0,"89",,terminal_output +2652,4050121,"TERMINAL",0,0,"9",,terminal_output +2653,4050336,"TERMINAL",0,0,"96:00",,terminal_output +2654,4051143,"TERMINAL",0,0,"50",,terminal_output +2655,4051399,"TERMINAL",0,0,"501",,terminal_output +2656,4052232,"TERMINAL",0,0,"1",,terminal_output +2657,4052424,"TERMINAL",0,0,"12",,terminal_output +2658,4053241,"TERMINAL",0,0,"2",,terminal_output +2659,4053461,"TERMINAL",0,0,"23",,terminal_output +2660,4054146,"TERMINAL",0,0,"3",,terminal_output +2661,4054488,"TERMINAL",0,0,"34",,terminal_output +2662,4055315,"TERMINAL",0,0,"4",,terminal_output +2663,4055651,"TERMINAL",0,0,"45",,terminal_output +2664,4056272,"TERMINAL",0,0,"5",,terminal_output +2665,4056569,"TERMINAL",0,0,"56",,terminal_output +2666,4057193,"TERMINAL",0,0,"6",,terminal_output +2667,4057712,"TERMINAL",0,0,"67",,terminal_output +2668,4058216,"TERMINAL",0,0,"7",,terminal_output +2669,4058686,"TERMINAL",0,0,"78",,terminal_output +2670,4059219,"TERMINAL",0,0,"8",,terminal_output +2671,4059732,"TERMINAL",0,0,"810",,terminal_output +2672,4060245,"TERMINAL",0,0,"9",,terminal_output +2673,4060860,"TERMINAL",0,0,"5:001",,terminal_output +2674,4061271,"TERMINAL",0,0,"5:00",,terminal_output +2675,4061789,"TERMINAL",0,0,"12",,terminal_output +2676,4062298,"TERMINAL",0,0,"1",,terminal_output +2677,4062909,"TERMINAL",0,0,"23",,terminal_output +2678,4063240,"TERMINAL",0,0,"2",,terminal_output +2679,4063865,"TERMINAL",0,0,"34",,terminal_output +2680,4064258,"TERMINAL",0,0,"3",,terminal_output +2681,4064959,"TERMINAL",0,0,"45",,terminal_output +2682,4065271,"TERMINAL",0,0,"4",,terminal_output +2683,4065980,"TERMINAL",0,0,"56",,terminal_output +2684,4066291,"TERMINAL",0,0,"5",,terminal_output +2685,4066975,"TERMINAL",0,0,"67",,terminal_output +2686,4067322,"TERMINAL",0,0,"6",,terminal_output +2687,4068028,"TERMINAL",0,0,"78",,terminal_output +2688,4068318,"TERMINAL",0,0,"7",,terminal_output +2689,4069158,"TERMINAL",0,0,"89",,terminal_output +2690,4069335,"TERMINAL",0,0,"8",,terminal_output +2691,4070185,"TERMINAL",0,0,"920",,terminal_output +2692,4070350,"TERMINAL",0,0,"9",,terminal_output +2693,4071217,"TERMINAL",0,0,"101",,terminal_output +2694,4071370,"TERMINAL",0,0,"10",,terminal_output +2695,4072222,"TERMINAL",0,0,"12",,terminal_output +2696,4072380,"TERMINAL",0,0,"1",,terminal_output +2697,4073268,"TERMINAL",0,0,"23",,terminal_output +2698,4073399,"TERMINAL",0,0,"2",,terminal_output +2699,4074276,"TERMINAL",0,0,"34",,terminal_output +2700,4074412,"TERMINAL",0,0,"3",,terminal_output +2701,4075318,"TERMINAL",0,0,"45",,terminal_output +2702,4075431,"TERMINAL",0,0,"4",,terminal_output +2703,4076358,"TERMINAL",0,0,"56",,terminal_output +2704,4076479,"TERMINAL",0,0,"5",,terminal_output +2705,4077394,"TERMINAL",0,0,"67",,terminal_output +2706,4077502,"TERMINAL",0,0,"6\r000200.orbax-checkpoint-tmp-3",,terminal_output +2707,4078472,"TERMINAL",0,0,"78",,terminal_output +2708,4078487,"TERMINAL",0,0,"7",,terminal_output +2709,4079495,"TERMINAL",0,0,"89",,terminal_output +2710,4079509,"TERMINAL",0,0,"8",,terminal_output +2711,4080624,"TERMINAL",0,0,"930",,terminal_output +2712,4080625,"TERMINAL",0,0,"9",,terminal_output +2713,4081544,"TERMINAL",0,0,"20",,terminal_output +2714,4081558,"TERMINAL",0,0,"201",,terminal_output +2715,4082567,"TERMINAL",0,0,"1",,terminal_output +2716,4082628,"TERMINAL",0,0,"12",,terminal_output +2717,4083585,"TERMINAL",0,0,"2",,terminal_output +2718,4083675,"TERMINAL",0,0,"23",,terminal_output +2719,4084620,"TERMINAL",0,0,"3",,terminal_output +2720,4084677,"TERMINAL",0,0,"34",,terminal_output +2721,4085636,"TERMINAL",0,0,"4",,terminal_output +2722,4085743,"TERMINAL",0,0,"56",,terminal_output +2723,4086668,"TERMINAL",0,0,"5",,terminal_output +2724,4086783,"TERMINAL",0,0,"67",,terminal_output +2725,4087643,"TERMINAL",0,0,"6",,terminal_output +2726,4087814,"TERMINAL",0,0,"78",,terminal_output +2727,4088686,"TERMINAL",0,0,"7",,terminal_output +2728,4088862,"TERMINAL",0,0,"89",,terminal_output +2729,4089681,"TERMINAL",0,0,"8",,terminal_output +2730,4089902,"TERMINAL",0,0,"940",,terminal_output +2731,4090759,"TERMINAL",0,0,"9",,terminal_output +2732,4090938,"TERMINAL",0,0,"301",,terminal_output +2733,4091783,"TERMINAL",0,0,"31",,terminal_output +2734,4091976,"TERMINAL",0,0,"12",,terminal_output +2735,4092813,"TERMINAL",0,0,"2",,terminal_output +2736,4093021,"TERMINAL",0,0,"23",,terminal_output +2737,4093742,"TERMINAL",0,0,"3",,terminal_output +2738,4094137,"TERMINAL",0,0,"34",,terminal_output +2739,4094856,"TERMINAL",0,0,"4",,terminal_output +2740,4095092,"TERMINAL",0,0,"45",,terminal_output +2741,4095831,"TERMINAL",0,0,"5",,terminal_output +2742,4096189,"TERMINAL",0,0,"56",,terminal_output +2743,4096798,"TERMINAL",0,0,"6",,terminal_output +2744,4097209,"TERMINAL",0,0,"67",,terminal_output +2745,4097825,"TERMINAL",0,0,"7",,terminal_output +2746,4098234,"TERMINAL",0,0,"78",,terminal_output +2747,4098840,"TERMINAL",0,0,"8",,terminal_output +2748,4099267,"TERMINAL",0,0,"89",,terminal_output +2749,4099870,"TERMINAL",0,0,"9",,terminal_output +2750,4100306,"TERMINAL",0,0,"950",,terminal_output +2751,4100896,"TERMINAL",0,0,"40",,terminal_output +2752,4101343,"TERMINAL",0,0,"401",,terminal_output +2753,4101879,"TERMINAL",0,0,"1",,terminal_output +2754,4102383,"TERMINAL",0,0,"12",,terminal_output +2755,4102949,"TERMINAL",0,0,"2",,terminal_output +2756,4103421,"TERMINAL",0,0,"23",,terminal_output +2757,4103912,"TERMINAL",0,0,"3",,terminal_output +2758,4104483,"TERMINAL",0,0,"34",,terminal_output +2759,4105013,"TERMINAL",0,0,"4",,terminal_output +2760,4105502,"TERMINAL",0,0,"45",,terminal_output +2761,4105943,"TERMINAL",0,0,"5",,terminal_output +2762,4106640,"TERMINAL",0,0,"56",,terminal_output +2763,4106958,"TERMINAL",0,0,"6",,terminal_output +2764,4107655,"TERMINAL",0,0,"67",,terminal_output +2765,4108067,"TERMINAL",0,0,"7",,terminal_output +2766,4108677,"TERMINAL",0,0,"78",,terminal_output +2767,4109092,"TERMINAL",0,0,"8",,terminal_output +2768,4109663,"TERMINAL",0,0,"89",,terminal_output +2769,4110007,"TERMINAL",0,0,"9",,terminal_output +2770,4110725,"TERMINAL",0,0,"97:01",,terminal_output +2771,4111032,"TERMINAL",0,0,"50",,terminal_output +2772,4111749,"TERMINAL",0,0,"512",,terminal_output +2773,4112058,"TERMINAL",0,0,"1",,terminal_output +2774,4112849,"TERMINAL",0,0,"23",,terminal_output +2775,4113053,"TERMINAL",0,0,"2",,terminal_output +2776,4113818,"TERMINAL",0,0,"34",,terminal_output +2777,4114104,"TERMINAL",0,0,"3",,terminal_output +2778,4114928,"TERMINAL",0,0,"45",,terminal_output +2779,4115087,"TERMINAL",0,0,"4",,terminal_output +2780,4115905,"TERMINAL",0,0,"56",,terminal_output +2781,4116104,"TERMINAL",0,0,"5",,terminal_output +2782,4116954,"TERMINAL",0,0,"67",,terminal_output +2783,4117126,"TERMINAL",0,0,"6",,terminal_output +2784,4118099,"TERMINAL",0,0,"78",,terminal_output +2785,4118152,"TERMINAL",0,0,"7",,terminal_output +2786,4119045,"TERMINAL",0,0,"89",,terminal_output +2787,4119162,"TERMINAL",0,0,"8",,terminal_output +2788,4120148,"TERMINAL",0,0,"910",,terminal_output +2789,4120180,"TERMINAL",0,0,"9",,terminal_output +2790,4121176,"TERMINAL",0,0,"6:001",,terminal_output +2791,4121192,"TERMINAL",0,0,"6:00",,terminal_output +2792,4122188,"TERMINAL",0,0,"12",,terminal_output +2793,4122205,"TERMINAL",0,0,"1",,terminal_output +2794,4123322,"TERMINAL",0,0,"2",,terminal_output +2795,4123328,"TERMINAL",0,0,"23",,terminal_output +2796,4124246,"TERMINAL",0,0,"3",,terminal_output +2797,4124284,"TERMINAL",0,0,"34",,terminal_output +2798,4125272,"TERMINAL",0,0,"4",,terminal_output +2799,4125310,"TERMINAL",0,0,"45",,terminal_output +2800,4126293,"TERMINAL",0,0,"5",,terminal_output +2801,4126405,"TERMINAL",0,0,"56",,terminal_output +2802,4127286,"TERMINAL",0,0,"6",,terminal_output +2803,4127398,"TERMINAL",0,0,"67",,terminal_output +2804,4128347,"TERMINAL",0,0,"7",,terminal_output +2805,4128454,"TERMINAL",0,0,"78",,terminal_output +2806,4129326,"TERMINAL",0,0,"8",,terminal_output +2807,4129470,"TERMINAL",0,0,"89",,terminal_output +2808,4130338,"TERMINAL",0,0,"9",,terminal_output +2809,4130503,"TERMINAL",0,0,"920",,terminal_output +2810,4131357,"TERMINAL",0,0,"10",,terminal_output +2811,4131630,"TERMINAL",0,0,"101",,terminal_output +2812,4132370,"TERMINAL",0,0,"1",,terminal_output +2813,4132643,"TERMINAL",0,0,"12",,terminal_output +2814,4133392,"TERMINAL",0,0,"2",,terminal_output +2815,4133675,"TERMINAL",0,0,"23",,terminal_output +2816,4134400,"TERMINAL",0,0,"3",,terminal_output +2817,4134691,"TERMINAL",0,0,"34",,terminal_output +2818,4135454,"TERMINAL",0,0,"4",,terminal_output +2819,4135726,"TERMINAL",0,0,"46",,terminal_output +2820,4136467,"TERMINAL",0,0,"5",,terminal_output +2821,4136842,"TERMINAL",0,0,"67",,terminal_output +2822,4137449,"TERMINAL",0,0,"6",,terminal_output +2823,4137877,"TERMINAL",0,0,"78",,terminal_output +2824,4138485,"TERMINAL",0,0,"7",,terminal_output +2825,4138830,"TERMINAL",0,0,"89",,terminal_output +2826,4139487,"TERMINAL",0,0,"8",,terminal_output +2827,4139924,"TERMINAL",0,0,"930",,terminal_output +2828,4140551,"TERMINAL",0,0,"9",,terminal_output +2829,4140944,"TERMINAL",0,0,"201",,terminal_output +2830,4141551,"TERMINAL",0,0,"20",,terminal_output +2831,4141945,"TERMINAL",0,0,"12",,terminal_output +2832,4142527,"TERMINAL",0,0,"1",,terminal_output +2833,4142985,"TERMINAL",0,0,"23",,terminal_output +2834,4143597,"TERMINAL",0,0,"2",,terminal_output +2835,4144117,"TERMINAL",0,0,"34",,terminal_output +2836,4144581,"TERMINAL",0,0,"3",,terminal_output +2837,4145141,"TERMINAL",0,0,"45",,terminal_output +2838,4145597,"TERMINAL",0,0,"4",,terminal_output +2839,4146112,"TERMINAL",0,0,"56",,terminal_output +2840,4146595,"TERMINAL",0,0,"5",,terminal_output +2841,4147149,"TERMINAL",0,0,"67",,terminal_output +2842,4147606,"TERMINAL",0,0,"6",,terminal_output +2843,4148215,"TERMINAL",0,0,"78",,terminal_output +2844,4148627,"TERMINAL",0,0,"7",,terminal_output +2845,4149334,"TERMINAL",0,0,"89",,terminal_output +2846,4149652,"TERMINAL",0,0,"8",,terminal_output +2847,4150263,"TERMINAL",0,0,"940",,terminal_output +2848,4150657,"TERMINAL",0,0,"9",,terminal_output +2849,4151310,"TERMINAL",0,0,"301",,terminal_output +2850,4151713,"TERMINAL",0,0,"30",,terminal_output +2851,4152342,"TERMINAL",0,0,"12",,terminal_output +2852,4152715,"TERMINAL",0,0,"1",,terminal_output +2853,4153407,"TERMINAL",0,0,"23",,terminal_output +2854,4153721,"TERMINAL",0,0,"3",,terminal_output +2855,4154419,"TERMINAL",0,0,"34",,terminal_output +2856,4154723,"TERMINAL",0,0,"4",,terminal_output +2857,4155472,"TERMINAL",0,0,"45",,terminal_output +2858,4155740,"TERMINAL",0,0,"5",,terminal_output +2859,4156604,"TERMINAL",0,0,"56",,terminal_output +2860,4156761,"TERMINAL",0,0,"6",,terminal_output +2861,4157629,"TERMINAL",0,0,"67",,terminal_output +2862,4157780,"TERMINAL",0,0,"7",,terminal_output +2863,4158678,"TERMINAL",0,0,"78",,terminal_output +2864,4158791,"TERMINAL",0,0,"8",,terminal_output +2865,4159676,"TERMINAL",0,0,"89",,terminal_output +2866,4159806,"TERMINAL",0,0,"9",,terminal_output +2867,4160700,"TERMINAL",0,0,"950",,terminal_output +2868,4160824,"TERMINAL",0,0,"40",,terminal_output +2869,4161726,"TERMINAL",0,0,"402",,terminal_output +2870,4161843,"TERMINAL",0,0,"1",,terminal_output +2871,4162746,"TERMINAL",0,0,"23",,terminal_output +2872,4162857,"TERMINAL",0,0,"2",,terminal_output +2873,4163792,"TERMINAL",0,0,"34",,terminal_output +2874,4163908,"TERMINAL",0,0,"3",,terminal_output +2875,4164899,"TERMINAL",0,0,"45",,terminal_output +2876,4164914,"TERMINAL",0,0,"4\r000300.orbax-checkpoint-tmp-6",,terminal_output +2877,4165930,"TERMINAL",0,0,"56",,terminal_output +2878,4165948,"TERMINAL",0,0,"5",,terminal_output +2879,4166933,"TERMINAL",0,0,"67",,terminal_output +2880,4166952,"TERMINAL",0,0,"6",,terminal_output +2881,4167972,"TERMINAL",0,0,"78",,terminal_output +2882,4167972,"TERMINAL",0,0,"7",,terminal_output +2883,4168974,"TERMINAL",0,0,"8",,terminal_output +2884,4168992,"TERMINAL",0,0,"89",,terminal_output +2885,4170016,"TERMINAL",0,0,"9",,terminal_output +2886,4170039,"TERMINAL",0,0,"98:00",,terminal_output +2887,4171001,"TERMINAL",0,0,"50",,terminal_output +2888,4171108,"TERMINAL",0,0,"501",,terminal_output +2889,4172066,"TERMINAL",0,0,"1",,terminal_output +2890,4172115,"TERMINAL",0,0,"12",,terminal_output +2891,4173041,"TERMINAL",0,0,"2",,terminal_output +2892,4173156,"TERMINAL",0,0,"23",,terminal_output +2893,4174098,"TERMINAL",0,0,"3",,terminal_output +2894,4174205,"TERMINAL",0,0,"34",,terminal_output +2895,4175070,"TERMINAL",0,0,"4",,terminal_output +2896,4175263,"TERMINAL",0,0,"45",,terminal_output +2897,4176166,"TERMINAL",0,0,"5",,terminal_output +2898,4176295,"TERMINAL",0,0,"56",,terminal_output +2899,4177108,"TERMINAL",0,0,"6",,terminal_output +2900,4177337,"TERMINAL",0,0,"67",,terminal_output +2901,4178212,"TERMINAL",0,0,"7",,terminal_output +2902,4178380,"TERMINAL",0,0,"78",,terminal_output +2903,4179268,"TERMINAL",0,0,"8",,terminal_output +2904,4179415,"TERMINAL",0,0,"89",,terminal_output +2905,4180156,"TERMINAL",0,0,"9",,terminal_output +2906,4180466,"TERMINAL",0,0,"910",,terminal_output +2907,4181168,"TERMINAL",0,0,"7:00",,terminal_output +2908,4181604,"TERMINAL",0,0,"7:001",,terminal_output +2909,4182206,"TERMINAL",0,0,"1",,terminal_output +2910,4182639,"TERMINAL",0,0,"12",,terminal_output +2911,4183203,"TERMINAL",0,0,"2",,terminal_output +2912,4183671,"TERMINAL",0,0,"23",,terminal_output +2913,4184277,"TERMINAL",0,0,"3",,terminal_output +2914,4184613,"TERMINAL",0,0,"34",,terminal_output +2915,4185235,"TERMINAL",0,0,"4",,terminal_output +2916,4185654,"TERMINAL",0,0,"45",,terminal_output +2917,4186271,"TERMINAL",0,0,"5",,terminal_output +2918,4186689,"TERMINAL",0,0,"57",,terminal_output +2919,4187347,"TERMINAL",0,0,"6",,terminal_output +2920,4187851,"TERMINAL",0,0,"78",,terminal_output +2921,4188355,"TERMINAL",0,0,"7",,terminal_output +2922,4188811,"TERMINAL",0,0,"89",,terminal_output +2923,4189330,"TERMINAL",0,0,"8",,terminal_output +2924,4190315,"TERMINAL",0,0,"9",,terminal_output +2925,4190448,"TERMINAL",0,0,"920",,terminal_output +2926,4191327,"TERMINAL",0,0,"10",,terminal_output +2927,4191507,"TERMINAL",0,0,"101",,terminal_output +2928,4192352,"TERMINAL",0,0,"1",,terminal_output +2929,4192572,"TERMINAL",0,0,"12",,terminal_output +2930,4193367,"TERMINAL",0,0,"2",,terminal_output +2931,4193568,"TERMINAL",0,0,"23",,terminal_output +2932,4194374,"TERMINAL",0,0,"3",,terminal_output +2933,4194717,"TERMINAL",0,0,"34",,terminal_output +2934,4195396,"TERMINAL",0,0,"4",,terminal_output +2935,4195721,"TERMINAL",0,0,"45",,terminal_output +2936,4196406,"TERMINAL",0,0,"5",,terminal_output +2937,4196673,"TERMINAL",0,0,"56",,terminal_output +2938,4197423,"TERMINAL",0,0,"6",,terminal_output +2939,4197716,"TERMINAL",0,0,"68",,terminal_output +2940,4198439,"TERMINAL",0,0,"7",,terminal_output +2941,4198762,"TERMINAL",0,0,"89",,terminal_output +2942,4199456,"TERMINAL",0,0,"8",,terminal_output +2943,4199815,"TERMINAL",0,0,"930",,terminal_output +2944,4200481,"TERMINAL",0,0,"9",,terminal_output +2945,4200849,"TERMINAL",0,0,"201",,terminal_output +2946,4201555,"TERMINAL",0,0,"20",,terminal_output +2947,4201883,"TERMINAL",0,0,"12",,terminal_output +2948,4202507,"TERMINAL",0,0,"1",,terminal_output +2949,4202925,"TERMINAL",0,0,"23",,terminal_output +2950,4203609,"TERMINAL",0,0,"2",,terminal_output +2951,4203965,"TERMINAL",0,0,"34",,terminal_output +2952,4204630,"TERMINAL",0,0,"3",,terminal_output +2953,4205017,"TERMINAL",0,0,"45",,terminal_output +2954,4205655,"TERMINAL",0,0,"4",,terminal_output +2955,4206164,"TERMINAL",0,0,"56",,terminal_output +2956,4206677,"TERMINAL",0,0,"5",,terminal_output +2957,4207187,"TERMINAL",0,0,"67",,terminal_output +2958,4207599,"TERMINAL",0,0,"6",,terminal_output +2959,4208212,"TERMINAL",0,0,"78",,terminal_output +2960,4208621,"TERMINAL",0,0,"7",,terminal_output +2961,4209238,"TERMINAL",0,0,"89",,terminal_output +2962,4209652,"TERMINAL",0,0,"8",,terminal_output +2963,4210232,"TERMINAL",0,0,"940",,terminal_output +2964,4210629,"TERMINAL",0,0,"9",,terminal_output +2965,4211282,"TERMINAL",0,0,"301",,terminal_output +2966,4211648,"TERMINAL",0,0,"30",,terminal_output +2967,4212317,"TERMINAL",0,0,"12",,terminal_output +2968,4212716,"TERMINAL",0,0,"1",,terminal_output +2969,4213347,"TERMINAL",0,0,"23",,terminal_output +2970,4213683,"TERMINAL",0,0,"2",,terminal_output +2971,4214387,"TERMINAL",0,0,"34",,terminal_output +2972,4214699,"TERMINAL",0,0,"4",,terminal_output +2973,4215437,"TERMINAL",0,0,"45",,terminal_output +2974,4215797,"TERMINAL",0,0,"5",,terminal_output +2975,4216488,"TERMINAL",0,0,"56",,terminal_output +2976,4216726,"TERMINAL",0,0,"6",,terminal_output +2977,4217514,"TERMINAL",0,0,"67",,terminal_output +2978,4217835,"TERMINAL",0,0,"7",,terminal_output +2979,4218559,"TERMINAL",0,0,"78",,terminal_output +2980,4218758,"TERMINAL",0,0,"8",,terminal_output +2981,4219679,"TERMINAL",0,0,"89",,terminal_output +2982,4219793,"TERMINAL",0,0,"9",,terminal_output +2983,4220706,"TERMINAL",0,0,"950",,terminal_output +2984,4220816,"TERMINAL",0,0,"40",,terminal_output +2985,4221697,"TERMINAL",0,0,"402",,terminal_output +2986,4221824,"TERMINAL",0,0,"1",,terminal_output +2987,4222760,"TERMINAL",0,0,"23",,terminal_output +2988,4222821,"TERMINAL",0,0,"2",,terminal_output +2989,4223771,"TERMINAL",0,0,"34",,terminal_output +2990,4223880,"TERMINAL",0,0,"3",,terminal_output +2991,4224811,"TERMINAL",0,0,"45",,terminal_output +2992,4224874,"TERMINAL",0,0,"4",,terminal_output +2993,4225848,"TERMINAL",0,0,"56",,terminal_output +2994,4225865,"TERMINAL",0,0,"5",,terminal_output +2995,4226886,"TERMINAL",0,0,"6",,terminal_output +2996,4226902,"TERMINAL",0,0,"67",,terminal_output +2997,4227975,"TERMINAL",0,0,"7",,terminal_output +2998,4227975,"TERMINAL",0,0,"78",,terminal_output +2999,4228954,"TERMINAL",0,0,"8",,terminal_output +3000,4228995,"TERMINAL",0,0,"89",,terminal_output +3001,4230026,"TERMINAL",0,0,"9",,terminal_output +3002,4230026,"TERMINAL",0,0,"99:00",,terminal_output +3003,4230952,"TERMINAL",0,0,"50",,terminal_output +3004,4231067,"TERMINAL",0,0,"501",,terminal_output +3005,4232072,"TERMINAL",0,0,"1",,terminal_output +3006,4232125,"TERMINAL",0,0,"12",,terminal_output +3007,4232992,"TERMINAL",0,0,"2",,terminal_output +3008,4233155,"TERMINAL",0,0,"23",,terminal_output +3009,4234017,"TERMINAL",0,0,"3",,terminal_output +3010,4234196,"TERMINAL",0,0,"34",,terminal_output +3011,4235050,"TERMINAL",0,0,"4",,terminal_output +3012,4235233,"TERMINAL",0,0,"45",,terminal_output +3013,4236068,"TERMINAL",0,0,"5",,terminal_output +3014,4236291,"TERMINAL",0,0,"56",,terminal_output +3015,4237046,"TERMINAL",0,0,"6",,terminal_output +3016,4237310,"TERMINAL",0,0,"67",,terminal_output +3017,4238114,"TERMINAL",0,0,"7",,terminal_output +3018,4238344,"TERMINAL",0,0,"78",,terminal_output +3019,4239137,"TERMINAL",0,0,"8",,terminal_output +3020,4239392,"TERMINAL",0,0,"89",,terminal_output +3021,4240160,"TERMINAL",0,0,"9",,terminal_output +3022,4240426,"TERMINAL",0,0,"910",,terminal_output +3023,4241111,"TERMINAL",0,0,"8:00",,terminal_output +3024,4241494,"TERMINAL",0,0,"8:001",,terminal_output +3025,4242130,"TERMINAL",0,0,"1",,terminal_output +3026,4242624,"TERMINAL",0,0,"12",,terminal_output +3027,4243235,"TERMINAL",0,0,"2",,terminal_output +3028,4243686,"TERMINAL",0,0,"23",,terminal_output +3029,4244159,"TERMINAL",0,0,"3",,terminal_output +3030,4244669,"TERMINAL",0,0,"34",,terminal_output +3031,4245283,"TERMINAL",0,0,"4",,terminal_output +3032,4245699,"TERMINAL",0,0,"45",,terminal_output +3033,4246186,"TERMINAL",0,0,"5",,terminal_output +3034,4246677,"TERMINAL",0,0,"56",,terminal_output +3035,4247235,"TERMINAL",0,0,"6",,terminal_output +3036,4247714,"TERMINAL",0,0,"68",,terminal_output +3037,4248217,"TERMINAL",0,0,"7",,terminal_output +3038,4248755,"TERMINAL",0,0,"89",,terminal_output +3039,4249275,"TERMINAL",0,0,"8",,terminal_output +3040,4249800,"TERMINAL",0,0,"920",,terminal_output +3041,4250251,"TERMINAL",0,0,"9",,terminal_output +3042,4250914,"TERMINAL",0,0,"101",,terminal_output +3043,4251286,"TERMINAL",0,0,"10\r000400.orbax-checkpoint-tmp-9",,terminal_output +3044,4251874,"TERMINAL",0,0,"12",,terminal_output +3045,4252355,"TERMINAL",0,0,"1",,terminal_output +3046,4252965,"TERMINAL",0,0,"23",,terminal_output +3047,4253304,"TERMINAL",0,0,"2",,terminal_output +3048,4253953,"TERMINAL",0,0,"34",,terminal_output +3049,4254318,"TERMINAL",0,0,"3",,terminal_output +3050,4254984,"TERMINAL",0,0,"45",,terminal_output +3051,4255342,"TERMINAL",0,0,"\r4",,terminal_output +3052,4256022,"TERMINAL",0,0,"56",,terminal_output +3053,4256361,"TERMINAL",0,0,"5",,terminal_output +3054,4257060,"TERMINAL",0,0,"67",,terminal_output +3055,4257378,"TERMINAL",0,0,"6",,terminal_output +3056,4258100,"TERMINAL",0,0,"78",,terminal_output +3057,4258392,"TERMINAL",0,0,"7",,terminal_output +3058,4259211,"TERMINAL",0,0,"89",,terminal_output +3059,4259405,"TERMINAL",0,0,"8",,terminal_output +3060,4260234,"TERMINAL",0,0,"930",,terminal_output +3061,4260418,"TERMINAL",0,0,"9",,terminal_output +3062,4261276,"TERMINAL",0,0,"201",,terminal_output +3063,4261438,"TERMINAL",0,0,"20",,terminal_output +3064,4262292,"TERMINAL",0,0,"12",,terminal_output +3065,4262453,"TERMINAL",0,0,"1",,terminal_output +3066,4263315,"TERMINAL",0,0,"23",,terminal_output +3067,4263469,"TERMINAL",0,0,"2",,terminal_output +3068,4264343,"TERMINAL",0,0,"34",,terminal_output +3069,4264482,"TERMINAL",0,0,"3",,terminal_output +3070,4265388,"TERMINAL",0,0,"45",,terminal_output +3071,4265502,"TERMINAL",0,0,"4",,terminal_output +3072,4266427,"TERMINAL",0,0,"56",,terminal_output +3073,4266535,"TERMINAL",0,0,"5",,terminal_output +3074,4267502,"TERMINAL",0,0,"67",,terminal_output +3075,4267554,"TERMINAL",0,0,"6",,terminal_output +3076,4268523,"TERMINAL",0,0,"78",,terminal_output +3077,4268542,"TERMINAL",0,0,"7",,terminal_output +3078,4269560,"TERMINAL",0,0,"89",,terminal_output +3079,4269583,"TERMINAL",0,0,"8",,terminal_output +3080,4270579,"TERMINAL",0,0,"9",,terminal_output +3081,4270646,"TERMINAL",0,0,"940",,terminal_output +3082,4271603,"TERMINAL",0,0,"30",,terminal_output +3083,4271667,"TERMINAL",0,0,"301",,terminal_output +3084,4272621,"TERMINAL",0,0,"1",,terminal_output +3085,4272685,"TERMINAL",0,0,"13",,terminal_output +3086,4273671,"TERMINAL",0,0,"2",,terminal_output +3087,4273726,"TERMINAL",0,0,"34",,terminal_output +3088,4274642,"TERMINAL",0,0,"3",,terminal_output +3089,4274763,"TERMINAL",0,0,"45",,terminal_output +3090,4275655,"TERMINAL",0,0,"4",,terminal_output +3091,4275800,"TERMINAL",0,0,"56",,terminal_output +3092,4276719,"TERMINAL",0,0,"5",,terminal_output +3093,4276838,"TERMINAL",0,0,"67",,terminal_output +3094,4277688,"TERMINAL",0,0,"6",,terminal_output +3095,4277875,"TERMINAL",0,0,"78",,terminal_output +3096,4278703,"TERMINAL",0,0,"8",,terminal_output +3097,4278922,"TERMINAL",0,0,"89",,terminal_output +3098,4279730,"TERMINAL",0,0,"9",,terminal_output +3099,4279977,"TERMINAL",0,0,"950",,terminal_output +3100,4280735,"TERMINAL",0,0,"40",,terminal_output +3101,4281024,"TERMINAL",0,0,"401",,terminal_output +3102,4281846,"TERMINAL",0,0,"1",,terminal_output +3103,4282031,"TERMINAL",0,0,"12",,terminal_output +3104,4282783,"TERMINAL",0,0,"2",,terminal_output +3105,4283174,"TERMINAL",0,0,"23",,terminal_output +3106,4283785,"TERMINAL",0,0,"3",,terminal_output +3107,4284109,"TERMINAL",0,0,"34",,terminal_output +3108,4284800,"TERMINAL",0,0,"4",,terminal_output +3109,4285149,"TERMINAL",0,0,"45",,terminal_output +3110,4285834,"TERMINAL",0,0,"5",,terminal_output +3111,4286200,"TERMINAL",0,0,"56",,terminal_output +3112,4286857,"TERMINAL",0,0,"6",,terminal_output +3113,4287231,"TERMINAL",0,0,"67",,terminal_output +3114,4287847,"TERMINAL",0,0,"7",,terminal_output +3115,4288393,"TERMINAL",0,0,"78",,terminal_output +3116,4288864,"TERMINAL",0,0,"8",,terminal_output +3117,4289312,"TERMINAL",0,0,"89",,terminal_output +3118,4289930,"TERMINAL",0,0,"9",,terminal_output +3119,4290343,"TERMINAL",0,0,"910:00",,terminal_output +3120,4290969,"TERMINAL",0,0,"50",,terminal_output +3121,4291378,"TERMINAL",0,0,"501",,terminal_output +3122,4291913,"TERMINAL",0,0,"1",,terminal_output +3123,4292420,"TERMINAL",0,0,"12",,terminal_output +3124,4293003,"TERMINAL",0,0,"2",,terminal_output +3125,4293462,"TERMINAL",0,0,"23",,terminal_output +3126,4294029,"TERMINAL",0,0,"3",,terminal_output +3127,4294503,"TERMINAL",0,0,"34",,terminal_output +3128,4294949,"TERMINAL",0,0,"4",,terminal_output +3129,4295568,"TERMINAL",0,0,"45",,terminal_output +3130,4296072,"TERMINAL",0,0,"5",,terminal_output +3131,4296687,"TERMINAL",0,0,"56",,terminal_output +3132,4296999,"TERMINAL",0,0,"6",,terminal_output +3133,4297641,"TERMINAL",0,0,"67",,terminal_output +3134,4298020,"TERMINAL",0,0,"7",,terminal_output +3135,4298691,"TERMINAL",0,0,"79",,terminal_output +3136,4299047,"TERMINAL",0,0,"8",,terminal_output +3137,4299762,"TERMINAL",0,0,"910",,terminal_output +3138,4300068,"TERMINAL",0,0,"9",,terminal_output +3139,4300784,"TERMINAL",0,0,"9:001",,terminal_output +3140,4301096,"TERMINAL",0,0,"9:00",,terminal_output +3141,4301854,"TERMINAL",0,0,"12",,terminal_output +3142,4302063,"TERMINAL",0,0,"1",,terminal_output +3143,4302938,"TERMINAL",0,0,"23",,terminal_output +3144,4303086,"TERMINAL",0,0,"2",,terminal_output +3145,4303907,"TERMINAL",0,0,"34",,terminal_output +3146,4304092,"TERMINAL",0,0,"3",,terminal_output +3147,4304980,"TERMINAL",0,0,"45",,terminal_output +3148,4305118,"TERMINAL",0,0,"4",,terminal_output +3149,4306007,"TERMINAL",0,0,"56",,terminal_output +3150,4306325,"TERMINAL",0,0,"5",,terminal_output +3151,4307034,"TERMINAL",0,0,"67",,terminal_output +3152,4307160,"TERMINAL",0,0,"6",,terminal_output +3153,4308067,"TERMINAL",0,0,"78",,terminal_output +3154,4308177,"TERMINAL",0,0,"7",,terminal_output +3155,4309107,"TERMINAL",0,0,"89",,terminal_output +3156,4309171,"TERMINAL",0,0,"8",,terminal_output +3157,4310150,"TERMINAL",0,0,"920",,terminal_output +3158,4310216,"TERMINAL",0,0,"9",,terminal_output +3159,4311189,"TERMINAL",0,0,"101",,terminal_output +3160,4311209,"TERMINAL",0,0,"10",,terminal_output +3161,4312256,"TERMINAL",0,0,"1",,terminal_output +3162,4312256,"TERMINAL",0,0,"12",,terminal_output +3163,4313234,"TERMINAL",0,0,"2",,terminal_output +3164,4313305,"TERMINAL",0,0,"23",,terminal_output +3165,4314251,"TERMINAL",0,0,"3",,terminal_output +3166,4314313,"TERMINAL",0,0,"34",,terminal_output +3167,4315325,"TERMINAL",0,0,"4",,terminal_output +3168,4315348,"TERMINAL",0,0,"45",,terminal_output +3169,4316360,"TERMINAL",0,0,"5",,terminal_output +3170,4316411,"TERMINAL",0,0,"56",,terminal_output +3171,4317317,"TERMINAL",0,0,"6",,terminal_output +3172,4317437,"TERMINAL",0,0,"67",,terminal_output +3173,4318401,"TERMINAL",0,0,"7",,terminal_output +3174,4318509,"TERMINAL",0,0,"78",,terminal_output +3175,4319329,"TERMINAL",0,0,"8",,terminal_output +3176,4319512,"TERMINAL",0,0,"89",,terminal_output +3177,4320345,"TERMINAL",0,0,"9",,terminal_output +3178,4320557,"TERMINAL",0,0,"930",,terminal_output +3179,4321366,"TERMINAL",0,0,"20",,terminal_output +3180,4321675,"TERMINAL",0,0,"201",,terminal_output +3181,4322371,"TERMINAL",0,0,"1",,terminal_output +3182,4322631,"TERMINAL",0,0,"12",,terminal_output +3183,4323383,"TERMINAL",0,0,"2",,terminal_output +3184,4323684,"TERMINAL",0,0,"23",,terminal_output +3185,4324399,"TERMINAL",0,0,"3",,terminal_output +3186,4324746,"TERMINAL",0,0,"45",,terminal_output +3187,4325417,"TERMINAL",0,0,"4",,terminal_output +3188,4325770,"TERMINAL",0,0,"56",,terminal_output +3189,4326431,"TERMINAL",0,0,"5",,terminal_output +3190,4326823,"TERMINAL",0,0,"67",,terminal_output +3191,4327457,"TERMINAL",0,0,"6",,terminal_output +3192,4327930,"TERMINAL",0,0,"78",,terminal_output +3193,4328465,"TERMINAL",0,0,"7",,terminal_output +3194,4328899,"TERMINAL",0,0,"89",,terminal_output +3195,4329481,"TERMINAL",0,0,"8",,terminal_output +3196,4329966,"TERMINAL",0,0,"940",,terminal_output +3197,4330496,"TERMINAL",0,0,"9",,terminal_output +3198,4330993,"TERMINAL",0,0,"301",,terminal_output +3199,4331510,"TERMINAL",0,0,"30",,terminal_output +3200,4332029,"TERMINAL",0,0,"12",,terminal_output +3201,4332628,"TERMINAL",0,0,"1",,terminal_output +3202,4333143,"TERMINAL",0,0,"23",,terminal_output +3203,4333550,"TERMINAL",0,0,"2",,terminal_output +3204,4334113,"TERMINAL",0,0,"34",,terminal_output +3205,4334574,"TERMINAL",0,0,"3",,terminal_output +3206,4335194,"TERMINAL",0,0,"45",,terminal_output +3207,4335604,"TERMINAL",0,0,"4",,terminal_output +3208,4336195,"TERMINAL",0,0,"56",,terminal_output +3209,4336589,"TERMINAL",0,0,"5",,terminal_output +3210,4337234,"TERMINAL",0,0,"67",,terminal_output +3211,4337648,"TERMINAL",0,0,"6",,terminal_output +3212,4338364,"TERMINAL",0,0,"78",,terminal_output +3213,4338687,"TERMINAL",0,0,"7\r000500.orbax-checkpoint-tmp-12",,terminal_output +3214,4339388,"TERMINAL",0,0,"89",,terminal_output +3215,4339695,"TERMINAL",0,0,"8",,terminal_output +3216,4340517,"TERMINAL",0,0,"950",,terminal_output +3217,4340666,"TERMINAL",0,0,"9",,terminal_output +3218,4341422,"TERMINAL",0,0,"401",,terminal_output +3219,4341758,"TERMINAL",0,0,"\r40",,terminal_output +3220,4342517,"TERMINAL",0,0,"12",,terminal_output +3221,4342768,"TERMINAL",0,0,"2",,terminal_output +3222,4343492,"TERMINAL",0,0,"23",,terminal_output +3223,4343719,"TERMINAL",0,0,"3",,terminal_output +3224,4344529,"TERMINAL",0,0,"34",,terminal_output +3225,4344732,"TERMINAL",0,0,"4",,terminal_output +3226,4345632,"TERMINAL",0,0,"45",,terminal_output +3227,4345753,"TERMINAL",0,0,"5",,terminal_output +3228,4346685,"TERMINAL",0,0,"56",,terminal_output +3229,4346797,"TERMINAL",0,0,"6",,terminal_output +3230,4347684,"TERMINAL",0,0,"67",,terminal_output +3231,4347795,"TERMINAL",0,0,"7",,terminal_output +3232,4348683,"TERMINAL",0,0,"78",,terminal_output +3233,4348797,"TERMINAL",0,0,"8",,terminal_output +3234,4349741,"TERMINAL",0,0,"81:00",,terminal_output +3235,4349856,"TERMINAL",0,0,"9",,terminal_output +3236,4350861,"TERMINAL",0,0,"501",,terminal_output +3237,4350875,"TERMINAL",0,0,"50",,terminal_output +3238,4351812,"TERMINAL",0,0,"12",,terminal_output +3239,4351881,"TERMINAL",0,0,"1",,terminal_output +3240,4352907,"TERMINAL",0,0,"23",,terminal_output +3241,4352922,"TERMINAL",0,0,"2",,terminal_output +3242,4353929,"TERMINAL",0,0,"3",,terminal_output +3243,4353929,"TERMINAL",0,0,"34",,terminal_output +3244,4354954,"TERMINAL",0,0,"4",,terminal_output +3245,4354954,"TERMINAL",0,0,"45",,terminal_output +3246,4355978,"TERMINAL",0,0,"5",,terminal_output +3247,4355978,"TERMINAL",0,0,"56",,terminal_output +3248,4356924,"TERMINAL",0,0,"6",,terminal_output +3249,4357039,"TERMINAL",0,0,"67",,terminal_output +3250,4357939,"TERMINAL",0,0,"7",,terminal_output +3251,4358052,"TERMINAL",0,0,"78",,terminal_output +3252,4358960,"TERMINAL",0,0,"8",,terminal_output +3253,4359086,"TERMINAL",0,0,"89",,terminal_output +3254,4359974,"TERMINAL",0,0,"9",,terminal_output +3255,4360122,"TERMINAL",0,0,"910",,terminal_output +3256,4360988,"TERMINAL",0,0,"6:00:00",,terminal_output +3257,4361169,"TERMINAL",0,0,"6:00:001",,terminal_output +3258,4362119,"TERMINAL",0,0,"1",,terminal_output +3259,4362226,"TERMINAL",0,0,"12",,terminal_output +3260,4363045,"TERMINAL",0,0,"2",,terminal_output +3261,4363249,"TERMINAL",0,0,"23",,terminal_output +3262,4364036,"TERMINAL",0,0,"3",,terminal_output +3263,4364373,"TERMINAL",0,0,"34",,terminal_output +3264,4365089,"TERMINAL",0,0,"4",,terminal_output +3265,4365345,"TERMINAL",0,0,"45",,terminal_output +3266,4366115,"TERMINAL",0,0,"5",,terminal_output +3267,4366384,"TERMINAL",0,0,"56",,terminal_output +3268,4367110,"TERMINAL",0,0,"6",,terminal_output +3269,4367433,"TERMINAL",0,0,"67",,terminal_output +3270,4368100,"TERMINAL",0,0,"7",,terminal_output +3271,4368588,"TERMINAL",0,0,"78",,terminal_output +3272,4369117,"TERMINAL",0,0,"8",,terminal_output +3273,4369529,"TERMINAL",0,0,"89",,terminal_output +3274,4370212,"TERMINAL",0,0,"9",,terminal_output +3275,4370574,"TERMINAL",0,0,"920",,terminal_output +3276,4371143,"TERMINAL",0,0,"10",,terminal_output +3277,4371623,"TERMINAL",0,0,"101",,terminal_output +3278,4372159,"TERMINAL",0,0,"1",,terminal_output +3279,4372678,"TERMINAL",0,0,"12",,terminal_output +3280,4373187,"TERMINAL",0,0,"2",,terminal_output +3281,4373710,"TERMINAL",0,0,"24",,terminal_output +3282,4374204,"TERMINAL",0,0,"3",,terminal_output +3283,4374820,"TERMINAL",0,0,"45",,terminal_output +3284,4375239,"TERMINAL",0,0,"4",,terminal_output +3285,4375807,"TERMINAL",0,0,"56",,terminal_output +3286,4376261,"TERMINAL",0,0,"5",,terminal_output +3287,4376880,"TERMINAL",0,0,"67",,terminal_output +3288,4377239,"TERMINAL",0,0,"6",,terminal_output +3289,4377905,"TERMINAL",0,0,"78",,terminal_output +3290,4378256,"TERMINAL",0,0,"7",,terminal_output +3291,4378952,"TERMINAL",0,0,"89",,terminal_output +3292,4379326,"TERMINAL",0,0,"8",,terminal_output +3293,4380009,"TERMINAL",0,0,"930",,terminal_output +3294,4380287,"TERMINAL",0,0,"9",,terminal_output +3295,4381074,"TERMINAL",0,0,"201",,terminal_output +3296,4381378,"TERMINAL",0,0,"20",,terminal_output +3297,4382191,"TERMINAL",0,0,"12",,terminal_output +3298,4382309,"TERMINAL",0,0,"1",,terminal_output +3299,4383132,"TERMINAL",0,0,"23",,terminal_output +3300,4383323,"TERMINAL",0,0,"2",,terminal_output +3301,4384240,"TERMINAL",0,0,"34",,terminal_output +3302,4384349,"TERMINAL",0,0,"3",,terminal_output +3303,4385226,"TERMINAL",0,0,"45",,terminal_output +3304,4385355,"TERMINAL",0,0,"4",,terminal_output +3305,4386285,"TERMINAL",0,0,"56",,terminal_output +3306,4386371,"TERMINAL",0,0,"5",,terminal_output +3307,4387421,"TERMINAL",0,0,"67",,terminal_output +3308,4387421,"TERMINAL",0,0,"6",,terminal_output +3309,4388357,"TERMINAL",0,0,"78",,terminal_output +3310,4388403,"TERMINAL",0,0,"7",,terminal_output +3311,4389420,"TERMINAL",0,0,"89",,terminal_output +3312,4389433,"TERMINAL",0,0,"8",,terminal_output +3313,4390437,"TERMINAL",0,0,"9",,terminal_output +3314,4390457,"TERMINAL",0,0,"940",,terminal_output +3315,4391461,"TERMINAL",0,0,"30",,terminal_output +3316,4391517,"TERMINAL",0,0,"301",,terminal_output +3317,4392473,"TERMINAL",0,0,"1",,terminal_output +3318,4392598,"TERMINAL",0,0,"12",,terminal_output +3319,4393484,"TERMINAL",0,0,"2",,terminal_output +3320,4393596,"TERMINAL",0,0,"23",,terminal_output +3321,4394499,"TERMINAL",0,0,"3",,terminal_output +3322,4394614,"TERMINAL",0,0,"34",,terminal_output +3323,4395517,"TERMINAL",0,0,"4",,terminal_output +3324,4395657,"TERMINAL",0,0,"45",,terminal_output +3325,4396542,"TERMINAL",0,0,"5",,terminal_output +3326,4396703,"TERMINAL",0,0,"57",,terminal_output +3327,4397552,"TERMINAL",0,0,"6",,terminal_output +3328,4397797,"TERMINAL",0,0,"78",,terminal_output +3329,4398577,"TERMINAL",0,0,"7",,terminal_output +3330,4398793,"TERMINAL",0,0,"89",,terminal_output +3331,4399601,"TERMINAL",0,0,"8",,terminal_output +3332,4399911,"TERMINAL",0,0,"950",,terminal_output +3333,4400598,"TERMINAL",0,0,"9",,terminal_output +3334,4400933,"TERMINAL",0,0,"401",,terminal_output +3335,4401649,"TERMINAL",0,0,"40",,terminal_output +3336,4401929,"TERMINAL",0,0,"12",,terminal_output +3337,4402674,"TERMINAL",0,0,"1",,terminal_output +3338,4402982,"TERMINAL",0,0,"23",,terminal_output +3339,4403702,"TERMINAL",0,0,"2",,terminal_output +3340,4404014,"TERMINAL",0,0,"34",,terminal_output +3341,4404723,"TERMINAL",0,0,"3",,terminal_output +3342,4405086,"TERMINAL",0,0,"45",,terminal_output +3343,4405701,"TERMINAL",0,0,"4",,terminal_output +3344,4406159,"TERMINAL",0,0,"56",,terminal_output +3345,4406778,"TERMINAL",0,0,"6",,terminal_output +3346,4407217,"TERMINAL",0,0,"67",,terminal_output +3347,4407735,"TERMINAL",0,0,"7",,terminal_output +3348,4408325,"TERMINAL",0,0,"78",,terminal_output +3349,4408737,"TERMINAL",0,0,"8",,terminal_output +3350,4409336,"TERMINAL",0,0,"89",,terminal_output +3351,4409753,"TERMINAL",0,0,"9",,terminal_output +3352,4410302,"TERMINAL",0,0,"92:00",,terminal_output +3353,4410867,"TERMINAL",0,0,"50",,terminal_output +3354,4411337,"TERMINAL",0,0,"501",,terminal_output +3355,4411791,"TERMINAL",0,0,"1",,terminal_output +3356,4412402,"TERMINAL",0,0,"12",,terminal_output +3357,4412798,"TERMINAL",0,0,"2",,terminal_output +3358,4413437,"TERMINAL",0,0,"23",,terminal_output +3359,4413813,"TERMINAL",0,0,"3",,terminal_output +3360,4414473,"TERMINAL",0,0,"34",,terminal_output +3361,4414831,"TERMINAL",0,0,"4",,terminal_output +3362,4415586,"TERMINAL",0,0,"45",,terminal_output +3363,4415887,"TERMINAL",0,0,"5",,terminal_output +3364,4416556,"TERMINAL",0,0,"56",,terminal_output +3365,4416879,"TERMINAL",0,0,"6",,terminal_output +3366,4417604,"TERMINAL",0,0,"67",,terminal_output +3367,4417880,"TERMINAL",0,0,"7",,terminal_output +3368,4418684,"TERMINAL",0,0,"78",,terminal_output +3369,4418946,"TERMINAL",0,0,"8",,terminal_output +3370,4419691,"TERMINAL",0,0,"810",,terminal_output +3371,4419904,"TERMINAL",0,0,"9",,terminal_output +3372,4420794,"TERMINAL",0,0,"1:001",,terminal_output +3373,4420923,"TERMINAL",0,0,"1:00",,terminal_output +3374,4421851,"TERMINAL",0,0,"12",,terminal_output +3375,4421977,"TERMINAL",0,0,"1",,terminal_output +3376,4422861,"TERMINAL",0,0,"23",,terminal_output +3377,4422977,"TERMINAL",0,0,"2",,terminal_output +3378,4423941,"TERMINAL",0,0,"34",,terminal_output +3379,4424000,"TERMINAL",0,0,"3",,terminal_output +3380,4424944,"TERMINAL",0,0,"45",,terminal_output +3381,4425018,"TERMINAL",0,0,"4\r000600.orbax-checkpoint-tmp-15",,terminal_output +3382,4426003,"TERMINAL",0,0,"56",,terminal_output +3383,4426098,"TERMINAL",0,0,"5",,terminal_output +3384,4427051,"TERMINAL",0,0,"67",,terminal_output +3385,4427051,"TERMINAL",0,0,"6",,terminal_output +3386,4428067,"TERMINAL",0,0,"\r7",,terminal_output +3387,4428096,"TERMINAL",0,0,"78",,terminal_output +3388,4429073,"TERMINAL",0,0,"8",,terminal_output +3389,4429100,"TERMINAL",0,0,"89",,terminal_output +3390,4430173,"TERMINAL",0,0,"9",,terminal_output +3391,4430175,"TERMINAL",0,0,"920",,terminal_output +3392,4431143,"TERMINAL",0,0,"10",,terminal_output +3393,4431522,"TERMINAL",0,0,"101",,terminal_output +3394,4432172,"TERMINAL",0,0,"1",,terminal_output +3395,4432692,"TERMINAL",0,0,"12",,terminal_output +3396,4433198,"TERMINAL",0,0,"2",,terminal_output +3397,4433785,"TERMINAL",0,0,"23",,terminal_output +3398,4434226,"TERMINAL",0,0,"3",,terminal_output +3399,4434765,"TERMINAL",0,0,"34",,terminal_output +3400,4435242,"TERMINAL",0,0,"4",,terminal_output +3401,4435745,"TERMINAL",0,0,"45",,terminal_output +3402,4436224,"TERMINAL",0,0,"5",,terminal_output +3403,4436989,"TERMINAL",0,0,"67",,terminal_output +3404,4437373,"TERMINAL",0,0,"6",,terminal_output +3405,4437784,"TERMINAL",0,0,"78",,terminal_output +3406,4438379,"TERMINAL",0,0,"7",,terminal_output +3407,4438985,"TERMINAL",0,0,"89",,terminal_output +3408,4439247,"TERMINAL",0,0,"8",,terminal_output +3409,4439944,"TERMINAL",0,0,"930",,terminal_output +3410,4440258,"TERMINAL",0,0,"9",,terminal_output +3411,4440886,"TERMINAL",0,0,"201",,terminal_output +3412,4441285,"TERMINAL",0,0,"20",,terminal_output +3413,4441946,"TERMINAL",0,0,"12",,terminal_output +3414,4442274,"TERMINAL",0,0,"1",,terminal_output +3415,4442979,"TERMINAL",0,0,"23",,terminal_output +3416,4443303,"TERMINAL",0,0,"2",,terminal_output +3417,4444043,"TERMINAL",0,0,"34",,terminal_output +3418,4444310,"TERMINAL",0,0,"3",,terminal_output +3419,4445075,"TERMINAL",0,0,"45",,terminal_output +3420,4445341,"TERMINAL",0,0,"4",,terminal_output +3421,4446238,"TERMINAL",0,0,"56",,terminal_output +3422,4446362,"TERMINAL",0,0,"5",,terminal_output +3423,4447236,"TERMINAL",0,0,"67",,terminal_output +3424,4447382,"TERMINAL",0,0,"6",,terminal_output +3425,4448282,"TERMINAL",0,0,"78",,terminal_output +3426,4448413,"TERMINAL",0,0,"7",,terminal_output +3427,4449315,"TERMINAL",0,0,"89",,terminal_output +3428,4449432,"TERMINAL",0,0,"8",,terminal_output +3429,4450276,"TERMINAL",0,0,"940",,terminal_output +3430,4450452,"TERMINAL",0,0,"9",,terminal_output +3431,4451300,"TERMINAL",0,0,"301",,terminal_output +3432,4451440,"TERMINAL",0,0,"30",,terminal_output +3433,4452351,"TERMINAL",0,0,"12",,terminal_output +3434,4452634,"TERMINAL",0,0,"1",,terminal_output +3435,4453400,"TERMINAL",0,0,"23",,terminal_output +3436,4453468,"TERMINAL",0,0,"2",,terminal_output +3437,4454479,"TERMINAL",0,0,"34",,terminal_output +3438,4454502,"TERMINAL",0,0,"3",,terminal_output +3439,4455484,"TERMINAL",0,0,"45",,terminal_output +3440,4455502,"TERMINAL",0,0,"4",,terminal_output +3441,4456508,"TERMINAL",0,0,"5",,terminal_output +3442,4456524,"TERMINAL",0,0,"56",,terminal_output +3443,4457518,"TERMINAL",0,0,"6",,terminal_output +3444,4457573,"TERMINAL",0,0,"67",,terminal_output +3445,4458529,"TERMINAL",0,0,"7",,terminal_output +3446,4458671,"TERMINAL",0,0,"78",,terminal_output +3447,4459544,"TERMINAL",0,0,"8",,terminal_output +3448,4459658,"TERMINAL",0,0,"89",,terminal_output +3449,4460630,"TERMINAL",0,0,"9",,terminal_output +3450,4460686,"TERMINAL",0,0,"950",,terminal_output +3451,4461577,"TERMINAL",0,0,"40",,terminal_output +3452,4461713,"TERMINAL",0,0,"402",,terminal_output +3453,4462679,"TERMINAL",0,0,"1",,terminal_output +3454,4462845,"TERMINAL",0,0,"23",,terminal_output +3455,4463612,"TERMINAL",0,0,"2",,terminal_output +3456,4463793,"TERMINAL",0,0,"34",,terminal_output +3457,4464647,"TERMINAL",0,0,"3",,terminal_output +3458,4464844,"TERMINAL",0,0,"45",,terminal_output +3459,4465641,"TERMINAL",0,0,"4",,terminal_output +3460,4465890,"TERMINAL",0,0,"56",,terminal_output +3461,4466674,"TERMINAL",0,0,"5",,terminal_output +3462,4466914,"TERMINAL",0,0,"67",,terminal_output +3463,4467698,"TERMINAL",0,0,"6",,terminal_output +3464,4468000,"TERMINAL",0,0,"78",,terminal_output +3465,4468717,"TERMINAL",0,0,"7",,terminal_output +3466,4469059,"TERMINAL",0,0,"89",,terminal_output +3467,4469742,"TERMINAL",0,0,"9",,terminal_output +3468,4470024,"TERMINAL",0,0,"93:00",,terminal_output +3469,4470722,"TERMINAL",0,0,"50",,terminal_output +3470,4471067,"TERMINAL",0,0,"501",,terminal_output +3471,4471736,"TERMINAL",0,0,"1",,terminal_output +3472,4472117,"TERMINAL",0,0,"12",,terminal_output +3473,4472751,"TERMINAL",0,0,"2",,terminal_output +3474,4473149,"TERMINAL",0,0,"23",,terminal_output +3475,4473771,"TERMINAL",0,0,"3",,terminal_output +3476,4474251,"TERMINAL",0,0,"34",,terminal_output +3477,4474862,"TERMINAL",0,0,"4",,terminal_output +3478,4475235,"TERMINAL",0,0,"45",,terminal_output +3479,4475888,"TERMINAL",0,0,"5",,terminal_output +3480,4476302,"TERMINAL",0,0,"56",,terminal_output +3481,4476820,"TERMINAL",0,0,"6",,terminal_output +3482,4477321,"TERMINAL",0,0,"67",,terminal_output +3483,4477834,"TERMINAL",0,0,"7",,terminal_output +3484,4478356,"TERMINAL",0,0,"78",,terminal_output +3485,4478849,"TERMINAL",0,0,"8",,terminal_output +3486,4479401,"TERMINAL",0,0,"89",,terminal_output +3487,4479875,"TERMINAL",0,0,"9",,terminal_output +3488,4480450,"TERMINAL",0,0,"910",,terminal_output +3489,4480880,"TERMINAL",0,0,"2:00",,terminal_output +3490,4481480,"TERMINAL",0,0,"2:001",,terminal_output +3491,4481897,"TERMINAL",0,0,"1",,terminal_output +3492,4482519,"TERMINAL",0,0,"12",,terminal_output +3493,4482913,"TERMINAL",0,0,"2",,terminal_output +3494,4483669,"TERMINAL",0,0,"23",,terminal_output +3495,4483929,"TERMINAL",0,0,"3",,terminal_output +3496,4484608,"TERMINAL",0,0,"34",,terminal_output +3497,4485006,"TERMINAL",0,0,"4",,terminal_output +3498,4485649,"TERMINAL",0,0,"45",,terminal_output +3499,4485964,"TERMINAL",0,0,"5",,terminal_output +3500,4486691,"TERMINAL",0,0,"57",,terminal_output +3501,4486977,"TERMINAL",0,0,"6",,terminal_output +3502,4487733,"TERMINAL",0,0,"78",,terminal_output +3503,4487993,"TERMINAL",0,0,"7",,terminal_output +3504,4488780,"TERMINAL",0,0,"89",,terminal_output +3505,4489010,"TERMINAL",0,0,"8",,terminal_output +3506,4489816,"TERMINAL",0,0,"920",,terminal_output +3507,4490028,"TERMINAL",0,0,"9",,terminal_output +3508,4490869,"TERMINAL",0,0,"101",,terminal_output +3509,4491048,"TERMINAL",0,0,"10",,terminal_output +3510,4491892,"TERMINAL",0,0,"12",,terminal_output +3511,4492070,"TERMINAL",0,0,"1",,terminal_output +3512,4492988,"TERMINAL",0,0,"23",,terminal_output +3513,4493101,"TERMINAL",0,0,"2",,terminal_output +3514,4493969,"TERMINAL",0,0,"34",,terminal_output +3515,4494095,"TERMINAL",0,0,"3",,terminal_output +3516,4495037,"TERMINAL",0,0,"45",,terminal_output +3517,4495149,"TERMINAL",0,0,"4",,terminal_output +3518,4496059,"TERMINAL",0,0,"56",,terminal_output +3519,4496121,"TERMINAL",0,0,"5",,terminal_output +3520,4497192,"TERMINAL",0,0,"67",,terminal_output +3521,4497193,"TERMINAL",0,0,"6",,terminal_output +3522,4498153,"TERMINAL",0,0,"7",,terminal_output +3523,4498377,"TERMINAL",0,0,"78",,terminal_output +3524,4499233,"TERMINAL",0,0,"8",,terminal_output +3525,4499415,"TERMINAL",0,0,"89",,terminal_output +3526,4500260,"TERMINAL",0,0,"9",,terminal_output +3527,4500451,"TERMINAL",0,0,"930",,terminal_output +3528,4501198,"TERMINAL",0,0,"20",,terminal_output +3529,4501589,"TERMINAL",0,0,"201",,terminal_output +3530,4502309,"TERMINAL",0,0,"1",,terminal_output +3531,4502539,"TERMINAL",0,0,"12",,terminal_output +3532,4503239,"TERMINAL",0,0,"2",,terminal_output +3533,4503590,"TERMINAL",0,0,"23",,terminal_output +3534,4504248,"TERMINAL",0,0,"3",,terminal_output +3535,4504665,"TERMINAL",0,0,"34",,terminal_output +3536,4505263,"TERMINAL",0,0,"4",,terminal_output +3537,4505680,"TERMINAL",0,0,"45",,terminal_output +3538,4506302,"TERMINAL",0,0,"5",,terminal_output +3539,4506720,"TERMINAL",0,0,"67",,terminal_output +3540,4507325,"TERMINAL",0,0,"6",,terminal_output +3541,4507750,"TERMINAL",0,0,"78",,terminal_output +3542,4508313,"TERMINAL",0,0,"7",,terminal_output +3543,4508791,"TERMINAL",0,0,"89",,terminal_output +3544,4509373,"TERMINAL",0,0,"8",,terminal_output +3545,4509836,"TERMINAL",0,0,"940",,terminal_output +3546,4510411,"TERMINAL",0,0,"9",,terminal_output +3547,4510875,"TERMINAL",0,0,"301",,terminal_output +3548,4511371,"TERMINAL",0,0,"30\r000700.orbax-checkpoint-tmp-18",,terminal_output +3549,4511934,"TERMINAL",0,0,"12",,terminal_output +3550,4512477,"TERMINAL",0,0,"1",,terminal_output +3551,4512961,"TERMINAL",0,0,"23",,terminal_output +3552,4513403,"TERMINAL",0,0,"2",,terminal_output +3553,4513992,"TERMINAL",0,0,"34",,terminal_output +3554,4514427,"TERMINAL",0,0,"\r3",,terminal_output +3555,4515111,"TERMINAL",0,0,"45",,terminal_output +3556,4515443,"TERMINAL",0,0,"4",,terminal_output +3557,4516077,"TERMINAL",0,0,"56",,terminal_output +3558,4516460,"TERMINAL",0,0,"5",,terminal_output +3559,4517114,"TERMINAL",0,0,"67",,terminal_output +3560,4517477,"TERMINAL",0,0,"6",,terminal_output +3561,4518165,"TERMINAL",0,0,"78",,terminal_output +3562,4518493,"TERMINAL",0,0,"7",,terminal_output +3563,4519203,"TERMINAL",0,0,"89",,terminal_output +3564,4519516,"TERMINAL",0,0,"8",,terminal_output +3565,4520235,"TERMINAL",0,0,"950",,terminal_output +3566,4520527,"TERMINAL",0,0,"9",,terminal_output +3567,4521274,"TERMINAL",0,0,"401",,terminal_output +3568,4521540,"TERMINAL",0,0,"40",,terminal_output +3569,4522377,"TERMINAL",0,0,"12",,terminal_output +3570,4522582,"TERMINAL",0,0,"1",,terminal_output +3571,4523367,"TERMINAL",0,0,"23",,terminal_output +3572,4523578,"TERMINAL",0,0,"2",,terminal_output +3573,4524431,"TERMINAL",0,0,"34",,terminal_output +3574,4524588,"TERMINAL",0,0,"3",,terminal_output +3575,4525433,"TERMINAL",0,0,"45",,terminal_output +3576,4525608,"TERMINAL",0,0,"4",,terminal_output +3577,4526476,"TERMINAL",0,0,"56",,terminal_output +3578,4526627,"TERMINAL",0,0,"5",,terminal_output +3579,4527508,"TERMINAL",0,0,"67",,terminal_output +3580,4527639,"TERMINAL",0,0,"6",,terminal_output +3581,4528667,"TERMINAL",0,0,"78",,terminal_output +3582,4528684,"TERMINAL",0,0,"7",,terminal_output +3583,4529580,"TERMINAL",0,0,"89",,terminal_output +3584,4529691,"TERMINAL",0,0,"8",,terminal_output +3585,4530633,"TERMINAL",0,0,"94:00",,terminal_output +3586,4530704,"TERMINAL",0,0,"9",,terminal_output +3587,4531662,"TERMINAL",0,0,"501",,terminal_output +3588,4531731,"TERMINAL",0,0,"51",,terminal_output +3589,4532722,"TERMINAL",0,0,"1CG 0:00",,terminal_output +3590,4532722,"TERMINAL",0,0,"2",,terminal_output +3591,4533732,"TERMINAL",0,0,"3",,terminal_output +3592,4533759,"TERMINAL",0,0,"3",,terminal_output +3593,4534771,"TERMINAL",0,0,"4",,terminal_output +3594,4534794,"TERMINAL",0,0,"4",,terminal_output +3595,4535765,"TERMINAL",0,0,"5",,terminal_output +3596,4535830,"TERMINAL",0,0,"5",,terminal_output +3597,4536781,"TERMINAL",0,0,"6",,terminal_output +3598,4536890,"TERMINAL",0,0,"6",,terminal_output +3599,4537841,"TERMINAL",0,0,"7",,terminal_output +3600,4537952,"TERMINAL",0,0,"7",,terminal_output +3601,4538815,"TERMINAL",0,0,"8",,terminal_output +3602,4538951,"TERMINAL",0,0,"8",,terminal_output +3603,4539833,"TERMINAL",0,0,"9",,terminal_output +3604,4540000,"TERMINAL",0,0,"9",,terminal_output +3605,4540915,"TERMINAL",0,0,"3:00",,terminal_output +3606,4541034,"TERMINAL",0,0,"3:00",,terminal_output +3607,4541867,"TERMINAL",0,0,"1",,terminal_output +3608,4542076,"TERMINAL",0,0,"1",,terminal_output +3609,4542871,"TERMINAL",0,0,"2",,terminal_output +3610,4543163,"TERMINAL",0,0,"2",,terminal_output +3611,4543921,"TERMINAL",0,0,"3",,terminal_output +3612,4544160,"TERMINAL",0,0,"3",,terminal_output +3613,4545007,"TERMINAL",0,0,"4",,terminal_output +3614,4545204,"TERMINAL",0,0,"4",,terminal_output +3615,4545919,"TERMINAL",0,0,"5",,terminal_output +3616,4546342,"TERMINAL",0,0,"5",,terminal_output +3617,4546933,"TERMINAL",0,0,"6",,terminal_output +3618,4547374,"TERMINAL",0,0,"6",,terminal_output +3619,4547986,"TERMINAL",0,0,"7",,terminal_output +3620,4548386,"TERMINAL",0,0,"7",,terminal_output +3621,4548965,"TERMINAL",0,0,"8",,terminal_output +3622,4549417,"TERMINAL",0,0,"8",,terminal_output +3623,4550027,"TERMINAL",0,0,"9",,terminal_output +3624,4550427,"TERMINAL",0,0,"9PD(BeginTime)",,terminal_output +3625,4551050,"TERMINAL",0,0,"10",,terminal_output +3626,4551468,"TERMINAL",0,0,"10",,terminal_output +3627,4552080,"TERMINAL",0,0,"1",,terminal_output +3628,4552508,"TERMINAL",0,0,"1",,terminal_output +3629,4553096,"TERMINAL",0,0,"2",,terminal_output +3630,4553556,"TERMINAL",0,0,"2",,terminal_output +3631,4554046,"TERMINAL",0,0,"3",,terminal_output +3632,4554635,"TERMINAL",0,0,"3",,terminal_output +3633,4555150,"TERMINAL",0,0,"4",,terminal_output +3634,4555657,"TERMINAL",0,0,"4",,terminal_output +3635,4556079,"TERMINAL",0,0,"5",,terminal_output +3636,4556689,"TERMINAL",0,0,"5",,terminal_output +3637,4557095,"TERMINAL",0,0,"6",,terminal_output +3638,4557736,"TERMINAL",0,0,"7",,terminal_output +3639,4558115,"TERMINAL",0,0,"7",,terminal_output +3640,4558783,"TERMINAL",0,0,"8",,terminal_output +3641,4559140,"TERMINAL",0,0,"8",,terminal_output +3642,4559822,"TERMINAL",0,0,"9",,terminal_output +3643,4560165,"TERMINAL",0,0,"9",,terminal_output +3644,4560984,"TERMINAL",0,0,"20",,terminal_output +3645,4561171,"TERMINAL",0,0,"20",,terminal_output +3646,4561962,"TERMINAL",0,0,"1",,terminal_output +3647,4562217,"TERMINAL",0,0,"1",,terminal_output +3648,4562958,"TERMINAL",0,0,"2",,terminal_output +3649,4563198,"TERMINAL",0,0,"2",,terminal_output +3650,4564059,"TERMINAL",0,0,"3",,terminal_output +3651,4564215,"TERMINAL",0,0,"3",,terminal_output +3652,4565084,"TERMINAL",0,0,"4",,terminal_output +3653,4565238,"TERMINAL",0,0,"4",,terminal_output +3654,4566107,"TERMINAL",0,0,"5",,terminal_output +3655,4566245,"TERMINAL",0,0,"5",,terminal_output +3656,4567230,"TERMINAL",0,0,"6",,terminal_output +3657,4567283,"TERMINAL",0,0,"6",,terminal_output +3658,4568254,"TERMINAL",0,0,"7",,terminal_output +3659,4568307,"TERMINAL",0,0,"7",,terminal_output +3660,4569230,"TERMINAL",0,0,"8",,terminal_output +3661,4569298,"TERMINAL",0,0,"8",,terminal_output +3662,4570302,"TERMINAL",0,0,"9",,terminal_output +3663,4570317,"TERMINAL",0,0,"9",,terminal_output +3664,4571300,"TERMINAL",0,0,"30",,terminal_output +3665,4571363,"TERMINAL",0,0,"30",,terminal_output +3666,4572451,"TERMINAL",0,0,"1",,terminal_output +3667,4572451,"TERMINAL",0,0,"1",,terminal_output +3668,4573374,"TERMINAL",0,0,"2",,terminal_output +3669,4573393,"TERMINAL",0,0,"2",,terminal_output +3670,4574380,"TERMINAL",0,0,"3",,terminal_output +3671,4574446,"TERMINAL",0,0,"3",,terminal_output +3672,4575423,"TERMINAL",0,0,"4",,terminal_output +3673,4575530,"TERMINAL",0,0,"4",,terminal_output +3674,4576447,"TERMINAL",0,0,"5",,terminal_output +3675,4576552,"TERMINAL",0,0,"5",,terminal_output +3676,4577419,"TERMINAL",0,0,"6",,terminal_output +3677,4577583,"TERMINAL",0,0,"6",,terminal_output +3678,4578441,"TERMINAL",0,0,"7",,terminal_output +3679,4578667,"TERMINAL",0,0,"7",,terminal_output +3680,4579465,"TERMINAL",0,0,"8",,terminal_output +3681,4579728,"TERMINAL",0,0,"8",,terminal_output +3682,4580474,"TERMINAL",0,0,"9",,terminal_output +3683,4580754,"TERMINAL",0,0,"9",,terminal_output +3684,4581488,"TERMINAL",0,0,"40",,terminal_output +3685,4581775,"TERMINAL",0,0,"41",,terminal_output +3686,4582506,"TERMINAL",0,0,"1",,terminal_output +3687,4582896,"TERMINAL",0,0,"2",,terminal_output +3688,4583522,"TERMINAL",0,0,"2",,terminal_output +3689,4583920,"TERMINAL",0,0,"3",,terminal_output +3690,4584539,"TERMINAL",0,0,"3",,terminal_output +3691,4584942,"TERMINAL",0,0,"4",,terminal_output +3692,4585567,"TERMINAL",0,0,"4",,terminal_output +3693,4585971,"TERMINAL",0,0,"5",,terminal_output +3694,4586581,"TERMINAL",0,0,"5",,terminal_output +3695,4586979,"TERMINAL",0,0,"6",,terminal_output +3696,4587606,"TERMINAL",0,0,"6",,terminal_output +3697,4588122,"TERMINAL",0,0,"7",,terminal_output +3698,4588666,"TERMINAL",0,0,"7",,terminal_output +3699,4589062,"TERMINAL",0,0,"8",,terminal_output +3700,4589653,"TERMINAL",0,0,"8",,terminal_output +3701,4590170,"TERMINAL",0,0,"9",,terminal_output +3702,4590680,"TERMINAL",0,0,"9",,terminal_output +3703,4591189,"TERMINAL",0,0,"50",,terminal_output +3704,4591667,"TERMINAL",0,0,"50",,terminal_output +3705,4592223,"TERMINAL",0,0,"1",,terminal_output +3706,4592731,"TERMINAL",0,0,"1",,terminal_output +3707,4593250,"TERMINAL",0,0,"2",,terminal_output +3708,4593699,"TERMINAL",0,0,"3",,terminal_output +3709,4594374,"TERMINAL",0,0,"3",,terminal_output +3710,4594777,"TERMINAL",0,0,"4",,terminal_output +3711,4595341,"TERMINAL",0,0,"4",,terminal_output +3712,4595734,"TERMINAL",0,0,"5",,terminal_output +3713,4596406,"TERMINAL",0,0,"5",,terminal_output +3714,4596749,"TERMINAL",0,0,"6",,terminal_output +3715,4597430,"TERMINAL",0,0,"6",,terminal_output +3716,4597769,"TERMINAL",0,0,"7",,terminal_output +3717,4598486,"TERMINAL",0,0,"7",,terminal_output +3718,4598784,"TERMINAL",0,0,"8",,terminal_output +3719,4599522,"TERMINAL",0,0,"8",,terminal_output +3720,4599893,"TERMINAL",0,0,"9",,terminal_output +3721,4600568,"TERMINAL",0,0,"9",,terminal_output +3722,4600922,"TERMINAL",0,0,"4:00",,terminal_output +3723,4601645,"TERMINAL",0,0,"4:00",,terminal_output +3724,4601835,"TERMINAL",0,0,"1",,terminal_output +3725,4602662,"TERMINAL",0,0,"1",,terminal_output +3726,4602851,"TERMINAL",0,0,"2",,terminal_output +3727,4603696,"TERMINAL",0,0,"2",,terminal_output +3728,4603871,"TERMINAL",0,0,"3",,terminal_output +3729,4604809,"TERMINAL",0,0,"4",,terminal_output +3730,4604920,"TERMINAL",0,0,"4",,terminal_output +3731,4605839,"TERMINAL",0,0,"5",,terminal_output +3732,4605903,"TERMINAL",0,0,"5",,terminal_output +3733,4606860,"TERMINAL",0,0,"6",,terminal_output +3734,4606921,"TERMINAL",0,0,"6",,terminal_output +3735,4607843,"TERMINAL",0,0,"7",,terminal_output +3736,4607948,"TERMINAL",0,0,"7",,terminal_output +3737,4608888,"TERMINAL",0,0,"8",,terminal_output +3738,4608954,"TERMINAL",0,0,"8",,terminal_output +3739,4610032,"TERMINAL",0,0,"9",,terminal_output +3740,4610033,"TERMINAL",0,0,"9",,terminal_output +3741,4610986,"TERMINAL",0,0,"10",,terminal_output +3742,4610986,"TERMINAL",0,0,"10",,terminal_output +3743,4612080,"TERMINAL",0,0,"1",,terminal_output +3744,4612080,"TERMINAL",0,0,"1",,terminal_output +3745,4613017,"TERMINAL",0,0,"2",,terminal_output +3746,4613079,"TERMINAL",0,0,"2",,terminal_output +3747,4614035,"TERMINAL",0,0,"3",,terminal_output +3748,4614141,"TERMINAL",0,0,"3",,terminal_output +3749,4615167,"TERMINAL",0,0,"4",,terminal_output +3750,4615174,"TERMINAL",0,0,"4",,terminal_output +3751,4616081,"TERMINAL",0,0,"5",,terminal_output +3752,4616213,"TERMINAL",0,0,"5",,terminal_output +3753,4617103,"TERMINAL",0,0,"6",,terminal_output +3754,4617265,"TERMINAL",0,0,"6",,terminal_output +3755,4618106,"TERMINAL",0,0,"7",,terminal_output +3756,4618309,"TERMINAL",0,0,"7",,terminal_output +3757,4619144,"TERMINAL",0,0,"8",,terminal_output +3758,4619358,"TERMINAL",0,0,"8",,terminal_output +3759,4620149,"TERMINAL",0,0,"9",,terminal_output +3760,4620490,"TERMINAL",0,0,"9",,terminal_output +3761,4621164,"TERMINAL",0,0,"20",,terminal_output +3762,4621502,"TERMINAL",0,0,"20",,terminal_output +3763,4622173,"TERMINAL",0,0,"1",,terminal_output +3764,4622494,"TERMINAL",0,0,"1",,terminal_output +3765,4623189,"TERMINAL",0,0,"2",,terminal_output +3766,4623546,"TERMINAL",0,0,"2",,terminal_output +3767,4624268,"TERMINAL",0,0,"3",,terminal_output +3768,4624779,"TERMINAL",0,0,"3",,terminal_output +3769,4625290,"TERMINAL",0,0,"4",,terminal_output +3770,4625639,"TERMINAL",0,0,"4",,terminal_output +3771,4626241,"TERMINAL",0,0,"5",,terminal_output +3772,4626727,"TERMINAL",0,0,"5",,terminal_output +3773,4627340,"TERMINAL",0,0,"6",,terminal_output +3774,4627725,"TERMINAL",0,0,"7",,terminal_output +3775,4628275,"TERMINAL",0,0,"7",,terminal_output +3776,4628772,"TERMINAL",0,0,"8",,terminal_output +3777,4629291,"TERMINAL",0,0,"8",,terminal_output +3778,4629811,"TERMINAL",0,0,"9",,terminal_output +3779,4630313,"TERMINAL",0,0,"9",,terminal_output +3780,4630856,"TERMINAL",0,0,"30",,terminal_output +3781,4631331,"TERMINAL",0,0,"30",,terminal_output +3782,4631903,"TERMINAL",0,0,"1",,terminal_output +3783,4632342,"TERMINAL",0,0,"1",,terminal_output +3784,4632973,"TERMINAL",0,0,"2",,terminal_output +3785,4633361,"TERMINAL",0,0,"2",,terminal_output +3786,4634109,"TERMINAL",0,0,"3",,terminal_output +3787,4634404,"TERMINAL",0,0,"3",,terminal_output +3788,4635045,"TERMINAL",0,0,"4",,terminal_output +3789,4635385,"TERMINAL",0,0,"4",,terminal_output +3790,4636147,"TERMINAL",0,0,"5",,terminal_output +3791,4636409,"TERMINAL",0,0,"5",,terminal_output +3792,4637168,"TERMINAL",0,0,"6",,terminal_output +3793,4637423,"TERMINAL",0,0,"6",,terminal_output +3794,4638193,"TERMINAL",0,0,"7",,terminal_output +3795,4638438,"TERMINAL",0,0,"7",,terminal_output +3796,4639224,"TERMINAL",0,0,"8",,terminal_output +3797,4639460,"TERMINAL",0,0,"8",,terminal_output +3798,4640275,"TERMINAL",0,0,"9",,terminal_output +3799,4640476,"TERMINAL",0,0,"9",,terminal_output +3800,4641367,"TERMINAL",0,0,"40",,terminal_output +3801,4641490,"TERMINAL",0,0,"40",,terminal_output +3802,4642373,"TERMINAL",0,0,"1",,terminal_output +3803,4642510,"TERMINAL",0,0,"1",,terminal_output +3804,4643525,"TERMINAL",0,0,"2",,terminal_output +3805,4643547,"TERMINAL",0,0,"2",,terminal_output +3806,4644468,"TERMINAL",0,0,"3",,terminal_output +3807,4644578,"TERMINAL",0,0,"3",,terminal_output +3808,4645519,"TERMINAL",0,0,"4",,terminal_output +3809,4645583,"TERMINAL",0,0,"4",,terminal_output +3810,4646594,"TERMINAL",0,0,"5",,terminal_output +3811,4646608,"TERMINAL",0,0,"5",,terminal_output +3812,4647615,"TERMINAL",0,0,"6",,terminal_output +3813,4647633,"TERMINAL",0,0,"6",,terminal_output +3814,4648611,"TERMINAL",0,0,"7",,terminal_output +3815,4648674,"TERMINAL",0,0,"7",,terminal_output +3816,4649663,"TERMINAL",0,0,"8",,terminal_output +3817,4649734,"TERMINAL",0,0,"8",,terminal_output +3818,4650794,"TERMINAL",0,0,"9",,terminal_output +3819,4650810,"TERMINAL",0,0,"50",,terminal_output +3820,4651712,"TERMINAL",0,0,"50",,terminal_output +3821,4651823,"TERMINAL",0,0,"1",,terminal_output +3822,4652739,"TERMINAL",0,0,"1",,terminal_output +3823,4652850,"TERMINAL",0,0,"2",,terminal_output +3824,4653717,"TERMINAL",0,0,"3",,terminal_output +3825,4653883,"TERMINAL",0,0,"3",,terminal_output +3826,4654784,"TERMINAL",0,0,"4",,terminal_output +3827,4654928,"TERMINAL",0,0,"4",,terminal_output +3828,4655735,"TERMINAL",0,0,"5",,terminal_output +3829,4656011,"TERMINAL",0,0,"5",,terminal_output +3830,4656833,"TERMINAL",0,0,"6",,terminal_output +3831,4657028,"TERMINAL",0,0,"6",,terminal_output +3832,4657770,"TERMINAL",0,0,"7",,terminal_output +3833,4658163,"TERMINAL",0,0,"7",,terminal_output +3834,4658784,"TERMINAL",0,0,"8",,terminal_output +3835,4659188,"TERMINAL",0,0,"8",,terminal_output +3836,4659905,"TERMINAL",0,0,"9",,terminal_output +3837,4660214,"TERMINAL",0,0,"9",,terminal_output +3838,4660823,"TERMINAL",0,0,"5:00",,terminal_output +3839,4661235,"TERMINAL",0,0,"5:00",,terminal_output +3840,4661854,"TERMINAL",0,0,"1",,terminal_output +3841,4662258,"TERMINAL",0,0,"1",,terminal_output +3842,4662880,"TERMINAL",0,0,"2",,terminal_output +3843,4663291,"TERMINAL",0,0,"2",,terminal_output +3844,4663903,"TERMINAL",0,0,"3",,terminal_output +3845,4664335,"TERMINAL",0,0,"3",,terminal_output +3846,4664880,"TERMINAL",0,0,"4",,terminal_output +3847,4665390,"TERMINAL",0,0,"4",,terminal_output +3848,4665950,"TERMINAL",0,0,"5",,terminal_output +3849,4666469,"TERMINAL",0,0,"5",,terminal_output +3850,4666917,"TERMINAL",0,0,"6",,terminal_output +3851,4667479,"TERMINAL",0,0,"6",,terminal_output +3852,4667927,"TERMINAL",0,0,"7",,terminal_output +3853,4668515,"TERMINAL",0,0,"7",,terminal_output +3854,4668946,"TERMINAL",0,0,"8",,terminal_output +3855,4669565,"TERMINAL",0,0,"8",,terminal_output +3856,4669960,"TERMINAL",0,0,"9",,terminal_output +3857,4670610,"TERMINAL",0,0,"9",,terminal_output +3858,4671083,"TERMINAL",0,0,"10",,terminal_output +3859,4671681,"TERMINAL",0,0,"10",,terminal_output +3860,4672090,"TERMINAL",0,0,"1",,terminal_output +3861,4672805,"TERMINAL",0,0,"1 R1hkn0403",,terminal_output +3862,4673009,"TERMINAL",0,0,"2",,terminal_output +3863,4673763,"TERMINAL",0,0,"32",,terminal_output +3864,4674034,"TERMINAL",0,0,"3",,terminal_output +3865,4674806,"TERMINAL",0,0,"43",,terminal_output +3866,4675059,"TERMINAL",0,0,"4",,terminal_output +3867,4675855,"TERMINAL",0,0,"54",,terminal_output +3868,4676064,"TERMINAL",0,0,"5",,terminal_output +3869,4676944,"TERMINAL",0,0,"65",,terminal_output +3870,4677091,"TERMINAL",0,0,"6",,terminal_output +3871,4677949,"TERMINAL",0,0,"76",,terminal_output +3872,4678094,"TERMINAL",0,0,"7",,terminal_output +3873,4679003,"TERMINAL",0,0,"87",,terminal_output +3874,4679108,"TERMINAL",0,0,"8",,terminal_output +3875,4680076,"TERMINAL",0,0,"98",,terminal_output +3876,4680130,"TERMINAL",0,0,"9",,terminal_output +3877,4681206,"TERMINAL",0,0,"209",,terminal_output +3878,4681207,"TERMINAL",0,0,"20",,terminal_output +3879,4682228,"TERMINAL",0,0,"1",,terminal_output +3880,4682229,"TERMINAL",0,0,"110",,terminal_output +3881,4683173,"TERMINAL",0,0,"2",,terminal_output +3882,4683239,"TERMINAL",0,0,"21",,terminal_output +3883,4684190,"TERMINAL",0,0,"3",,terminal_output +3884,4684256,"TERMINAL",0,0,"32",,terminal_output +3885,4685300,"TERMINAL",0,0,"4",,terminal_output +3886,4685314,"TERMINAL",0,0,"43",,terminal_output +3887,4686325,"TERMINAL",0,0,"5",,terminal_output +3888,4686342,"TERMINAL",0,0,"54",,terminal_output +3889,4687245,"TERMINAL",0,0,"6",,terminal_output +3890,4687388,"TERMINAL",0,0,"65",,terminal_output +3891,4688262,"TERMINAL",0,0,"7",,terminal_output +3892,4688435,"TERMINAL",0,0,"76",,terminal_output +3893,4689296,"TERMINAL",0,0,"8",,terminal_output +3894,4689470,"TERMINAL",0,0,"87",,terminal_output +3895,4690315,"TERMINAL",0,0,"9",,terminal_output +3896,4690510,"TERMINAL",0,0,"98",,terminal_output +3897,4691339,"TERMINAL",0,0,"30",,terminal_output +3898,4691562,"TERMINAL",0,0,"309",,terminal_output +3899,4692369,"TERMINAL",0,0,"1",,terminal_output +3900,4692670,"TERMINAL",0,0,"120",,terminal_output +3901,4693361,"TERMINAL",0,0,"2",,terminal_output +3902,4693681,"TERMINAL",0,0,"21",,terminal_output +3903,4694433,"TERMINAL",0,0,"3",,terminal_output +3904,4694721,"TERMINAL",0,0,"33",,terminal_output +3905,4695435,"TERMINAL",0,0,"4",,terminal_output +3906,4695848,"TERMINAL",0,0,"54",,terminal_output +3907,4696458,"TERMINAL",0,0,"5",,terminal_output +3908,4696868,"TERMINAL",0,0,"65",,terminal_output +3909,4697434,"TERMINAL",0,0,"6",,terminal_output +3910,4697869,"TERMINAL",0,0,"76",,terminal_output +3911,4698506,"TERMINAL",0,0,"7",,terminal_output +3912,4698994,"TERMINAL",0,0,"87",,terminal_output +3913,4699557,"TERMINAL",0,0,"8",,terminal_output +3914,4700247,"TERMINAL",0,0,"98",,terminal_output +3915,4700534,"TERMINAL",0,0,"9",,terminal_output +3916,4701174,"TERMINAL",0,0,"409",,terminal_output +3917,4701599,"TERMINAL",0,0,"40",,terminal_output +3918,4702506,"TERMINAL",0,0,"130",,terminal_output +3919,4702525,"TERMINAL",0,0,"1",,terminal_output +3920,4703118,"TERMINAL",0,0,"21",,terminal_output +3921,4703697,"TERMINAL",0,0,"2",,terminal_output +3922,4704141,"TERMINAL",0,0,"32",,terminal_output +3923,4704761,"TERMINAL",0,0,"3",,terminal_output +3924,4705286,"TERMINAL",0,0,"43",,terminal_output +3925,4705879,"TERMINAL",0,0,"4",,terminal_output +3926,4706394,"TERMINAL",0,0,"54",,terminal_output +3927,4706610,"TERMINAL",0,0,"5",,terminal_output +3928,4707518,"TERMINAL",0,0,"65",,terminal_output +3929,4707681,"TERMINAL",0,0,"6",,terminal_output +3930,4708446,"TERMINAL",0,0,"76",,terminal_output +3931,4708700,"TERMINAL",0,0,"7",,terminal_output +3932,4709464,"TERMINAL",0,0,"87",,terminal_output +3933,4709779,"TERMINAL",0,0,"8",,terminal_output +3934,4710489,"TERMINAL",0,0,"98",,terminal_output +3935,4711022,"TERMINAL",0,0,"9",,terminal_output +3936,4711415,"TERMINAL",0,0,"509",,terminal_output +3937,4711717,"TERMINAL",0,0,"50",,terminal_output +3938,4712444,"TERMINAL",0,0,"140",,terminal_output +3939,4712757,"TERMINAL",0,0,"1",,terminal_output +3940,4713499,"TERMINAL",0,0,"21",,terminal_output +3941,4713702,"TERMINAL",0,0,"3",,terminal_output +3942,4714526,"TERMINAL",0,0,"32",,terminal_output +3943,4714716,"TERMINAL",0,0,"4",,terminal_output +3944,4715569,"TERMINAL",0,0,"43",,terminal_output +3945,4715738,"TERMINAL",0,0,"5",,terminal_output +3946,4716636,"TERMINAL",0,0,"54",,terminal_output +3947,4716751,"TERMINAL",0,0,"6",,terminal_output +3948,4717667,"TERMINAL",0,0,"65",,terminal_output +3949,4717779,"TERMINAL",0,0,"7",,terminal_output +3950,4718703,"TERMINAL",0,0,"77",,terminal_output +3951,4718817,"TERMINAL",0,0,"8",,terminal_output +3952,4719741,"TERMINAL",0,0,"98",,terminal_output +3953,4719799,"TERMINAL",0,0,"9",,terminal_output +3954,4720787,"TERMINAL",0,0,"6:009",,terminal_output +3955,4720853,"TERMINAL",0,0,"6:00",,terminal_output +3956,4721830,"TERMINAL",0,0,"150",,terminal_output +3957,4721859,"TERMINAL",0,0,"1",,terminal_output +3958,4722886,"TERMINAL",0,0,"2",,terminal_output +3959,4722912,"TERMINAL",0,0,"21",,terminal_output +3960,4723946,"TERMINAL",0,0,"3",,terminal_output +3961,4723946,"TERMINAL",0,0,"32",,terminal_output +3962,4724894,"TERMINAL",0,0,"4",,terminal_output +3963,4724961,"TERMINAL",0,0,"43",,terminal_output +3964,4725954,"TERMINAL",0,0,"5",,terminal_output +3965,4726015,"TERMINAL",0,0,"54",,terminal_output +3966,4726929,"TERMINAL",0,0,"6",,terminal_output +3967,4727041,"TERMINAL",0,0,"65",,terminal_output +3968,4727962,"TERMINAL",0,0,"7",,terminal_output +3969,4728081,"TERMINAL",0,0,"76",,terminal_output +3970,4728982,"TERMINAL",0,0,"8",,terminal_output +3971,4729133,"TERMINAL",0,0,"87",,terminal_output +3972,4730064,"TERMINAL",0,0,"9",,terminal_output +3973,4730480,"TERMINAL",0,0,"98",,terminal_output +3974,4731077,"TERMINAL",0,0,"10",,terminal_output +3975,4731478,"TERMINAL",0,0,"109",,terminal_output +3976,4732016,"TERMINAL",0,0,"1",,terminal_output +3977,4732257,"TERMINAL",0,0,"11:00",,terminal_output +3978,4733124,"TERMINAL",0,0,"2",,terminal_output +3979,4733302,"TERMINAL",0,0,"21",,terminal_output +3980,4734050,"TERMINAL",0,0,"3",,terminal_output +3981,4734343,"TERMINAL",0,0,"32",,terminal_output +3982,4735170,"TERMINAL",0,0,"4",,terminal_output +3983,4735483,"TERMINAL",0,0,"43",,terminal_output +3984,4736084,"TERMINAL",0,0,"5",,terminal_output +3985,4736439,"TERMINAL",0,0,"54",,terminal_output +3986,4737101,"TERMINAL",0,0,"6",,terminal_output +3987,4737460,"TERMINAL",0,0,"65",,terminal_output +3988,4738140,"TERMINAL",0,0,"7",,terminal_output +3989,4738566,"TERMINAL",0,0,"76",,terminal_output +3990,4739134,"TERMINAL",0,0,"8",,terminal_output +3991,4739551,"TERMINAL",0,0,"87",,terminal_output +3992,4740207,"TERMINAL",0,0,"9",,terminal_output +3993,4740656,"TERMINAL",0,0,"98",,terminal_output +3994,4741223,"TERMINAL",0,0,"20",,terminal_output +3995,4741642,"TERMINAL",0,0,"209",,terminal_output +3996,4742187,"TERMINAL",0,0,"1",,terminal_output +3997,4742676,"TERMINAL",0,0,"110",,terminal_output +3998,4743241,"TERMINAL",0,0,"2",,terminal_output +3999,4743729,"TERMINAL",0,0,"22",,terminal_output +4000,4744217,"TERMINAL",0,0,"3",,terminal_output +4001,4744759,"TERMINAL",0,0,"43",,terminal_output +4002,4745307,"TERMINAL",0,0,"4",,terminal_output +4003,4745823,"TERMINAL",0,0,"54",,terminal_output +4004,4746334,"TERMINAL",0,0,"5",,terminal_output +4005,4746841,"TERMINAL",0,0,"65",,terminal_output +4006,4747357,"TERMINAL",0,0,"6",,terminal_output +4007,4747970,"TERMINAL",0,0,"76",,terminal_output +4008,4748383,"TERMINAL",0,0,"7",,terminal_output +4009,4748924,"TERMINAL",0,0,"87",,terminal_output +4010,4749320,"TERMINAL",0,0,"8",,terminal_output +4011,4750023,"TERMINAL",0,0,"98",,terminal_output +4012,4750326,"TERMINAL",0,0,"9",,terminal_output +4013,4751008,"TERMINAL",0,0,"309",,terminal_output +4014,4751354,"TERMINAL",0,0,"30",,terminal_output +4015,4752054,"TERMINAL",0,0,"120",,terminal_output +4016,4752377,"TERMINAL",0,0,"1",,terminal_output +4017,4753197,"TERMINAL",0,0,"21",,terminal_output +4018,4753380,"TERMINAL",0,0,"2",,terminal_output +4019,4754165,"TERMINAL",0,0,"32",,terminal_output +4020,4754387,"TERMINAL",0,0,"3",,terminal_output +4021,4755192,"TERMINAL",0,0,"43",,terminal_output +4022,4755401,"TERMINAL",0,0,"4",,terminal_output +4023,4756266,"TERMINAL",0,0,"54",,terminal_output +4024,4756417,"TERMINAL",0,0,"5",,terminal_output +4025,4757396,"TERMINAL",0,0,"65",,terminal_output +4026,4757443,"TERMINAL",0,0,"6",,terminal_output +4027,4758418,"TERMINAL",0,0,"76",,terminal_output +4028,4758483,"TERMINAL",0,0,"7",,terminal_output +4029,4759370,"TERMINAL",0,0,"87",,terminal_output +4030,4759487,"TERMINAL",0,0,"8",,terminal_output +4031,4760413,"TERMINAL",0,0,"98",,terminal_output +4032,4760496,"TERMINAL",0,0,"9",,terminal_output +4033,4761520,"TERMINAL",0,0,"409",,terminal_output +4034,4761520,"TERMINAL",0,0,"40",,terminal_output +4035,4762512,"TERMINAL",0,0,"130",,terminal_output +4036,4762512,"TERMINAL",0,0,"1",,terminal_output +4037,4763529,"TERMINAL",0,0,"2",,terminal_output +4038,4763545,"TERMINAL",0,0,"21",,terminal_output +4039,4764555,"TERMINAL",0,0,"3",,terminal_output +4040,4764575,"TERMINAL",0,0,"32",,terminal_output +4041,4765567,"TERMINAL",0,0,"4",,terminal_output +4042,4765624,"TERMINAL",0,0,"43",,terminal_output +4043,4766575,"TERMINAL",0,0,"5",,terminal_output +4044,4766686,"TERMINAL",0,0,"54",,terminal_output +4045,4767597,"TERMINAL",0,0,"6",,terminal_output +4046,4767715,"TERMINAL",0,0,"66",,terminal_output +4047,4768669,"TERMINAL",0,0,"7",,terminal_output +4048,4768783,"TERMINAL",0,0,"87",,terminal_output +4049,4769686,"TERMINAL",0,0,"8",,terminal_output +4050,4769798,"TERMINAL",0,0,"98",,terminal_output +4051,4770652,"TERMINAL",0,0,"9",,terminal_output +4052,4770821,"TERMINAL",0,0,"509",,terminal_output +4053,4771660,"TERMINAL",0,0,"50",,terminal_output +4054,4771876,"TERMINAL",0,0,"140",,terminal_output +4055,4772683,"TERMINAL",0,0,"1",,terminal_output +4056,4772921,"TERMINAL",0,0,"21",,terminal_output +4057,4773704,"TERMINAL",0,0,"3",,terminal_output +4058,4773964,"TERMINAL",0,0,"32",,terminal_output +4059,4774802,"TERMINAL",0,0,"4",,terminal_output +4060,4775004,"TERMINAL",0,0,"43",,terminal_output +4061,4775735,"TERMINAL",0,0,"5",,terminal_output +4062,4776127,"TERMINAL",0,0,"54",,terminal_output +4063,4776863,"TERMINAL",0,0,"6",,terminal_output +4064,4777152,"TERMINAL",0,0,"65",,terminal_output +4065,4777874,"TERMINAL",0,0,"7",,terminal_output +4066,4778180,"TERMINAL",0,0,"76",,terminal_output +4067,4778779,"TERMINAL",0,0,"8",,terminal_output +4068,4779231,"TERMINAL",0,0,"87",,terminal_output +4069,4779795,"TERMINAL",0,0,"9",,terminal_output +4070,4780249,"TERMINAL",0,0,"98",,terminal_output +4071,4780824,"TERMINAL",0,0,"7:00",,terminal_output +4072,4781317,"TERMINAL",0,0,"7:009",,terminal_output +4073,4781831,"TERMINAL",0,0,"1",,terminal_output +4074,4782342,"TERMINAL",0,0,"150",,terminal_output +4075,4782887,"TERMINAL",0,0,"2",,terminal_output +4076,4783408,"TERMINAL",0,0,"21",,terminal_output +4077,4783917,"TERMINAL",0,0,"3",,terminal_output +4078,4784435,"TERMINAL",0,0,"32",,terminal_output +4079,4784936,"TERMINAL",0,0,"4",,terminal_output +4080,4785553,"TERMINAL",0,0,"43",,terminal_output +4081,4785960,"TERMINAL",0,0,"5",,terminal_output +4082,4786528,"TERMINAL",0,0,"54",,terminal_output +4083,4786908,"TERMINAL",0,0,"6",,terminal_output +4084,4787562,"TERMINAL",0,0,"65",,terminal_output +4085,4788012,"TERMINAL",0,0,"7",,terminal_output +4086,4788615,"TERMINAL",0,0,"76",,terminal_output +4087,4788983,"TERMINAL",0,0,"8",,terminal_output +4088,4789655,"TERMINAL",0,0,"87",,terminal_output +4089,4789957,"TERMINAL",0,0,"9",,terminal_output +4090,4790700,"TERMINAL",0,0,"99",,terminal_output +4091,4790980,"TERMINAL",0,0,"10",,terminal_output +4092,4791802,"TERMINAL",0,0,"112:00",,terminal_output +4093,4791990,"TERMINAL",0,0,"1",,terminal_output +4094,4793031,"TERMINAL",0,0,"2",,terminal_output +4095,4793406,"TERMINAL",0,0,"21",,terminal_output +4096,4794023,"TERMINAL",0,0,"3",,terminal_output +4097,4794457,"TERMINAL",0,0,"32",,terminal_output +4098,4795079,"TERMINAL",0,0,"4",,terminal_output +4099,4795502,"TERMINAL",0,0,"43",,terminal_output +4100,4796105,"TERMINAL",0,0,"5",,terminal_output +4101,4796556,"TERMINAL",0,0,"54",,terminal_output +4102,4797071,"TERMINAL",0,0,"6",,terminal_output +4103,4797606,"TERMINAL",0,0,"65",,terminal_output +4104,4798085,"TERMINAL",0,0,"7",,terminal_output +4105,4798666,"TERMINAL",0,0,"76",,terminal_output +4106,4799100,"TERMINAL",0,0,"8",,terminal_output +4107,4799698,"TERMINAL",0,0,"88",,terminal_output +4108,4800116,"TERMINAL",0,0,"9",,terminal_output +4109,4800821,"TERMINAL",0,0,"209",,terminal_output +4110,4801135,"TERMINAL",0,0,"20",,terminal_output +4111,4801792,"TERMINAL",0,0,"110",,terminal_output +4112,4802239,"TERMINAL",0,0,"1",,terminal_output +4113,4802854,"TERMINAL",0,0,"21",,terminal_output +4114,4803175,"TERMINAL",0,0,"2",,terminal_output +4115,4803921,"TERMINAL",0,0,"32",,terminal_output +4116,4804181,"TERMINAL",0,0,"3",,terminal_output +4117,4805013,"TERMINAL",0,0,"43",,terminal_output +4118,4805195,"TERMINAL",0,0,"4",,terminal_output +4119,4806028,"TERMINAL",0,0,"54",,terminal_output +4120,4806213,"TERMINAL",0,0,"5",,terminal_output +4121,4807009,"TERMINAL",0,0,"65",,terminal_output +4122,4807259,"TERMINAL",0,0,"6",,terminal_output +4123,4808081,"TERMINAL",0,0,"76",,terminal_output +4124,4808244,"TERMINAL",0,0,"7",,terminal_output +4125,4809210,"TERMINAL",0,0,"87",,terminal_output +4126,4809257,"TERMINAL",0,0,"8",,terminal_output +4127,4810236,"TERMINAL",0,0,"98",,terminal_output +4128,4810283,"TERMINAL",0,0,"9",,terminal_output +4129,4811252,"TERMINAL",0,0,"309",,terminal_output +4130,4811306,"TERMINAL",0,0,"30",,terminal_output +4131,4812251,"TERMINAL",0,0,"120",,terminal_output +4132,4812325,"TERMINAL",0,0,"1",,terminal_output +4133,4813308,"TERMINAL",0,0,"21",,terminal_output +4134,4813324,"TERMINAL",0,0,"2",,terminal_output +4135,4814338,"TERMINAL",0,0,"32",,terminal_output +4136,4814355,"TERMINAL",0,0,"3",,terminal_output +4137,4815465,"TERMINAL",0,0,"4",,terminal_output +4138,4815466,"TERMINAL",0,0,"43",,terminal_output +4139,4816372,"TERMINAL",0,0,"5",,terminal_output +4140,4816431,"TERMINAL",0,0,"54",,terminal_output +4141,4817395,"TERMINAL",0,0,"6",,terminal_output +4142,4817467,"TERMINAL",0,0,"65",,terminal_output +4143,4818436,"TERMINAL",0,0,"7",,terminal_output +4144,4818491,"TERMINAL",0,0,"76",,terminal_output +4145,4819472,"TERMINAL",0,0,"8",,terminal_output +4146,4819528,"TERMINAL",0,0,"87",,terminal_output +4147,4820436,"TERMINAL",0,0,"9",,terminal_output +4148,4820579,"TERMINAL",0,0,"98",,terminal_output +4149,4821494,"TERMINAL",0,0,"40",,terminal_output +4150,4821615,"TERMINAL",0,0,"409",,terminal_output +4151,4822493,"TERMINAL",0,0,"1",,terminal_output +4152,4822658,"TERMINAL",0,0,"130",,terminal_output +4153,4823551,"TERMINAL",0,0,"2",,terminal_output +4154,4823702,"TERMINAL",0,0,"22",,terminal_output +4155,4824495,"TERMINAL",0,0,"3",,terminal_output +4156,4824783,"TERMINAL",0,0,"43",,terminal_output +4157,4825520,"TERMINAL",0,0,"4",,terminal_output +4158,4825772,"TERMINAL",0,0,"54",,terminal_output +4159,4826536,"TERMINAL",0,0,"5",,terminal_output +4160,4826823,"TERMINAL",0,0,"65",,terminal_output +4161,4827545,"TERMINAL",0,0,"6",,terminal_output +4162,4827943,"TERMINAL",0,0,"76",,terminal_output +4163,4828560,"TERMINAL",0,0,"7",,terminal_output +4164,4828919,"TERMINAL",0,0,"87",,terminal_output +4165,4829584,"TERMINAL",0,0,"8",,terminal_output +4166,4829925,"TERMINAL",0,0,"98",,terminal_output +4167,4830591,"TERMINAL",0,0,"9",,terminal_output +4168,4830964,"TERMINAL",0,0,"509",,terminal_output +4169,4831613,"TERMINAL",0,0,"50",,terminal_output +4170,4832005,"TERMINAL",0,0,"140",,terminal_output +4171,4832637,"TERMINAL",0,0,"1",,terminal_output +4172,4833048,"TERMINAL",0,0,"21",,terminal_output +4173,4833670,"TERMINAL",0,0,"2",,terminal_output +4174,4834194,"TERMINAL",0,0,"32",,terminal_output +4175,4834707,"TERMINAL",0,0,"3",,terminal_output +4176,4835220,"TERMINAL",0,0,"43",,terminal_output +4177,4835677,"TERMINAL",0,0,"4",,terminal_output +4178,4836238,"TERMINAL",0,0,"54",,terminal_output +4179,4836707,"TERMINAL",0,0,"5",,terminal_output +4180,4837265,"TERMINAL",0,0,"65",,terminal_output +4181,4837779,"TERMINAL",0,0,"7",,terminal_output +4182,4838284,"TERMINAL",0,0,"76",,terminal_output +4183,4838722,"TERMINAL",0,0,"8",,terminal_output +4184,4839311,"TERMINAL",0,0,"87",,terminal_output +4185,4839822,"TERMINAL",0,0,"9",,terminal_output +4186,4840344,"TERMINAL",0,0,"98",,terminal_output +4187,4840849,"TERMINAL",0,0,"8:00",,terminal_output +4188,4841466,"TERMINAL",0,0,"8:009",,terminal_output +4189,4841870,"TERMINAL",0,0,"1",,terminal_output +4190,4842902,"TERMINAL",0,0,"2",,terminal_output +4191,4842962,"TERMINAL",0,0,"151",,terminal_output +4192,4843819,"TERMINAL",0,0,"3",,terminal_output +4193,4843974,"TERMINAL",0,0,"32",,terminal_output +4194,4844866,"TERMINAL",0,0,"4",,terminal_output +4195,4845015,"TERMINAL",0,0,"43",,terminal_output +4196,4845863,"TERMINAL",0,0,"5",,terminal_output +4197,4846061,"TERMINAL",0,0,"54",,terminal_output +4198,4846901,"TERMINAL",0,0,"6",,terminal_output +4199,4847101,"TERMINAL",0,0,"65",,terminal_output +4200,4847867,"TERMINAL",0,0,"7",,terminal_output +4201,4848137,"TERMINAL",0,0,"76",,terminal_output +4202,4848909,"TERMINAL",0,0,"8",,terminal_output +4203,4849180,"TERMINAL",0,0,"87",,terminal_output +4204,4849958,"TERMINAL",0,0,"9",,terminal_output +4205,4850284,"TERMINAL",0,0,"98",,terminal_output +4206,4850912,"TERMINAL",0,0,"10",,terminal_output +4207,4851257,"TERMINAL",0,0,"109",,terminal_output +4208,4851948,"TERMINAL",0,0,"1",,terminal_output +4209,4852293,"TERMINAL",0,0,"13:00",,terminal_output +4210,4852949,"TERMINAL",0,0,"2",,terminal_output +4211,4853352,"TERMINAL",0,0,"21",,terminal_output +4212,4853965,"TERMINAL",0,0,"3",,terminal_output +4213,4854464,"TERMINAL",0,0,"32",,terminal_output +4214,4854991,"TERMINAL",0,0,"4",,terminal_output +4215,4855436,"TERMINAL",0,0,"43",,terminal_output +4216,4856101,"TERMINAL",0,0,"5",,terminal_output +4217,4856515,"TERMINAL",0,0,"54",,terminal_output +4218,4857021,"TERMINAL",0,0,"6",,terminal_output +4219,4857531,"TERMINAL",0,0,"65",,terminal_output +4220,4858050,"TERMINAL",0,0,"7",,terminal_output +4221,4858583,"TERMINAL",0,0,"76",,terminal_output +4222,4859057,"TERMINAL",0,0,"8",,terminal_output +4223,4859629,"TERMINAL",0,0,"87",,terminal_output +4224,4860101,"TERMINAL",0,0,"9",,terminal_output +4225,4860674,"TERMINAL",0,0,"98",,terminal_output +4226,4861120,"TERMINAL",0,0,"20",,terminal_output +4227,4861758,"TERMINAL",0,0,"2010",,terminal_output +4228,4862152,"TERMINAL",0,0,"1",,terminal_output +4229,4862780,"TERMINAL",0,0,"21",,terminal_output +4230,4863124,"TERMINAL",0,0,"2",,terminal_output +4231,4863889,"TERMINAL",0,0,"32",,terminal_output +4232,4864138,"TERMINAL",0,0,"3",,terminal_output +4233,4864918,"TERMINAL",0,0,"43",,terminal_output +4234,4865222,"TERMINAL",0,0,"4",,terminal_output +4235,4865935,"TERMINAL",0,0,"54",,terminal_output +4236,4866246,"TERMINAL",0,0,"5",,terminal_output +4237,4866951,"TERMINAL",0,0,"65",,terminal_output +4238,4867187,"TERMINAL",0,0,"6",,terminal_output +4239,4867999,"TERMINAL",0,0,"76",,terminal_output +4240,4868203,"TERMINAL",0,0,"7",,terminal_output +4241,4869111,"TERMINAL",0,0,"87",,terminal_output +4242,4869231,"TERMINAL",0,0,"8",,terminal_output +4243,4870138,"TERMINAL",0,0,"98",,terminal_output +4244,4870259,"TERMINAL",0,0,"9",,terminal_output +4245,4871172,"TERMINAL",0,0,"309",,terminal_output +4246,4871293,"TERMINAL",0,0,"30",,terminal_output +4247,4872196,"TERMINAL",0,0,"120",,terminal_output +4248,4872319,"TERMINAL",0,0,"1",,terminal_output +4249,4873306,"TERMINAL",0,0,"21",,terminal_output +4250,4873323,"TERMINAL",0,0,"2",,terminal_output +4251,4874259,"TERMINAL",0,0,"32",,terminal_output +4252,4874320,"TERMINAL",0,0,"3",,terminal_output +4253,4875292,"TERMINAL",0,0,"43",,terminal_output +4254,4875361,"TERMINAL",0,0,"4",,terminal_output +4255,4876385,"TERMINAL",0,0,"54",,terminal_output +4256,4876386,"TERMINAL",0,0,"5",,terminal_output +4257,4877420,"TERMINAL",0,0,"6",,terminal_output +4258,4877420,"TERMINAL",0,0,"65",,terminal_output +4259,4878406,"TERMINAL",0,0,"7",,terminal_output +4260,4878441,"TERMINAL",0,0,"76",,terminal_output +4261,4879458,"TERMINAL",0,0,"8",,terminal_output +4262,4879470,"TERMINAL",0,0,"87",,terminal_output +4263,4880483,"TERMINAL",0,0,"9",,terminal_output +4264,4880529,"TERMINAL",0,0,"98",,terminal_output +4265,4881500,"TERMINAL",0,0,"40",,terminal_output +4266,4881554,"TERMINAL",0,0,"409",,terminal_output +4267,4882523,"TERMINAL",0,0,"1",,terminal_output +4268,4882634,"TERMINAL",0,0,"130",,terminal_output +4269,4883456,"TERMINAL",0,0,"2",,terminal_output +4270,4883683,"TERMINAL",0,0,"21",,terminal_output +4271,4884569,"TERMINAL",0,0,"3",,terminal_output +4272,4884683,"TERMINAL",0,0,"33",,terminal_output +4273,4885598,"TERMINAL",0,0,"4",,terminal_output +4274,4885721,"TERMINAL",0,0,"54",,terminal_output +4275,4886530,"TERMINAL",0,0,"5",,terminal_output +4276,4886762,"TERMINAL",0,0,"65",,terminal_output +4277,4887524,"TERMINAL",0,0,"6",,terminal_output +4278,4887846,"TERMINAL",0,0,"76",,terminal_output +4279,4888573,"TERMINAL",0,0,"7",,terminal_output +4280,4888872,"TERMINAL",0,0,"87",,terminal_output +4281,4889559,"TERMINAL",0,0,"8",,terminal_output +4282,4889894,"TERMINAL",0,0,"98",,terminal_output +4283,4890615,"TERMINAL",0,0,"9",,terminal_output +4284,4890934,"TERMINAL",0,0,"509",,terminal_output +4285,4891590,"TERMINAL",0,0,"50",,terminal_output +4286,4892050,"TERMINAL",0,0,"140",,terminal_output +4287,4892614,"TERMINAL",0,0,"1",,terminal_output +4288,4893022,"TERMINAL",0,0,"21",,terminal_output +4289,4893665,"TERMINAL",0,0,"2",,terminal_output +4290,4894102,"TERMINAL",0,0,"32",,terminal_output +4291,4894720,"TERMINAL",0,0,"3",,terminal_output +4292,4895119,"TERMINAL",0,0,"43",,terminal_output +4293,4895689,"TERMINAL",0,0,"4",,terminal_output +4294,4896150,"TERMINAL",0,0,"54",,terminal_output +4295,4896674,"TERMINAL",0,0,"5",,terminal_output +4296,4897277,"TERMINAL",0,0,"65",,terminal_output +4297,4897796,"TERMINAL",0,0,"6",,terminal_output +4298,4898300,"TERMINAL",0,0,"76",,terminal_output +4299,4898712,"TERMINAL",0,0,"8",,terminal_output +4300,4899255,"TERMINAL",0,0,"87",,terminal_output +4301,4899834,"TERMINAL",0,0,"9",,terminal_output +4302,4900340,"TERMINAL",0,0,"98",,terminal_output +4303,4900755,"TERMINAL",0,0,"9:00",,terminal_output +4304,4901375,"TERMINAL",0,0,"9:009",,terminal_output +4305,4901777,"TERMINAL",0,0,"1",,terminal_output +4306,4902517,"TERMINAL",0,0,"150",,terminal_output +4307,4902806,"TERMINAL",0,0,"2",,terminal_output +4308,4903521,"TERMINAL",0,0,"21",,terminal_output +4309,4903791,"TERMINAL",0,0,"3",,terminal_output +4310,4904544,"TERMINAL",0,0,"32",,terminal_output +4311,4904864,"TERMINAL",0,0,"4",,terminal_output +4312,4905526,"TERMINAL",0,0,"43",,terminal_output +4313,4905823,"TERMINAL",0,0,"5",,terminal_output +4314,4906594,"TERMINAL",0,0,"54",,terminal_output +4315,4906848,"TERMINAL",0,0,"6",,terminal_output +4316,4907614,"TERMINAL",0,0,"65",,terminal_output +4317,4907925,"TERMINAL",0,0,"7",,terminal_output +4318,4908669,"TERMINAL",0,0,"76",,terminal_output +4319,4908875,"TERMINAL",0,0,"8",,terminal_output +4320,4909709,"TERMINAL",0,0,"88",,terminal_output +4321,4909895,"TERMINAL",0,0,"9",,terminal_output +4322,4910741,"TERMINAL",0,0,"109",,terminal_output +4323,4910913,"TERMINAL",0,0,"10",,terminal_output +4324,4911812,"TERMINAL",0,0,"14:00",,terminal_output +4325,4911919,"TERMINAL",0,0,"1",,terminal_output +4326,4912845,"TERMINAL",0,0,"21",,terminal_output +4327,4912955,"TERMINAL",0,0,"2",,terminal_output +4328,4914011,"TERMINAL",0,0,"32",,terminal_output +4329,4914012,"TERMINAL",0,0,"3",,terminal_output +4330,4914906,"TERMINAL",0,0,"43",,terminal_output +4331,4914971,"TERMINAL",0,0,"4",,terminal_output +4332,4916008,"TERMINAL",0,0,"54",,terminal_output +4333,4916023,"TERMINAL",0,0,"5",,terminal_output +4334,4916978,"TERMINAL",0,0,"65",,terminal_output +4335,4917001,"TERMINAL",0,0,"6",,terminal_output +4336,4918020,"TERMINAL",0,0,"76",,terminal_output +4337,4918040,"TERMINAL",0,0,"7",,terminal_output +4338,4919082,"TERMINAL",0,0,"8",,terminal_output +4339,4919083,"TERMINAL",0,0,"87",,terminal_output +4340,4920102,"TERMINAL",0,0,"9",,terminal_output +4341,4920103,"TERMINAL",0,0,"98",,terminal_output +4342,4921068,"TERMINAL",0,0,"20",,terminal_output +4343,4921182,"TERMINAL",0,0,"209",,terminal_output +4344,4922086,"TERMINAL",0,0,"1",,terminal_output +4345,4922356,"TERMINAL",0,0,"110",,terminal_output +4346,4923100,"TERMINAL",0,0,"2",,terminal_output +4347,4923484,"TERMINAL",0,0,"21",,terminal_output +4348,4924201,"TERMINAL",0,0,"3",,terminal_output +4349,4924518,"TERMINAL",0,0,"32",,terminal_output +4350,4925133,"TERMINAL",0,0,"4",,terminal_output +4351,4925459,"TERMINAL",0,0,"43",,terminal_output +4352,4926250,"TERMINAL",0,0,"5",,terminal_output +4353,4926532,"TERMINAL",0,0,"54",,terminal_output +4354,4927174,"TERMINAL",0,0,"6",,terminal_output +4355,4927579,"TERMINAL",0,0,"65",,terminal_output +4356,4928197,"TERMINAL",0,0,"7",,terminal_output +4357,4928599,"TERMINAL",0,0,"76",,terminal_output +4358,4929216,"TERMINAL",0,0,"8",,terminal_output +4359,4929623,"TERMINAL",0,0,"87",,terminal_output +4360,4930241,"TERMINAL",0,0,"9",,terminal_output +4361,4930657,"TERMINAL",0,0,"98",,terminal_output +4362,4931233,"TERMINAL",0,0,"30",,terminal_output +4363,4931707,"TERMINAL",0,0,"3020",,terminal_output +4364,4932289,"TERMINAL",0,0,"1",,terminal_output +4365,4932753,"TERMINAL",0,0,"21",,terminal_output +4366,4933314,"TERMINAL",0,0,"2",,terminal_output +4367,4933791,"TERMINAL",0,0,"32",,terminal_output +4368,4934338,"TERMINAL",0,0,"3",,terminal_output +4369,4934851,"TERMINAL",0,0,"43",,terminal_output +4370,4935363,"TERMINAL",0,0,"4",,terminal_output +4371,4935862,"TERMINAL",0,0,"54",,terminal_output +4372,4936385,"TERMINAL",0,0,"5",,terminal_output +4373,4936912,"TERMINAL",0,0,"65",,terminal_output +4374,4937408,"TERMINAL",0,0,"6",,terminal_output +4375,4938024,"TERMINAL",0,0,"76",,terminal_output +4376,4938433,"TERMINAL",0,0,"7",,terminal_output +4377,4939006,"TERMINAL",0,0,"87",,terminal_output +4378,4939457,"TERMINAL",0,0,"8",,terminal_output +4379,4940040,"TERMINAL",0,0,"98",,terminal_output +4380,4940382,"TERMINAL",0,0,"9",,terminal_output +4381,4941084,"TERMINAL",0,0,"409",,terminal_output +4382,4941451,"TERMINAL",0,0,"40",,terminal_output +4383,4942186,"TERMINAL",0,0,"130",,terminal_output +4384,4942432,"TERMINAL",0,0,"1",,terminal_output +4385,4943251,"TERMINAL",0,0,"21",,terminal_output +4386,4943431,"TERMINAL",0,0,"2",,terminal_output +4387,4944271,"TERMINAL",0,0,"32",,terminal_output +4388,4944446,"TERMINAL",0,0,"3",,terminal_output +4389,4945250,"TERMINAL",0,0,"43",,terminal_output +4390,4945500,"TERMINAL",0,0,"4",,terminal_output +4391,4946320,"TERMINAL",0,0,"54",,terminal_output +4392,4946484,"TERMINAL",0,0,"5",,terminal_output +4393,4947450,"TERMINAL",0,0,"65",,terminal_output +4394,4947504,"TERMINAL",0,0,"6",,terminal_output +4395,4948472,"TERMINAL",0,0,"76",,terminal_output +4396,4948547,"TERMINAL",0,0,"7",,terminal_output +4397,4949442,"TERMINAL",0,0,"87",,terminal_output +4398,4949559,"TERMINAL",0,0,"8",,terminal_output +4399,4950517,"TERMINAL",0,0,"98",,terminal_output +4400,4950574,"TERMINAL",0,0,"9",,terminal_output +4401,4951547,"TERMINAL",0,0,"509",,terminal_output +4402,4951565,"TERMINAL",0,0,"50",,terminal_output +4403,4952562,"TERMINAL",0,0,"140",,terminal_output +4404,4952584,"TERMINAL",0,0,"1",,terminal_output +4405,4953601,"TERMINAL",0,0,"2",,terminal_output +4406,4953616,"TERMINAL",0,0,"21",,terminal_output +4407,4954619,"TERMINAL",0,0,"3",,terminal_output +4408,4954692,"TERMINAL",0,0,"32",,terminal_output +4409,4955647,"TERMINAL",0,0,"4\r000800.orbax-checkpoint-tmp-0",,terminal_output +4410,4955718,"TERMINAL",0,0,"44",,terminal_output +4411,4956658,"TERMINAL",0,0,"5",,terminal_output +4412,4956774,"TERMINAL",0,0,"65",,terminal_output +4413,4957675,"TERMINAL",0,0,"6",,terminal_output +4414,4957786,"TERMINAL",0,0,"76",,terminal_output +4415,4958697,"TERMINAL",0,0,"\r7",,terminal_output +4416,4958809,"TERMINAL",0,0,"87",,terminal_output +4417,4959714,"TERMINAL",0,0,"9",,terminal_output +4418,4959858,"TERMINAL",0,0,"98",,terminal_output +4419,4960759,"TERMINAL",0,0,"10:00",,terminal_output +4420,4960899,"TERMINAL",0,0,"10:009",,terminal_output +4421,4961782,"TERMINAL",0,0,"1",,terminal_output +4422,4961946,"TERMINAL",0,0,"150",,terminal_output +4423,4962815,"TERMINAL",0,0,"2",,terminal_output +4424,4963119,"TERMINAL",0,0,"21",,terminal_output +4425,4963789,"TERMINAL",0,0,"3",,terminal_output +4426,4964096,"TERMINAL",0,0,"32",,terminal_output +4427,4964794,"TERMINAL",0,0,"4",,terminal_output +4428,4965131,"TERMINAL",0,0,"43",,terminal_output +4429,4965880,"TERMINAL",0,0,"5",,terminal_output +4430,4966189,"TERMINAL",0,0,"54",,terminal_output +4431,4966906,"TERMINAL",0,0,"6",,terminal_output +4432,4967212,"TERMINAL",0,0,"65",,terminal_output +4433,4967927,"TERMINAL",0,0,"7",,terminal_output +4434,4968336,"TERMINAL",0,0,"76",,terminal_output +4435,4968960,"TERMINAL",0,0,"8",,terminal_output +4436,4969307,"TERMINAL",0,0,"87",,terminal_output +4437,4969972,"TERMINAL",0,0,"9",,terminal_output +4438,4970356,"TERMINAL",0,0,"98",,terminal_output +4439,4970913,"TERMINAL",0,0,"10",,terminal_output +4440,4971409,"TERMINAL",0,0,"109",,terminal_output +4441,4971908,"TERMINAL",0,0,"1",,terminal_output +4442,4972447,"TERMINAL",0,0,"15:00",,terminal_output +4443,4972939,"TERMINAL",0,0,"2",,terminal_output +4444,4973488,"TERMINAL",0,0,"21",,terminal_output +4445,4973941,"TERMINAL",0,0,"3",,terminal_output +4446,4974538,"TERMINAL",0,0,"32",,terminal_output +4447,4974958,"TERMINAL",0,0,"4",,terminal_output +4448,4975572,"TERMINAL",0,0,"43",,terminal_output +4449,4976013,"TERMINAL",0,0,"5",,terminal_output +4450,4976620,"TERMINAL",0,0,"54",,terminal_output +4451,4977006,"TERMINAL",0,0,"6",,terminal_output +4452,4977661,"TERMINAL",0,0,"65",,terminal_output +4453,4978167,"TERMINAL",0,0,"7",,terminal_output +4454,4978698,"TERMINAL",0,0,"77",,terminal_output +4455,4979088,"TERMINAL",0,0,"8",,terminal_output +4456,4979737,"TERMINAL",0,0,"98",,terminal_output +4457,4980115,"TERMINAL",0,0,"9",,terminal_output +4458,4980825,"TERMINAL",0,0,"209",,terminal_output +4459,4981138,"TERMINAL",0,0,"20",,terminal_output +4460,4981832,"TERMINAL",0,0,"110",,terminal_output +4461,4982073,"TERMINAL",0,0,"1",,terminal_output +4462,4982886,"TERMINAL",0,0,"21",,terminal_output +4463,4983094,"TERMINAL",0,0,"2",,terminal_output +4464,4983943,"TERMINAL",0,0,"32",,terminal_output +4465,4984110,"TERMINAL",0,0,"3",,terminal_output +4466,4984934,"TERMINAL",0,0,"43",,terminal_output +4467,4985119,"TERMINAL",0,0,"4",,terminal_output +4468,4986008,"TERMINAL",0,0,"54",,terminal_output +4469,4986139,"TERMINAL",0,0,"5",,terminal_output +4470,4987075,"TERMINAL",0,0,"65",,terminal_output +4471,4987183,"TERMINAL",0,0,"6",,terminal_output +4472,4988100,"TERMINAL",0,0,"76",,terminal_output +4473,4988211,"TERMINAL",0,0,"7",,terminal_output +4474,4989122,"TERMINAL",0,0,"87",,terminal_output +4475,4989187,"TERMINAL",0,0,"8",,terminal_output +4476,4990147,"TERMINAL",0,0,"98",,terminal_output +4477,4990218,"TERMINAL",0,0,"9",,terminal_output +4478,4991181,"TERMINAL",0,0,"309",,terminal_output +4479,4991240,"TERMINAL",0,0,"30",,terminal_output +4480,4992266,"TERMINAL",0,0,"120",,terminal_output +4481,4992267,"TERMINAL",0,0,"1",,terminal_output +4482,4993249,"TERMINAL",0,0,"2",,terminal_output +4483,4993262,"TERMINAL",0,0,"21",,terminal_output +4484,4994307,"TERMINAL",0,0,"3",,terminal_output +4485,4994308,"TERMINAL",0,0,"32",,terminal_output +4486,4995368,"TERMINAL",0,0,"4",,terminal_output +4487,4995381,"TERMINAL",0,0,"43",,terminal_output +4488,4996330,"TERMINAL",0,0,"5",,terminal_output +4489,4996423,"TERMINAL",0,0,"54",,terminal_output +4490,4997416,"TERMINAL",0,0,"6",,terminal_output +4491,4997431,"TERMINAL",0,0,"65",,terminal_output +4492,4998425,"TERMINAL",0,0,"7",,terminal_output +4493,4998476,"TERMINAL",0,0,"76",,terminal_output +4494,4999463,"TERMINAL",0,0,"8",,terminal_output +4495,4999519,"TERMINAL",0,0,"87",,terminal_output +4496,5000384,"TERMINAL",0,0,"9",,terminal_output +4497,5000555,"TERMINAL",0,0,"98",,terminal_output +4498,5001418,"TERMINAL",0,0,"40",,terminal_output +4499,5001601,"TERMINAL",0,0,"409",,terminal_output +4500,5002438,"TERMINAL",0,0,"1",,terminal_output +4501,5002627,"TERMINAL",0,0,"130",,terminal_output +4502,5003461,"TERMINAL",0,0,"2",,terminal_output +4503,5003693,"TERMINAL",0,0,"21",,terminal_output +4504,5004468,"TERMINAL",0,0,"3",,terminal_output +4505,5004727,"TERMINAL",0,0,"33",,terminal_output +4506,5005462,"TERMINAL",0,0,"4",,terminal_output +4507,5005753,"TERMINAL",0,0,"54",,terminal_output +4508,5006487,"TERMINAL",0,0,"5",,terminal_output +4509,5006841,"TERMINAL",0,0,"65",,terminal_output +4510,5007491,"TERMINAL",0,0,"6",,terminal_output +4511,5007835,"TERMINAL",0,0,"76",,terminal_output +4512,5008510,"TERMINAL",0,0,"7",,terminal_output +4513,5008928,"TERMINAL",0,0,"87",,terminal_output +4514,5009600,"TERMINAL",0,0,"8",,terminal_output +4515,5010012,"TERMINAL",0,0,"98",,terminal_output +4516,5010544,"TERMINAL",0,0,"9",,terminal_output +4517,5010963,"TERMINAL",0,0,"509",,terminal_output +4518,5011557,"TERMINAL",0,0,"50",,terminal_output +4519,5012040,"TERMINAL",0,0,"140",,terminal_output +4520,5012591,"TERMINAL",0,0,"1",,terminal_output +4521,5013050,"TERMINAL",0,0,"21",,terminal_output +4522,5013598,"TERMINAL",0,0,"2",,terminal_output +4523,5014093,"TERMINAL",0,0,"32",,terminal_output +4524,5014605,"TERMINAL",0,0,"3",,terminal_output +4525,5015132,"TERMINAL",0,0,"43",,terminal_output +4526,5015619,"TERMINAL",0,0,"4",,terminal_output +4527,5016179,"TERMINAL",0,0,"54",,terminal_output +4528,5016638,"TERMINAL",0,0,"5",,terminal_output +4529,5017236,"TERMINAL",0,0,"65",,terminal_output +4530,5017659,"TERMINAL",0,0,"6",,terminal_output +4531,5018307,"TERMINAL",0,0,"76",,terminal_output +4532,5018670,"TERMINAL",0,0,"7",,terminal_output +4533,5019317,"TERMINAL",0,0,"87",,terminal_output +4534,5019704,"TERMINAL",0,0,"8",,terminal_output +4535,5020360,"TERMINAL",0,0,"98",,terminal_output +4536,5020713,"TERMINAL",0,0,"1:00",,terminal_output +4537,5021401,"TERMINAL",0,0,"1:009",,terminal_output +4538,5021755,"TERMINAL",0,0,"1",,terminal_output +4539,5022475,"TERMINAL",0,0,"150",,terminal_output +4540,5022736,"TERMINAL",0,0,"2",,terminal_output +4541,5023543,"TERMINAL",0,0,"21",,terminal_output +4542,5023755,"TERMINAL",0,0,"3",,terminal_output +4543,5024438,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +4544,5024774,"TERMINAL",0,0,"4",,terminal_output +4545,5025805,"TERMINAL",0,0,"5",,terminal_output +4546,5026116,"TERMINAL",0,0,"scancel 3370788",,terminal_command +4547,5026814,"TERMINAL",0,0,"6",,terminal_output +4548,5027815,"TERMINAL",0,0,"7",,terminal_output +4549,5028832,"TERMINAL",0,0,"8",,terminal_output +4550,5029328,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +4551,5029847,"TERMINAL",0,0,"9",,terminal_output +4552,5030866,"TERMINAL",0,0,"10",,terminal_output +4553,5031927,"TERMINAL",0,0,"1",,terminal_output +4554,5032954,"TERMINAL",0,0,"2",,terminal_output +4555,5033916,"TERMINAL",0,0,"3",,terminal_output +4556,5034929,"TERMINAL",0,0,"4",,terminal_output +4557,5035954,"TERMINAL",0,0,"5",,terminal_output +4558,5036959,"TERMINAL",0,0,"6",,terminal_output +4559,5038073,"TERMINAL",0,0,"7",,terminal_output +4560,5039010,"TERMINAL",0,0,"8",,terminal_output +4561,5040016,"TERMINAL",0,0,"9",,terminal_output +4562,5041041,"TERMINAL",0,0,"20",,terminal_output +4563,5042076,"TERMINAL",0,0,"1",,terminal_output +4564,5042947,"TERMINAL",0,0,"watch",,terminal_focus +4565,5043331,"TERMINAL",0,0,"2",,terminal_output +4566,5043537,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3370788]633;D;0",,terminal_output +4567,5047686,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",0,0,"",shellscript,tab +4568,5050965,"TERMINAL",0,0,"diff slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",,terminal_command +4569,5050968,"TERMINAL",0,0,"]633;E;2025-07-23 16:11:30 diff slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;Cdiff: slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch: No such file or directory\r\ndiff: slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch: No such file or directory\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3370788]633;D;2",,terminal_output +4570,5057307,"TERMINAL",0,0,"bash",,terminal_focus +4571,5061339,"TERMINAL",0,0,"bash",,terminal_focus +4572,5063324,"TERMINAL",0,0,"bash",,terminal_focus +4573,5063699,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch^C",,terminal_command +4574,5065050,"TERMINAL",0,0,"diff slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",,terminal_command +4575,5065067,"TERMINAL",0,0,"]633;E;2025-07-23 16:11:44 diff slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch;469e5d18-6e08-4909-a55e-e2644c9abc02]633;C3,6c3,6\r\n< #SBATCH --nodes=8\r\n< #SBATCH --ntasks-per-node=4\r\n< #SBATCH --time=48:00:00\r\n< #SBATCH --partition=accelerated\r\n---\r\n> #SBATCH --nodes=1\r\n> #SBATCH --ntasks-per-node=1\r\n> #SBATCH --time=00:15:00\r\n> #SBATCH --partition=dev_accelerated\r\n8c8\r\n< #SBATCH --gres=gpu:4\r\n---\r\n> #SBATCH --gres=gpu:1\r\n13c13\r\n< #SBATCH --signal=b:usr1@300 # 5 min before timeout\r\n---\r\n> #SBATCH --signal=b:usr1@30 # 30sec before timeout\r\n62c62\r\n< --batch_size=384 \\r\n---\r\n> --batch_size=12 \\r\n65c65\r\n< --log_image_interval=1000 \\r\n---\r\n> --log_image_interval=100 \\r\n67,68c67,68\r\n< --log_checkpoint_interval=1000 \\r\n< --name=dynamics-maskprob-fix-8-node-$slurm_job_id \\r\n---\r\n> --log_checkpoint_interval=100 \\r\n> --name=dynamics-maskprob-fix-8-node-dev-$slurm_job_id \\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;1",,terminal_output +4576,5095099,"TERMINAL",0,0,"bash",,terminal_focus +4577,5097770,"TERMINAL",0,0,"bash",,terminal_focus +4578,5101329,"TERMINAL",0,0,"runner",,terminal_command +4579,5104648,"TERMINAL",0,0,"sync-runner",,terminal_command +4580,5104663,"TERMINAL",0,0,"]633;E;2025-07-23 16:12:23 sync-runner;469e5d18-6e08-4909-a55e-e2644c9abc02]633;Csending incremental file list\r\n",,terminal_output +4581,5109330,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +4582,5109505,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +4583,5109559,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +4584,5109708,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +4585,5109867,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +4586,5109944,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +4587,5110273,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +4588,5110685,"TERMINAL",0,0,"[?25lslurm/jobs/mihir/horeka/mask_prob_fix/train_dynami[?25h[?25lcs_8_nodes.sbatch[?25h",,terminal_output +4589,5110873,"TERMINAL",0,0,"slurm/dev/alfred/horeka/\r\nslurm/dev/alfred/horeka/masked_lim_noise/\r\nslurm/dev/alfred/horeka/masked_lim_noise/masked_lim_dev.sbatch\r\nslurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sbatch\r\nslurm/dev/alfred/horeka/masked_lim_noise/masked_lim_yolo.sh\r\nslurm/jobs/mihir/horeka/mask_prob_fix/\r\nslurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch\r\nslurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch\r\n\r\nsent 33,895 bytes received 249 bytes 4,552.53 bytes/sec\r\ntotal size is 185,099,322 speedup is 5,421.14\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +4590,5116896,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch",,terminal_command +4591,5116923,"TERMINAL",0,0,"]633;E;2025-07-23 16:12:36 sbatch slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes.sbatch;469e5d18-6e08-4909-a55e-e2644c9abc02]633;CSubmitted batch job 3370822\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar_jobs]633;D;0",,terminal_output +4592,5122014,"TERMINAL",0,0,"cd ../jafar",,terminal_command +4593,5134089,"TERMINAL",0,0,"bash",,terminal_focus +4594,5336624,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",0,0,"",shellscript,tab +4595,5336626,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",750,0,"",shellscript,selection_mouse +4596,5336695,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",743,8,"optional",shellscript,selection_mouse +4597,5336918,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",743,17,"optional: trigger",shellscript,selection_mouse +4598,5336991,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",743,28,"optional: trigger checkpoint",shellscript,selection_mouse +4599,5337066,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",743,29,"optional: trigger checkpoint ",shellscript,selection_mouse +4600,5337083,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",743,35,"optional: trigger checkpoint saving",shellscript,selection_mouse +4601,5337180,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",743,36,"optional: trigger checkpoint saving ",shellscript,selection_mouse +4602,5337242,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",743,40,"optional: trigger checkpoint saving here",shellscript,selection_mouse +4603,5337607,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",781,0,"",shellscript,selection_mouse +4604,5338502,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",792,0,"",shellscript,selection_mouse +4605,5338661,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",792,1,"g",shellscript,selection_mouse +4606,5338846,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",792,9,"g., touch",shellscript,selection_mouse +4607,5338865,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",792,11,"g., touch $",shellscript,selection_mouse +4608,5338886,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",792,25,"g., touch $checkpoint_dir",shellscript,selection_mouse +4609,5338948,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",792,41,"g., touch $checkpoint_dir/requeue_trigger",shellscript,selection_mouse +4610,5339415,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",825,0,"",shellscript,selection_mouse +4611,5367522,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1171,0,"",shellscript,selection_mouse +4612,5368225,"TERMINAL",0,0,"bash",,terminal_focus +4613,5369307,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",0,0,"",shellscript,tab +4614,5370565,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",0,0,"",shellscript,tab +4615,5370582,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1167,0,"",shellscript,selection_mouse +4616,5370583,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",1166,0,"",shellscript,selection_command +4617,5371728,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",0,0,"",shellscript,tab +4618,5371729,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",833,0,"",shellscript,selection_mouse +4619,5371736,"slurm/jobs/mihir/horeka/mask_prob_fix/train_dynamics_8_nodes_dev.sbatch",832,0,"",shellscript,selection_command +4620,5388210,"TERMINAL",0,0,"queue",,terminal_command +4621,5388256,"TERMINAL",0,0,"]633;E;2025-07-23 16:17:07 queue;469e5d18-6e08-4909-a55e-e2644c9abc02]633;C",,terminal_output +4622,5388321,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Wed Jul 23 16:17:07 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3370822 accelerat train_dy tum_cte0 PD\t0:00\t 8 (Priority)",,terminal_output +4623,5389372,"TERMINAL",0,0,"8[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +4624,5424055,"TERMINAL",0,0,"bash",,terminal_focus +4625,5453947,"TERMINAL",0,0,"bash",,terminal_focus +4626,5642925,"slurm/jobs/mihir/horeka/modelsize_scaling/dynamics/5_train_dyn_500M.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=24\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/big-runs/dynamics-cotraining-modelsize-scaling/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/big-runs/dynamics-cotraining-modelsize-scaling/%x_%j.log\n#SBATCH --job-name=train_dynamics_modelsize_scaling_500M_32_node\n#SBATCH --mem=400G\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/dynamics-cotraining-modelsize-scaling/$job_name\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.5e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-modelsize-scaling-500M-$slurm_job_id \\n --tags dynamics modelsize-scaling 500M \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir \\n --dyna_dim=1536 \\n --dyna_num_blocks=24 \\n --dyna_num_heads=24\n",shellscript,tab +4627,5687587,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5",,terminal_command +4628,5687637,"TERMINAL",0,0,"]633;E;2025-07-23 16:22:06 salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5;469e5d18-6e08-4909-a55e-e2644c9abc02]633;Csalloc: Pending job allocation 3370903\r\nsalloc: job 3370903 queued and waiting for resources\r\n",,terminal_output +4629,5691048,"TERMINAL",0,0,"^Csalloc: Job allocation 3370903 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;1",,terminal_output +4630,5696476,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5",,terminal_command +4631,5696528,"TERMINAL",0,0,"]633;E;2025-07-23 16:22:15 salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5;469e5d18-6e08-4909-a55e-e2644c9abc02]633;Csalloc: Pending job allocation 3370904\r\nsalloc: job 3370904 queued and waiting for resources\r\n",,terminal_output +4632,5697264,"TERMINAL",0,0,"bash",,terminal_focus +4633,5698310,"TERMINAL",0,0,"idling",,terminal_command +4634,5698393,"TERMINAL",0,0,"]633;E;2025-07-23 16:22:17 idling;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1990.localdomain: Wed Jul 23 16:22:17 2025Partition dev_cpuonly: 11 nodes idle\rPartition cpuonly: 42 nodes idle\rPartition dev_accelerated:\t 0 nodes idle\rPartition accelerated:\t 2 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 7 nodes idle",,terminal_output +4635,5699483,"TERMINAL",0,0,"8",,terminal_output +4636,5700001,"TERMINAL",0,0,"salloc",,terminal_focus +4637,5700385,"TERMINAL",0,0,"^Csalloc: Job allocation 3370904 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;1",,terminal_output +4638,5700479,"TERMINAL",0,0,"9",,terminal_output +4639,5701502,"TERMINAL",0,0,"20",,terminal_output +4640,5702548,"TERMINAL",0,0,"1",,terminal_output +4641,5703593,"TERMINAL",0,0,"2",,terminal_output +4642,5704626,"TERMINAL",0,0,"3",,terminal_output +4643,5704667,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5",,terminal_command +4644,5704732,"TERMINAL",0,0,"]633;E;2025-07-23 16:22:23 salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5;469e5d18-6e08-4909-a55e-e2644c9abc02]633;Csalloc: Pending job allocation 3370905\r\nsalloc: job 3370905 queued and waiting for resources\r\n",,terminal_output +4645,5705727,"TERMINAL",0,0,"4",,terminal_output +4646,5706708,"TERMINAL",0,0,"5",,terminal_output +4647,5707776,"TERMINAL",0,0,"7",,terminal_output +4648,5708822,"TERMINAL",0,0,"8",,terminal_output +4649,5709922,"TERMINAL",0,0,"9",,terminal_output +4650,5710973,"TERMINAL",0,0,"30",,terminal_output +4651,5711904,"TERMINAL",0,0,"1",,terminal_output +4652,5712996,"TERMINAL",0,0,"2",,terminal_output +4653,5714011,"TERMINAL",0,0,"^Csalloc: Job allocation 3370905 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;1",,terminal_output +4654,5714026,"TERMINAL",0,0,"3",,terminal_output +4655,5715017,"TERMINAL",0,0,"4",,terminal_output +4656,5716068,"TERMINAL",0,0,"5",,terminal_output +4657,5717134,"TERMINAL",0,0,"6",,terminal_output +4658,5718220,"TERMINAL",0,0,"7",,terminal_output +4659,5719178,"TERMINAL",0,0,"8",,terminal_output +4660,5720266,"TERMINAL",0,0,"9",,terminal_output +4661,5721268,"TERMINAL",0,0,"40",,terminal_output +4662,5721878,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5",,terminal_command +4663,5721935,"TERMINAL",0,0,"]633;E;2025-07-23 16:22:41 salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5;469e5d18-6e08-4909-a55e-e2644c9abc02]633;Csalloc: Pending job allocation 3370906\r\nsalloc: job 3370906 queued and waiting for resources\r\n",,terminal_output +4664,5722298,"TERMINAL",0,0,"1",,terminal_output +4665,5723332,"TERMINAL",0,0,"2",,terminal_output +4666,5724464,"TERMINAL",0,0,"3",,terminal_output +4667,5725426,"TERMINAL",0,0,"4",,terminal_output +4668,5726512,"TERMINAL",0,0,"5",,terminal_output +4669,5727537,"TERMINAL",0,0,"6",,terminal_output +4670,5728561,"TERMINAL",0,0,"7",,terminal_output +4671,5729595,"TERMINAL",0,0,"8",,terminal_output +4672,5730711,"TERMINAL",0,0,"90",,terminal_output +4673,5731701,"TERMINAL",0,0,"50",,terminal_output +4674,5732758,"TERMINAL",0,0,"1",,terminal_output +4675,5733752,"TERMINAL",0,0,"3",,terminal_output +4676,5734792,"TERMINAL",0,0,"4",,terminal_output +4677,5735833,"TERMINAL",0,0,"5",,terminal_output +4678,5736886,"TERMINAL",0,0,"6",,terminal_output +4679,5737993,"TERMINAL",0,0,"7",,terminal_output +4680,5738959,"TERMINAL",0,0,"8",,terminal_output +4681,5740000,"TERMINAL",0,0,"9",,terminal_output +4682,5741056,"TERMINAL",0,0,"3:00",,terminal_output +4683,5742076,"TERMINAL",0,0,"1",,terminal_output +4684,5743121,"TERMINAL",0,0,"2",,terminal_output +4685,5744237,"TERMINAL",0,0,"3",,terminal_output +4686,5745256,"TERMINAL",0,0,"4",,terminal_output +4687,5746276,"TERMINAL",0,0,"5",,terminal_output +4688,5747278,"TERMINAL",0,0,"6",,terminal_output +4689,5748427,"TERMINAL",0,0,"7",,terminal_output +4690,5749453,"TERMINAL",0,0,"8",,terminal_output +4691,5750398,"TERMINAL",0,0,"9",,terminal_output +4692,5751498,"TERMINAL",0,0,"10",,terminal_output +4693,5752525,"TERMINAL",0,0,"1",,terminal_output +4694,5753550,"TERMINAL",0,0,"2",,terminal_output +4695,5754563,"TERMINAL",0,0,"3",,terminal_output +4696,5755598,"TERMINAL",0,0,"4",,terminal_output +4697,5756721,"TERMINAL",0,0,"5",,terminal_output +4698,5758024,"TERMINAL",0,0,"61",,terminal_output +4699,5759052,"TERMINAL",0,0,"82",,terminal_output +4700,5760095,"TERMINAL",0,0,"9",,terminal_output +4701,5761136,"TERMINAL",0,0,"20",,terminal_output +4702,5762248,"TERMINAL",0,0,"1",,terminal_output +4703,5763228,"TERMINAL",0,0,"2",,terminal_output +4704,5764286,"TERMINAL",0,0,"3",,terminal_output +4705,5765325,"TERMINAL",0,0,"4",,terminal_output +4706,5766351,"TERMINAL",0,0,"5",,terminal_output +4707,5767366,"TERMINAL",0,0,"6",,terminal_output +4708,5768408,"TERMINAL",0,0,"7",,terminal_output +4709,5769451,"TERMINAL",0,0,"8",,terminal_output +4710,5770547,"TERMINAL",0,0,"9",,terminal_output +4711,5771572,"TERMINAL",0,0,"30",,terminal_output +4712,5772595,"TERMINAL",0,0,"1",,terminal_output +4713,5773621,"TERMINAL",0,0,"2",,terminal_output +4714,5774748,"TERMINAL",0,0,"3",,terminal_output +4715,5775774,"TERMINAL",0,0,"4",,terminal_output +4716,5776742,"TERMINAL",0,0,"6",,terminal_output +4717,5777825,"TERMINAL",0,0,"7",,terminal_output +4718,5778840,"TERMINAL",0,0,"8",,terminal_output +4719,5779968,"TERMINAL",0,0,"9",,terminal_output +4720,5780898,"TERMINAL",0,0,"40",,terminal_output +4721,5782013,"TERMINAL",0,0,"1",,terminal_output +4722,5783010,"TERMINAL",0,0,"2",,terminal_output +4723,5784029,"TERMINAL",0,0,"3",,terminal_output +4724,5785061,"TERMINAL",0,0,"4",,terminal_output +4725,5786099,"TERMINAL",0,0,"5",,terminal_output +4726,5787150,"TERMINAL",0,0,"6",,terminal_output +4727,5788183,"TERMINAL",0,0,"7",,terminal_output +4728,5789286,"TERMINAL",0,0,"8",,terminal_output +4729,5790309,"TERMINAL",0,0,"9",,terminal_output +4730,5791330,"TERMINAL",0,0,"50",,terminal_output +4731,5792461,"TERMINAL",0,0,"11",,terminal_output +4732,5793434,"TERMINAL",0,0,"2",,terminal_output +4733,5794466,"TERMINAL",0,0,"3",,terminal_output +4734,5795536,"TERMINAL",0,0,"4",,terminal_output +4735,5796548,"TERMINAL",0,0,"5",,terminal_output +4736,5797587,"TERMINAL",0,0,"6",,terminal_output +4737,5798667,"TERMINAL",0,0,"7",,terminal_output +4738,5799730,"TERMINAL",0,0,"8",,terminal_output +4739,5800759,"TERMINAL",0,0,"9",,terminal_output +4740,5801778,"TERMINAL",0,0,"4:01",,terminal_output +4741,5802904,"TERMINAL",0,0,"2",,terminal_output +4742,5803833,"TERMINAL",0,0,"3",,terminal_output +4743,5804959,"TERMINAL",0,0,"4",,terminal_output +4744,5805977,"TERMINAL",0,0,"5",,terminal_output +4745,5806999,"TERMINAL",0,0,"6",,terminal_output +4746,5808027,"TERMINAL",0,0,"7",,terminal_output +4747,5809041,"TERMINAL",0,0,"8",,terminal_output +4748,5810092,"TERMINAL",0,0,"9",,terminal_output +4749,5811120,"TERMINAL",0,0,"10",,terminal_output +4750,5812176,"TERMINAL",0,0,"1",,terminal_output +4751,5813245,"TERMINAL",0,0,"2",,terminal_output +4752,5814271,"TERMINAL",0,0,"3",,terminal_output +4753,5815276,"TERMINAL",0,0,"4",,terminal_output +4754,5816424,"TERMINAL",0,0,"5",,terminal_output +4755,5817366,"TERMINAL",0,0,"6",,terminal_output +4756,5818468,"TERMINAL",0,0,"7",,terminal_output +4757,5819501,"TERMINAL",0,0,"8",,terminal_output +4758,5821759,"TERMINAL",0,0,"920",,terminal_output +4759,5822552,"TERMINAL",0,0,"1",,terminal_output +4760,5823674,"TERMINAL",0,0,"2",,terminal_output +4761,5824633,"TERMINAL",0,0,"33",,terminal_output +4762,5825742,"TERMINAL",0,0,"4",,terminal_output +4763,5826715,"TERMINAL",0,0,"5",,terminal_output +4764,5827791,"TERMINAL",0,0,"7",,terminal_output +4765,5828792,"TERMINAL",0,0,"8",,terminal_output +4766,5829847,"TERMINAL",0,0,"9",,terminal_output +4767,5830960,"TERMINAL",0,0,"30",,terminal_output +4768,5831983,"TERMINAL",0,0,"1",,terminal_output +4769,5833023,"TERMINAL",0,0,"2",,terminal_output +4770,5833980,"TERMINAL",0,0,"3",,terminal_output +4771,5835070,"TERMINAL",0,0,"45",,terminal_output +4772,5836080,"TERMINAL",0,0,"56",,terminal_output +4773,5837111,"TERMINAL",0,0,"6",,terminal_output +4774,5838150,"TERMINAL",0,0,"714",,terminal_output +4775,5839254,"TERMINAL",0,0,"8",,terminal_output +4776,5840279,"TERMINAL",0,0,"9",,terminal_output +4777,5841301,"TERMINAL",0,0,"40",,terminal_output +4778,5842332,"TERMINAL",0,0,"1",,terminal_output +4779,5843349,"TERMINAL",0,0,"2",,terminal_output +4780,5844395,"TERMINAL",0,0,"3",,terminal_output +4781,5845501,"TERMINAL",0,0,"4",,terminal_output +4782,5846528,"TERMINAL",0,0,"50",,terminal_output +4783,5847510,"TERMINAL",0,0,"66",,terminal_output +4784,5848579,"TERMINAL",0,0,"7",,terminal_output +4785,5849598,"TERMINAL",0,0,"8",,terminal_output +4786,5850724,"TERMINAL",0,0,"9",,terminal_output +4787,5851679,"TERMINAL",0,0,"50",,terminal_output +4788,5852719,"TERMINAL",0,0,"2",,terminal_output +4789,5853758,"TERMINAL",0,0,"3",,terminal_output +4790,5854822,"TERMINAL",0,0,"4",,terminal_output +4791,5855949,"TERMINAL",0,0,"5",,terminal_output +4792,5856876,"TERMINAL",0,0,"6",,terminal_output +4793,5857994,"TERMINAL",0,0,"7",,terminal_output +4794,5858960,"TERMINAL",0,0,"8",,terminal_output +4795,5860043,"TERMINAL",0,0,"9",,terminal_output +4796,5861040,"TERMINAL",0,0,"5:00",,terminal_output +4797,5862082,"TERMINAL",0,0,"1",,terminal_output +4798,5863121,"TERMINAL",0,0,"2",,terminal_output +4799,5864162,"TERMINAL",0,0,"3",,terminal_output +4800,5865200,"TERMINAL",0,0,"4",,terminal_output +4801,5866238,"TERMINAL",0,0,"5",,terminal_output +4802,5867313,"TERMINAL",0,0,"620",,terminal_output +4803,5868339,"TERMINAL",0,0,"7",,terminal_output +4804,5869363,"TERMINAL",0,0,"8",,terminal_output +4805,5870404,"TERMINAL",0,0,"9",,terminal_output +4806,5871512,"TERMINAL",0,0,"10",,terminal_output +4807,5872537,"TERMINAL",0,0,"1",,terminal_output +4808,5873561,"TERMINAL",0,0,"2",,terminal_output +4809,5874584,"TERMINAL",0,0,"3",,terminal_output +4810,5875595,"TERMINAL",0,0,"46",,terminal_output +4811,5876734,"TERMINAL",0,0,"5",,terminal_output +4812,5877687,"TERMINAL",0,0,"6",,terminal_output +4813,5878734,"TERMINAL",0,0,"8115",,terminal_output +4814,5879768,"TERMINAL",0,0,"9",,terminal_output +4815,5880806,"TERMINAL",0,0,"20",,terminal_output +4816,5881858,"TERMINAL",0,0,"1",,terminal_output +4817,5882885,"TERMINAL",0,0,"2",,terminal_output +4818,5883952,"TERMINAL",0,0,"3",,terminal_output +4819,5884961,"TERMINAL",0,0,"4",,terminal_output +4820,5886070,"TERMINAL",0,0,"5",,terminal_output +4821,5887043,"TERMINAL",0,0,"6",,terminal_output +4822,5888077,"TERMINAL",0,0,"7",,terminal_output +4823,5889115,"TERMINAL",0,0,"8",,terminal_output +4824,5890162,"TERMINAL",0,0,"9",,terminal_output +4825,5891201,"TERMINAL",0,0,"30",,terminal_output +4826,5892300,"TERMINAL",0,0,"1",,terminal_output +4827,5893327,"TERMINAL",0,0,"2",,terminal_output +4828,5894349,"TERMINAL",0,0,"3",,terminal_output +4829,5895575,"TERMINAL",0,0,"4",,terminal_output +4830,5896407,"TERMINAL",0,0,"5",,terminal_output +4831,5897526,"TERMINAL",0,0,"6",,terminal_output +4832,5898493,"TERMINAL",0,0,"7",,terminal_output +4833,5899531,"TERMINAL",0,0,"8",,terminal_output +4834,5900591,"TERMINAL",0,0,"9",,terminal_output +4835,5901622,"TERMINAL",0,0,"40",,terminal_output +4836,5902745,"TERMINAL",0,0,"1",,terminal_output +4837,5903693,"TERMINAL",0,0,"2",,terminal_output +4838,5904797,"TERMINAL",0,0,"4",,terminal_output +4839,5905815,"TERMINAL",0,0,"5",,terminal_output +4840,5906849,"TERMINAL",0,0,"6",,terminal_output +4841,5907845,"TERMINAL",0,0,"7",,terminal_output +4842,5908890,"TERMINAL",0,0,"8",,terminal_output +4843,5910017,"TERMINAL",0,0,"9",,terminal_output +4844,5911041,"TERMINAL",0,0,"50",,terminal_output +4845,5912005,"TERMINAL",0,0,"1",,terminal_output +4846,5913077,"TERMINAL",0,0,"2",,terminal_output +4847,5914089,"TERMINAL",0,0,"3",,terminal_output +4848,5915135,"TERMINAL",0,0,"4",,terminal_output +4849,5916159,"TERMINAL",0,0,"5",,terminal_output +4850,5917215,"TERMINAL",0,0,"6",,terminal_output +4851,5918247,"TERMINAL",0,0,"7",,terminal_output +4852,5919334,"TERMINAL",0,0,"8",,terminal_output +4853,5920319,"TERMINAL",0,0,"9",,terminal_output +4854,5921383,"TERMINAL",0,0,"6:002",,terminal_output +4855,5922408,"TERMINAL",0,0,"1",,terminal_output +4856,5923537,"TERMINAL",0,0,"2",,terminal_output +4857,5924482,"TERMINAL",0,0,"3",,terminal_output +4858,5925580,"TERMINAL",0,0,"4",,terminal_output +4859,5926609,"TERMINAL",0,0,"5",,terminal_output +4860,5927635,"TERMINAL",0,0,"6",,terminal_output +4861,5928724,"TERMINAL",0,0,"7",,terminal_output +4862,5929780,"TERMINAL",0,0,"8",,terminal_output +4863,5930721,"TERMINAL",0,0,"10",,terminal_output +4864,5931829,"TERMINAL",0,0,"1",,terminal_output +4865,5932850,"TERMINAL",0,0,"2",,terminal_output +4866,5933838,"TERMINAL",0,0,"3",,terminal_output +4867,5934901,"TERMINAL",0,0,"4",,terminal_output +4868,5936026,"TERMINAL",0,0,"5",,terminal_output +4869,5936962,"TERMINAL",0,0,"6",,terminal_output +4870,5938083,"TERMINAL",0,0,"7",,terminal_output +4871,5939048,"TERMINAL",0,0,"8",,terminal_output +4872,5940124,"TERMINAL",0,0,"9",,terminal_output +4873,5941147,"TERMINAL",0,0,"20",,terminal_output +4874,5942158,"TERMINAL",0,0,"1",,terminal_output +4875,5943198,"TERMINAL",0,0,"2",,terminal_output +4876,5944238,"TERMINAL",0,0,"3",,terminal_output +4877,5945347,"TERMINAL",0,0,"4",,terminal_output +4878,5946373,"TERMINAL",0,0,"5",,terminal_output +4879,5947391,"TERMINAL",0,0,"6",,terminal_output +4880,5948582,"TERMINAL",0,0,"7",,terminal_output +4881,5949658,"TERMINAL",0,0,"8",,terminal_output +4882,5950567,"TERMINAL",0,0,"9",,terminal_output +4883,5951521,"TERMINAL",0,0,"30",,terminal_output +4884,5952614,"TERMINAL",0,0,"1",,terminal_output +4885,5953667,"TERMINAL",0,0,"2",,terminal_output +4886,5954662,"TERMINAL",0,0,"3",,terminal_output +4887,5955794,"TERMINAL",0,0,"4",,terminal_output +4888,5956726,"TERMINAL",0,0,"6",,terminal_output +4889,5957837,"TERMINAL",0,0,"7",,terminal_output +4890,5958862,"TERMINAL",0,0,"8",,terminal_output +4891,5959883,"TERMINAL",0,0,"9",,terminal_output +4892,5960907,"TERMINAL",0,0,"40",,terminal_output +4893,5961921,"TERMINAL",0,0,"1",,terminal_output +4894,5962960,"TERMINAL",0,0,"2",,terminal_output +4895,5964002,"TERMINAL",0,0,"3",,terminal_output +4896,5965109,"TERMINAL",0,0,"4",,terminal_output +4897,5966135,"TERMINAL",0,0,"5",,terminal_output +4898,5967124,"TERMINAL",0,0,"6",,terminal_output +4899,5968186,"TERMINAL",0,0,"7",,terminal_output +4900,5969210,"TERMINAL",0,0,"8",,terminal_output +4901,5970332,"TERMINAL",0,0,"9",,terminal_output +4902,5971358,"TERMINAL",0,0,"50",,terminal_output +4903,5972378,"TERMINAL",0,0,"1",,terminal_output +4904,5973410,"TERMINAL",0,0,"2",,terminal_output +4905,5974425,"TERMINAL",0,0,"3",,terminal_output +4906,5975554,"TERMINAL",0,0,"4",,terminal_output +4907,5976577,"TERMINAL",0,0,"5",,terminal_output +4908,5977602,"TERMINAL",0,0,"6",,terminal_output +4909,5978669,"TERMINAL",0,0,"7",,terminal_output +4910,5979648,"TERMINAL",0,0,"8",,terminal_output +4911,5980672,"TERMINAL",0,0,"9",,terminal_output +4912,5981703,"TERMINAL",0,0,"7:00",,terminal_output +4913,5982824,"TERMINAL",0,0,"2",,terminal_output +4914,5983768,"TERMINAL",0,0,"3",,terminal_output +4915,5984871,"TERMINAL",0,0,"4",,terminal_output +4916,5985894,"TERMINAL",0,0,"5",,terminal_output +4917,5986888,"TERMINAL",0,0,"6",,terminal_output +4918,5987942,"TERMINAL",0,0,"7",,terminal_output +4919,5988968,"TERMINAL",0,0,"84",,terminal_output +4920,5990003,"TERMINAL",0,0,"9",,terminal_output +4921,5991043,"TERMINAL",0,0,"10",,terminal_output +4922,5992088,"TERMINAL",0,0,"1",,terminal_output +4923,5993123,"TERMINAL",0,0,"2",,terminal_output +4924,5994169,"TERMINAL",0,0,"3",,terminal_output +4925,5995318,"TERMINAL",0,0,"4",,terminal_output +4926,5996345,"TERMINAL",0,0,"5",,terminal_output +4927,5997288,"TERMINAL",0,0,"6",,terminal_output +4928,5998337,"TERMINAL",0,0,"7",,terminal_output +4929,5999822,"TERMINAL",0,0,"801",,terminal_output +4930,6000954,"TERMINAL",0,0,"20",,terminal_output +4931,6001880,"TERMINAL",0,0,"1",,terminal_output +4932,6002999,"TERMINAL",0,0,"2",,terminal_output +4933,6004082,"TERMINAL",0,0,"3",,terminal_output +4934,6005043,"TERMINAL",0,0,"4",,terminal_output +4935,6006084,"TERMINAL",0,0,"5",,terminal_output +4936,6007093,"TERMINAL",0,0,"6",,terminal_output +4937,6008225,"TERMINAL",0,0,"7",,terminal_output +4938,6009146,"TERMINAL",0,0,"8",,terminal_output +4939,6010188,"TERMINAL",0,0,"9",,terminal_output +4940,6011248,"TERMINAL",0,0,"30",,terminal_output +4941,6012315,"TERMINAL",0,0,"1",,terminal_output +4942,6013336,"TERMINAL",0,0,"2",,terminal_output +4943,6014350,"TERMINAL",0,0,"3",,terminal_output +4944,6015487,"TERMINAL",0,0,"4",,terminal_output +4945,6016516,"TERMINAL",0,0,"5",,terminal_output +4946,6017460,"TERMINAL",0,0,"6",,terminal_output +4947,6018564,"TERMINAL",0,0,"7",,terminal_output +4948,6019586,"TERMINAL",0,0,"8",,terminal_output +4949,6020611,"TERMINAL",0,0,"9",,terminal_output +4950,6021620,"TERMINAL",0,0,"40",,terminal_output +4951,6022760,"TERMINAL",0,0,"1",,terminal_output +4952,6023701,"TERMINAL",0,0,"2",,terminal_output +4953,6024742,"TERMINAL",0,0,"4",,terminal_output +4954,6025836,"TERMINAL",0,0,"5",,terminal_output +4955,6026818,"TERMINAL",0,0,"6",,terminal_output +4956,6027878,"TERMINAL",0,0,"7",,terminal_output +4957,6028918,"TERMINAL",0,0,"8",,terminal_output +4958,6029724,"TERMINAL",0,0,"salloc: job 3370906 has been allocated resources\r\nsalloc: Granted job allocation 3370906\r\n",,terminal_output +4959,6029831,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +4960,6029939,"TERMINAL",0,0,"9",,terminal_output +4961,6031054,"TERMINAL",0,0,"50",,terminal_output +4962,6032024,"TERMINAL",0,0,"1",,terminal_output +4963,6033052,"TERMINAL",0,0,"256",,terminal_output +4964,6034092,"TERMINAL",0,0,"3",,terminal_output +4965,6035151,"TERMINAL",0,0,"4",,terminal_output +4966,6036174,"TERMINAL",0,0,"5",,terminal_output +4967,6037210,"TERMINAL",0,0,"6",,terminal_output +4968,6038246,"TERMINAL",0,0,"77",,terminal_output +4969,6039351,"TERMINAL",0,0,"8",,terminal_output +4970,6040323,"TERMINAL",0,0,"9",,terminal_output +4971,6041371,"TERMINAL",0,0,"8:00",,terminal_output +4972,6042418,"TERMINAL",0,0,"1",,terminal_output +4973,6043450,"TERMINAL",0,0,"2",,terminal_output +4974,6044570,"TERMINAL",0,0,"3",,terminal_output +4975,6045593,"TERMINAL",0,0,"45",,terminal_output +4976,6046619,"TERMINAL",0,0,"5",,terminal_output +4977,6047646,"TERMINAL",0,0,"6",,terminal_output +4978,6048667,"TERMINAL",0,0,"7",,terminal_output +4979,6049793,"TERMINAL",0,0,"8",,terminal_output +4980,6050725,"TERMINAL",0,0,"10",,terminal_output +4981,6051839,"TERMINAL",0,0,"1",,terminal_output +4982,6052863,"TERMINAL",0,0,"2",,terminal_output +4983,6053888,"TERMINAL",0,0,"3",,terminal_output +4984,6054915,"TERMINAL",0,0,"4",,terminal_output +4985,6055928,"TERMINAL",0,0,"5",,terminal_output +4986,6056880,"TERMINAL",0,0,"salloc: Nodes hkn0403 are ready for job\r\n",,terminal_output +4987,6056992,"TERMINAL",0,0,"6",,terminal_output +4988,6057830,"TERMINAL",0,0,"]0;tum_cte0515@hkn0403:~/Projects/jafar[?2004h[tum_cte0515@hkn0403 jafar]$ ",,terminal_output +4989,6058013,"TERMINAL",0,0,"7",,terminal_output +4990,6059060,"TERMINAL",0,0,"8",,terminal_output +4991,6060150,"TERMINAL",0,0,"9",,terminal_output +4992,6061160,"TERMINAL",0,0,"206",,terminal_output +4993,6062189,"TERMINAL",0,0,"1",,terminal_output +4994,6063229,"TERMINAL",0,0,"2",,terminal_output +4995,6064333,"TERMINAL",0,0,"3",,terminal_output +4996,6065356,"TERMINAL",0,0,"4",,terminal_output +4997,6066381,"TERMINAL",0,0,"5",,terminal_output +4998,6067409,"TERMINAL",0,0,"6",,terminal_output +4999,6068431,"TERMINAL",0,0,"7",,terminal_output +5000,6069556,"TERMINAL",0,0,"8",,terminal_output +5001,6070583,"TERMINAL",0,0,"9",,terminal_output +5002,6071603,"TERMINAL",0,0,"30",,terminal_output +5003,6072633,"TERMINAL",0,0,"1",,terminal_output +5004,6073667,"TERMINAL",0,0,"2",,terminal_output +5005,6074779,"TERMINAL",0,0,"3",,terminal_output +5006,6075719,"TERMINAL",0,0,"4",,terminal_output +5007,6076827,"TERMINAL",0,0,"6",,terminal_output +5008,6077804,"TERMINAL",0,0,"7",,terminal_output +5009,6078841,"TERMINAL",0,0,"8",,terminal_output +5010,6079899,"TERMINAL",0,0,"9",,terminal_output +5011,6081028,"TERMINAL",0,0,"40",,terminal_output +5012,6082047,"TERMINAL",0,0,"1",,terminal_output +5013,6083005,"TERMINAL",0,0,"2",,terminal_output +5014,6084051,"TERMINAL",0,0,"3",,terminal_output +5015,6085089,"TERMINAL",0,0,"4",,terminal_output +5016,6086160,"TERMINAL",0,0,"5",,terminal_output +5017,6087175,"TERMINAL",0,0,"6",,terminal_output +5018,6088217,"TERMINAL",0,0,"7",,terminal_output +5019,6089256,"TERMINAL",0,0,"8",,terminal_output +5020,6090343,"TERMINAL",0,0,"9",,terminal_output +5021,6091339,"TERMINAL",0,0,"50",,terminal_output +5022,6092393,"TERMINAL",0,0,"1",,terminal_output +5023,6093517,"TERMINAL",0,0,"2",,terminal_output +5024,6094549,"TERMINAL",0,0,"3",,terminal_output +5025,6095572,"TERMINAL",0,0,"4",,terminal_output +5026,6096556,"TERMINAL",0,0,"5",,terminal_output +5027,6097611,"TERMINAL",0,0,"6",,terminal_output +5028,6098679,"TERMINAL",0,0,"7",,terminal_output +5029,6099764,"TERMINAL",0,0,"8",,terminal_output +5030,6100787,"TERMINAL",0,0,"9:00",,terminal_output +5031,6101813,"TERMINAL",0,0,"1",,terminal_output +5032,6102805,"TERMINAL",0,0,"2",,terminal_output +5033,6103863,"TERMINAL",0,0,"3",,terminal_output +5034,6104985,"TERMINAL",0,0,"4",,terminal_output +5035,6106011,"TERMINAL",0,0,"5",,terminal_output +5036,6107016,"TERMINAL",0,0,"6",,terminal_output +5037,6108014,"TERMINAL",0,0,"7",,terminal_output +5038,6109084,"TERMINAL",0,0,"8",,terminal_output +5039,6110135,"TERMINAL",0,0,"9",,terminal_output +5040,6111143,"TERMINAL",0,0,"10",,terminal_output +5041,6112198,"TERMINAL",0,0,"1",,terminal_output +5042,6113232,"TERMINAL",0,0,"2",,terminal_output +5043,6114273,"TERMINAL",0,0,"3",,terminal_output +5044,6115318,"TERMINAL",0,0,"4",,terminal_output +5045,6116458,"TERMINAL",0,0,"5",,terminal_output +5046,6117401,"TERMINAL",0,0,"6",,terminal_output +5047,6118436,"TERMINAL",0,0,"7",,terminal_output +5048,6119527,"TERMINAL",0,0,"8",,terminal_output +5049,6121240,"TERMINAL",0,0,"963",,terminal_output +5050,6122284,"TERMINAL",0,0,"21",,terminal_output +5051,6123419,"TERMINAL",0,0,"2",,terminal_output +5052,6124445,"TERMINAL",0,0,"3",,terminal_output +5053,6125467,"TERMINAL",0,0,"4",,terminal_output +5054,6126494,"TERMINAL",0,0,"5",,terminal_output +5055,6127513,"TERMINAL",0,0,"6",,terminal_output +5056,6128665,"TERMINAL",0,0,"7",,terminal_output +5057,6129667,"TERMINAL",0,0,"8",,terminal_output +5058,6130698,"TERMINAL",0,0,"9",,terminal_output +5059,6131712,"TERMINAL",0,0,"30",,terminal_output +5060,6132704,"TERMINAL",0,0,"1",,terminal_output +5061,6133743,"TERMINAL",0,0,"3",,terminal_output +5062,6134886,"TERMINAL",0,0,"4",,terminal_output +5063,6135824,"TERMINAL",0,0,"5",,terminal_output +5064,6136936,"TERMINAL",0,0,"6",,terminal_output +5065,6137963,"TERMINAL",0,0,"7",,terminal_output +5066,6138982,"TERMINAL",0,0,"8",,terminal_output +5067,6140014,"TERMINAL",0,0,"9",,terminal_output +5068,6141151,"TERMINAL",0,0,"40",,terminal_output +5069,6142092,"TERMINAL",0,0,"1",,terminal_output +5070,6143183,"TERMINAL",0,0,"2",,terminal_output +5071,6144159,"TERMINAL",0,0,"3",,terminal_output +5072,6145233,"TERMINAL",0,0,"42",,terminal_output +5073,6146239,"TERMINAL",0,0,"5",,terminal_output +5074,6147381,"TERMINAL",0,0,"6",,terminal_output +5075,6148318,"TERMINAL",0,0,"7",,terminal_output +5076,6149428,"TERMINAL",0,0,"8",,terminal_output +5077,6150413,"TERMINAL",0,0,"9",,terminal_output +5078,6151479,"TERMINAL",0,0,"50",,terminal_output +5079,6152491,"TERMINAL",0,0,"1",,terminal_output +5080,6153669,"TERMINAL",0,0,"2",,terminal_output +5081,6154569,"TERMINAL",0,0,"3",,terminal_output +5082,6155682,"TERMINAL",0,0,"4",,terminal_output +5083,6156649,"TERMINAL",0,0,"5",,terminal_output +5084,6157722,"TERMINAL",0,0,"6",,terminal_output +5085,6158730,"TERMINAL",0,0,"8",,terminal_output +5086,6159873,"TERMINAL",0,0,"9",,terminal_output +5087,6160810,"TERMINAL",0,0,"30:00",,terminal_output +5088,6161920,"TERMINAL",0,0,"1",,terminal_output +5089,6162944,"TERMINAL",0,0,"2",,terminal_output +5090,6163960,"TERMINAL",0,0,"3",,terminal_output +5091,6164991,"TERMINAL",0,0,"4",,terminal_output +5092,6166022,"TERMINAL",0,0,"5",,terminal_output +5093,6167059,"TERMINAL",0,0,"67",,terminal_output +5094,6168166,"TERMINAL",0,0,"7",,terminal_output +5095,6169194,"TERMINAL",0,0,"8",,terminal_output +5096,6170216,"TERMINAL",0,0,"9",,terminal_output +5097,6171218,"TERMINAL",0,0,"10",,terminal_output +5098,6172262,"TERMINAL",0,0,"1",,terminal_output +5099,6173306,"TERMINAL",0,0,"2",,terminal_output +5100,6174343,"TERMINAL",0,0,"3",,terminal_output +5101,6175444,"TERMINAL",0,0,"4",,terminal_output +5102,6176421,"TERMINAL",0,0,"5",,terminal_output +5103,6177460,"TERMINAL",0,0,"6",,terminal_output +5104,6178509,"TERMINAL",0,0,"7",,terminal_output +5105,6179647,"TERMINAL",0,0,"8",,terminal_output +5106,6180667,"TERMINAL",0,0,"9",,terminal_output +5107,6181685,"TERMINAL",0,0,"20",,terminal_output +5108,6182711,"TERMINAL",0,0,"1",,terminal_output +5109,6183679,"TERMINAL",0,0,"2",,terminal_output +5110,6184721,"TERMINAL",0,0,"3",,terminal_output +5111,6185761,"TERMINAL",0,0,"5",,terminal_output +5112,6186909,"TERMINAL",0,0,"6",,terminal_output +5113,6187932,"TERMINAL",0,0,"7",,terminal_output +5114,6188956,"TERMINAL",0,0,"8",,terminal_output +5115,6189980,"TERMINAL",0,0,"93",,terminal_output +5116,6191005,"TERMINAL",0,0,"30",,terminal_output +5117,6192010,"TERMINAL",0,0,"1",,terminal_output +5118,6193050,"TERMINAL",0,0,"2",,terminal_output +5119,6194176,"TERMINAL",0,0,"3",,terminal_output +5120,6195206,"TERMINAL",0,0,"4",,terminal_output +5121,6196176,"TERMINAL",0,0,"5",,terminal_output +5122,6197248,"TERMINAL",0,0,"6",,terminal_output +5123,6198237,"TERMINAL",0,0,"7",,terminal_output +5124,6199275,"TERMINAL",0,0,"8",,terminal_output +5125,6200336,"TERMINAL",0,0,"9",,terminal_output +5126,6201353,"TERMINAL",0,0,"40",,terminal_output +5127,6202392,"TERMINAL",0,0,"1",,terminal_output +5128,6203497,"TERMINAL",0,0,"2",,terminal_output +5129,6204475,"TERMINAL",0,0,"3",,terminal_output +5130,6205512,"TERMINAL",0,0,"4",,terminal_output +5131,6206579,"TERMINAL",0,0,"5",,terminal_output +5132,6207697,"TERMINAL",0,0,"6",,terminal_output +5133,6208683,"TERMINAL",0,0,"7",,terminal_output +5134,6209749,"TERMINAL",0,0,"8",,terminal_output +5135,6210768,"TERMINAL",0,0,"9",,terminal_output +5136,6211788,"TERMINAL",0,0,"51",,terminal_output +5137,6212819,"TERMINAL",0,0,"2",,terminal_output +5138,6213836,"TERMINAL",0,0,"3",,terminal_output +5139,6214965,"TERMINAL",0,0,"4",,terminal_output +5140,6215995,"TERMINAL",0,0,"5",,terminal_output +5141,6217015,"TERMINAL",0,0,"6",,terminal_output +5142,6218039,"TERMINAL",0,0,"7",,terminal_output +5143,6219033,"TERMINAL",0,0,"8",,terminal_output +5144,6220084,"TERMINAL",0,0,"9",,terminal_output +5145,6221209,"TERMINAL",0,0,"1:00",,terminal_output +5146,6222147,"TERMINAL",0,0,"1",,terminal_output +5147,6223258,"TERMINAL",0,0,"2",,terminal_output +5148,6224231,"TERMINAL",0,0,"3",,terminal_output +5149,6225262,"TERMINAL",0,0,"4",,terminal_output +5150,6226330,"TERMINAL",0,0,"5",,terminal_output +5151,6227343,"TERMINAL",0,0,"6",,terminal_output +5152,6228385,"TERMINAL",0,0,"7",,terminal_output +5153,6229506,"TERMINAL",0,0,"8",,terminal_output +5154,6230540,"TERMINAL",0,0,"9",,terminal_output +5155,6231504,"TERMINAL",0,0,"10",,terminal_output +5156,6232578,"TERMINAL",0,0,"1",,terminal_output +5157,6233609,"TERMINAL",0,0,"2",,terminal_output +5158,6234626,"TERMINAL",0,0,"3",,terminal_output +5159,6235755,"TERMINAL",0,0,"4",,terminal_output +5160,6236708,"TERMINAL",0,0,"5",,terminal_output +5161,6237806,"TERMINAL",0,0,"7",,terminal_output +5162,6238793,"TERMINAL",0,0,"8",,terminal_output +5163,6239846,"TERMINAL",0,0,"9",,terminal_output +5164,6240887,"TERMINAL",0,0,"20",,terminal_output +5165,6241921,"TERMINAL",0,0,"1",,terminal_output +5166,6243025,"TERMINAL",0,0,"2",,terminal_output +5167,6244020,"TERMINAL",0,0,"3",,terminal_output +5168,6245072,"TERMINAL",0,0,"4",,terminal_output +5169,6246088,"TERMINAL",0,0,"5",,terminal_output +5170,6247132,"TERMINAL",0,0,"6",,terminal_output +5171,6248245,"TERMINAL",0,0,"7",,terminal_output +5172,6249272,"TERMINAL",0,0,"8",,terminal_output +5173,6250254,"TERMINAL",0,0,"9",,terminal_output +5174,6251294,"TERMINAL",0,0,"30",,terminal_output +5175,6252345,"TERMINAL",0,0,"1",,terminal_output +5176,6253376,"TERMINAL",0,0,"2",,terminal_output +5177,6254495,"TERMINAL",0,0,"3",,terminal_output +5178,6255516,"TERMINAL",0,0,"4",,terminal_output +5179,6256495,"TERMINAL",0,0,"5",,terminal_output +5180,6257570,"TERMINAL",0,0,"6",,terminal_output +5181,6258586,"TERMINAL",0,0,"7",,terminal_output +5182,6259715,"TERMINAL",0,0,"8",,terminal_output +5183,6260745,"TERMINAL",0,0,"9",,terminal_output +5184,6261762,"TERMINAL",0,0,"40",,terminal_output +5185,6262802,"TERMINAL",0,0,"2",,terminal_output +5186,6263784,"TERMINAL",0,0,"3",,terminal_output +5187,6264834,"TERMINAL",0,0,"45",,terminal_output +5188,6265962,"TERMINAL",0,0,"5",,terminal_output +5189,6266918,"TERMINAL",0,0,"6",,terminal_output +5190,6268008,"TERMINAL",0,0,"7",,terminal_output +5191,6268979,"TERMINAL",0,0,"8",,terminal_output +5192,6270014,"TERMINAL",0,0,"9",,terminal_output +5193,6271081,"TERMINAL",0,0,"50",,terminal_output +5194,6272098,"TERMINAL",0,0,"1",,terminal_output +5195,6273231,"TERMINAL",0,0,"2",,terminal_output +5196,6274254,"TERMINAL",0,0,"3",,terminal_output +5197,6275279,"TERMINAL",0,0,"4",,terminal_output +5198,6276302,"TERMINAL",0,0,"5",,terminal_output +5199,6277300,"TERMINAL",0,0,"6",,terminal_output +5200,6278345,"TERMINAL",0,0,"7",,terminal_output +5201,6279388,"TERMINAL",0,0,"8",,terminal_output +5202,6280428,"TERMINAL",0,0,"9",,terminal_output +5203,6281476,"TERMINAL",0,0,"2:00",,terminal_output +5204,6282552,"TERMINAL",0,0,"1",,terminal_output +5205,6283567,"TERMINAL",0,0,"2",,terminal_output +5206,6284604,"TERMINAL",0,0,"3",,terminal_output +5207,6285726,"TERMINAL",0,0,"4",,terminal_output +5208,6286755,"TERMINAL",0,0,"5",,terminal_output +5209,6287721,"TERMINAL",0,0,"6",,terminal_output +5210,6288759,"TERMINAL",0,0,"8",,terminal_output +5211,6289827,"TERMINAL",0,0,"9",,terminal_output +5212,6290845,"TERMINAL",0,0,"10",,terminal_output +5213,6291883,"TERMINAL",0,0,"1",,terminal_output +5214,6292993,"TERMINAL",0,0,"2",,terminal_output +5215,6294070,"TERMINAL",0,0,"3",,terminal_output +5216,6295002,"TERMINAL",0,0,"4",,terminal_output +5217,6296040,"TERMINAL",0,0,"5",,terminal_output +5218,6297090,"TERMINAL",0,0,"6",,terminal_output +5219,6298123,"TERMINAL",0,0,"7",,terminal_output +5220,6299165,"TERMINAL",0,0,"8",,terminal_output +5221,6300269,"TERMINAL",0,0,"9",,terminal_output +5222,6301295,"TERMINAL",0,0,"20",,terminal_output +5223,6302283,"TERMINAL",0,0,"1",,terminal_output +5224,6303326,"TERMINAL",0,0,"2",,terminal_output +5225,6304365,"TERMINAL",0,0,"3",,terminal_output +5226,6305410,"TERMINAL",0,0,"4",,terminal_output +5227,6306447,"TERMINAL",0,0,"5",,terminal_output +5228,6307540,"TERMINAL",0,0,"6",,terminal_output +5229,6308558,"TERMINAL",0,0,"7",,terminal_output +5230,6309584,"TERMINAL",0,0,"8",,terminal_output +5231,6310709,"TERMINAL",0,0,"9",,terminal_output +5232,6311653,"TERMINAL",0,0,"30",,terminal_output +5233,6312697,"TERMINAL",0,0,"1",,terminal_output +5234,6313735,"TERMINAL",0,0,"3",,terminal_output +5235,6314807,"TERMINAL",0,0,"4",,terminal_output +5236,6315815,"TERMINAL",0,0,"5",,terminal_output +5237,6316959,"TERMINAL",0,0,"6",,terminal_output +5238,6317982,"TERMINAL",0,0,"7",,terminal_output +5239,6319004,"TERMINAL",0,0,"8",,terminal_output +5240,6320032,"TERMINAL",0,0,"9",,terminal_output +5241,6321053,"TERMINAL",0,0,"40",,terminal_output +5242,6322055,"TERMINAL",0,0,"1",,terminal_output +5243,6323202,"TERMINAL",0,0,"2",,terminal_output +5244,6324137,"TERMINAL",0,0,"3",,terminal_output +5245,6325252,"TERMINAL",0,0,"4",,terminal_output +5246,6326224,"TERMINAL",0,0,"5",,terminal_output +5247,6327315,"TERMINAL",0,0,"6",,terminal_output +5248,6328293,"TERMINAL",0,0,"7",,terminal_output +5249,6329331,"TERMINAL",0,0,"8",,terminal_output +5250,6330370,"TERMINAL",0,0,"9",,terminal_output +5251,6331416,"TERMINAL",0,0,"50",,terminal_output +5252,6332453,"TERMINAL",0,0,"1",,terminal_output +5253,6333548,"TERMINAL",0,0,"2",,terminal_output +5254,6334534,"TERMINAL",0,0,"3",,terminal_output +5255,6335593,"TERMINAL",0,0,"4",,terminal_output +5256,6336613,"TERMINAL",0,0,"5",,terminal_output +5257,6337653,"TERMINAL",0,0,"6",,terminal_output +5258,6338694,"TERMINAL",0,0,"7",,terminal_output +5259,6339796,"TERMINAL",0,0,"9",,terminal_output +5260,6340769,"TERMINAL",0,0,"3:00",,terminal_output +5261,6341840,"TERMINAL",0,0,"1",,terminal_output +5262,6342870,"TERMINAL",0,0,"2",,terminal_output +5263,6343903,"TERMINAL",0,0,"3",,terminal_output +5264,6345015,"TERMINAL",0,0,"4",,terminal_output +5265,6345976,"TERMINAL",0,0,"5",,terminal_output +5266,6347119,"TERMINAL",0,0,"6",,terminal_output +5267,6348085,"TERMINAL",0,0,"7",,terminal_output +5268,6349118,"TERMINAL",0,0,"8",,terminal_output +5269,6350251,"TERMINAL",0,0,"9",,terminal_output +5270,6351195,"TERMINAL",0,0,"10",,terminal_output +5271,6352284,"TERMINAL",0,0,"1",,terminal_output +5272,6353313,"TERMINAL",0,0,"2",,terminal_output +5273,6354334,"TERMINAL",0,0,"3",,terminal_output +5274,6355357,"TERMINAL",0,0,"4",,terminal_output +5275,6356396,"TERMINAL",0,0,"5",,terminal_output +5276,6357437,"TERMINAL",0,0,"6",,terminal_output +5277,6358488,"TERMINAL",0,0,"7",,terminal_output +5278,6359554,"TERMINAL",0,0,"8",,terminal_output +5279,6360560,"TERMINAL",0,0,"9",,terminal_output +5280,6361705,"TERMINAL",0,0,"20",,terminal_output +5281,6362646,"TERMINAL",0,0,"1",,terminal_output +5282,6363688,"TERMINAL",0,0,"2",,terminal_output +5283,6364781,"TERMINAL",0,0,"4",,terminal_output +5284,6365758,"TERMINAL",0,0,"5",,terminal_output +5285,6366833,"TERMINAL",0,0,"6",,terminal_output +5286,6367851,"TERMINAL",0,0,"7",,terminal_output +5287,6368977,"TERMINAL",0,0,"8",,terminal_output +5288,6370005,"TERMINAL",0,0,"9",,terminal_output +5289,6371024,"TERMINAL",0,0,"30",,terminal_output +5290,6372021,"TERMINAL",0,0,"1",,terminal_output +5291,6373079,"TERMINAL",0,0,"2",,terminal_output +5292,6374089,"TERMINAL",0,0,"3",,terminal_output +5293,6375160,"TERMINAL",0,0,"4",,terminal_output +5294,6376168,"TERMINAL",0,0,"5",,terminal_output +5295,6377273,"TERMINAL",0,0,"6",,terminal_output +5296,6378258,"TERMINAL",0,0,"7",,terminal_output +5297,6379321,"TERMINAL",0,0,"8",,terminal_output +5298,6380335,"TERMINAL",0,0,"9",,terminal_output +5299,6381373,"TERMINAL",0,0,"40",,terminal_output +5300,6382492,"TERMINAL",0,0,"1",,terminal_output +5301,6383523,"TERMINAL",0,0,"2",,terminal_output +5302,6384494,"TERMINAL",0,0,"3",,terminal_output +5303,6385564,"TERMINAL",0,0,"4",,terminal_output +5304,6386575,"TERMINAL",0,0,"5",,terminal_output +5305,6387717,"TERMINAL",0,0,"6",,terminal_output +5306,6388688,"TERMINAL",0,0,"7",,terminal_output +5307,6389698,"TERMINAL",0,0,"8",,terminal_output +5308,6390737,"TERMINAL",0,0,"50",,terminal_output +5309,6391813,"TERMINAL",0,0,"1",,terminal_output +5310,6392829,"TERMINAL",0,0,"2",,terminal_output +5311,6393927,"TERMINAL",0,0,"3",,terminal_output +5312,6394895,"TERMINAL",0,0,"4",,terminal_output +5313,6395948,"TERMINAL",0,0,"5",,terminal_output +5314,6397050,"TERMINAL",0,0,"6",,terminal_output +5315,6398060,"TERMINAL",0,0,"7",,terminal_output +5316,6399053,"TERMINAL",0,0,"8",,terminal_output +5317,6400107,"TERMINAL",0,0,"9",,terminal_output +5318,6401245,"TERMINAL",0,0,"4:00",,terminal_output +5319,6402175,"TERMINAL",0,0,"1",,terminal_output +5320,6403217,"TERMINAL",0,0,"2",,terminal_output +5321,6404254,"TERMINAL",0,0,"3",,terminal_output +5322,6405298,"TERMINAL",0,0,"4",,terminal_output +5323,6406340,"TERMINAL",0,0,"5",,terminal_output +5324,6407478,"TERMINAL",0,0,"6",,terminal_output +5325,6408422,"TERMINAL",0,0,"7",,terminal_output +5326,6409468,"TERMINAL",0,0,"8",,terminal_output +5327,6410507,"TERMINAL",0,0,"9",,terminal_output +5328,6411576,"TERMINAL",0,0,"10",,terminal_output +5329,6412584,"TERMINAL",0,0,"1",,terminal_output +5330,6413671,"TERMINAL",0,0,"2",,terminal_output +5331,6414670,"TERMINAL",0,0,"3",,terminal_output +5332,6415775,"TERMINAL",0,0,"4",,terminal_output +5333,6416767,"TERMINAL",0,0,"6",,terminal_output +5334,6417794,"TERMINAL",0,0,"7",,terminal_output +5335,6418946,"TERMINAL",0,0,"8",,terminal_output +5336,6419872,"TERMINAL",0,0,"9",,terminal_output +5337,6420996,"TERMINAL",0,0,"20",,terminal_output +5338,6422121,"TERMINAL",0,0,"1",,terminal_output +5339,6423055,"TERMINAL",0,0,"2",,terminal_output +5340,6424038,"TERMINAL",0,0,"3",,terminal_output +5341,6425092,"TERMINAL",0,0,"4",,terminal_output +5342,6426132,"TERMINAL",0,0,"5",,terminal_output +5343,6427168,"TERMINAL",0,0,"6",,terminal_output +5344,6428212,"TERMINAL",0,0,"7",,terminal_output +5345,6429251,"TERMINAL",0,0,"8",,terminal_output +5346,6430312,"TERMINAL",0,0,"9",,terminal_output +5347,6431330,"TERMINAL",0,0,"30",,terminal_output +5348,6432379,"TERMINAL",0,0,"1",,terminal_output +5349,6433419,"TERMINAL",0,0,"2",,terminal_output +5350,6434514,"TERMINAL",0,0,"3",,terminal_output +5351,6435537,"TERMINAL",0,0,"4",,terminal_output +5352,6436561,"TERMINAL",0,0,"5",,terminal_output +5353,6437577,"TERMINAL",0,0,"6",,terminal_output +5354,6438682,"TERMINAL",0,0,"7",,terminal_output +5355,6439735,"TERMINAL",0,0,"8",,terminal_output +5356,6440714,"TERMINAL",0,0,"9",,terminal_output +5357,6442412,"TERMINAL",0,0,"416",,terminal_output +5358,6443456,"TERMINAL",0,0,"2",,terminal_output +5359,6444497,"TERMINAL",0,0,"3",,terminal_output +5360,6445535,"TERMINAL",0,0,"4",,terminal_output +5361,6446581,"TERMINAL",0,0,"5",,terminal_output +5362,6447722,"TERMINAL",0,0,"6",,terminal_output +5363,6448662,"TERMINAL",0,0,"7",,terminal_output +5364,6449700,"TERMINAL",0,0,"8",,terminal_output +5365,6450793,"TERMINAL",0,0,"50",,terminal_output +5366,6451824,"TERMINAL",0,0,"1",,terminal_output +5367,6452846,"TERMINAL",0,0,"2",,terminal_output +5368,6453967,"TERMINAL",0,0,"3",,terminal_output +5369,6454993,"TERMINAL",0,0,"4",,terminal_output +5370,6456016,"TERMINAL",0,0,"5",,terminal_output +5371,6456969,"TERMINAL",0,0,"6",,terminal_output +5372,6458068,"TERMINAL",0,0,"7",,terminal_output +5373,6459052,"TERMINAL",0,0,"8",,terminal_output +5374,6460116,"TERMINAL",0,0,"9",,terminal_output +5375,6461240,"TERMINAL",0,0,"5:00113",,terminal_output +5376,6462163,"TERMINAL",0,0,"1",,terminal_output +5377,6463196,"TERMINAL",0,0,"2",,terminal_output +5378,6464244,"TERMINAL",0,0,"3",,terminal_output +5379,6465285,"TERMINAL",0,0,"4",,terminal_output +5380,6466362,"TERMINAL",0,0,"5",,terminal_output +5381,6467364,"TERMINAL",0,0,"6",,terminal_output +5382,6468403,"TERMINAL",0,0,"7",,terminal_output +5383,6469442,"TERMINAL",0,0,"8",,terminal_output +5384,6470483,"TERMINAL",0,0,"9",,terminal_output +5385,6471580,"TERMINAL",0,0,"10",,terminal_output +5386,6472607,"TERMINAL",0,0,"1",,terminal_output +5387,6473603,"TERMINAL",0,0,"2",,terminal_output +5388,6474657,"TERMINAL",0,0,"3",,terminal_output +5389,6475782,"TERMINAL",0,0,"48",,terminal_output +5390,6476804,"TERMINAL",0,0,"6",,terminal_output +5391,6477754,"TERMINAL",0,0,"7",,terminal_output +5392,6478804,"TERMINAL",0,0,"8",,terminal_output +5393,6479876,"TERMINAL",0,0,"9",,terminal_output +5394,6480906,"TERMINAL",0,0,"20",,terminal_output +5395,6481910,"TERMINAL",0,0,"1",,terminal_output +5396,6483061,"TERMINAL",0,0,"2 21",,terminal_output +5397,6484102,"TERMINAL",0,0,"3",,terminal_output +5398,6485202,"TERMINAL",0,0,"4",,terminal_output +5399,6486224,"TERMINAL",0,0,"5",,terminal_output +5400,6487248,"TERMINAL",0,0,"6",,terminal_output +5401,6488297,"TERMINAL",0,0,"7",,terminal_output +5402,6489297,"TERMINAL",0,0,"8",,terminal_output +5403,6490337,"TERMINAL",0,0,"9",,terminal_output +5404,6491375,"TERMINAL",0,0,"30",,terminal_output +5405,6492413,"TERMINAL",0,0,"1",,terminal_output +5406,6493496,"TERMINAL",0,0,"2",,terminal_output +5407,6494519,"TERMINAL",0,0,"3",,terminal_output +5408,6495543,"TERMINAL",0,0,"4",,terminal_output +5409,6496577,"TERMINAL",0,0,"5",,terminal_output +5410,6497698,"TERMINAL",0,0,"6",,terminal_output +5411,6498683,"TERMINAL",0,0,"7",,terminal_output +5412,6499741,"TERMINAL",0,0,"8",,terminal_output +5413,6500767,"TERMINAL",0,0,"40",,terminal_output +5414,6501798,"TERMINAL",0,0,"1",,terminal_output +5415,6502831,"TERMINAL",0,0,"2",,terminal_output +5416,6503873,"TERMINAL",0,0,"3",,terminal_output +5417,6504966,"TERMINAL",0,0,"4",,terminal_output +5418,6505988,"TERMINAL",0,0,"5",,terminal_output +5419,6507013,"TERMINAL",0,0,"6",,terminal_output +5420,6508138,"TERMINAL",0,0,"7",,terminal_output +5421,6509072,"TERMINAL",0,0,"8",,terminal_output +5422,6510187,"TERMINAL",0,0,"9",,terminal_output +5423,6511212,"TERMINAL",0,0,"50",,terminal_output +5424,6512242,"TERMINAL",0,0,"1",,terminal_output +5425,6513243,"TERMINAL",0,0,"2",,terminal_output +5426,6514385,"TERMINAL",0,0,"3",,terminal_output +5427,6515323,"TERMINAL",0,0,"4",,terminal_output +5428,6516367,"TERMINAL",0,0,"5",,terminal_output +5429,6517411,"TERMINAL",0,0,"6",,terminal_output +5430,6518452,"TERMINAL",0,0,"7",,terminal_output +5431,6519508,"TERMINAL",0,0,"8",,terminal_output +5432,6520631,"TERMINAL",0,0,"93",,terminal_output +5433,6521657,"TERMINAL",0,0,"6:00",,terminal_output +5434,6522681,"TERMINAL",0,0,"1",,terminal_output +5435,6523685,"TERMINAL",0,0,"2",,terminal_output +5436,6524730,"TERMINAL",0,0,"34",,terminal_output +5437,6525729,"TERMINAL",0,0,"5",,terminal_output +5438,6526779,"TERMINAL",0,0,"6",,terminal_output +5439,6527903,"TERMINAL",0,0,"7",,terminal_output +5440,6528926,"TERMINAL",0,0,"8",,terminal_output +5441,6529956,"TERMINAL",0,0,"9",,terminal_output +5442,6530925,"TERMINAL",0,0,"10",,terminal_output +5443,6532005,"TERMINAL",0,0,"1",,terminal_output +5444,6533026,"TERMINAL",0,0,"2",,terminal_output +5445,6534040,"TERMINAL",0,0,"3",,terminal_output +5446,6535185,"TERMINAL",0,0,"4",,terminal_output +5447,6536196,"TERMINAL",0,0,"5",,terminal_output +5448,6537155,"TERMINAL",0,0,"6",,terminal_output +5449,6538246,"TERMINAL",0,0,"7",,terminal_output +5450,6539268,"TERMINAL",0,0,"8",,terminal_output +5451,6540304,"TERMINAL",0,0,"9",,terminal_output +5452,6541322,"TERMINAL",0,0,"20",,terminal_output +5453,6542343,"TERMINAL",0,0,"1",,terminal_output +5454,6543399,"TERMINAL",0,0,"2",,terminal_output +5455,6544427,"TERMINAL",0,0,"3",,terminal_output +5456,6545463,"TERMINAL",0,0,"4",,terminal_output +5457,6546541,"TERMINAL",0,0,"5",,terminal_output +5458,6547570,"TERMINAL",0,0,"6",,terminal_output +5459,6548587,"TERMINAL",0,0,"79",,terminal_output +5460,6549719,"TERMINAL",0,0,"820",,terminal_output +5461,6550750,"TERMINAL",0,0,"9",,terminal_output +5462,6551764,"TERMINAL",0,0,"30",,terminal_output +5463,6552907,"TERMINAL",0,0,"2",,terminal_output +5464,6553825,"TERMINAL",0,0,"3",,terminal_output +5465,6554833,"TERMINAL",0,0,"4",,terminal_output +5466,6555861,"TERMINAL",0,0,"5",,terminal_output +5467,6556987,"TERMINAL",0,0,"6",,terminal_output +5468,6557926,"TERMINAL",0,0,"7",,terminal_output +5469,6559034,"TERMINAL",0,0,"8",,terminal_output +5470,6560007,"TERMINAL",0,0,"9",,terminal_output +5471,6561045,"TERMINAL",0,0,"40",,terminal_output +5472,6562079,"TERMINAL",0,0,"1",,terminal_output +5473,6563130,"TERMINAL",0,0,"2",,terminal_output +5474,6564256,"TERMINAL",0,0,"3",,terminal_output +5475,6565286,"TERMINAL",0,0,"4",,terminal_output +5476,6566304,"TERMINAL",0,0,"5",,terminal_output +5477,6567282,"TERMINAL",0,0,"6",,terminal_output +5478,6568353,"TERMINAL",0,0,"7",,terminal_output +5479,6569374,"TERMINAL",0,0,"8",,terminal_output +5480,6570391,"TERMINAL",0,0,"9",,terminal_output +5481,6571431,"TERMINAL",0,0,"50",,terminal_output +5482,6572467,"TERMINAL",0,0,"1",,terminal_output +5483,6573510,"TERMINAL",0,0,"2",,terminal_output +5484,6574546,"TERMINAL",0,0,"3",,terminal_output +5485,6575620,"TERMINAL",0,0,"4",,terminal_output +5486,6576646,"TERMINAL",0,0,"5",,terminal_output +5487,6577668,"TERMINAL",0,0,"6",,terminal_output +5488,6578709,"TERMINAL",0,0,"7",,terminal_output +5489,6579820,"TERMINAL",0,0,"9",,terminal_output +5490,6580844,"TERMINAL",0,0,"7:00",,terminal_output +5491,6581872,"TERMINAL",0,0,"1",,terminal_output +5492,6582893,"TERMINAL",0,0,"2",,terminal_output +5493,6583918,"TERMINAL",0,0,"3",,terminal_output +5494,6585047,"TERMINAL",0,0,"4",,terminal_output +5495,6585975,"TERMINAL",0,0,"5",,terminal_output +5496,6587089,"TERMINAL",0,0,"6",,terminal_output +5497,6588113,"TERMINAL",0,0,"7",,terminal_output +5498,6589089,"TERMINAL",0,0,"8",,terminal_output +5499,6590137,"TERMINAL",0,0,"9",,terminal_output +5500,6591188,"TERMINAL",0,0,"10",,terminal_output +5501,6592209,"TERMINAL",0,0,"1",,terminal_output +5502,6593242,"TERMINAL",0,0,"2",,terminal_output +5503,6594360,"TERMINAL",0,0,"3",,terminal_output +5504,6595321,"TERMINAL",0,0,"4",,terminal_output +5505,6596411,"TERMINAL",0,0,"5",,terminal_output +5506,6597412,"TERMINAL",0,0,"6",,terminal_output +5507,6598443,"TERMINAL",0,0,"7",,terminal_output +5508,6599486,"TERMINAL",0,0,"8",,terminal_output +5509,6600530,"TERMINAL",0,0,"9",,terminal_output +5510,6601577,"TERMINAL",0,0,"20",,terminal_output +5511,6602655,"TERMINAL",0,0,"1",,terminal_output +5512,6603682,"TERMINAL",0,0,"2 8",,terminal_output +5513,6604703,"TERMINAL",0,0,"3",,terminal_output +5514,6605737,"TERMINAL",0,0,"5",,terminal_output +5515,6606764,"TERMINAL",0,0,"6",,terminal_output +5516,6607882,"TERMINAL",0,0,"7",,terminal_output +5517,6608903,"TERMINAL",0,0,"8",,terminal_output +5518,6609925,"TERMINAL",0,0,"9",,terminal_output +5519,6610951,"TERMINAL",0,0,"30",,terminal_output +5520,6612076,"TERMINAL",0,0,"1",,terminal_output +5521,6613111,"TERMINAL",0,0,"2",,terminal_output +5522,6614073,"TERMINAL",0,0,"3",,terminal_output +5523,6615148,"TERMINAL",0,0,"4",,terminal_output +5524,6616126,"TERMINAL",0,0,"5",,terminal_output +5525,6617164,"TERMINAL",0,0,"6",,terminal_output +5526,6618221,"TERMINAL",0,0,"7",,terminal_output +5527,6619347,"TERMINAL",0,0,"8",,terminal_output +5528,6620374,"TERMINAL",0,0,"9",,terminal_output +5529,6621399,"TERMINAL",0,0,"40",,terminal_output +5530,6622418,"TERMINAL",0,0,"1",,terminal_output +5531,6623447,"TERMINAL",0,0,"2",,terminal_output +5532,6624428,"TERMINAL",0,0,"3",,terminal_output +5533,6625469,"TERMINAL",0,0,"4",,terminal_output +5534,6626515,"TERMINAL",0,0,"5",,terminal_output +5535,6627545,"TERMINAL",0,0,"6",,terminal_output +5536,6628578,"TERMINAL",0,0,"7",,terminal_output +5537,6629690,"TERMINAL",0,0,"8",,terminal_output +5538,6630725,"TERMINAL",0,0,"9",,terminal_output +5539,6631736,"TERMINAL",0,0,"50",,terminal_output +5540,6632762,"TERMINAL",0,0,"2",,terminal_output +5541,6633795,"TERMINAL",0,0,"3",,terminal_output +5542,6634922,"TERMINAL",0,0,"4",,terminal_output +5543,6635934,"TERMINAL",0,0,"5",,terminal_output +5544,6636963,"TERMINAL",0,0,"6",,terminal_output +5545,6637940,"TERMINAL",0,0,"7",,terminal_output +5546,6639006,"TERMINAL",0,0,"8",,terminal_output +5547,6640032,"TERMINAL",0,0,"9",,terminal_output +5548,6641055,"TERMINAL",0,0,"8:00",,terminal_output +5549,6642123,"TERMINAL",0,0,"1",,terminal_output +5550,6643205,"TERMINAL",0,0,"2",,terminal_output +5551,6644229,"TERMINAL",0,0,"3",,terminal_output +5552,6645256,"TERMINAL",0,0,"4",,terminal_output +5553,6646248,"TERMINAL",0,0,"5",,terminal_output +5554,6647302,"TERMINAL",0,0,"6",,terminal_output +5555,6648441,"TERMINAL",0,0,"7",,terminal_output +5556,6649455,"TERMINAL",0,0,"8",,terminal_output +5557,6650416,"TERMINAL",0,0,"9",,terminal_output +5558,6651453,"TERMINAL",0,0,"10",,terminal_output +5559,6652489,"TERMINAL",0,0,"1",,terminal_output +5560,6653525,"TERMINAL",0,0,"2",,terminal_output +5561,6654576,"TERMINAL",0,0,"3",,terminal_output +5562,6655701,"TERMINAL",0,0,"4",,terminal_output +5563,6656723,"TERMINAL",0,0,"5",,terminal_output +5564,6657749,"TERMINAL",0,0,"6",,terminal_output +5565,6658726,"TERMINAL",0,0,"8",,terminal_output +5566,6659799,"TERMINAL",0,0,"9",,terminal_output +5567,6660811,"TERMINAL",0,0,"20",,terminal_output +5568,6661950,"TERMINAL",0,0,"1",,terminal_output +5569,6662884,"TERMINAL",0,0,"2",,terminal_output +5570,6663993,"TERMINAL",0,0,"3",,terminal_output +5571,6665021,"TERMINAL",0,0,"49",,terminal_output +5572,6666007,"TERMINAL",0,0,"5",,terminal_output +5573,6667065,"TERMINAL",0,0,"6",,terminal_output +5574,6668197,"TERMINAL",0,0,"7",,terminal_output +5575,6669126,"TERMINAL",0,0,"8",,terminal_output +5576,6670242,"TERMINAL",0,0,"9",,terminal_output +5577,6671268,"TERMINAL",0,0,"30",,terminal_output +5578,6672242,"TERMINAL",0,0,"1",,terminal_output +5579,6673311,"TERMINAL",0,0,"2",,terminal_output +5580,6674369,"TERMINAL",0,0,"3",,terminal_output +5581,6675462,"TERMINAL",0,0,"4",,terminal_output +5582,6676411,"TERMINAL",0,0,"5",,terminal_output +5583,6677451,"TERMINAL",0,0,"6",,terminal_output +5584,6678490,"TERMINAL",0,0,"7",,terminal_output +5585,6679534,"TERMINAL",0,0,"8",,terminal_output +5586,6680573,"TERMINAL",0,0,"9",,terminal_output +5587,6681708,"TERMINAL",0,0,"40",,terminal_output +5588,6682731,"TERMINAL",0,0,"1",,terminal_output +5589,6683693,"TERMINAL",0,0,"2",,terminal_output +5590,6684793,"TERMINAL",0,0,"4",,terminal_output +5591,6685803,"TERMINAL",0,0,"5",,terminal_output +5592,6686840,"TERMINAL",0,0,"6",,terminal_output +5593,6687867,"TERMINAL",0,0,"7",,terminal_output +5594,6688985,"TERMINAL",0,0,"8",,terminal_output +5595,6690005,"TERMINAL",0,0,"9",,terminal_output +5596,6691033,"TERMINAL",0,0,"50",,terminal_output +5597,6692053,"TERMINAL",0,0,"1",,terminal_output +5598,6693071,"TERMINAL",0,0,"2",,terminal_output +5599,6694112,"TERMINAL",0,0,"3",,terminal_output +5600,6695225,"TERMINAL",0,0,"4",,terminal_output +5601,6696253,"TERMINAL",0,0,"5",,terminal_output +5602,6697237,"TERMINAL",0,0,"6",,terminal_output +5603,6698298,"TERMINAL",0,0,"7",,terminal_output +5604,6699425,"TERMINAL",0,0,"8",,terminal_output +5605,6700449,"TERMINAL",0,0,"9",,terminal_output +5606,6701403,"TERMINAL",0,0,"9:00",,terminal_output +5607,6702442,"TERMINAL",0,0,"1",,terminal_output +5608,6703481,"TERMINAL",0,0,"2",,terminal_output +5609,6704534,"TERMINAL",0,0,"3",,terminal_output +5610,6705562,"TERMINAL",0,0,"4",,terminal_output +5611,6706599,"TERMINAL",0,0,"5",,terminal_output +5612,6707637,"TERMINAL",0,0,"6",,terminal_output +5613,6708689,"TERMINAL",0,0,"7",,terminal_output +5614,6709772,"TERMINAL",0,0,"8",,terminal_output +5615,6710758,"TERMINAL",0,0,"10",,terminal_output +5616,6711816,"TERMINAL",0,0,"1",,terminal_output +5617,6712844,"TERMINAL",0,0,"2",,terminal_output +5618,6713972,"TERMINAL",0,0,"3",,terminal_output +5619,6714993,"TERMINAL",0,0,"4",,terminal_output +5620,6715960,"TERMINAL",0,0,"5",,terminal_output +5621,6717024,"TERMINAL",0,0,"6",,terminal_output +5622,6718039,"TERMINAL",0,0,"7",,terminal_output +5623,6719088,"TERMINAL",0,0,"8",,terminal_output +5624,6720211,"TERMINAL",0,0,"9",,terminal_output +5625,6721239,"TERMINAL",0,0,"20",,terminal_output +5626,6722197,"TERMINAL",0,0,"1",,terminal_output +5627,6723285,"TERMINAL",0,0,"2",,terminal_output +5628,6724309,"TERMINAL",0,0,"38",,terminal_output +5629,6725323,"TERMINAL",0,0,"4",,terminal_output +5630,6726457,"TERMINAL",0,0,"5",,terminal_output +5631,6727401,"TERMINAL",0,0,"6",,terminal_output +5632,6728510,"TERMINAL",0,0,"7",,terminal_output +5633,6729478,"TERMINAL",0,0,"8",,terminal_output +5634,6730523,"TERMINAL",0,0,"9",,terminal_output +5635,6731557,"TERMINAL",0,0,"30",,terminal_output +5636,6732591,"TERMINAL",0,0,"1",,terminal_output +5637,6733683,"TERMINAL",0,0,"2",,terminal_output +5638,6734672,"TERMINAL",0,0,"3",,terminal_output +5639,6735776,"TERMINAL",0,0,"4",,terminal_output +5640,6736799,"TERMINAL",0,0,"6",,terminal_output +5641,6737853,"TERMINAL",0,0,"7",,terminal_output +5642,6738850,"TERMINAL",0,0,"8",,terminal_output +5643,6739985,"TERMINAL",0,0,"9",,terminal_output +5644,6741002,"TERMINAL",0,0,"40",,terminal_output +5645,6742024,"TERMINAL",0,0,"1",,terminal_output +5646,6743050,"TERMINAL",0,0,"2",,terminal_output +5647,6744053,"TERMINAL",0,0,"3",,terminal_output +5648,6745100,"TERMINAL",0,0,"4",,terminal_output +5649,6746118,"TERMINAL",0,0,"5",,terminal_output +5650,6747249,"TERMINAL",0,0,"6",,terminal_output +5651,6748270,"TERMINAL",0,0,"7",,terminal_output +5652,6749295,"TERMINAL",0,0,"8",,terminal_output +5653,6750277,"TERMINAL",0,0,"9",,terminal_output +5654,6751862,"TERMINAL",0,0,"50",,terminal_output +5655,6752877,"TERMINAL",0,0,"2",,terminal_output +5656,6754009,"TERMINAL",0,0,"3",,terminal_output +5657,6755033,"TERMINAL",0,0,"4",,terminal_output +5658,6756055,"TERMINAL",0,0,"58",,terminal_output +5659,6757125,"TERMINAL",0,0,"6",,terminal_output +5660,6758104,"TERMINAL",0,0,"7",,terminal_output +5661,6759236,"TERMINAL",0,0,"8",,terminal_output +5662,6760162,"TERMINAL",0,0,"9",,terminal_output +5663,6761193,"TERMINAL",0,0,"40:00",,terminal_output +5664,6762236,"TERMINAL",0,0,"1",,terminal_output +5665,6763323,"TERMINAL",0,0,"2",,terminal_output +5666,6764320,"TERMINAL",0,0,"3",,terminal_output +5667,6765371,"TERMINAL",0,0,"4",,terminal_output +5668,6766391,"TERMINAL",0,0,"5",,terminal_output +5669,6767435,"TERMINAL",0,0,"69",,terminal_output +5670,6768467,"TERMINAL",0,0,"7",,terminal_output +5671,6769502,"TERMINAL",0,0,"8",,terminal_output +5672,6770541,"TERMINAL",0,0,"9",,terminal_output +5673,6771583,"TERMINAL",0,0,"10",,terminal_output +5674,6772620,"TERMINAL",0,0,"1",,terminal_output +5675,6773685,"TERMINAL",0,0,"2",,terminal_output +5676,6774800,"TERMINAL",0,0,"3",,terminal_output +5677,6775824,"TERMINAL",0,0,"51",,terminal_output +5678,6776781,"TERMINAL",0,0,"6",,terminal_output +5679,6777814,"TERMINAL",0,0,"7",,terminal_output +5680,6778887,"TERMINAL",0,0,"8",,terminal_output +5681,6779885,"TERMINAL",0,0,"9",,terminal_output +5682,6780936,"TERMINAL",0,0,"20",,terminal_output +5683,6782065,"TERMINAL",0,0,"1",,terminal_output +5684,6783000,"TERMINAL",0,0,"2",,terminal_output +5685,6784059,"TERMINAL",0,0,"3",,terminal_output +5686,6785139,"TERMINAL",0,0,"4",,terminal_output +5687,6786164,"TERMINAL",0,0,"5",,terminal_output +5688,6787166,"TERMINAL",0,0,"6",,terminal_output +5689,6788211,"TERMINAL",0,0,"7",,terminal_output +5690,6789235,"TERMINAL",0,0,"8",,terminal_output +5691,6790356,"TERMINAL",0,0,"9",,terminal_output +5692,6791382,"TERMINAL",0,0,"30",,terminal_output +5693,6792354,"TERMINAL",0,0,"1",,terminal_output +5694,6793430,"TERMINAL",0,0,"2",,terminal_output +5695,6794422,"TERMINAL",0,0,"3",,terminal_output +5696,6795478,"TERMINAL",0,0,"4",,terminal_output +5697,6796508,"TERMINAL",0,0,"5",,terminal_output +5698,6797545,"TERMINAL",0,0,"6",,terminal_output +5699,6798580,"TERMINAL",0,0,"7",,terminal_output +5700,6799674,"TERMINAL",0,0,"8",,terminal_output +5701,6800658,"TERMINAL",0,0,"9",,terminal_output +5702,6801724,"TERMINAL",0,0,"40",,terminal_output +5703,6802851,"TERMINAL",0,0,"2",,terminal_output +5704,6803791,"TERMINAL",0,0,"3",,terminal_output +5705,6804899,"TERMINAL",0,0,"4",,terminal_output +5706,6805851,"TERMINAL",0,0,"5",,terminal_output +5707,6806887,"TERMINAL",0,0,"6",,terminal_output +5708,6807982,"TERMINAL",0,0,"7",,terminal_output +5709,6808990,"TERMINAL",0,0,"8",,terminal_output +5710,6810036,"TERMINAL",0,0,"9",,terminal_output +5711,6811148,"TERMINAL",0,0,"50 9",,terminal_output +5712,6812116,"TERMINAL",0,0,"1",,terminal_output +5713,6813196,"TERMINAL",0,0,"2",,terminal_output +5714,6814221,"TERMINAL",0,0,"3",,terminal_output +5715,6815209,"TERMINAL",0,0,"4",,terminal_output +5716,6816265,"TERMINAL",0,0,"5",,terminal_output +5717,6817287,"TERMINAL",0,0,"6",,terminal_output +5718,6818322,"TERMINAL",0,0,"7",,terminal_output +5719,6819358,"TERMINAL",0,0,"8",,terminal_output +5720,6820477,"TERMINAL",0,0,"9",,terminal_output +5721,6821487,"TERMINAL",0,0,"1:00",,terminal_output +5722,6822494,"TERMINAL",0,0,"1",,terminal_output +5723,6823535,"TERMINAL",0,0,"2",,terminal_output +5724,6824664,"TERMINAL",0,0,"3",,terminal_output +5725,6825605,"TERMINAL",0,0,"4",,terminal_output +5726,6826707,"TERMINAL",0,0,"5",,terminal_output +5727,6827685,"TERMINAL",0,0,"6",,terminal_output +5728,6828719,"TERMINAL",0,0,"7",,terminal_output +5729,6829780,"TERMINAL",0,0,"9",,terminal_output +5730,6830789,"TERMINAL",0,0,"10",,terminal_output +5731,6831926,"TERMINAL",0,0,"1",,terminal_output +5732,6832957,"TERMINAL",0,0,"2",,terminal_output +5733,6833982,"TERMINAL",0,0,"3",,terminal_output +5734,6835005,"TERMINAL",0,0,"4",,terminal_output +5735,6835981,"TERMINAL",0,0,"5",,terminal_output +5736,6837022,"TERMINAL",0,0,"6",,terminal_output +5737,6838074,"TERMINAL",0,0,"7",,terminal_output +5738,6839089,"TERMINAL",0,0,"8",,terminal_output +5739,6840131,"TERMINAL",0,0,"9",,terminal_output +5740,6841251,"TERMINAL",0,0,"20",,terminal_output +5741,6842205,"TERMINAL",0,0,"1",,terminal_output +5742,6843243,"TERMINAL",0,0,"2",,terminal_output +5743,6844520,"TERMINAL",0,0,"3",,terminal_output +5744,6845560,"TERMINAL",0,0,"4",,terminal_output +5745,6846600,"TERMINAL",0,0,"5",,terminal_output +5746,6847653,"TERMINAL",0,0,"6",,terminal_output +5747,6848691,"TERMINAL",0,0,"7",,terminal_output +5748,6849717,"TERMINAL",0,0,"8",,terminal_output +5749,6850779,"TERMINAL",0,0,"30",,terminal_output +5750,6851802,"TERMINAL",0,0,"1",,terminal_output +5751,6852841,"TERMINAL",0,0,"2",,terminal_output +5752,6853949,"TERMINAL",0,0,"3",,terminal_output +5753,6854968,"TERMINAL",0,0,"4",,terminal_output +5754,6856094,"TERMINAL",0,0,"5",,terminal_output +5755,6857035,"TERMINAL",0,0,"6",,terminal_output +5756,6858150,"TERMINAL",0,0,"7",,terminal_output +5757,6859084,"TERMINAL",0,0,"8",,terminal_output +5758,6860196,"TERMINAL",0,0,"9",,terminal_output +5759,6861224,"TERMINAL",0,0,"40",,terminal_output +5760,6862243,"TERMINAL",0,0,"1",,terminal_output +5761,6863266,"TERMINAL",0,0,"2",,terminal_output +5762,6864392,"TERMINAL",0,0,"3",,terminal_output +5763,6865395,"TERMINAL",0,0,"4",,terminal_output +5764,6866449,"TERMINAL",0,0,"5",,terminal_output +5765,6867471,"TERMINAL",0,0,"6",,terminal_output +5766,6868492,"TERMINAL",0,0,"7",,terminal_output +5767,6869569,"TERMINAL",0,0,"8",,terminal_output +5768,6870544,"TERMINAL",0,0,"97",,terminal_output +5769,6871563,"TERMINAL",0,0,"50",,terminal_output +5770,6872604,"TERMINAL",0,0,"1",,terminal_output +5771,6873675,"TERMINAL",0,0,"2",,terminal_output +5772,6874685,"TERMINAL",0,0,"3",,terminal_output +5773,6875724,"TERMINAL",0,0,"5",,terminal_output +5774,6876760,"TERMINAL",0,0,"6",,terminal_output +5775,6877808,"TERMINAL",0,0,"7",,terminal_output +5776,6878840,"TERMINAL",0,0,"8",,terminal_output +5777,6879974,"TERMINAL",0,0,"9",,terminal_output +5778,6880982,"TERMINAL",0,0,"2:00",,terminal_output +5779,6881956,"TERMINAL",0,0,"1",,terminal_output +5780,6883033,"TERMINAL",0,0,"2",,terminal_output +5781,6884041,"TERMINAL",0,0,"3",,terminal_output +5782,6885091,"TERMINAL",0,0,"4",,terminal_output +5783,6886204,"TERMINAL",0,0,"5",,terminal_output +5784,6887169,"TERMINAL",0,0,"611",,terminal_output +5785,6888252,"TERMINAL",0,0,"7",,terminal_output +5786,6889289,"TERMINAL",0,0,"8",,terminal_output +5787,6890290,"TERMINAL",0,0,"9",,terminal_output +5788,6891326,"TERMINAL",0,0,"10",,terminal_output +5789,6892462,"TERMINAL",0,0,"1",,terminal_output +5790,6893474,"TERMINAL",0,0,"2",,terminal_output +5791,6894445,"TERMINAL",0,0,"3",,terminal_output +5792,6895484,"TERMINAL",0,0,"4",,terminal_output +5793,6896522,"TERMINAL",0,0,"5",,terminal_output +5794,6897568,"TERMINAL",0,0,"6",,terminal_output +5795,6898614,"TERMINAL",0,0,"7",,terminal_output +5796,6899724,"TERMINAL",0,0,"8",,terminal_output +5797,6900695,"TERMINAL",0,0,"9",,terminal_output +5798,6901770,"TERMINAL",0,0,"21",,terminal_output +5799,6902775,"TERMINAL",0,0,"2",,terminal_output +5800,6903921,"TERMINAL",0,0,"3",,terminal_output +5801,6904944,"TERMINAL",0,0,"4",,terminal_output +5802,6905970,"TERMINAL",0,0,"5",,terminal_output +5803,6906938,"TERMINAL",0,0,"6",,terminal_output +5804,6907979,"TERMINAL",0,0,"7",,terminal_output +5805,6909024,"TERMINAL",0,0,"8",,terminal_output +5806,6910165,"TERMINAL",0,0,"9",,terminal_output +5807,6911092,"TERMINAL",0,0,"30",,terminal_output +5808,6912164,"TERMINAL",0,0,"1",,terminal_output +5809,6913242,"TERMINAL",0,0,"2",,terminal_output +5810,6914265,"TERMINAL",0,0,"3",,terminal_output +5811,6915287,"TERMINAL",0,0,"4",,terminal_output +5812,6916294,"TERMINAL",0,0,"5",,terminal_output +5813,6917436,"TERMINAL",0,0,"6",,terminal_output +5814,6918477,"TERMINAL",0,0,"7",,terminal_output +5815,6919486,"TERMINAL",0,0,"8",,terminal_output +5816,6920459,"TERMINAL",0,0,"9",,terminal_output +5817,6921505,"TERMINAL",0,0,"401",,terminal_output +5818,6922560,"TERMINAL",0,0,"1",,terminal_output +5819,6923561,"TERMINAL",0,0,"2",,terminal_output +5820,6924595,"TERMINAL",0,0,"3",,terminal_output +5821,6925635,"TERMINAL",0,0,"4",,terminal_output +5822,6926671,"TERMINAL",0,0,"5",,terminal_output +5823,6927709,"TERMINAL",0,0,"6",,terminal_output +5824,6928750,"TERMINAL",0,0,"8",,terminal_output +5825,6929786,"TERMINAL",0,0,"9",,terminal_output +5826,6930850,"TERMINAL",0,0,"50",,terminal_output +5827,6931868,"TERMINAL",0,0,"1",,terminal_output +5828,6932907,"TERMINAL",0,0,"2",,terminal_output +5829,6934025,"TERMINAL",0,0,"3",,terminal_output +5830,6935040,"TERMINAL",0,0,"4",,terminal_output +5831,6936077,"TERMINAL",0,0,"5",,terminal_output +5832,6937097,"TERMINAL",0,0,"6",,terminal_output +5833,6938123,"TERMINAL",0,0,"7",,terminal_output +5834,6939247,"TERMINAL",0,0,"8",,terminal_output +5835,6940192,"TERMINAL",0,0,"9",,terminal_output +5836,6941295,"TERMINAL",0,0,"3:00",,terminal_output +5837,6942280,"TERMINAL",0,0,"1",,terminal_output +5838,6943353,"TERMINAL",0,0,"2",,terminal_output +5839,6944382,"TERMINAL",0,0,"3",,terminal_output +5840,6945399,"TERMINAL",0,0,"4",,terminal_output +5841,6946518,"TERMINAL",0,0,"5",,terminal_output +5842,6947515,"TERMINAL",0,0,"6",,terminal_output +5843,6948575,"TERMINAL",0,0,"7",,terminal_output +5844,6949552,"TERMINAL",0,0,"8",,terminal_output +5845,6950593,"TERMINAL",0,0,"9",,terminal_output +5846,6951627,"TERMINAL",0,0,"10",,terminal_output +5847,6952769,"TERMINAL",0,0,"1",,terminal_output +5848,6953717,"TERMINAL",0,0,"20",,terminal_output +5849,6954744,"TERMINAL",0,0,"4",,terminal_output +5850,6955841,"TERMINAL",0,0,"5",,terminal_output +5851,6956863,"TERMINAL",0,0,"6",,terminal_output +5852,6957893,"TERMINAL",0,0,"7",,terminal_output +5853,6959021,"TERMINAL",0,0,"8",,terminal_output +5854,6959962,"TERMINAL",0,0,"91",,terminal_output +5855,6961071,"TERMINAL",0,0,"20",,terminal_output +5856,6962083,"TERMINAL",0,0,"1",,terminal_output +5857,6963076,"TERMINAL",0,0,"2",,terminal_output +5858,6964131,"TERMINAL",0,0,"3",,terminal_output +5859,6965257,"TERMINAL",0,0,"4 3",,terminal_output +5860,6966210,"TERMINAL",0,0,"5",,terminal_output +5861,6967304,"TERMINAL",0,0,"6",,terminal_output +5862,6968329,"TERMINAL",0,0,"7",,terminal_output +5863,6969354,"TERMINAL",0,0,"8",,terminal_output +5864,6970374,"TERMINAL",0,0,"9",,terminal_output +5865,6971503,"TERMINAL",0,0,"30",,terminal_output +5866,6972532,"TERMINAL",0,0,"1",,terminal_output +5867,6973553,"TERMINAL",0,0,"2",,terminal_output +5868,6974577,"TERMINAL",0,0,"3",,terminal_output +5869,6975575,"TERMINAL",0,0,"4",,terminal_output +5870,6976646,"TERMINAL",0,0,"5",,terminal_output +5871,6977653,"TERMINAL",0,0,"6",,terminal_output +5872,6978682,"TERMINAL",0,0,"7",,terminal_output +5873,6979729,"TERMINAL",0,0,"9",,terminal_output +5874,6980832,"TERMINAL",0,0,"40",,terminal_output +5875,6981805,"TERMINAL",0,0,"1",,terminal_output +5876,6982872,"TERMINAL",0,0,"2",,terminal_output +5877,6983890,"TERMINAL",0,0,"3",,terminal_output +5878,6985022,"TERMINAL",0,0,"4",,terminal_output +5879,6986030,"TERMINAL",0,0,"5",,terminal_output +5880,6987025,"TERMINAL",0,0,"61",,terminal_output +5881,6988092,"TERMINAL",0,0,"7",,terminal_output +5882,6989097,"TERMINAL",0,0,"8",,terminal_output +5883,6990140,"TERMINAL",0,0,"9",,terminal_output +5884,6991171,"TERMINAL",0,0,"50",,terminal_output +5885,6992216,"TERMINAL",0,0,"1",,terminal_output +5886,6993245,"TERMINAL",0,0,"2",,terminal_output +5887,6994343,"TERMINAL",0,0,"3",,terminal_output +5888,6995367,"TERMINAL",0,0,"4",,terminal_output +5889,6996389,"TERMINAL",0,0,"5",,terminal_output +5890,6997411,"TERMINAL",0,0,"6",,terminal_output +5891,6998554,"TERMINAL",0,0,"7",,terminal_output +5892,6999564,"TERMINAL",0,0,"8",,terminal_output +5893,7000515,"TERMINAL",0,0,"9",,terminal_output +5894,7001609,"TERMINAL",0,0,"4:00",,terminal_output +5895,7002641,"TERMINAL",0,0,"1",,terminal_output +5896,7003677,"TERMINAL",0,0,"2",,terminal_output +5897,7004676,"TERMINAL",0,0,"3",,terminal_output +5898,7005716,"TERMINAL",0,0,"4",,terminal_output +5899,7006839,"TERMINAL",0,0,"6",,terminal_output +5900,7007797,"TERMINAL",0,0,"7",,terminal_output +5901,7008839,"TERMINAL",0,0,"8",,terminal_output +5902,7009905,"TERMINAL",0,0,"9",,terminal_output +5903,7010919,"TERMINAL",0,0,"10",,terminal_output +5904,7011961,"TERMINAL",0,0,"1",,terminal_output +5905,7012999,"TERMINAL",0,0,"2",,terminal_output +5906,7014052,"TERMINAL",0,0,"35",,terminal_output +5907,7015132,"TERMINAL",0,0,"4",,terminal_output +5908,7016124,"TERMINAL",0,0,"5",,terminal_output +5909,7017167,"TERMINAL",0,0,"6",,terminal_output +5910,7018304,"TERMINAL",0,0,"7",,terminal_output +5911,7019328,"TERMINAL",0,0,"8",,terminal_output +5912,7020354,"TERMINAL",0,0,"9",,terminal_output +5913,7021373,"TERMINAL",0,0,"20",,terminal_output +5914,7022403,"TERMINAL",0,0,"12",,terminal_output +5915,7023529,"TERMINAL",0,0,"2",,terminal_output +5916,7024468,"TERMINAL",0,0,"3",,terminal_output +5917,7025589,"TERMINAL",0,0,"4",,terminal_output +5918,7026562,"TERMINAL",0,0,"5",,terminal_output +5919,7027618,"TERMINAL",0,0,"6",,terminal_output +5920,7028658,"TERMINAL",0,0,"7",,terminal_output +5921,7029687,"TERMINAL",0,0,"8",,terminal_output +5922,7030723,"TERMINAL",0,0,"30",,terminal_output +5923,7031770,"TERMINAL",0,0,"1",,terminal_output +5924,7032844,"TERMINAL",0,0,"2",,terminal_output +5925,7033869,"TERMINAL",0,0,"3",,terminal_output +5926,7034909,"TERMINAL",0,0,"4",,terminal_output +5927,7036018,"TERMINAL",0,0,"5",,terminal_output +5928,7036979,"TERMINAL",0,0,"6",,terminal_output +5929,7038176,"TERMINAL",0,0,"7",,terminal_output +5930,7039067,"TERMINAL",0,0,"8",,terminal_output +5931,7040215,"TERMINAL",0,0,"9",,terminal_output +5932,7041148,"TERMINAL",0,0,"40",,terminal_output +5933,7042194,"TERMINAL",0,0,"1",,terminal_output +5934,7043238,"TERMINAL",0,0,"2",,terminal_output +5935,7044311,"TERMINAL",0,0,"3",,terminal_output +5936,7045416,"TERMINAL",0,0,"4",,terminal_output +5937,7046466,"TERMINAL",0,0,"5",,terminal_output +5938,7047485,"TERMINAL",0,0,"6",,terminal_output +5939,7048510,"TERMINAL",0,0,"7",,terminal_output +5940,7049536,"TERMINAL",0,0,"8",,terminal_output +5941,7050564,"TERMINAL",0,0,"9",,terminal_output +5942,7051586,"TERMINAL",0,0,"50",,terminal_output +5943,7052608,"TERMINAL",0,0,"1",,terminal_output +5944,7053681,"TERMINAL",0,0,"2",,terminal_output +5945,7054759,"TERMINAL",0,0,"3",,terminal_output +5946,7055779,"TERMINAL",0,0,"5",,terminal_output +5947,7056787,"TERMINAL",0,0,"6",,terminal_output +5948,7057825,"TERMINAL",0,0,"7",,terminal_output +5949,7058868,"TERMINAL",0,0,"8",,terminal_output +5950,7059981,"TERMINAL",0,0,"9",,terminal_output +5951,7061003,"TERMINAL",0,0,"5:00",,terminal_output +5952,7061994,"TERMINAL",0,0,"1",,terminal_output +5953,7063153,"TERMINAL",0,0,"2",,terminal_output +5954,7064087,"TERMINAL",0,0,"3",,terminal_output +5955,7065213,"TERMINAL",0,0,"4",,terminal_output +5956,7066234,"TERMINAL",0,0,"5",,terminal_output +5957,7067257,"TERMINAL",0,0,"6",,terminal_output +5958,7068253,"TERMINAL",0,0,"7",,terminal_output +5959,7069404,"TERMINAL",0,0,"8",,terminal_output +5960,7070338,"TERMINAL",0,0,"9",,terminal_output +5961,7071372,"TERMINAL",0,0,"10",,terminal_output +5962,7072411,"TERMINAL",0,0,"1",,terminal_output +5963,7073495,"TERMINAL",0,0,"2",,terminal_output +5964,7074499,"TERMINAL",0,0,"3",,terminal_output +5965,7075661,"TERMINAL",0,0,"4",,terminal_output +5966,7076672,"TERMINAL",0,0,"5",,terminal_output +5967,7077624,"TERMINAL",0,0,"6",,terminal_output +5968,7078679,"TERMINAL",0,0,"7",,terminal_output +5969,7079698,"TERMINAL",0,0,"8",,terminal_output +5970,7080748,"TERMINAL",0,0,"20",,terminal_output +5971,7081780,"TERMINAL",0,0,"1",,terminal_output +5972,7083431,"TERMINAL",0,0,"2",,terminal_output +5973,7084484,"TERMINAL",0,0,"3",,terminal_output +5974,7085578,"TERMINAL",0,0,"4200",,terminal_output +5975,7086605,"TERMINAL",0,0,"5",,terminal_output +5976,7087633,"TERMINAL",0,0,"6",,terminal_output +5977,7088675,"TERMINAL",0,0,"713",,terminal_output +5978,7089727,"TERMINAL",0,0,"8",,terminal_output +5979,7090752,"TERMINAL",0,0,"30",,terminal_output +5980,7091827,"TERMINAL",0,0,"1",,terminal_output +5981,7092857,"TERMINAL",0,0,"2",,terminal_output +5982,7093974,"TERMINAL",0,0,"3",,terminal_output +5983,7095003,"TERMINAL",0,0,"4",,terminal_output +5984,7095943,"TERMINAL",0,0,"5",,terminal_output +5985,7096994,"TERMINAL",0,0,"6",,terminal_output +5986,7098075,"TERMINAL",0,0,"7",,terminal_output +5987,7099081,"TERMINAL",0,0,"8",,terminal_output +5988,7100123,"TERMINAL",0,0,"9",,terminal_output +5989,7101363,"TERMINAL",0,0,"40",,terminal_output +5990,7102183,"TERMINAL",0,0,"1",,terminal_output +5991,7103300,"TERMINAL",0,0,"2",,terminal_output +5992,7104265,"TERMINAL",0,0,"3",,terminal_output +5993,7105350,"TERMINAL",0,0,"4",,terminal_output +5994,7106367,"TERMINAL",0,0,"5",,terminal_output +5995,7107502,"TERMINAL",0,0,"6",,terminal_output +5996,7108425,"TERMINAL",0,0,"7",,terminal_output +5997,7109458,"TERMINAL",0,0,"8",,terminal_output +5998,7110566,"TERMINAL",0,0,"9",,terminal_output +5999,7111550,"TERMINAL",0,0,"50",,terminal_output +6000,7112580,"TERMINAL",0,0,"1",,terminal_output +6001,7113621,"TERMINAL",0,0,"2",,terminal_output +6002,7114667,"TERMINAL",0,0,"3",,terminal_output +6003,7115695,"TERMINAL",0,0,"4",,terminal_output +6004,7116736,"TERMINAL",0,0,"6",,terminal_output +6005,7117784,"TERMINAL",0,0,"7",,terminal_output +6006,7118828,"TERMINAL",0,0,"8",,terminal_output +6007,7119857,"TERMINAL",0,0,"9",,terminal_output +6008,7120910,"TERMINAL",0,0,"6:00",,terminal_output +6009,7122149,"TERMINAL",0,0,"1",,terminal_output +6010,7122983,"TERMINAL",0,0,"2",,terminal_output +6011,7124032,"TERMINAL",0,0,"3",,terminal_output +6012,7125070,"TERMINAL",0,0,"4",,terminal_output +6013,7126114,"TERMINAL",0,0,"5",,terminal_output +6014,7127155,"TERMINAL",0,0,"6",,terminal_output +6015,7128281,"TERMINAL",0,0,"7",,terminal_output +6016,7129303,"TERMINAL",0,0,"8",,terminal_output +6017,7130304,"TERMINAL",0,0,"9",,terminal_output +6018,7131352,"TERMINAL",0,0,"10",,terminal_output +6019,7132377,"TERMINAL",0,0,"1",,terminal_output +6020,7133504,"TERMINAL",0,0,"2",,terminal_output +6021,7134444,"TERMINAL",0,0,"3",,terminal_output +6022,7135490,"TERMINAL",0,0,"4",,terminal_output +6023,7136526,"TERMINAL",0,0,"5",,terminal_output +6024,7137603,"TERMINAL",0,0,"6",,terminal_output +6025,7138623,"TERMINAL",0,0,"7",,terminal_output +6026,7139649,"TERMINAL",0,0,"8",,terminal_output +6027,7140681,"TERMINAL",0,0,"9",,terminal_output +6028,7141730,"TERMINAL",0,0,"21",,terminal_output +6029,7142769,"TERMINAL",0,0,"2",,terminal_output +6030,7143804,"TERMINAL",0,0,"3",,terminal_output +6031,7144870,"TERMINAL",0,0,"4",,terminal_output +6032,7145892,"TERMINAL",0,0,"5",,terminal_output +6033,7147027,"TERMINAL",0,0,"6",,terminal_output +6034,7147969,"TERMINAL",0,0,"7",,terminal_output +6035,7149028,"TERMINAL",0,0,"8",,terminal_output +6036,7150091,"TERMINAL",0,0,"9",,terminal_output +6037,7151126,"TERMINAL",0,0,"30",,terminal_output +6038,7152116,"TERMINAL",0,0,"1",,terminal_output +6039,7153158,"TERMINAL",0,0,"2",,terminal_output +6040,7154289,"TERMINAL",0,0,"3",,terminal_output +6041,7155312,"TERMINAL",0,0,"4",,terminal_output +6042,7156578,"TERMINAL",0,0,"5",,terminal_output +6043,7157370,"TERMINAL",0,0,"6",,terminal_output +6044,7158393,"TERMINAL",0,0,"7",,terminal_output +6045,7159412,"TERMINAL",0,0,"8",,terminal_output +6046,7160536,"TERMINAL",0,0,"9",,terminal_output +6047,7161561,"TERMINAL",0,0,"40",,terminal_output +6048,7162598,"TERMINAL",0,0,"1",,terminal_output +6049,7163613,"TERMINAL",0,0,"2",,terminal_output +6050,7164638,"TERMINAL",0,0,"3",,terminal_output +6051,7165621,"TERMINAL",0,0,"4",,terminal_output +6052,7166692,"TERMINAL",0,0,"5",,terminal_output +6053,7167722,"TERMINAL",0,0,"6",,terminal_output +6054,7168753,"TERMINAL",0,0,"8",,terminal_output +6055,7169797,"TERMINAL",0,0,"9",,terminal_output +6056,7170886,"TERMINAL",0,0,"50",,terminal_output +6057,7171865,"TERMINAL",0,0,"1",,terminal_output +6058,7172909,"TERMINAL",0,0,"2",,terminal_output +6059,7173969,"TERMINAL",0,0,"3",,terminal_output +6060,7175078,"TERMINAL",0,0,"4",,terminal_output +6061,7176107,"TERMINAL",0,0,"5",,terminal_output +6062,7177124,"TERMINAL",0,0,"6",,terminal_output +6063,7178156,"TERMINAL",0,0,"7",,terminal_output +6064,7179200,"TERMINAL",0,0,"8",,terminal_output +6065,7180315,"TERMINAL",0,0,"9",,terminal_output +6066,7181331,"TERMINAL",0,0,"7:00",,terminal_output +6067,7182294,"TERMINAL",0,0,"1",,terminal_output +6068,7183376,"TERMINAL",0,0,"2",,terminal_output +6069,7184398,"TERMINAL",0,0,"3",,terminal_output +6070,7185434,"TERMINAL",0,0,"4",,terminal_output +6071,7186549,"TERMINAL",0,0,"5",,terminal_output +6072,7187571,"TERMINAL",0,0,"6",,terminal_output +6073,7188606,"TERMINAL",0,0,"7",,terminal_output +6074,7189625,"TERMINAL",0,0,"8",,terminal_output +6075,7190602,"TERMINAL",0,0,"9",,terminal_output +6076,7191675,"TERMINAL",0,0,"10",,terminal_output +6077,7192679,"TERMINAL",0,0,"1",,terminal_output +6078,7193723,"TERMINAL",0,0,"3",,terminal_output +6079,7194764,"TERMINAL",0,0,"4",,terminal_output +6080,7195850,"TERMINAL",0,0,"5",,terminal_output +6081,7196842,"TERMINAL",0,0,"6",,terminal_output +6082,7197929,"TERMINAL",0,0,"7",,terminal_output +6083,7198931,"TERMINAL",0,0,"8",,terminal_output +6084,7200085,"TERMINAL",0,0,"9",,terminal_output +6085,7201087,"TERMINAL",0,0,"20",,terminal_output +6086,7202117,"TERMINAL",0,0,"1",,terminal_output +6087,7203083,"TERMINAL",0,0,"2",,terminal_output +6088,7204116,"TERMINAL",0,0,"3",,terminal_output +6089,7205188,"TERMINAL",0,0,"4",,terminal_output +6090,7206368,"TERMINAL",0,0,"5",,terminal_output +6091,7207241,"TERMINAL",0,0,"6",,terminal_output +6092,7208278,"TERMINAL",0,0,"7",,terminal_output +6093,7209312,"TERMINAL",0,0,"8",,terminal_output +6094,7210409,"TERMINAL",0,0,"9",,terminal_output +6095,7211429,"TERMINAL",0,0,"30",,terminal_output +6096,7212454,"TERMINAL",0,0,"1",,terminal_output +6097,7213588,"TERMINAL",0,0,"2",,terminal_output +6098,7214606,"TERMINAL",0,0,"3",,terminal_output +6099,7215556,"TERMINAL",0,0,"4",,terminal_output +6100,7216653,"TERMINAL",0,0,"5",,terminal_output +6101,7217681,"TERMINAL",0,0,"6",,terminal_output +6102,7218701,"TERMINAL",0,0,"7",,terminal_output +6103,7219715,"TERMINAL",0,0,"8",,terminal_output +6104,7220757,"TERMINAL",0,0,"40",,terminal_output +6105,7221802,"TERMINAL",0,0,"1",,terminal_output +6106,7222839,"TERMINAL",0,0,"2",,terminal_output +6107,7223099,"TERMINAL",0,0,"bash",,terminal_focus +6108,7223883,"TERMINAL",0,0,"3",,terminal_output +6109,7224918,"TERMINAL",0,0,"4",,terminal_output +6110,7225970,"TERMINAL",0,0,"5",,terminal_output +6111,7227020,"TERMINAL",0,0,"6",,terminal_output +6112,7228122,"TERMINAL",0,0,"7",,terminal_output +6113,7229081,"TERMINAL",0,0,"8",,terminal_output +6114,7230125,"TERMINAL",0,0,"9",,terminal_output +6115,7231168,"TERMINAL",0,0,"50",,terminal_output +6116,7231677,"TERMINAL",0,0,"salloc --time=10:00:00 --partition=accelerated --nodes=2 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5",,terminal_command +6117,7231688,"TERMINAL",0,0,"]633;E;2025-07-23 16:47:50 salloc --time=10:00:00 --partition=accelerated --nodes=2 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5;516944c0-6a28-4bc3-8960-506f1d0711b0]633;Csalloc: Pending job allocation 3370952\r\nsalloc: job 3370952 queued and waiting for resources\r\n",,terminal_output +6118,7232211,"TERMINAL",0,0,"1",,terminal_output +6119,7233344,"TERMINAL",0,0,"2",,terminal_output +6120,7234373,"TERMINAL",0,0,"3",,terminal_output +6121,7235399,"TERMINAL",0,0,"4",,terminal_output +6122,7236374,"TERMINAL",0,0,"5",,terminal_output +6123,7237448,"TERMINAL",0,0,"6",,terminal_output +6124,7238472,"TERMINAL",0,0,"7",,terminal_output +6125,7239594,"TERMINAL",0,0,"8",,terminal_output +6126,7240637,"TERMINAL",0,0,"9",,terminal_output +6127,7241637,"TERMINAL",0,0,"8:00",,terminal_output +6128,7242667,"TERMINAL",0,0,"1",,terminal_output +6129,7243687,"TERMINAL",0,0,"2",,terminal_output +6130,7244713,"TERMINAL",0,0,"3",,terminal_output +6131,7245754,"TERMINAL",0,0,"5",,terminal_output +6132,7246795,"TERMINAL",0,0,"6",,terminal_output +6133,7247887,"TERMINAL",0,0,"7",,terminal_output +6134,7248909,"TERMINAL",0,0,"8",,terminal_output +6135,7249933,"TERMINAL",0,0,"9",,terminal_output +6136,7250975,"TERMINAL",0,0,"10",,terminal_output +6137,7252088,"TERMINAL",0,0,"1",,terminal_output +6138,7253109,"TERMINAL",0,0,"2",,terminal_output +6139,7254077,"TERMINAL",0,0,"3",,terminal_output +6140,7255157,"TERMINAL",0,0,"4",,terminal_output +6141,7256181,"TERMINAL",0,0,"5",,terminal_output +6142,7257207,"TERMINAL",0,0,"619",,terminal_output +6143,7258251,"TERMINAL",0,0,"7",,terminal_output +6144,7259359,"TERMINAL",0,0,"8",,terminal_output +6145,7260330,"TERMINAL",0,0,"9",,terminal_output +6146,7261405,"TERMINAL",0,0,"20",,terminal_output +6147,7262439,"TERMINAL",0,0,"1",,terminal_output +6148,7263452,"TERMINAL",0,0,"2",,terminal_output +6149,7264580,"TERMINAL",0,0,"3",,terminal_output +6150,7265604,"TERMINAL",0,0,"4",,terminal_output +6151,7266651,"TERMINAL",0,0,"5",,terminal_output +6152,7267665,"TERMINAL",0,0,"6",,terminal_output +6153,7268662,"TERMINAL",0,0,"7",,terminal_output +6154,7269700,"TERMINAL",0,0,"8",,terminal_output +6155,7270725,"TERMINAL",0,0,"30",,terminal_output +6156,7271762,"TERMINAL",0,0,"1",,terminal_output +6157,7272805,"TERMINAL",0,0,"2",,terminal_output +6158,7273850,"TERMINAL",0,0,"3",,terminal_output +6159,7274906,"TERMINAL",0,0,"4",,terminal_output +6160,7275942,"TERMINAL",0,0,"5",,terminal_output +6161,7276967,"TERMINAL",0,0,"6",,terminal_output +6162,7278094,"TERMINAL",0,0,"7",,terminal_output +6163,7279052,"TERMINAL",0,0,"8",,terminal_output +6164,7280142,"TERMINAL",0,0,"9",,terminal_output +6165,7281140,"TERMINAL",0,0,"40",,terminal_output +6166,7282174,"TERMINAL",0,0,"1",,terminal_output +6167,7283318,"TERMINAL",0,0,"2",,terminal_output +6168,7284259,"TERMINAL",0,0,"3",,terminal_output +6169,7285377,"TERMINAL",0,0,"4",,terminal_output +6170,7286340,"TERMINAL",0,0,"5",,terminal_output +6171,7287391,"TERMINAL",0,0,"6",,terminal_output +6172,7288547,"TERMINAL",0,0,"7",,terminal_output +6173,7289466,"TERMINAL",0,0,"8",,terminal_output +6174,7290585,"TERMINAL",0,0,"9",,terminal_output +6175,7291615,"TERMINAL",0,0,"50",,terminal_output +6176,7292633,"TERMINAL",0,0,"1",,terminal_output +6177,7293660,"TERMINAL",0,0,"2",,terminal_output +6178,7294686,"TERMINAL",0,0,"3",,terminal_output +6179,7295711,"TERMINAL",0,0,"4",,terminal_output +6180,7296761,"TERMINAL",0,0,"6",,terminal_output +6181,7297813,"TERMINAL",0,0,"7",,terminal_output +6182,7298882,"TERMINAL",0,0,"8",,terminal_output +6183,7299887,"TERMINAL",0,0,"9",,terminal_output +6184,7301047,"TERMINAL",0,0,"9:00",,terminal_output +6185,7302057,"TERMINAL",0,0,"1",,terminal_output +6186,7303085,"TERMINAL",0,0,"2",,terminal_output +6187,7304058,"TERMINAL",0,0,"3",,terminal_output +6188,7305180,"TERMINAL",0,0,"4",,terminal_output +6189,7306159,"TERMINAL",0,0,"5",,terminal_output +6190,7307185,"TERMINAL",0,0,"6",,terminal_output +6191,7308307,"TERMINAL",0,0,"7",,terminal_output +6192,7309328,"TERMINAL",0,0,"8",,terminal_output +6193,7310320,"TERMINAL",0,0,"91",,terminal_output +6194,7311380,"TERMINAL",0,0,"10",,terminal_output +6195,7312400,"TERMINAL",0,0,"1",,terminal_output +6196,7313530,"TERMINAL",0,0,"2",,terminal_output +6197,7314563,"TERMINAL",0,0,"3",,terminal_output +6198,7315575,"TERMINAL",0,0,"4",,terminal_output +6199,7316604,"TERMINAL",0,0,"5",,terminal_output +6200,7317592,"TERMINAL",0,0,"6",,terminal_output +6201,7318625,"TERMINAL",0,0,"7",,terminal_output +6202,7319784,"TERMINAL",0,0,"8",,terminal_output +6203,7320701,"TERMINAL",0,0,"9",,terminal_output +6204,7321756,"TERMINAL",0,0,"21",,terminal_output +6205,7322801,"TERMINAL",0,0,"2",,terminal_output +6206,7323835,"TERMINAL",0,0,"3",,terminal_output +6207,7324894,"TERMINAL",0,0,"4",,terminal_output +6208,7325920,"TERMINAL",0,0,"5",,terminal_output +6209,7326739,"TERMINAL",0,0,"salloc: job 3370952 has been allocated resources\r\nsalloc: Granted job allocation 3370952\r\n",,terminal_output +6210,7326907,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +6211,7326964,"TERMINAL",0,0,"627",,terminal_output +6212,7328085,"TERMINAL",0,0,"720",,terminal_output +6213,7329066,"TERMINAL",0,0,"8",,terminal_output +6214,7330086,"TERMINAL",0,0,"9",,terminal_output +6215,7331139,"TERMINAL",0,0,"30",,terminal_output +6216,7332176,"TERMINAL",0,0,"1",,terminal_output +6217,7333210,"TERMINAL",0,0,"2",,terminal_output +6218,7334319,"TERMINAL",0,0,"3",,terminal_output +6219,7335338,"TERMINAL",0,0,"4",,terminal_output +6220,7336333,"TERMINAL",0,0,"5",,terminal_output +6221,7337382,"TERMINAL",0,0,"6",,terminal_output +6222,7338517,"TERMINAL",0,0,"7",,terminal_output +6223,7339470,"TERMINAL",0,0,"8",,terminal_output +6224,7340563,"TERMINAL",0,0,"9",,terminal_output +6225,7341549,"TERMINAL",0,0,"40",,terminal_output +6226,7342629,"TERMINAL",0,0,"1",,terminal_output +6227,7343622,"TERMINAL",0,0,"2",,terminal_output +6228,7344784,"TERMINAL",0,0,"3",,terminal_output +6229,7345788,"TERMINAL",0,0,"4",,terminal_output +6230,7346731,"TERMINAL",0,0,"6",,terminal_output +6231,7347817,"TERMINAL",0,0,"7",,terminal_output +6232,7348819,"TERMINAL",0,0,"8",,terminal_output +6233,7349912,"TERMINAL",0,0,"9",,terminal_output +6234,7350955,"TERMINAL",0,0,"50",,terminal_output +6235,7352049,"TERMINAL",0,0,"1",,terminal_output +6236,7353069,"TERMINAL",0,0,"2",,terminal_output +6237,7353987,"TERMINAL",0,0,"salloc: Nodes hkn[0717,0735] are ready for job\r\n",,terminal_output +6238,7354101,"TERMINAL",0,0,"3",,terminal_output +6239,7355012,"TERMINAL",0,0,"]0;tum_cte0515@hkn0717:~/Projects/jafar[?2004h[tum_cte0515@hkn0717 jafar]$ ",,terminal_output +6240,7355130,"TERMINAL",0,0,"4",,terminal_output +6241,7356231,"TERMINAL",0,0,"5",,terminal_output +6242,7357220,"TERMINAL",0,0,"6",,terminal_output +6243,7358282,"TERMINAL",0,0,"7",,terminal_output +6244,7359300,"TERMINAL",0,0,"8",,terminal_output +6245,7360334,"TERMINAL",0,0,"9",,terminal_output +6246,7361500,"TERMINAL",0,0,"50:00",,terminal_output +6247,7362479,"TERMINAL",0,0,"1",,terminal_output +6248,7363496,"TERMINAL",0,0,"2",,terminal_output +6249,7364522,"TERMINAL",0,0,"3",,terminal_output +6250,7365506,"TERMINAL",0,0,"4",,terminal_output +6251,7366571,"TERMINAL",0,0,"5",,terminal_output +6252,7367592,"TERMINAL",0,0,"6",,terminal_output +6253,7368625,"TERMINAL",0,0,"7",,terminal_output +6254,7369743,"TERMINAL",0,0,"8",,terminal_output +6255,7370717,"TERMINAL",0,0,"9",,terminal_output +6256,7371755,"TERMINAL",0,0,"11",,terminal_output +6257,7372793,"TERMINAL",0,0,"2",,terminal_output +6258,7373829,"TERMINAL",0,0,"3",,terminal_output +6259,7374869,"TERMINAL",0,0,"4",,terminal_output +6260,7375912,"TERMINAL",0,0,"5",,terminal_output +6261,7376978,"TERMINAL",0,0,"6",,terminal_output +6262,7377985,"TERMINAL",0,0,"7",,terminal_output +6263,7379105,"TERMINAL",0,0,"8",,terminal_output +6264,7380070,"TERMINAL",0,0,"9",,terminal_output +6265,7381213,"TERMINAL",0,0,"20",,terminal_output +6266,7382211,"TERMINAL",0,0,"1",,terminal_output +6267,7383222,"TERMINAL",0,0,"2",,terminal_output +6268,7384282,"TERMINAL",0,0,"3",,terminal_output +6269,7385321,"TERMINAL",0,0,"4",,terminal_output +6270,7386347,"TERMINAL",0,0,"5",,terminal_output +6271,7387463,"TERMINAL",0,0,"6",,terminal_output +6272,7388494,"TERMINAL",0,0,"7",,terminal_output +6273,7389509,"TERMINAL",0,0,"8",,terminal_output +6274,7390471,"TERMINAL",0,0,"9",,terminal_output +6275,7391556,"TERMINAL",0,0,"30",,terminal_output +6276,7392551,"TERMINAL",0,0,"1",,terminal_output +6277,7393609,"TERMINAL",0,0,"2",,terminal_output +6278,7394734,"TERMINAL",0,0,"3",,terminal_output +6279,7395760,"TERMINAL",0,0,"4",,terminal_output +6280,7396734,"TERMINAL",0,0,"5",,terminal_output +6281,7397808,"TERMINAL",0,0,"7",,terminal_output +6282,7398791,"TERMINAL",0,0,"8",,terminal_output +6283,7399832,"TERMINAL",0,0,"9",,terminal_output +6284,7400868,"TERMINAL",0,0,"40",,terminal_output +6285,7401911,"TERMINAL",0,0,"1",,terminal_output +6286,7402942,"TERMINAL",0,0,"2",,terminal_output +6287,7404043,"TERMINAL",0,0,"3",,terminal_output +6288,7405096,"TERMINAL",0,0,"4",,terminal_output +6289,7406095,"TERMINAL",0,0,"5",,terminal_output +6290,7407115,"TERMINAL",0,0,"6",,terminal_output +6291,7408139,"TERMINAL",0,0,"7",,terminal_output +6292,7409189,"TERMINAL",0,0,"8",,terminal_output +6293,7410295,"TERMINAL",0,0,"9",,terminal_output +6294,7411592,"TERMINAL",0,0,"501",,terminal_output +6295,7412634,"TERMINAL",0,0,"1",,terminal_output +6296,7413704,"TERMINAL",0,0,"2",,terminal_output +6297,7414799,"TERMINAL",0,0,"3",,terminal_output +6298,7415828,"TERMINAL",0,0,"5",,terminal_output +6299,7416792,"TERMINAL",0,0,"6",,terminal_output +6300,7417826,"TERMINAL",0,0,"7",,terminal_output +6301,7418877,"TERMINAL",0,0,"8",,terminal_output +6302,7419904,"TERMINAL",0,0,"9",,terminal_output +6303,7421047,"TERMINAL",0,0,"1:00",,terminal_output +6304,7421983,"TERMINAL",0,0,"1",,terminal_output +6305,7423024,"TERMINAL",0,0,"2",,terminal_output +6306,7424062,"TERMINAL",0,0,"3",,terminal_output +6307,7425140,"TERMINAL",0,0,"4",,terminal_output +6308,7426140,"TERMINAL",0,0,"5",,terminal_output +6309,7427179,"TERMINAL",0,0,"6",,terminal_output +6310,7428316,"TERMINAL",0,0,"7",,terminal_output +6311,7429254,"TERMINAL",0,0,"8",,terminal_output +6312,7430291,"TERMINAL",0,0,"9",,terminal_output +6313,7431326,"TERMINAL",0,0,"10",,terminal_output +6314,7432411,"TERMINAL",0,0,"1",,terminal_output +6315,7433437,"TERMINAL",0,0,"2",,terminal_output +6316,7434449,"TERMINAL",0,0,"3",,terminal_output +6317,7435585,"TERMINAL",0,0,"4",,terminal_output +6318,7436611,"TERMINAL",0,0,"5",,terminal_output +6319,7437556,"TERMINAL",0,0,"6",,terminal_output +6320,7438680,"TERMINAL",0,0,"7",,terminal_output +6321,7439682,"TERMINAL",0,0,"8",,terminal_output +6322,7440707,"TERMINAL",0,0,"9",,terminal_output +6323,7441716,"TERMINAL",0,0,"20",,terminal_output +6324,7442757,"TERMINAL",0,0,"2",,terminal_output +6325,7443794,"TERMINAL",0,0,"3",,terminal_output +6326,7444835,"TERMINAL",0,0,"4",,terminal_output +6327,7445929,"TERMINAL",0,0,"5",,terminal_output +6328,7447361,"TERMINAL",0,0,"6 5",,terminal_output +6329,7448387,"TERMINAL",0,0,"76",,terminal_output +6330,7449404,"TERMINAL",0,0,"8",,terminal_output +6331,7450417,"TERMINAL",0,0,"9",,terminal_output +6332,7451573,"TERMINAL",0,0,"30",,terminal_output +6333,7452595,"TERMINAL",0,0,"1",,terminal_output +6334,7453610,"TERMINAL",0,0,"2",,terminal_output +6335,7454638,"TERMINAL",0,0,"3",,terminal_output +6336,7455655,"TERMINAL",0,0,"4",,terminal_output +6337,7456684,"TERMINAL",0,0,"5",,terminal_output +6338,7457705,"TERMINAL",0,0,"6",,terminal_output +6339,7458822,"TERMINAL",0,0,"8",,terminal_output +6340,7459865,"TERMINAL",0,0,"9",,terminal_output +6341,7460888,"TERMINAL",0,0,"40",,terminal_output +6342,7461852,"TERMINAL",0,0,"1",,terminal_output +6343,7462894,"TERMINAL",0,0,"2",,terminal_output +6344,7463926,"TERMINAL",0,0,"3",,terminal_output +6345,7465079,"TERMINAL",0,0,"4",,terminal_output +6346,7466011,"TERMINAL",0,0,"5",,terminal_output +6347,7467054,"TERMINAL",0,0,"6",,terminal_output +6348,7468148,"TERMINAL",0,0,"7",,terminal_output +6349,7469127,"TERMINAL",0,0,"8",,terminal_output +6350,7470167,"TERMINAL",0,0,"9",,terminal_output +6351,7471219,"TERMINAL",0,0,"50",,terminal_output +6352,7472248,"TERMINAL",0,0,"1",,terminal_output +6353,7473288,"TERMINAL",0,0,"2",,terminal_output +6354,7474397,"TERMINAL",0,0,"3",,terminal_output +6355,7475422,"TERMINAL",0,0,"4",,terminal_output +6356,7476448,"TERMINAL",0,0,"5",,terminal_output +6357,7477469,"TERMINAL",0,0,"6",,terminal_output +6358,7478597,"TERMINAL",0,0,"7",,terminal_output +6359,7479622,"TERMINAL",0,0,"8",,terminal_output +6360,7480643,"TERMINAL",0,0,"92",,terminal_output +6361,7481669,"TERMINAL",0,0,"2:00",,terminal_output +6362,7482688,"TERMINAL",0,0,"1",,terminal_output +6363,7483678,"TERMINAL",0,0,"2",,terminal_output +6364,7484725,"TERMINAL",0,0,"4",,terminal_output +6365,7485867,"TERMINAL",0,0,"5",,terminal_output +6366,7486804,"TERMINAL",0,0,"6",,terminal_output +6367,7487845,"TERMINAL",0,0,"7",,terminal_output +6368,7488892,"TERMINAL",0,0,"8",,terminal_output +6369,7489916,"TERMINAL",0,0,"9",,terminal_output +6370,7490956,"TERMINAL",0,0,"10",,terminal_output +6371,7492018,"TERMINAL",0,0,"1",,terminal_output +6372,7493135,"TERMINAL",0,0,"2",,terminal_output +6373,7494099,"TERMINAL",0,0,"3",,terminal_output +6374,7495185,"TERMINAL",0,0,"4",,terminal_output +6375,7496158,"TERMINAL",0,0,"5",,terminal_output +6376,7497189,"TERMINAL",0,0,"6",,terminal_output +6377,7498233,"TERMINAL",0,0,"7",,terminal_output +6378,7499279,"TERMINAL",0,0,"8",,terminal_output +6379,7500407,"TERMINAL",0,0,"9",,terminal_output +6380,7501347,"TERMINAL",0,0,"20",,terminal_output +6381,7502392,"TERMINAL",0,0,"1",,terminal_output +6382,7503432,"TERMINAL",0,0,"2",,terminal_output +6383,7504502,"TERMINAL",0,0,"3",,terminal_output +6384,7505526,"TERMINAL",0,0,"4",,terminal_output +6385,7506551,"TERMINAL",0,0,"5",,terminal_output +6386,7507581,"TERMINAL",0,0,"6",,terminal_output +6387,7508690,"TERMINAL",0,0,"7",,terminal_output +6388,7509723,"TERMINAL",0,0,"8",,terminal_output +6389,7510762,"TERMINAL",0,0,"9",,terminal_output +6390,7511772,"TERMINAL",0,0,"31",,terminal_output +6391,7512775,"TERMINAL",0,0,"2",,terminal_output +6392,7513816,"TERMINAL",0,0,"3",,terminal_output +6393,7514855,"TERMINAL",0,0,"4",,terminal_output +6394,7515893,"TERMINAL",0,0,"5",,terminal_output +6395,7516950,"TERMINAL",0,0,"6",,terminal_output +6396,7517997,"TERMINAL",0,0,"7",,terminal_output +6397,7519062,"TERMINAL",0,0,"8",,terminal_output +6398,7520052,"TERMINAL",0,0,"9",,terminal_output +6399,7521090,"TERMINAL",0,0,"40",,terminal_output +6400,7522122,"TERMINAL",0,0,"1",,terminal_output +6401,7523240,"TERMINAL",0,0,"2",,terminal_output +6402,7524198,"TERMINAL",0,0,"3",,terminal_output +6403,7525254,"TERMINAL",0,0,"4",,terminal_output +6404,7526319,"TERMINAL",0,0,"5",,terminal_output +6405,7527341,"TERMINAL",0,0,"6",,terminal_output +6406,7528464,"TERMINAL",0,0,"7",,terminal_output +6407,7529412,"TERMINAL",0,0,"8",,terminal_output +6408,7530515,"TERMINAL",0,0,"9",,terminal_output +6409,7531537,"TERMINAL",0,0,"50",,terminal_output +6410,7532603,"TERMINAL",0,0,"1",,terminal_output +6411,7533552,"TERMINAL",0,0,"2",,terminal_output +6412,7534606,"TERMINAL",0,0,"3",,terminal_output +6413,7535634,"TERMINAL",0,0,"4",,terminal_output +6414,7536779,"TERMINAL",0,0,"5",,terminal_output +6415,7537706,"TERMINAL",0,0,"6",,terminal_output +6416,7538742,"TERMINAL",0,0,"8",,terminal_output +6417,7539785,"TERMINAL",0,0,"9",,terminal_output +6418,7540859,"TERMINAL",0,0,"3:00",,terminal_output +6419,7541867,"TERMINAL",0,0,"1",,terminal_output +6420,7542907,"TERMINAL",0,0,"2",,terminal_output +6421,7543946,"TERMINAL",0,0,"36",,terminal_output +6422,7544981,"TERMINAL",0,0,"4",,terminal_output +6423,7546077,"TERMINAL",0,0,"5",,terminal_output +6424,7547151,"TERMINAL",0,0,"6",,terminal_output +6425,7548124,"TERMINAL",0,0,"7",,terminal_output +6426,7549136,"TERMINAL",0,0,"8",,terminal_output +6427,7550275,"TERMINAL",0,0,"9",,terminal_output +6428,7551208,"TERMINAL",0,0,"10",,terminal_output +6429,7552246,"TERMINAL",0,0,"1",,terminal_output +6430,7553345,"TERMINAL",0,0,"2",,terminal_output +6431,7554374,"TERMINAL",0,0,"3",,terminal_output +6432,7555395,"TERMINAL",0,0,"4",,terminal_output +6433,7556420,"TERMINAL",0,0,"521",,terminal_output +6434,7557448,"TERMINAL",0,0,"6",,terminal_output +6435,7558576,"TERMINAL",0,0,"7",,terminal_output +6436,7559593,"TERMINAL",0,0,"8",,terminal_output +6437,7560617,"TERMINAL",0,0,"9",,terminal_output +6438,7561646,"TERMINAL",0,0,"20",,terminal_output +6439,7562679,"TERMINAL",0,0,"1",,terminal_output +6440,7563690,"TERMINAL",0,0,"2",,terminal_output +6441,7564712,"TERMINAL",0,0,"3",,terminal_output +6442,7565741,"TERMINAL",0,0,"5",,terminal_output +6443,7566804,"TERMINAL",0,0,"6",,terminal_output +6444,7567825,"TERMINAL",0,0,"715",,terminal_output +6445,7568864,"TERMINAL",0,0,"8",,terminal_output +6446,7569894,"TERMINAL",0,0,"9",,terminal_output +6447,7570929,"TERMINAL",0,0,"30",,terminal_output +6448,7571970,"TERMINAL",0,0,"1",,terminal_output +6449,7573110,"TERMINAL",0,0,"2",,terminal_output +6450,7574075,"TERMINAL",0,0,"3",,terminal_output +6451,7575159,"TERMINAL",0,0,"4",,terminal_output +6452,7576183,"TERMINAL",0,0,"5",,terminal_output +6453,7577152,"TERMINAL",0,0,"6",,terminal_output +6454,7578213,"TERMINAL",0,0,"7",,terminal_output +6455,7579222,"TERMINAL",0,0,"8",,terminal_output +6456,7580278,"TERMINAL",0,0,"9",,terminal_output +6457,7581406,"TERMINAL",0,0,"40",,terminal_output +6458,7582336,"TERMINAL",0,0,"1",,terminal_output +6459,7583374,"TERMINAL",0,0,"2",,terminal_output +6460,7584492,"TERMINAL",0,0,"3",,terminal_output +6461,7585501,"TERMINAL",0,0,"4",,terminal_output +6462,7586493,"TERMINAL",0,0,"5",,terminal_output +6463,7587572,"TERMINAL",0,0,"6",,terminal_output +6464,7588709,"TERMINAL",0,0,"7",,terminal_output +6465,7589801,"TERMINAL",0,0,"8",,terminal_output +6466,7590644,"TERMINAL",0,0,"9",,terminal_output +6467,7591685,"TERMINAL",0,0,"50",,terminal_output +6468,7592772,"TERMINAL",0,0,"2",,terminal_output +6469,7593768,"TERMINAL",0,0,"3",,terminal_output +6470,7594834,"TERMINAL",0,0,"4",,terminal_output +6471,7595848,"TERMINAL",0,0,"5",,terminal_output +6472,7596883,"TERMINAL",0,0,"6",,terminal_output +6473,7597919,"TERMINAL",0,0,"7",,terminal_output +6474,7599018,"TERMINAL",0,0,"8",,terminal_output +6475,7599989,"TERMINAL",0,0,"9",,terminal_output +6476,7601026,"TERMINAL",0,0,"4:00",,terminal_output +6477,7602090,"TERMINAL",0,0,"1",,terminal_output +6478,7603224,"TERMINAL",0,0,"2",,terminal_output +6479,7604137,"TERMINAL",0,0,"3",,terminal_output +6480,7605170,"TERMINAL",0,0,"4",,terminal_output +6481,7606293,"TERMINAL",0,0,"5",,terminal_output +6482,7607246,"TERMINAL",0,0,"6",,terminal_output +6483,7608368,"TERMINAL",0,0,"7",,terminal_output +6484,7609359,"TERMINAL",0,0,"8",,terminal_output +6485,7610364,"TERMINAL",0,0,"9",,terminal_output +6486,7611399,"TERMINAL",0,0,"10",,terminal_output +6487,7612443,"TERMINAL",0,0,"1",,terminal_output +6488,7613567,"TERMINAL",0,0,"2",,terminal_output +6489,7614530,"TERMINAL",0,0,"3",,terminal_output +6490,7615562,"TERMINAL",0,0,"4",,terminal_output +6491,7616630,"TERMINAL",0,0,"5",,terminal_output +6492,7617656,"TERMINAL",0,0,"6",,terminal_output +6493,7618686,"TERMINAL",0,0,"7",,terminal_output +6494,7619808,"TERMINAL",0,0,"8",,terminal_output +6495,7620830,"TERMINAL",0,0,"20",,terminal_output +6496,7621790,"TERMINAL",0,0,"1",,terminal_output +6497,7622829,"TERMINAL",0,0,"2",,terminal_output +6498,7623870,"TERMINAL",0,0,"3",,terminal_output +6499,7624907,"TERMINAL",0,0,"4",,terminal_output +6500,7625952,"TERMINAL",0,0,"5",,terminal_output +6501,7626987,"TERMINAL",0,0,"6",,terminal_output +6502,7628104,"TERMINAL",0,0,"7",,terminal_output +6503,7629069,"TERMINAL",0,0,"8",,terminal_output +6504,7630151,"TERMINAL",0,0,"9",,terminal_output +6505,7631148,"TERMINAL",0,0,"30",,terminal_output +6506,7632185,"TERMINAL",0,0,"1",,terminal_output +6507,7633221,"TERMINAL",0,0,"2",,terminal_output +6508,7634347,"TERMINAL",0,0,"3",,terminal_output +6509,7635378,"TERMINAL",0,0,"4",,terminal_output +6510,7636343,"TERMINAL",0,0,"5",,terminal_output +6511,7637389,"TERMINAL",0,0,"6",,terminal_output +6512,7638446,"TERMINAL",0,0,"7",,terminal_output +6513,7639575,"TERMINAL",0,0,"8",,terminal_output +6514,7640505,"TERMINAL",0,0,"9",,terminal_output +6515,7641618,"TERMINAL",0,0,"40",,terminal_output +6516,7642594,"TERMINAL",0,0,"1",,terminal_output +6517,7643627,"TERMINAL",0,0,"2",,terminal_output +6518,7644689,"TERMINAL",0,0,"3",,terminal_output +6519,7645913,"TERMINAL",0,0,"4",,terminal_output +6520,7646850,"TERMINAL",0,0,"6",,terminal_output +6521,7647828,"TERMINAL",0,0,"7",,terminal_output +6522,7648883,"TERMINAL",0,0,"8",,terminal_output +6523,7649912,"TERMINAL",0,0,"9",,terminal_output +6524,7650937,"TERMINAL",0,0,"50",,terminal_output +6525,7652069,"TERMINAL",0,0,"1",,terminal_output +6526,7652986,"TERMINAL",0,0,"2",,terminal_output +6527,7654058,"TERMINAL",0,0,"3",,terminal_output +6528,7655079,"TERMINAL",0,0,"4",,terminal_output +6529,7656167,"TERMINAL",0,0,"53",,terminal_output +6530,7657189,"TERMINAL",0,0,"6",,terminal_output +6531,7658207,"TERMINAL",0,0,"7",,terminal_output +6532,7659336,"TERMINAL",0,0,"8",,terminal_output +6533,7660358,"TERMINAL",0,0,"9",,terminal_output +6534,7661310,"TERMINAL",0,0,"5:00",,terminal_output +6535,7662407,"TERMINAL",0,0,"1",,terminal_output +6536,7663429,"TERMINAL",0,0,"2",,terminal_output +6537,7664455,"TERMINAL",0,0,"3",,terminal_output +6538,7665580,"TERMINAL",0,0,"4",,terminal_output +6539,7666531,"TERMINAL",0,0,"5",,terminal_output +6540,7667631,"TERMINAL",0,0,"6",,terminal_output +6541,7668617,"TERMINAL",0,0,"7",,terminal_output +6542,7669676,"TERMINAL",0,0,"8",,terminal_output +6543,7670700,"TERMINAL",0,0,"9",,terminal_output +6544,7671837,"TERMINAL",0,0,"11",,terminal_output +6545,7672850,"TERMINAL",0,0,"2",,terminal_output +6546,7673821,"TERMINAL",0,0,"3",,terminal_output +6547,7674901,"TERMINAL",0,0,"4",,terminal_output +6548,7675924,"TERMINAL",0,0,"5",,terminal_output +6549,7676946,"TERMINAL",0,0,"6",,terminal_output +6550,7677974,"TERMINAL",0,0,"7",,terminal_output +6551,7679042,"TERMINAL",0,0,"8",,terminal_output +6552,7680120,"TERMINAL",0,0,"92",,terminal_output +6553,7681093,"TERMINAL",0,0,"20",,terminal_output +6554,7682128,"TERMINAL",0,0,"1",,terminal_output +6555,7683196,"TERMINAL",0,0,"2",,terminal_output +6556,7684319,"TERMINAL",0,0,"3",,terminal_output +6557,7685344,"TERMINAL",0,0,"4",,terminal_output +6558,7686366,"TERMINAL",0,0,"5",,terminal_output +6559,7687340,"TERMINAL",0,0,"6",,terminal_output +6560,7688416,"TERMINAL",0,0,"7",,terminal_output +6561,7689439,"TERMINAL",0,0,"8",,terminal_output +6562,7690564,"TERMINAL",0,0,"9",,terminal_output +6563,7691590,"TERMINAL",0,0,"30",,terminal_output +6564,7692537,"TERMINAL",0,0,"1",,terminal_output +6565,7693677,"TERMINAL",0,0,"2",,terminal_output +6566,7694618,"TERMINAL",0,0,"3",,terminal_output +6567,7695691,"TERMINAL",0,0,"4",,terminal_output +6568,7696709,"TERMINAL",0,0,"5",,terminal_output +6569,7697729,"TERMINAL",0,0,"7",,terminal_output +6570,7698767,"TERMINAL",0,0,"8",,terminal_output +6571,7699815,"TERMINAL",0,0,"9",,terminal_output +6572,7700857,"TERMINAL",0,0,"40",,terminal_output +6573,7701946,"TERMINAL",0,0,"1",,terminal_output +6574,7702929,"TERMINAL",0,0,"2",,terminal_output +6575,7703969,"TERMINAL",0,0,"3",,terminal_output +6576,7705019,"TERMINAL",0,0,"4",,terminal_output +6577,7706057,"TERMINAL",0,0,"5",,terminal_output +6578,7707103,"TERMINAL",0,0,"6",,terminal_output +6579,7708178,"TERMINAL",0,0,"7",,terminal_output +6580,7709183,"TERMINAL",0,0,"8",,terminal_output +6581,7710333,"TERMINAL",0,0,"9",,terminal_output +6582,7711362,"TERMINAL",0,0,"50",,terminal_output +6583,7712307,"TERMINAL",0,0,"1",,terminal_output +6584,7713355,"TERMINAL",0,0,"2",,terminal_output +6585,7714392,"TERMINAL",0,0,"3",,terminal_output +6586,7715441,"TERMINAL",0,0,"4",,terminal_output +6587,7716476,"TERMINAL",0,0,"5",,terminal_output +6588,7717598,"TERMINAL",0,0,"6",,terminal_output +6589,7718622,"TERMINAL",0,0,"7",,terminal_output +6590,7719609,"TERMINAL",0,0,"8",,terminal_output +6591,7720671,"TERMINAL",0,0,"9",,terminal_output +6592,7721695,"TERMINAL",0,0,"6:00",,terminal_output +6593,7722723,"TERMINAL",0,0,"1",,terminal_output +6594,7723761,"TERMINAL",0,0,"3",,terminal_output +6595,7724872,"TERMINAL",0,0,"4",,terminal_output +6596,7725893,"TERMINAL",0,0,"5",,terminal_output +6597,7726877,"TERMINAL",0,0,"6",,terminal_output +6598,7727917,"TERMINAL",0,0,"7",,terminal_output +6599,7728954,"TERMINAL",0,0,"8",,terminal_output +6600,7729996,"TERMINAL",0,0,"9",,terminal_output +6601,7731035,"TERMINAL",0,0,"10",,terminal_output +6602,7732096,"TERMINAL",0,0,"1",,terminal_output +6603,7733116,"TERMINAL",0,0,"2",,terminal_output +6604,7734156,"TERMINAL",0,0,"3",,terminal_output +6605,7735199,"TERMINAL",0,0,"4",,terminal_output +6606,7736240,"TERMINAL",0,0,"5",,terminal_output +6607,7737282,"TERMINAL",0,0,"6",,terminal_output +6608,7738393,"TERMINAL",0,0,"7",,terminal_output +6609,7739412,"TERMINAL",0,0,"8",,terminal_output +6610,7740439,"TERMINAL",0,0,"9",,terminal_output +6611,7741472,"TERMINAL",0,0,"20",,terminal_output +6612,7742513,"TERMINAL",0,0,"1",,terminal_output +6613,7743618,"TERMINAL",0,0,"2",,terminal_output +6614,7744634,"TERMINAL",0,0,"3",,terminal_output +6615,7745664,"TERMINAL",0,0,"4",,terminal_output +6616,7746683,"TERMINAL",0,0,"5",,terminal_output +6617,7747708,"TERMINAL",0,0,"6",,terminal_output +6618,7748723,"TERMINAL",0,0,"7",,terminal_output +6619,7749878,"TERMINAL",0,0,"9",,terminal_output +6620,7750878,"TERMINAL",0,0,"30",,terminal_output +6621,7751831,"TERMINAL",0,0,"1",,terminal_output +6622,7752934,"TERMINAL",0,0,"2",,terminal_output +6623,7753979,"TERMINAL",0,0,"3",,terminal_output +6624,7754983,"TERMINAL",0,0,"4",,terminal_output +6625,7756004,"TERMINAL",0,0,"5",,terminal_output +6626,7757036,"TERMINAL",0,0,"6",,terminal_output +6627,7758075,"TERMINAL",0,0,"7",,terminal_output +6628,7759180,"TERMINAL",0,0,"8",,terminal_output +6629,7760202,"TERMINAL",0,0,"9",,terminal_output +6630,7761223,"TERMINAL",0,0,"40",,terminal_output +6631,7762562,"TERMINAL",0,0,"1",,terminal_output +6632,7763372,"TERMINAL",0,0,"2",,terminal_output +6633,7764399,"TERMINAL",0,0,"3",,terminal_output +6634,7765422,"TERMINAL",0,0,"4",,terminal_output +6635,7766446,"TERMINAL",0,0,"5",,terminal_output +6636,7767469,"TERMINAL",0,0,"6",,terminal_output +6637,7768506,"TERMINAL",0,0,"7",,terminal_output +6638,7769618,"TERMINAL",0,0,"8",,terminal_output +6639,7770651,"TERMINAL",0,0,"9",,terminal_output +6640,7771671,"TERMINAL",0,0,"50",,terminal_output +6641,7772644,"TERMINAL",0,0,"1",,terminal_output +6642,7773702,"TERMINAL",0,0,"2",,terminal_output +6643,7774739,"TERMINAL",0,0,"41",,terminal_output +6644,7775779,"TERMINAL",0,0,"5",,terminal_output +6645,7776810,"TERMINAL",0,0,"6",,terminal_output +6646,7777913,"TERMINAL",0,0,"7",,terminal_output +6647,7778901,"TERMINAL",0,0,"8",,terminal_output +6648,7779965,"TERMINAL",0,0,"9",,terminal_output +6649,7780974,"TERMINAL",0,0,"7:00",,terminal_output +6650,7782018,"TERMINAL",0,0,"1",,terminal_output +6651,7783136,"TERMINAL",0,0,"2",,terminal_output +6652,7784105,"TERMINAL",0,0,"3",,terminal_output +6653,7785220,"TERMINAL",0,0,"4",,terminal_output +6654,7786313,"TERMINAL",0,0,"5",,terminal_output +6655,7787337,"TERMINAL",0,0,"6",,terminal_output +6656,7788346,"TERMINAL",0,0,"7",,terminal_output +6657,7789386,"TERMINAL",0,0,"8",,terminal_output +6658,7790427,"TERMINAL",0,0,"9",,terminal_output +6659,7791535,"TERMINAL",0,0,"10",,terminal_output +6660,7792557,"TERMINAL",0,0,"1",,terminal_output +6661,7793580,"TERMINAL",0,0,"2",,terminal_output +6662,7794604,"TERMINAL",0,0,"3",,terminal_output +6663,7795731,"TERMINAL",0,0,"4",,terminal_output +6664,7796671,"TERMINAL",0,0,"5",,terminal_output +6665,7797723,"TERMINAL",0,0,"6",,terminal_output +6666,7798758,"TERMINAL",0,0,"8",,terminal_output +6667,7799826,"TERMINAL",0,0,"9",,terminal_output +6668,7800855,"TERMINAL",0,0,"20",,terminal_output +6669,7802002,"TERMINAL",0,0,"1",,terminal_output +6670,7802927,"TERMINAL",0,0,"2",,terminal_output +6671,7803963,"TERMINAL",0,0,"3",,terminal_output +6672,7804995,"TERMINAL",0,0,"4",,terminal_output +6673,7806038,"TERMINAL",0,0,"5",,terminal_output +6674,7807103,"TERMINAL",0,0,"6",,terminal_output +6675,7808127,"TERMINAL",0,0,"7",,terminal_output +6676,7809162,"TERMINAL",0,0,"8 8",,terminal_output +6677,7810272,"TERMINAL",0,0,"9",,terminal_output +6678,7811231,"TERMINAL",0,0,"30",,terminal_output +6679,7812268,"TERMINAL",0,0,"1",,terminal_output +6680,7813352,"TERMINAL",0,0,"2",,terminal_output +6681,7814369,"TERMINAL",0,0,"3",,terminal_output +6682,7815393,"TERMINAL",0,0,"4",,terminal_output +6683,7816520,"TERMINAL",0,0,"5",,terminal_output +6684,7817545,"TERMINAL",0,0,"6",,terminal_output +6685,7818567,"TERMINAL",0,0,"7",,terminal_output +6686,7819695,"TERMINAL",0,0,"8",,terminal_output +6687,7820593,"TERMINAL",0,0,"9",,terminal_output +6688,7821742,"TERMINAL",0,0,"40",,terminal_output +6689,7822770,"TERMINAL",0,0,"1",,terminal_output +6690,7823725,"TERMINAL",0,0,"3",,terminal_output +6691,7824763,"TERMINAL",0,0,"4",,terminal_output +6692,7825843,"TERMINAL",0,0,"5",,terminal_output +6693,7826863,"TERMINAL",0,0,"6",,terminal_output +6694,7827987,"TERMINAL",0,0,"7",,terminal_output +6695,7829022,"TERMINAL",0,0,"8",,terminal_output +6696,7829956,"TERMINAL",0,0,"90",,terminal_output +6697,7830999,"TERMINAL",0,0,"50",,terminal_output +6698,7832043,"TERMINAL",0,0,"1",,terminal_output +6699,7833110,"TERMINAL",0,0,"2",,terminal_output +6700,7834118,"TERMINAL",0,0,"3",,terminal_output +6701,7835258,"TERMINAL",0,0,"4",,terminal_output +6702,7836283,"TERMINAL",0,0,"5",,terminal_output +6703,7837314,"TERMINAL",0,0,"6",,terminal_output +6704,7838284,"TERMINAL",0,0,"7",,terminal_output +6705,7839357,"TERMINAL",0,0,"8",,terminal_output +6706,7840378,"TERMINAL",0,0,"9",,terminal_output +6707,7841390,"TERMINAL",0,0,"8:00",,terminal_output +6708,7842531,"TERMINAL",0,0,"1",,terminal_output +6709,7843558,"TERMINAL",0,0,"2",,terminal_output +6710,7844507,"TERMINAL",0,0,"3",,terminal_output +6711,7845607,"TERMINAL",0,0,"4",,terminal_output +6712,7846589,"TERMINAL",0,0,"5",,terminal_output +6713,7847650,"TERMINAL",0,0,"6",,terminal_output +6714,7848691,"TERMINAL",0,0,"7",,terminal_output +6715,7849801,"TERMINAL",0,0,"8",,terminal_output +6716,7850744,"TERMINAL",0,0,"10",,terminal_output +6717,7851864,"TERMINAL",0,0,"1",,terminal_output +6718,7852831,"TERMINAL",0,0,"2",,terminal_output +6719,7853894,"TERMINAL",0,0,"3",,terminal_output +6720,7855011,"TERMINAL",0,0,"4",,terminal_output +6721,7855959,"TERMINAL",0,0,"5",,terminal_output +6722,7857030,"TERMINAL",0,0,"6",,terminal_output +6723,7858042,"TERMINAL",0,0,"7",,terminal_output +6724,7859099,"TERMINAL",0,0,"8",,terminal_output +6725,7860116,"TERMINAL",0,0,"9",,terminal_output +6726,7861165,"TERMINAL",0,0,"20",,terminal_output +6727,7862194,"TERMINAL",0,0,"1",,terminal_output +6728,7863236,"TERMINAL",0,0,"2",,terminal_output +6729,7864342,"TERMINAL",0,0,"3",,terminal_output +6730,7865368,"TERMINAL",0,0,"4",,terminal_output +6731,7866395,"TERMINAL",0,0,"5",,terminal_output +6732,7867516,"TERMINAL",0,0,"6",,terminal_output +6733,7868539,"TERMINAL",0,0,"7",,terminal_output +6734,7869569,"TERMINAL",0,0,"8",,terminal_output +6735,7870588,"TERMINAL",0,0,"9",,terminal_output +6736,7871611,"TERMINAL",0,0,"30",,terminal_output +6737,7872636,"TERMINAL",0,0,"1",,terminal_output +6738,7873669,"TERMINAL",0,0,"2",,terminal_output +6739,7874792,"TERMINAL",0,0,"3",,terminal_output +6740,7875809,"TERMINAL",0,0,"5",,terminal_output +6741,7876763,"TERMINAL",0,0,"6",,terminal_output +6742,7877848,"TERMINAL",0,0,"7",,terminal_output +6743,7878889,"TERMINAL",0,0,"8",,terminal_output +6744,7879924,"TERMINAL",0,0,"9",,terminal_output +6745,7880939,"TERMINAL",0,0,"40",,terminal_output +6746,7881960,"TERMINAL",0,0,"1",,terminal_output +6747,7883011,"TERMINAL",0,0,"2",,terminal_output +6748,7884061,"TERMINAL",0,0,"3",,terminal_output +6749,7885076,"TERMINAL",0,0,"4",,terminal_output +6750,7886118,"TERMINAL",0,0,"5",,terminal_output +6751,7887156,"TERMINAL",0,0,"6",,terminal_output +6752,7888301,"TERMINAL",0,0,"7",,terminal_output +6753,7889327,"TERMINAL",0,0,"8",,terminal_output +6754,7890280,"TERMINAL",0,0,"9",,terminal_output +6755,7891384,"TERMINAL",0,0,"50",,terminal_output +6756,7892405,"TERMINAL",0,0,"1",,terminal_output +6757,7893424,"TERMINAL",0,0,"2",,terminal_output +6758,7894554,"TERMINAL",0,0,"3",,terminal_output +6759,7895575,"TERMINAL",0,0,"4",,terminal_output +6760,7896597,"TERMINAL",0,0,"5",,terminal_output +6761,7897630,"TERMINAL",0,0,"6",,terminal_output +6762,7898598,"TERMINAL",0,0,"7",,terminal_output +6763,7899671,"TERMINAL",0,0,"8",,terminal_output +6764,7900698,"TERMINAL",0,0,"9",,terminal_output +6765,7901819,"TERMINAL",0,0,"9:01",,terminal_output +6766,7902844,"TERMINAL",0,0,"2",,terminal_output +6767,7903808,"TERMINAL",0,0,"3",,terminal_output +6768,7904905,"TERMINAL",0,0,"4",,terminal_output +6769,7905893,"TERMINAL",0,0,"5",,terminal_output +6770,7906974,"TERMINAL",0,0,"6",,terminal_output +6771,7907973,"TERMINAL",0,0,"7",,terminal_output +6772,7909053,"TERMINAL",0,0,"86",,terminal_output +6773,7910064,"TERMINAL",0,0,"9",,terminal_output +6774,7911095,"TERMINAL",0,0,"10",,terminal_output +6775,7912131,"TERMINAL",0,0,"1",,terminal_output +6776,7913189,"TERMINAL",0,0,"2",,terminal_output +6777,7914314,"TERMINAL",0,0,"3",,terminal_output +6778,7915337,"TERMINAL",0,0,"4",,terminal_output +6779,7916359,"TERMINAL",0,0,"5",,terminal_output +6780,7917346,"TERMINAL",0,0,"6",,terminal_output +6781,7918409,"TERMINAL",0,0,"7",,terminal_output +6782,7919539,"TERMINAL",0,0,"8",,terminal_output +6783,7920559,"TERMINAL",0,0,"9",,terminal_output +6784,7921530,"TERMINAL",0,0,"20",,terminal_output +6785,7922561,"TERMINAL",0,0,"1",,terminal_output +6786,7923630,"TERMINAL",0,0,"2",,terminal_output +6787,7924638,"TERMINAL",0,0,"3",,terminal_output +6788,7925681,"TERMINAL",0,0,"4",,terminal_output +6789,7926809,"TERMINAL",0,0,"5",,terminal_output +6790,7927831,"TERMINAL",0,0,"7",,terminal_output +6791,7929574,"TERMINAL",0,0,"84",,terminal_output +6792,7930590,"TERMINAL",0,0,"9",,terminal_output +6793,7931729,"TERMINAL",0,0,"30",,terminal_output +6794,7932673,"TERMINAL",0,0,"1",,terminal_output +6795,7933715,"TERMINAL",0,0,"2",,terminal_output +6796,7934747,"TERMINAL",0,0,"4",,terminal_output +6797,7935821,"TERMINAL",0,0,"5",,terminal_output +6798,7936954,"TERMINAL",0,0,"6",,terminal_output +6799,7937967,"TERMINAL",0,0,"7",,terminal_output +6800,7938920,"TERMINAL",0,0,"8",,terminal_output +6801,7940020,"TERMINAL",0,0,"9",,terminal_output +6802,7941004,"TERMINAL",0,0,"40",,terminal_output +6803,7942175,"TERMINAL",0,0,"1",,terminal_output +6804,7943091,"TERMINAL",0,0,"2",,terminal_output +6805,7944125,"TERMINAL",0,0,"3",,terminal_output +6806,7945241,"TERMINAL",0,0,"4",,terminal_output +6807,7946212,"TERMINAL",0,0,"5",,terminal_output +6808,7947284,"TERMINAL",0,0,"6",,terminal_output +6809,7948308,"TERMINAL",0,0,"7",,terminal_output +6810,7949334,"TERMINAL",0,0,"8",,terminal_output +6811,7950459,"TERMINAL",0,0,"95",,terminal_output +6812,7951487,"TERMINAL",0,0,"50",,terminal_output +6813,7952507,"TERMINAL",0,0,"1",,terminal_output +6814,7953532,"TERMINAL",0,0,"2",,terminal_output +6815,7954558,"TERMINAL",0,0,"3",,terminal_output +6816,7955683,"TERMINAL",0,0,"4",,terminal_output +6817,7956613,"TERMINAL",0,0,"5",,terminal_output +6818,7957731,"TERMINAL",0,0,"6",,terminal_output +6819,7958766,"TERMINAL",0,0,"7",,terminal_output +6820,7959886,"TERMINAL",0,0,"9",,terminal_output +6821,7960851,"TERMINAL",0,0,"7:00:00",,terminal_output +6822,7962039,"TERMINAL",0,0,"1",,terminal_output +6823,7962970,"TERMINAL",0,0,"2",,terminal_output +6824,7963979,"TERMINAL",0,0,"3",,terminal_output +6825,7965014,"TERMINAL",0,0,"4",,terminal_output +6826,7966049,"TERMINAL",0,0,"5",,terminal_output +6827,7967089,"TERMINAL",0,0,"6",,terminal_output +6828,7968132,"TERMINAL",0,0,"7",,terminal_output +6829,7969201,"TERMINAL",0,0,"8",,terminal_output +6830,7970225,"TERMINAL",0,0,"9",,terminal_output +6831,7971254,"TERMINAL",0,0,"10",,terminal_output +6832,7972379,"TERMINAL",0,0,"1",,terminal_output +6833,7973399,"TERMINAL",0,0,"2",,terminal_output +6834,7974422,"TERMINAL",0,0,"3",,terminal_output +6835,7975417,"TERMINAL",0,0,"4",,terminal_output +6836,7976477,"TERMINAL",0,0,"5",,terminal_output +6837,7977496,"TERMINAL",0,0,"6",,terminal_output +6838,7978553,"TERMINAL",0,0,"7",,terminal_output +6839,7979686,"TERMINAL",0,0,"8",,terminal_output +6840,7980669,"TERMINAL",0,0,"9",,terminal_output +6841,7981655,"TERMINAL",0,0,"20",,terminal_output +6842,7982731,"TERMINAL",0,0,"1",,terminal_output +6843,7983752,"TERMINAL",0,0,"3",,terminal_output +6844,7984785,"TERMINAL",0,0,"4",,terminal_output +6845,7985889,"TERMINAL",0,0,"5",,terminal_output +6846,7986914,"TERMINAL",0,0,"6",,terminal_output +6847,7987936,"TERMINAL",0,0,"7",,terminal_output +6848,7988963,"TERMINAL",0,0,"8",,terminal_output +6849,7989985,"TERMINAL",0,0,"9",,terminal_output +6850,7991013,"TERMINAL",0,0,"301",,terminal_output +6851,7992053,"TERMINAL",0,0,"1",,terminal_output +6852,7993097,"TERMINAL",0,0,"2",,terminal_output +6853,7994136,"TERMINAL",0,0,"3",,terminal_output +6854,7995181,"TERMINAL",0,0,"4",,terminal_output +6855,7996234,"TERMINAL",0,0,"519",,terminal_output +6856,7997300,"TERMINAL",0,0,"6",,terminal_output +6857,7998382,"TERMINAL",0,0,"7",,terminal_output +6858,7999358,"TERMINAL",0,0,"8",,terminal_output +6859,8000396,"TERMINAL",0,0,"9",,terminal_output +6860,8001464,"TERMINAL",0,0,"40",,terminal_output +6861,8002485,"TERMINAL",0,0,"1",,terminal_output +6862,8003604,"TERMINAL",0,0,"2",,terminal_output +6863,8004627,"TERMINAL",0,0,"3",,terminal_output +6864,8005658,"TERMINAL",0,0,"47",,terminal_output +6865,8006649,"TERMINAL",0,0,"5",,terminal_output +6866,8007736,"TERMINAL",0,0,"6",,terminal_output +6867,8008775,"TERMINAL",0,0,"8",,terminal_output +6868,8009915,"TERMINAL",0,0,"910",,terminal_output +6869,8010891,"TERMINAL",0,0,"50",,terminal_output +6870,8012007,"TERMINAL",0,0,"1",,terminal_output +6871,8012921,"TERMINAL",0,0,"21",,terminal_output +6872,8014063,"TERMINAL",0,0,"3",,terminal_output +6873,8015076,"TERMINAL",0,0,"4",,terminal_output +6874,8016040,"TERMINAL",0,0,"5",,terminal_output +6875,8017101,"TERMINAL",0,0,"6",,terminal_output +6876,8018126,"TERMINAL",0,0,"7",,terminal_output +6877,8019173,"TERMINAL",0,0,"8",,terminal_output +6878,8020213,"TERMINAL",0,0,"9",,terminal_output +6879,8021256,"TERMINAL",0,0,"1:00",,terminal_output +6880,8022352,"TERMINAL",0,0,"1",,terminal_output +6881,8023338,"TERMINAL",0,0,"2",,terminal_output +6882,8024395,"TERMINAL",0,0,"3",,terminal_output +6883,8025520,"TERMINAL",0,0,"4",,terminal_output +6884,8026542,"TERMINAL",0,0,"5",,terminal_output +6885,8027517,"TERMINAL",0,0,"6",,terminal_output +6886,8028590,"TERMINAL",0,0,"7",,terminal_output +6887,8029618,"TERMINAL",0,0,"8",,terminal_output +6888,8030748,"TERMINAL",0,0,"9",,terminal_output +6889,8031766,"TERMINAL",0,0,"10",,terminal_output +6890,8032795,"TERMINAL",0,0,"2",,terminal_output +6891,8033766,"TERMINAL",0,0,"3",,terminal_output +6892,8034840,"TERMINAL",0,0,"4",,terminal_output +6893,8035860,"TERMINAL",0,0,"5",,terminal_output +6894,8036888,"TERMINAL",0,0,"6",,terminal_output +6895,8037925,"TERMINAL",0,0,"7",,terminal_output +6896,8039037,"TERMINAL",0,0,"8",,terminal_output +6897,8040060,"TERMINAL",0,0,"9",,terminal_output +6898,8041090,"TERMINAL",0,0,"20",,terminal_output +6899,8042100,"TERMINAL",0,0,"1",,terminal_output +6900,8043129,"TERMINAL",0,0,"2",,terminal_output +6901,8044168,"TERMINAL",0,0,"3",,terminal_output +6902,8045213,"TERMINAL",0,0,"4",,terminal_output +6903,8046246,"TERMINAL",0,0,"5",,terminal_output +6904,8047329,"TERMINAL",0,0,"6",,terminal_output +6905,8048330,"TERMINAL",0,0,"7",,terminal_output +6906,8049382,"TERMINAL",0,0,"8",,terminal_output +6907,8050410,"TERMINAL",0,0,"9805",,terminal_output +6908,8051536,"TERMINAL",0,0,"30",,terminal_output +6909,8052555,"TERMINAL",0,0,"1",,terminal_output +6910,8053532,"TERMINAL",0,0,"2",,terminal_output +6911,8054611,"TERMINAL",0,0,"3",,terminal_output +6912,8055611,"TERMINAL",0,0,"4",,terminal_output +6913,8056752,"TERMINAL",0,0,"5",,terminal_output +6914,8057777,"TERMINAL",0,0,"6",,terminal_output +6915,8058733,"TERMINAL",0,0,"8",,terminal_output +6916,8059776,"TERMINAL",0,0,"9",,terminal_output +6917,8060857,"TERMINAL",0,0,"40",,terminal_output +6918,8061873,"TERMINAL",0,0,"1",,terminal_output +6919,8062898,"TERMINAL",0,0,"2",,terminal_output +6920,8063982,"TERMINAL",0,0,"3",,terminal_output +6921,8065053,"TERMINAL",0,0,"4",,terminal_output +6922,8066074,"TERMINAL",0,0,"5",,terminal_output +6923,8067071,"TERMINAL",0,0,"6",,terminal_output +6924,8068115,"TERMINAL",0,0,"7",,terminal_output +6925,8069167,"TERMINAL",0,0,"8",,terminal_output +6926,8070179,"TERMINAL",0,0,"9",,terminal_output +6927,8071226,"TERMINAL",0,0,"50",,terminal_output +6928,8072273,"TERMINAL",0,0,"1",,terminal_output +6929,8073304,"TERMINAL",0,0,"2",,terminal_output +6930,8074346,"TERMINAL",0,0,"3",,terminal_output +6931,8075491,"TERMINAL",0,0,"4",,terminal_output +6932,8076518,"TERMINAL",0,0,"5",,terminal_output +6933,8077540,"TERMINAL",0,0,"6",,terminal_output +6934,8078564,"TERMINAL",0,0,"7",,terminal_output +6935,8079814,"TERMINAL",0,0,"8",,terminal_output +6936,8080599,"TERMINAL",0,0,"9",,terminal_output +6937,8081646,"TERMINAL",0,0,"2:00",,terminal_output +6938,8082777,"TERMINAL",0,0,"1",,terminal_output +6939,8083720,"TERMINAL",0,0,"3",,terminal_output +6940,8084811,"TERMINAL",0,0,"4",,terminal_output +6941,8085835,"TERMINAL",0,0,"5",,terminal_output +6942,8087170,"TERMINAL",0,0,"62",,terminal_output +6943,8088296,"TERMINAL",0,0,"7",,terminal_output +6944,8089416,"TERMINAL",0,0,"8",,terminal_output +6945,8090318,"TERMINAL",0,0,"9",,terminal_output +6946,8091363,"TERMINAL",0,0,"10",,terminal_output +6947,8092367,"TERMINAL",0,0,"1",,terminal_output +6948,8093524,"TERMINAL",0,0,"2",,terminal_output +6949,8094449,"TERMINAL",0,0,"3",,terminal_output +6950,8095564,"TERMINAL",0,0,"4",,terminal_output +6951,8096588,"TERMINAL",0,0,"5",,terminal_output +6952,8097615,"TERMINAL",0,0,"6",,terminal_output +6953,8098688,"TERMINAL",0,0,"7",,terminal_output +6954,8099661,"TERMINAL",0,0,"8",,terminal_output +6955,8100696,"TERMINAL",0,0,"9",,terminal_output +6956,8101809,"TERMINAL",0,0,"21",,terminal_output +6957,8102777,"TERMINAL",0,0,"2",,terminal_output +6958,8103819,"TERMINAL",0,0,"3",,terminal_output +6959,8104901,"TERMINAL",0,0,"4",,terminal_output +6960,8105909,"TERMINAL",0,0,"5",,terminal_output +6961,8106959,"TERMINAL",0,0,"6",,terminal_output +6962,8108005,"TERMINAL",0,0,"7",,terminal_output +6963,8109081,"TERMINAL",0,0,"8",,terminal_output +6964,8110077,"TERMINAL",0,0,"9",,terminal_output +6965,8111129,"TERMINAL",0,0,"30",,terminal_output +6966,8112157,"TERMINAL",0,0,"1",,terminal_output +6967,8113206,"TERMINAL",0,0,"2",,terminal_output +6968,8114249,"TERMINAL",0,0,"3",,terminal_output +6969,8115290,"TERMINAL",0,0,"4",,terminal_output +6970,8116330,"TERMINAL",0,0,"5",,terminal_output +6971,8117477,"TERMINAL",0,0,"6",,terminal_output +6972,8118413,"TERMINAL",0,0,"7",,terminal_output +6973,8119544,"TERMINAL",0,0,"8",,terminal_output +6974,8120553,"TERMINAL",0,0,"9",,terminal_output +6975,8121528,"TERMINAL",0,0,"40",,terminal_output +6976,8122596,"TERMINAL",0,0,"11",,terminal_output +6977,8123682,"TERMINAL",0,0,"2",,terminal_output +6978,8124665,"TERMINAL",0,0,"3",,terminal_output +6979,8125777,"TERMINAL",0,0,"4",,terminal_output +6980,8126794,"TERMINAL",0,0,"6",,terminal_output +6981,8127818,"TERMINAL",0,0,"7",,terminal_output +6982,8128845,"TERMINAL",0,0,"8",,terminal_output +6983,8129975,"TERMINAL",0,0,"9",,terminal_output +6984,8130994,"TERMINAL",0,0,"50",,terminal_output +6985,8325901,"TERMINAL",0,0,"1",,terminal_output +6986,8326068,"TERMINAL",0,0,"2342567893:00123456789112134567892012345678921301245678940123456789050123456794:0012345678910123456789201345678930123451678940123456895001234567895:001234567891013456789201123456789300123456289401234567895011234567896:001345",,terminal_output +6987,8326849,"TERMINAL",0,0,"6",,terminal_output +6988,8327884,"TERMINAL",0,0,"7",,terminal_output +6989,8328943,"TERMINAL",0,0,"8",,terminal_output +6990,8329975,"TERMINAL",0,0,"9",,terminal_output +6991,8331005,"TERMINAL",0,0,"10",,terminal_output +6992,8332064,"TERMINAL",0,0,"1",,terminal_output +6993,8333094,"TERMINAL",0,0,"2",,terminal_output +6994,8334134,"TERMINAL",0,0,"3",,terminal_output +6995,8335192,"TERMINAL",0,0,"4",,terminal_output +6996,8336218,"TERMINAL",0,0,"5",,terminal_output +6997,8337264,"TERMINAL",0,0,"6",,terminal_output +6998,8338305,"TERMINAL",0,0,"7",,terminal_output +6999,8339341,"TERMINAL",0,0,"8",,terminal_output +7000,8340402,"TERMINAL",0,0,"9",,terminal_output +7001,8341432,"TERMINAL",0,0,"20",,terminal_output +7002,8342455,"TERMINAL",0,0,"1",,terminal_output +7003,8343497,"TERMINAL",0,0,"2",,terminal_output +7004,8344234,"TERMINAL",0,0,"s",,terminal_output +7005,8344348,"TERMINAL",0,0,"o",,terminal_output +7006,8344475,"TERMINAL",0,0,"r",,terminal_output +7007,8344537,"TERMINAL",0,0,"3",,terminal_output +7008,8344892,"TERMINAL",0,0,"",,terminal_output +7009,8345040,"TERMINAL",0,0,"[?25lu[?25h[?25lr[?25h",,terminal_output +7010,8345430,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +7011,8345576,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +7012,8345577,"TERMINAL",0,0,"4",,terminal_output +7013,8345642,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +7014,8345988,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +7015,8346055,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +7016,8346239,"TERMINAL",0,0,"env/",,terminal_output +7017,8346402,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +7018,8346525,"TERMINAL",0,0,"in/",,terminal_output +7019,8346617,"TERMINAL",0,0,"5",,terminal_output +7020,8346736,"TERMINAL",0,0,"[?25la[?25h[?25lc[?25h",,terminal_output +7021,8347017,"TERMINAL",0,0,"tivate",,terminal_output +7022,8347570,"TERMINAL",0,0,"[?25l[?2004l\r[?25h]0;tum_cte0515@hkn0717:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0717 jafar]$ ",,terminal_output +7023,8347634,"TERMINAL",0,0,"l",,terminal_output +7024,8347692,"TERMINAL",0,0,"6",,terminal_output +7025,8347839,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +7026,8347936,"TERMINAL",0,0,"[?25l[?2004l\r[?25h",,terminal_output +7027,8347997,"TERMINAL",0,0,"data frame-knoms.png generation_1753196800.0453017.gif LICENSE overfit_dir.zip requirements-franz.txt scripts_horeka slurm-3359338.out train_tokenizer.py weekend-job-starter.sh\r\ndebug frame.png genie.py logs __pycache__ requirements.txt slurm tests utils\r\ndiff.diff frames gifs models README.md sample.py slurm-3359333.out train_dynamics.py wandb\r\ndiff.log generate_dataset.py input_pipeline overfit_dir read_tf_record.py scripts_cremers slurm-3359334.out train_lam.py weekend-job-requeuer.sh\r\n]0;tum_cte0515@hkn0717:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0717 jafar]$ ",,terminal_output +7028,8348837,"TERMINAL",0,0,"7",,terminal_output +7029,8349741,"TERMINAL",0,0,"9",,terminal_output +7030,8350781,"TERMINAL",0,0,"30",,terminal_output +7031,8351870,"TERMINAL",0,0,"1",,terminal_output +7032,8352893,"TERMINAL",0,0,"2",,terminal_output +7033,8353939,"TERMINAL",0,0,"3",,terminal_output +7034,8354219,"TERMINAL",0,0,"srun",,terminal_focus +7035,8354951,"TERMINAL",0,0,"4",,terminal_output +7036,8355986,"TERMINAL",0,0,"5",,terminal_output +7037,8357093,"TERMINAL",0,0,"6",,terminal_output +7038,8358010,"TERMINAL",0,0,"[?2004l\r\r\nexit\r\nsalloc: Relinquishing job allocation 3370906\r\nsalloc: Job allocation 3370906 has been revoked.\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +7039,8358074,"TERMINAL",0,0,"7",,terminal_output +7040,8359106,"TERMINAL",0,0,"8",,terminal_output +7041,8360153,"TERMINAL",0,0,"9",,terminal_output +7042,8361210,"TERMINAL",0,0,"40",,terminal_output +7043,8362230,"TERMINAL",0,0,"1",,terminal_output +7044,8362981,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5",,terminal_command +7045,8363026,"TERMINAL",0,0,"]633;E;2025-07-23 17:06:42 salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5;469e5d18-6e08-4909-a55e-e2644c9abc02]633;Csalloc: Pending job allocation 3371004\r\nsalloc: job 3371004 queued and waiting for resources\r\n",,terminal_output +7046,8363266,"TERMINAL",0,0,"2",,terminal_output +7047,8364320,"TERMINAL",0,0,"3",,terminal_output +7048,8365352,"TERMINAL",0,0,"4",,terminal_output +7049,8366078,"TERMINAL",0,0,"^Csalloc: Job allocation 3371004 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;1",,terminal_output +7050,8366413,"TERMINAL",0,0,"5",,terminal_output +7051,8367441,"TERMINAL",0,0,"6",,terminal_output +7052,8368485,"TERMINAL",0,0,"7",,terminal_output +7053,8369552,"TERMINAL",0,0,"8",,terminal_output +7054,8370423,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5",,terminal_command +7055,8370484,"TERMINAL",0,0,"]633;E;2025-07-23 17:06:49 salloc --time=01:00:00 --partition=dev_accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5;469e5d18-6e08-4909-a55e-e2644c9abc02]633;Csalloc: Granted job allocation 3371005\r\n",,terminal_output +7056,8370610,"TERMINAL",0,0,"90",,terminal_output +7057,8370616,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +7058,8371600,"TERMINAL",0,0,"50",,terminal_output +7059,8372660,"TERMINAL",0,0,"1",,terminal_output +7060,8373679,"TERMINAL",0,0,"2",,terminal_output +7061,8374836,"TERMINAL",0,0,"36",,terminal_output +7062,8375836,"TERMINAL",0,0,"5",,terminal_output +7063,8376860,"TERMINAL",0,0,"6",,terminal_output +7064,8377843,"TERMINAL",0,0,"7",,terminal_output +7065,8379008,"TERMINAL",0,0,"8",,terminal_output +7066,8379932,"TERMINAL",0,0,"9",,terminal_output +7067,8381057,"TERMINAL",0,0,"7:00",,terminal_output +7068,8382045,"TERMINAL",0,0,"1",,terminal_output +7069,8383103,"TERMINAL",0,0,"2",,terminal_output +7070,8384188,"TERMINAL",0,0,"3",,terminal_output +7071,8385151,"TERMINAL",0,0,"4",,terminal_output +7072,8386209,"TERMINAL",0,0,"5",,terminal_output +7073,8387233,"TERMINAL",0,0,"6",,terminal_output +7074,8387322,"TERMINAL",0,0,"watch",,terminal_focus +7075,8387619,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3370788]633;D;0",,terminal_output +7076,8389282,"TERMINAL",0,0,"ls",,terminal_command +7077,8389296,"TERMINAL",0,0,"]633;E;2025-07-23 17:07:08 ls ;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C000600 000700 000800\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3370788]633;D;0",,terminal_output +7078,8389438,"TERMINAL",0,0,"",,terminal_command +7079,8389457,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3370788]633;D",,terminal_output +7080,8391758,"TERMINAL",0,0,"cd",,terminal_command +7081,8393825,"TERMINAL",0,0,"cd Projects/jafar",,terminal_command +7082,8394124,"TERMINAL",0,0,"la",,terminal_command +7083,8394173,"TERMINAL",0,0,"]633;E;2025-07-23 17:07:13 la;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;Cbash: la: command not found...\r\n",,terminal_output +7084,8394293,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;127",,terminal_output +7085,8394993,"TERMINAL",0,0,"ls",,terminal_command +7086,8395041,"TERMINAL",0,0,"]633;E;2025-07-23 17:07:14 ls;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;C",,terminal_output +7087,8395135,"TERMINAL",0,0,"data frame-knoms.png generation_1753196800.0453017.gif LICENSE overfit_dir.zip requirements-franz.txt scripts_horeka slurm-3359338.out train_tokenizer.py weekend-job-starter.sh\r\ndebug frame.png genie.py logs __pycache__ requirements.txt slurm tests utils\r\ndiff.diff frames gifs models README.md sample.py slurm-3359333.out train_dynamics.py wandb\r\ndiff.log generate_dataset.py input_pipeline overfit_dir read_tf_record.py scripts_cremers slurm-3359334.out train_lam.py weekend-job-requeuer.sh\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +7088,8397390,"TERMINAL",0,0,"cd ..",,terminal_command +7089,8397591,"TERMINAL",0,0,"ls",,terminal_command +7090,8397617,"TERMINAL",0,0,"]633;E;2025-07-23 17:07:16 ls;3d19b9bf-55ca-4a1a-89a2-d3473be0b612]633;Ccheckpoints jafar jafar_jobs tmp\r\n]0;tum_cte0515@hkn1990:~/Projects]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects",,terminal_output +7091,8397662,"TERMINAL",0,0,"salloc: Nodes hkn0402 are ready for job\r\n",,terminal_output +7092,8398433,"TERMINAL",0,0,"]0;tum_cte0515@hkn0402:~/Projects/jafar[?2004h[tum_cte0515@hkn0402 jafar]$ ",,terminal_output diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-72258fa0-c9fe-4d68-a250-2e65d061e9bb1754920210264-2025_08_11-15.50.35.855/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-72258fa0-c9fe-4d68-a250-2e65d061e9bb1754920210264-2025_08_11-15.50.35.855/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..d410cf757d4e4f6ddd320e794d358926f01dd9ce --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-72258fa0-c9fe-4d68-a250-2e65d061e9bb1754920210264-2025_08_11-15.50.35.855/source.csv @@ -0,0 +1,6295 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +2,170,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"3:50:35 PM [info] Activating crowd-code\n3:50:35 PM [info] Recording started\n3:50:35 PM [info] Initializing git provider using file system watchers...\n",Log,tab +3,287,"extension-output-pdoom-org.crowd-code-#1-crowd-code",150,0,"3:50:36 PM [info] Git repository found\n3:50:36 PM [info] Git provider initialized successfully\n3:50:36 PM [info] Initial git state: [object Object]\n",Log,content +4,273616,"TERMINAL",0,0,"bash",,terminal_focus +5,294403,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_type: str = ""maskgit"" # supported options: maskgit, causal\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(\n model: Genie, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n """"""Compute masked dynamics loss""""""\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@nnx.jit\ndef train_step(\n model: Genie, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n """"""Update state and compute metrics""""""\n\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=False,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(genie, tx)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +6,298734,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n init_lr: float = 0.0\n max_lr: float = 3e-4\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 20000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n ffn_dim: int = 2048\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 4\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(\n model: TokenizerVQVAE, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n # --- Compute loss ---\n # FIXME (f.srambical): Can we even do native int8 training without casting the video at all?\n # FIXME (f.srambical): If the tokenizer is the reason for the dynamics model being memory-bound,\n # should we at least train the tokenizer natively in int8?\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n outputs[""recon""] = outputs[""recon""].astype(jnp.float32)\n mse = jnp.square(gt - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@nnx.jit\ndef train_step(\n tokenizer: TokenizerVQVAE, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n def loss_fn(model: TokenizerVQVAE) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return tokenizer_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(\n tokenizer\n )\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n ffn_dim=args.ffn_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(tokenizer, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(tokenizer, tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n inputs = dict(videos=videos)\n loss, recon, metrics = train_step(tokenizer, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +7,299955,"train_dynamics.py",0,0,"",python,tab +8,301139,"train_lam.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n vq_beta: float = 0.25\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n vq_reset_thresh: int = 50\n # LAM\n model_dim: int = 512\n ffn_dim: int = 2048\n latent_dim: int = 32\n num_latents: int = 6\n patch_size: int = 16\n num_blocks: int = 4\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.0\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_lam""\n tags: list[str] = field(default_factory=lambda: [""lam""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_checkpoint_keep_period: int = 20000\n wandb_id: str = """"\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\n\ndef lam_loss_fn(\n model: LatentActionModel, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, jax.Array, dict]]:\n # --- Compute loss ---\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n outputs[""recon""] = outputs[""recon""].astype(jnp.float32)\n gt_future_frames = gt[:, 1:]\n mse = jnp.square(gt_future_frames - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = gt_future_frames.clip(0, 1).reshape(-1, *gt_future_frames.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n count_fn = jax.vmap(lambda i: (outputs[""indices""] == i).sum())\n index_counts = count_fn(jnp.arange(args.num_latents))\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=(index_counts != 0).mean(),\n )\n return loss, (outputs[""recon""], index_counts, metrics)\n\n\n@nnx.jit\ndef train_step(\n lam: LatentActionModel,\n optimizer: nnx.Optimizer,\n inputs: dict,\n action_last_active: jax.Array,\n rng: jax.Array,\n) -> tuple[jax.Array, jax.Array, jax.Array, dict]:\n def loss_fn(\n model: LatentActionModel,\n ) -> tuple[jax.Array, tuple[jax.Array, jax.Array, dict]]:\n return lam_loss_fn(model, inputs)\n\n # --- Update model ---\n (loss, (recon, idx_counts, metrics)), grads = nnx.value_and_grad(\n loss_fn, has_aux=True\n )(lam)\n optimizer.update(grads)\n\n # --- Reset inactive latent actions ---\n codebook = lam.vq.codebook\n num_codes = len(codebook)\n active_codes = idx_counts != 0.0\n action_last_active = jnp.where(active_codes, 0, action_last_active + 1)\n p_code = active_codes / active_codes.sum()\n reset_idxs = jax.random.choice(rng, num_codes, shape=(num_codes,), p=p_code)\n do_reset = action_last_active >= args.vq_reset_thresh\n new_codebook = jnp.where(\n jnp.expand_dims(do_reset, -1), codebook[reset_idxs], codebook.value\n )\n lam.vq.codebook.value = new_codebook\n action_last_active = jnp.where(do_reset, 0, action_last_active)\n return loss, recon, action_last_active, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n ffn_dim=args.ffn_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n\n # Count parameters\n _, params, _ = nnx.split(lam, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(lam, tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n action_last_active = jnp.zeros(args.num_latents, dtype=jnp.int32)\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n inputs = dict(videos=videos, rng=_rng)\n rng, _rng = jax.random.split(rng)\n loss, recon, action_last_active, metrics = train_step(\n lam, optimizer, inputs, action_last_active, _rng\n )\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0, 1:].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +9,307477,"models/tokenizer.py",0,0,"from typing import Dict, Tuple\n\nimport flax.nnx as nnx\nimport jax.numpy as jnp\nimport jax\n\nfrom utils.preprocess import patchify, unpatchify\nfrom utils.nn import STTransformer, VectorQuantizer\n\n\nclass TokenizerVQVAE(nnx.Module):\n """"""\n ST-ViVit VQ-VAE\n\n Dimension keys:\n B: batch size\n T: sequence length\n N: number of patches per frame\n L: latent dimension\n D: B * T * N\n H: height\n W: width\n C: number of channels\n P: patch token dimension (patch_size^2 * C)\n """"""\n\n def __init__(\n self,\n in_dim: int,\n model_dim: int,\n ffn_dim: int,\n latent_dim: int,\n num_latents: int,\n patch_size: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n codebook_dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.in_dim = in_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.latent_dim = latent_dim\n self.num_latents = num_latents\n self.patch_size = patch_size\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.codebook_dropout = codebook_dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.encoder = STTransformer(\n self.in_dim * self.patch_size**2,\n self.model_dim,\n self.ffn_dim,\n self.latent_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.vq = VectorQuantizer(\n self.latent_dim,\n self.num_latents,\n self.codebook_dropout,\n rngs=rngs,\n )\n self.out_dim = self.in_dim * self.patch_size**2\n self.decoder = STTransformer(\n self.latent_dim,\n self.model_dim,\n self.ffn_dim,\n self.out_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True\n ) -> Dict[str, jax.Array]:\n H, W = batch[""videos""].shape[2:4]\n videos_BTHWC = batch[""videos""]\n outputs = self.vq_encode(videos_BTHWC, training)\n z_q_BTNL = outputs[""z_q""]\n recon_BTHWC = self.decoder(z_q_BTNL)\n recon_BTHWC = recon_BTHWC.astype(jnp.float32)\n recon_BTHWC = nnx.sigmoid(recon_BTHWC)\n recon_BTHWC = recon_BTHWC.astype(self.dtype)\n recon_BTHWC = unpatchify(recon_BTHWC, self.patch_size, H, W)\n outputs[""recon""] = recon_BTHWC\n return outputs\n\n def vq_encode(\n self, videos: jax.Array, training: bool = True\n ) -> Dict[str, jax.Array]:\n # --- Preprocess + encode ---\n B, T = videos.shape[:2]\n patch_BTNP = patchify(videos, self.patch_size)\n N = patch_BTNP.shape[2]\n x_BTNL = self.encoder(patch_BTNP)\n\n # --- Vector quantize ---\n x_DL = x_BTNL.reshape(B * T * N, self.latent_dim)\n z_q_DL, z_DL, emb_DL, indices_D = self.vq(x_DL, training)\n z_q_BTNL = z_q_DL.reshape(B, T, N, self.latent_dim)\n indices_BTN = indices_D.reshape(B, T, N)\n return dict(z_q=z_q_BTNL, z=z_DL, emb=emb_DL, indices=indices_BTN)\n\n def decode(self, indices_BTN: jax.Array, video_hw: Tuple[int, int]) -> jax.Array:\n z_BTNL = self.vq.codebook[indices_BTN]\n recon_BTNP = self.decoder(z_BTNL)\n recon_BTNP = recon_BTNP.astype(jnp.float32)\n recon_BTNP = nnx.sigmoid(recon_BTNP)\n recon_BTNP = recon_BTNP.astype(self.dtype)\n return unpatchify(recon_BTNP, self.patch_size, *video_hw)\n",python,tab +10,308164,"models/lam.py",0,0,"from typing import Dict\n\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\n\nfrom utils.preprocess import patchify, unpatchify\nfrom utils.nn import STTransformer, VectorQuantizer\n\n\nclass LatentActionModel(nnx.Module):\n """"""Latent Action ST-ViVit VQ-VAE\n \n Dimension keys:\n B: batch size\n T: sequence length\n N: number of patches per frame\n M: model dimension\n L: latent dimension\n E: B * (T - 1)\n H: height\n W: width\n C: number of channels (n_dim)\n P: patch token dimension (patch_size^2 * C)\n\n Tm1: T - 1\n Np1: N + 1\n """"""\n\n def __init__(\n self,\n in_dim: int,\n model_dim: int,\n ffn_dim: int,\n latent_dim: int,\n num_latents: int,\n patch_size: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n codebook_dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.in_dim = in_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.latent_dim = latent_dim\n self.num_latents = num_latents\n self.patch_size = patch_size\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.codebook_dropout = codebook_dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.patch_token_dim = self.in_dim * self.patch_size**2\n self.encoder = STTransformer(\n self.patch_token_dim,\n self.model_dim,\n self.ffn_dim,\n self.latent_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.action_in = nnx.Param(\n nnx.initializers.lecun_uniform()(\n rngs.params(), (1, 1, 1, self.patch_token_dim)\n )\n )\n self.vq = VectorQuantizer(\n self.latent_dim,\n self.num_latents,\n self.codebook_dropout,\n rngs=rngs,\n )\n self.patch_up = nnx.Linear(\n self.patch_token_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.action_up = nnx.Linear(\n self.latent_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.decoder = STTransformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.patch_token_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True\n ) -> Dict[str, jax.Array]:\n # --- Encode + VQ ---\n H, W = batch[""videos""].shape[2:4]\n videos_BTHWC = batch[""videos""]\n outputs = self.vq_encode(videos_BTHWC, training)\n patch_BTNP = outputs[""patches""]\n z_q_BTm11L = outputs[""z_q""]\n action_BTm11M = self.action_up(z_q_BTm11L)\n patch_BTm1NM = self.patch_up(patch_BTNP[:, :-1])\n action_BTm1NM = jnp.broadcast_to(action_BTm11M, patch_BTm1NM.shape)\n video_action_patches_BTm1NM = action_BTm1NM + patch_BTm1NM\n del outputs[""patches""], patch_BTNP, patch_BTm1NM\n\n # --- Decode ---\n video_recon_BTm1P = self.decoder(video_action_patches_BTm1NM)\n video_recon_BTm1P = video_recon_BTm1P.astype(jnp.float32)\n video_recon_BTm1P = nnx.sigmoid(video_recon_BTm1P)\n video_recon_BTm1P = video_recon_BTm1P.astype(self.dtype)\n video_recon_BTHWC = unpatchify(video_recon_BTm1P, self.patch_size, H, W)\n outputs[""recon""] = video_recon_BTHWC\n return outputs\n\n def vq_encode(\n self, videos_BTHWC: jax.Array, training: bool = True\n ) -> Dict[str, jax.Array]:\n # --- Preprocess videos ---\n B, T = videos_BTHWC.shape[:2]\n patch_BTNP = patchify(videos_BTHWC, self.patch_size)\n action_pad_BT1P = jnp.broadcast_to(\n self.action_in.value, (B, T, 1, self.patch_token_dim)\n )\n padded_patch_BTNp1P = jnp.concatenate((action_pad_BT1P, patch_BTNP), axis=2)\n\n # --- Encode ---\n z_BTNp1L = self.encoder(padded_patch_BTNp1P)\n # Get latent action for all future frames\n z_BTm1L = z_BTNp1L[:, 1:, 0]\n\n # --- Vector quantize ---\n z_EL = z_BTm1L.reshape(B * (T - 1), self.latent_dim)\n z_q_EL, z_EL, emb_EL, indices_E = self.vq(z_EL, training)\n z_q_BTm11L = z_q_EL.reshape(B, T - 1, 1, self.latent_dim)\n return dict(patches=patch_BTNP, z_q=z_q_BTm11L, z=z_EL, emb=emb_EL, indices=indices_E)\n",python,tab +11,309377,"train_dynamics.py",0,0,"",python,tab +12,312208,"train_dynamics.py",2484,0,"",python,selection_mouse +13,313935,"train_dynamics.py",2612,0,"",python,selection_mouse +14,314072,"train_dynamics.py",2611,2,"gt",python,selection_mouse +15,314277,"train_dynamics.py",2611,72,"gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs",python,selection_mouse +16,314347,"train_dynamics.py",2611,116,"gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model",python,selection_mouse +17,314357,"train_dynamics.py",2611,136,"gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs",python,selection_mouse +18,314411,"train_dynamics.py",2611,176,"gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask",python,selection_mouse +19,314515,"train_dynamics.py",2611,206,"gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs",python,selection_mouse +20,314662,"train_dynamics.py",2611,280,"gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss",python,selection_mouse +21,315256,"train_dynamics.py",2885,0,"",python,selection_mouse +22,315381,"train_dynamics.py",2884,7,"ce_loss",python,selection_mouse +23,315694,"train_dynamics.py",2884,67,"ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n ",python,selection_mouse +24,315713,"train_dynamics.py",2884,74,"ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs",python,selection_mouse +25,316090,"train_dynamics.py",2884,121,"ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )",python,selection_mouse +26,316481,"train_dynamics.py",3005,0,"",python,selection_mouse +27,316489,"train_dynamics.py",3004,0,"",python,selection_command +28,319781,"train_dynamics.py",2752,0,"",python,selection_mouse +29,319908,"train_dynamics.py",2750,5,"model",python,selection_mouse +30,327434,"genie.py",0,0,"from typing import Dict\n\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT, DynamicsCausal\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\n\nclass Genie(nnx.Module):\n """"""Genie model""""""\n\n def __init__(\n self,\n in_dim: int,\n tokenizer_dim: int,\n tokenizer_ffn_dim: int,\n latent_patch_dim: int,\n num_patch_latents: int,\n patch_size: int,\n tokenizer_num_blocks: int,\n tokenizer_num_heads: int,\n lam_dim: int,\n lam_ffn_dim: int,\n latent_action_dim: int,\n num_latent_actions: int,\n lam_patch_size: int,\n lam_num_blocks: int,\n lam_num_heads: int,\n lam_co_train: bool,\n dyna_type: str,\n dyna_dim: int,\n dyna_ffn_dim: int,\n dyna_num_blocks: int,\n dyna_num_heads: int,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n dropout: float = 0.0,\n mask_limit: float = 0.0,\n ):\n # --- Tokenizer ---\n self.in_dim = in_dim\n self.tokenizer_dim = tokenizer_dim\n self.tokenizer_ffn_dim = tokenizer_ffn_dim\n self.latent_patch_dim = latent_patch_dim\n self.num_patch_latents = num_patch_latents\n self.patch_size = patch_size\n self.tokenizer_num_blocks = tokenizer_num_blocks\n self.tokenizer_num_heads = tokenizer_num_heads\n # --- LAM ---\n self.lam_dim = lam_dim\n self.lam_ffn_dim = lam_ffn_dim\n self.latent_action_dim = latent_action_dim\n self.num_latent_actions = num_latent_actions\n self.lam_patch_size = lam_patch_size\n self.lam_num_blocks = lam_num_blocks\n self.lam_num_heads = lam_num_heads\n self.lam_co_train = lam_co_train\n # --- Dynamics ---\n self.dyna_type = dyna_type\n self.dyna_dim = dyna_dim\n self.dyna_ffn_dim = dyna_ffn_dim\n self.dyna_num_blocks = dyna_num_blocks\n self.dyna_num_heads = dyna_num_heads\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.dropout = dropout\n self.mask_limit = mask_limit\n\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n ffn_dim=self.tokenizer_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n ffn_dim=self.lam_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n if self.dyna_type == ""maskgit"":\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n elif self.dyna_type == ""causal"":\n self.dynamics = DynamicsCausal(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n rngs=rngs,\n )\n else:\n raise ValueError(f""Invalid dynamics type: {self.dyna_type}"")\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True\n ) -> Dict[str, jax.Array]:\n videos_BTHWC = batch[""videos""]\n tokenizer_outputs = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_indices_BTN = tokenizer_outputs[""indices""]\n lam_outputs = self.lam.vq_encode(videos_BTHWC, training=False)\n z_q_BTm11L = lam_outputs[""z_q""]\n action_indices_E = lam_outputs[""indices""]\n latent_actions_BTm11L = jax.lax.cond(\n self.lam_co_train,\n lambda: z_q_BTm11L,\n lambda: jax.lax.stop_gradient(z_q_BTm11L),\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(token_indices_BTN),\n latent_actions=latent_actions_BTm11L,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_logits_BTNV, dyna_mask = self.dynamics(outputs, training)\n outputs[""token_logits""] = dyna_logits_BTNV\n if dyna_mask is not None:\n outputs[""mask""] = dyna_mask\n mle_indices_BTN = jnp.argmax(outputs[""token_logits""], axis=-1)\n H, W = batch[""videos""].shape[2:4]\n outputs[""recon""] = self.tokenizer.decode(mle_indices_BTN, (H, W))\n outputs[""lam_indices""] = action_indices_E\n return outputs\n\n def sample(\n self,\n batch: Dict[str, jax.Array],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> jax.Array:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n M: model dimension\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n P: S * N\n """"""\n # --- Encode videos and actions ---\n videos_BTHWC = batch[""videos""]\n latent_actions_E = batch[""latent_actions""]\n tokenizer_out = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_idxs_BTN = tokenizer_out[""indices""]\n B, T, N = token_idxs_BTN.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs_BTN.dtype)\n token_idxs_BSN = jnp.concatenate([token_idxs_BTN, pad], axis=1)\n action_tokens_EL = self.lam.vq.get_codes(latent_actions_E)\n\n def maskgit_step_fn(\n carry: tuple[jax.Array, jax.Array, jax.Array, jax.Array], step: jax.Array\n ) -> tuple[tuple[jax.Array, jax.Array, jax.Array, jax.Array], None]:\n rng, token_idxs_BSN, mask_BSN, action_tokens_EL = carry\n S, N = token_idxs_BSN.shape[1:]\n L = action_tokens_EL.shape[-1]\n\n # --- Construct + encode video ---\n vid_embed_BSNM = self.dynamics.patch_embed(token_idxs_BSN)\n mask_token_111M = self.dynamics.mask_token.value\n mask_expanded_BSN1 = mask_BSN[..., None]\n vid_embed_BSNM = jnp.where(mask_expanded_BSN1, mask_token_111M, vid_embed_BSNM)\n\n # --- Predict transition ---\n action_tokens_BSm1L = jnp.reshape(action_tokens_EL, (B, S - 1, L))\n act_embed_BSm1M = self.dynamics.action_up(action_tokens_BSm1L)\n act_embed_BSM = jnp.pad(act_embed_BSm1M, ((0, 0), (1, 0), (0, 0)))\n act_embed_BS1M = jnp.reshape(act_embed_BSM, (B, S, 1, act_embed_BSM.shape[-1]))\n vid_embed_BSNM += act_embed_BS1M\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (steps * 2))\n step_temp = temperature * (1.0 - unmasked_ratio)\n final_logits_BSNV = self.dynamics.transformer(vid_embed_BSNM) / step_temp\n\n # --- Sample new tokens for final frame ---\n if sample_argmax:\n sampled_token_idxs_BSN = jnp.argmax(final_logits_BSNV, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs_BSN = jax.random.categorical(_rng, final_logits_BSNV)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs_BSN = gather_fn(\n jax.nn.softmax(final_logits_BSNV), sampled_token_idxs_BSN\n )\n final_token_probs_BSN += ~mask_BSN\n # Update masked tokens only\n token_idxs_BSN = jnp.where(mask_BSN, sampled_token_idxs_BSN, token_idxs_BSN)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n final_token_probs_flat_BP = einops.rearrange(final_token_probs_BSN, ""b s n -> b (s n)"")\n idx_mask_P = jnp.arange(final_token_probs_flat_BP.shape[-1]) <= N - num_unmasked_tokens\n sorted_idxs_BP = jnp.argsort(final_token_probs_flat_BP, axis=-1)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask_P))\n mask_flat_BP = einops.rearrange(mask_BSN, ""b s n -> b (s n)"")\n new_mask_flat_BP = mask_update_fn(mask_flat_BP, sorted_idxs_BP)\n new_mask_BSN = einops.rearrange(new_mask_flat_BP, ""b (s n) -> b s n"", n=N)\n\n new_carry = (rng, token_idxs_BSN, new_mask_BSN, action_tokens_EL)\n return new_carry, None\n\n def generation_step_fn(\n carry: tuple[jax.Array, jax.Array], step_t: jax.Array\n ) -> tuple[tuple[jax.Array, jax.Array], None]:\n rng, current_token_idxs_BSN = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current frame (i.e., t == step_t)\n mask_S = jnp.arange(seq_len) == step_t\n mask_BSN = jnp.broadcast_to(mask_S[None, :, None], (B, seq_len, N)).astype(\n bool\n )\n masked_token_idxs_BSN = current_token_idxs_BSN * ~mask_BSN\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs_BSN,\n mask_BSN,\n action_tokens_EL,\n )\n final_carry_maskgit, _ = jax.lax.scan(\n maskgit_step_fn, init_carry_maskgit, jnp.arange(steps)\n )\n updated_token_idxs_BSN = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs_BSN)\n return new_carry, None\n\n # --- Run the autoregressive generation using jax.lax.scan ---\n initial_carry = (batch[""rng""], token_idxs_BSN)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn, initial_carry, timesteps_to_scan\n )\n final_token_idxs_BSN = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n H, W = batch[""videos""].shape[2:4]\n final_frames_BSHWC = self.tokenizer.decode(\n final_token_idxs_BSN,\n video_hw=(H, W),\n )\n return final_frames_BSHWC\n\n def sample_causal(\n self,\n batch: Dict[str, jax.Array],\n seq_len: int,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> jax.Array:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n M: model dimension\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n assert isinstance(self.dynamics, DynamicsCausal)\n # --- Encode videos and actions ---\n videos_BTHWC = batch[""videos""]\n latent_actions_E = batch[""latent_actions""]\n tokenizer_out = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_idxs_BTN = tokenizer_out[""indices""]\n B, T, N = token_idxs_BTN.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs_BTN.dtype)\n token_idxs_BSN = jnp.concatenate([token_idxs_BTN, pad], axis=1)\n action_tokens_EL = self.lam.vq.get_codes(latent_actions_E)\n dynamics_causal: DynamicsCausal = self.dynamics\n\n def causal_step_fn(\n carry: tuple[jax.Array, jax.Array, jax.Array, jax.Array], step_n: jax.Array\n ) -> tuple[tuple[jax.Array, jax.Array, jax.Array, jax.Array], None]:\n rng, token_idxs_BSN, action_tokens_EL, step_t = carry\n S, N = token_idxs_BSN.shape[1:]\n L = action_tokens_EL.shape[-1]\n\n # --- Construct + encode video ---\n vid_embed_BSNM = dynamics_causal.patch_embed(token_idxs_BSN)\n\n # --- Predict transition ---\n action_tokens_BSm1L = jnp.reshape(action_tokens_EL, (B, S - 1, L))\n act_embed_BSm1M = dynamics_causal.action_up(action_tokens_BSm1L)\n act_embed_BSM = jnp.pad(act_embed_BSm1M, ((0, 0), (1, 0), (0, 0)))\n act_embed_BS1M = jnp.reshape(act_embed_BSM, (B, S, 1, act_embed_BSM.shape[-1]))\n vid_embed_BSNp1M = jnp.concatenate([act_embed_BS1M, vid_embed_BSNM], axis=2)\n final_logits_BTNp1V = dynamics_causal.transformer(vid_embed_BSNp1M, (step_t, step_n)) / temperature\n final_logits_BV = final_logits_BTNp1V[:, step_t, step_n, :]\n\n # --- Sample new tokens for final frame ---\n if sample_argmax:\n sampled_token_idxs_B = jnp.argmax(final_logits_BV, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs_B = jax.random.categorical(_rng, final_logits_BV)\n # Update next tokens only\n token_idxs_BSN = token_idxs_BSN.at[:, step_t, step_n].set(sampled_token_idxs_B)\n\n new_carry = (rng, token_idxs_BSN, action_tokens_EL, step_t)\n return new_carry, None\n\n def generation_step_fn(\n carry: tuple[jax.Array, jax.Array], step_t: jax.Array\n ) -> tuple[tuple[jax.Array, jax.Array], None]:\n rng, current_token_idxs_BSN = carry\n rng, step_rng = jax.random.split(rng)\n\n # --- Initialize and run causal loop ---\n init_carry_causal = (\n step_rng,\n current_token_idxs_BSN,\n action_tokens_EL,\n step_t,\n )\n final_carry_causal, _ = jax.lax.scan(\n causal_step_fn, init_carry_causal, jnp.arange(N)\n )\n updated_token_idxs_BSN = final_carry_causal[1]\n new_carry = (rng, updated_token_idxs_BSN)\n return new_carry, None\n\n # --- Run the autoregressive generation using jax.lax.scan ---\n initial_carry = (batch[""rng""], token_idxs_BSN)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn, initial_carry, timesteps_to_scan\n )\n final_token_idxs_BSN = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n H, W = batch[""videos""].shape[2:4]\n final_frames_BSHWC = self.tokenizer.decode(\n final_token_idxs_BSN,\n video_hw=(H, W),\n )\n return final_frames_BSHWC\n\n def vq_encode(self, batch: Dict[str, jax.Array], training: bool) -> jax.Array:\n # --- Preprocess videos ---\n video_BTHWC = batch[""videos""]\n lam_output = self.lam.vq_encode(video_BTHWC, training=training)\n lam_indices_E = lam_output[""indices""]\n return lam_indices_E\n\n# FIXME (f.srambical): add conversion script for old checkpoints\ndef restore_genie_components(\n optimizer: nnx.Optimizer,\n sharding: jax.sharding.NamedSharding,\n rng: jax.Array,\n args,\n) -> nnx.Optimizer:\n """"""Restore pre-trained Genie components""""""\n rngs = nnx.Rngs(rng)\n\n tx = optimizer.tx\n model = optimizer.model\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n ffn_dim=args.tokenizer_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n dummy_tokenizer_optimizer = nnx.Optimizer(dummy_tokenizer, tx)\n dummy_tokenizer_optimizer_state = nnx.state(dummy_tokenizer_optimizer)\n abstract_sharded_tokenizer_optimizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_optimizer_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore( # type: ignore\n abstract_sharded_tokenizer_optimizer_state # type: ignore\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_tokenizer_optimizer.model, restored_tokenizer.model)\n model.tokenizer = dummy_tokenizer_optimizer.model\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n ffn_dim=args.lam_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n dummy_lam_optimizer = nnx.Optimizer(dummy_lam, tx)\n dummy_lam_optimizer_state = nnx.state(dummy_lam_optimizer)\n abstract_sharded_lam_optimizer_state = _create_abstract_sharded_pytree(\n dummy_lam_optimizer_state, sharding\n )\n restored_lam_optimizer = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore( # type: ignore\n abstract_sharded_lam_optimizer_state # type: ignore\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_lam_optimizer.model, restored_lam_optimizer.model)\n model.lam = dummy_lam_optimizer.model\n # Remove the LAM decoder to save memory and avoid unnecessary computation.\n del model.lam.decoder\n lam_checkpoint_manager.close()\n \n # Reinitialize the optimizer states\n optimizer = nnx.Optimizer(model, tx)\n return optimizer\n\n\ndef _create_abstract_sharded_pytree(\n pytree_template: nnx.GraphState, sharding_spec: jax.sharding.NamedSharding\n) -> jax.Array:\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)\n",python,tab +31,337889,"genie.py",5038,0,"",python,selection_mouse +32,338024,"genie.py",5032,12,"videos_BTHWC",python,selection_mouse +33,339411,"genie.py",5082,0,"",python,selection_mouse +34,339559,"genie.py",5071,17,"tokenizer_outputs",python,selection_mouse +35,340193,"genie.py",5161,0,"",python,selection_mouse +36,340289,"genie.py",5154,17,"token_indices_BTN",python,selection_mouse +37,342087,"genie.py",5164,0,"",python,selection_mouse +38,342087,"genie.py",5154,17,"token_indices_BTN",python,selection_mouse +39,342823,"genie.py",5288,0,"",python,selection_mouse +40,343361,"genie.py",5219,0,"",python,selection_mouse +41,343504,"genie.py",5211,11,"lam_outputs",python,selection_mouse +42,344799,"genie.py",5290,0,"",python,selection_mouse +43,344935,"genie.py",5282,10,"z_q_BTm11L",python,selection_mouse +44,346071,"genie.py",5313,0,"",python,selection_mouse +45,346079,"genie.py",5312,0,"",python,selection_command +46,346276,"genie.py",5311,1,"""",python,selection_mouse +47,346278,"genie.py",5311,2,"""]",python,selection_command +48,346294,"genie.py",5309,4,"_q""]",python,selection_mouse +49,346354,"genie.py",5307,6,"""z_q""]",python,selection_mouse +50,346355,"genie.py",5305,8,"s[""z_q""]",python,selection_mouse +51,346355,"genie.py",5304,9,"ts[""z_q""]",python,selection_mouse +52,346356,"genie.py",5302,11,"puts[""z_q""]",python,selection_mouse +53,346366,"genie.py",5301,12,"tputs[""z_q""]",python,selection_mouse +54,346382,"genie.py",5299,14,"outputs[""z_q""]",python,selection_mouse +55,346400,"genie.py",5298,15,"_outputs[""z_q""]",python,selection_mouse +56,346417,"genie.py",5297,16,"m_outputs[""z_q""]",python,selection_mouse +57,346477,"genie.py",5296,17,"am_outputs[""z_q""]",python,selection_mouse +58,346591,"genie.py",5295,18,"lam_outputs[""z_q""]",python,selection_mouse +59,346688,"genie.py",5294,19," lam_outputs[""z_q""]",python,selection_mouse +60,347171,"genie.py",5294,0,"",python,selection_mouse +61,347575,"genie.py",5296,0,"",python,selection_mouse +62,347733,"genie.py",5295,11,"lam_outputs",python,selection_mouse +63,347990,"genie.py",5295,13,"lam_outputs[""",python,selection_mouse +64,347991,"genie.py",5295,16,"lam_outputs[""z_q",python,selection_mouse +65,348000,"genie.py",5295,18,"lam_outputs[""z_q""]",python,selection_mouse +66,349874,"genie.py",5335,0,"",python,selection_mouse +67,350039,"genie.py",5322,16,"action_indices_E",python,selection_mouse +68,350899,"genie.py",5347,0,"",python,selection_mouse +69,351036,"genie.py",5341,11,"lam_outputs",python,selection_mouse +70,351618,"genie.py",5334,0,"",python,selection_mouse +71,351790,"genie.py",5322,16,"action_indices_E",python,selection_mouse +72,360125,"genie.py",5288,0,"",python,selection_mouse +73,360285,"genie.py",5282,10,"z_q_BTm11L",python,selection_mouse +74,365353,"genie.py",5285,0,"",python,selection_mouse +75,367539,"genie.py",5280,0,"",python,selection_mouse +76,369963,"genie.py",5292,0,"",python,selection_mouse +77,370649,"genie.py",5286,0,"",python,selection_mouse +78,370806,"genie.py",5282,10,"z_q_BTm11L",python,selection_mouse +79,371403,"genie.py",5281,0,"",python,selection_mouse +80,374434,"genie.py",5286,0,"",python,selection_mouse +81,374585,"genie.py",5282,10,"z_q_BTm11L",python,selection_mouse +82,378570,"models/lam.py",0,0,"",python,tab +83,381730,"models/lam.py",4970,0,"",python,selection_mouse +84,381924,"models/lam.py",4965,10,"z_q_BTm11L",python,selection_mouse +85,384315,"models/lam.py",4971,0,"",python,selection_mouse +86,384316,"models/lam.py",4965,10,"z_q_BTm11L",python,selection_mouse +87,385289,"models/lam.py",4968,0,"",python,selection_mouse +88,385778,"models/lam.py",4965,10,"z_q_BTm11L",python,selection_mouse +89,387450,"models/lam.py",4716,0,"",python,selection_mouse +90,387578,"models/lam.py",4716,1,"#",python,selection_mouse +91,387596,"models/lam.py",4716,3,"# G",python,selection_mouse +92,387612,"models/lam.py",4716,6,"# Get ",python,selection_mouse +93,387667,"models/lam.py",4716,9,"# Get lat",python,selection_mouse +94,387668,"models/lam.py",4716,12,"# Get latent",python,selection_mouse +95,387668,"models/lam.py",4716,17,"# Get latent acti",python,selection_mouse +96,387683,"models/lam.py",4716,21,"# Get latent action f",python,selection_mouse +97,387741,"models/lam.py",4716,23,"# Get latent action for",python,selection_mouse +98,387742,"models/lam.py",4716,25,"# Get latent action for a",python,selection_mouse +99,387742,"models/lam.py",4716,27,"# Get latent action for all",python,selection_mouse +100,387749,"models/lam.py",4716,28,"# Get latent action for all ",python,selection_mouse +101,387770,"models/lam.py",4716,30,"# Get latent action for all fu",python,selection_mouse +102,387785,"models/lam.py",4716,31,"# Get latent action for all fut",python,selection_mouse +103,387837,"models/lam.py",4716,33,"# Get latent action for all futur",python,selection_mouse +104,387848,"models/lam.py",4716,78,"# Get latent action for all future frames\n z_BTm1L = z_BTNp1L[:, 1:, 0]",python,selection_mouse +105,388748,"models/lam.py",4794,0,"",python,selection_mouse +106,388750,"models/lam.py",4793,0,"",python,selection_command +107,388889,"models/lam.py",4794,0,"",python,selection_mouse +108,388890,"models/lam.py",4793,0,"",python,selection_command +109,389018,"models/lam.py",4793,1,"]",python,selection_mouse +110,389035,"models/lam.py",4794,0,"",python,selection_command +111,389048,"models/lam.py",4793,1,"]",python,selection_mouse +112,389105,"models/lam.py",4790,4,", 0]",python,selection_mouse +113,389106,"models/lam.py",4785,9,":, 1:, 0]",python,selection_mouse +114,389107,"models/lam.py",4776,18,"z_BTNp1L[:, 1:, 0]",python,selection_mouse +115,389160,"models/lam.py",4766,28,"z_BTm1L = z_BTNp1L[:, 1:, 0]",python,selection_mouse +116,389177,"models/lam.py",4717,77," Get latent action for all future frames\n z_BTm1L = z_BTNp1L[:, 1:, 0]",python,selection_mouse +117,389231,"models/lam.py",4716,78,"# Get latent action for all future frames\n z_BTm1L = z_BTNp1L[:, 1:, 0]",python,selection_mouse +118,389232,"models/lam.py",4715,79," # Get latent action for all future frames\n z_BTm1L = z_BTNp1L[:, 1:, 0]",python,selection_mouse +119,389232,"models/lam.py",4714,80," # Get latent action for all future frames\n z_BTm1L = z_BTNp1L[:, 1:, 0]",python,selection_mouse +120,389240,"models/lam.py",4713,81," # Get latent action for all future frames\n z_BTm1L = z_BTNp1L[:, 1:, 0]",python,selection_mouse +121,389294,"models/lam.py",4712,82," # Get latent action for all future frames\n z_BTm1L = z_BTNp1L[:, 1:, 0]",python,selection_mouse +122,389295,"models/lam.py",4711,83," # Get latent action for all future frames\n z_BTm1L = z_BTNp1L[:, 1:, 0]",python,selection_mouse +123,389296,"models/lam.py",4710,84," # Get latent action for all future frames\n z_BTm1L = z_BTNp1L[:, 1:, 0]",python,selection_mouse +124,389326,"models/lam.py",4709,85," # Get latent action for all future frames\n z_BTm1L = z_BTNp1L[:, 1:, 0]",python,selection_mouse +125,389395,"models/lam.py",4708,86," # Get latent action for all future frames\n z_BTm1L = z_BTNp1L[:, 1:, 0]",python,selection_mouse +126,390421,"models/lam.py",4723,0,"",python,selection_mouse +127,390926,"models/lam.py",4667,0,"",python,selection_mouse +128,391289,"models/lam.py",4663,8,"z_BTNp1L",python,selection_mouse +129,392625,"models/lam.py",4669,0,"",python,selection_mouse +130,393151,"models/lam.py",4721,0,"",python,selection_mouse +131,393362,"models/lam.py",4718,3,"Get",python,selection_mouse +132,394513,"models/lam.py",4752,0,"",python,selection_mouse +133,394668,"models/lam.py",4751,6,"frames",python,selection_mouse +134,394837,"models/lam.py",4744,13,"future frames",python,selection_mouse +135,394897,"models/lam.py",4687,70,"padded_patch_BTNp1P)\n # Get latent action for all future frames",python,selection_mouse +136,394962,"models/lam.py",4686,71,"(padded_patch_BTNp1P)\n # Get latent action for all future frames",python,selection_mouse +137,394963,"models/lam.py",4679,78,"encoder(padded_patch_BTNp1P)\n # Get latent action for all future frames",python,selection_mouse +138,394993,"models/lam.py",4735,22," for all future frames",python,selection_mouse +139,395050,"models/lam.py",4729,28,"action for all future frames",python,selection_mouse +140,395409,"models/lam.py",4734,0,"",python,selection_mouse +141,396523,"models/lam.py",4789,0,"",python,selection_mouse +142,413454,"genie.py",0,0,"",python,tab +143,421493,"genie.py",5784,0,"",python,selection_mouse +144,422814,"genie.py",0,0,"",python,tab +145,422815,"genie.py",4271,0,"",python,selection_mouse +146,423054,"models/dynamics.py",0,0,"from typing import Dict\n\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\nimport einops\n\nfrom utils.nn import STTransformer, Transformer\n\n\nclass DynamicsMaskGIT(nnx.Module):\n """"""\n MaskGIT dynamics model\n \n Dimension keys:\n B: batch size\n T: sequence length\n N: number of patches per frame\n L: latent dimension\n V: vocabulary size (number of latents)\n """"""\n\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_latents: int,\n latent_action_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n mask_limit: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.mask_limit = mask_limit\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.transformer = STTransformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.mask_token = nnx.Param(\n nnx.initializers.lecun_uniform()(rngs.params(), (1, 1, 1, self.model_dim))\n )\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True\n ) -> tuple[jax.Array, jax.Array | None]:\n # --- Mask videos ---\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n vid_embed_BTNM = self.patch_embed(video_tokens_BTN)\n if training:\n batch_size = vid_embed_BTNM.shape[0]\n _rng_prob, *_rngs_mask = jax.random.split(batch[""mask_rng""], batch_size + 1)\n mask_prob = jax.random.uniform(\n _rng_prob, shape=(batch_size,), minval=self.mask_limit\n )\n per_sample_shape = vid_embed_BTNM.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(jnp.asarray(_rngs_mask), mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed_BTNM = jnp.where(\n jnp.expand_dims(mask, -1), self.mask_token.value, vid_embed_BTNM\n )\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed_BTm11M = self.action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0)))\n padded_act_embed_BTNM = jnp.broadcast_to(padded_act_embed_BT1M, vid_embed_BTNM.shape)\n vid_embed_BTNM += padded_act_embed_BTNM\n logits_BTNV = self.transformer(vid_embed_BTNM)\n return logits_BTNV, mask\n\nclass DynamicsCausal(nnx.Module):\n """"""Causal dynamics model""""""\n\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_latents: int,\n latent_action_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.transformer = Transformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True\n ) -> tuple[jax.Array, jax.Array | None]:\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n\n vid_embed_BTNM = self.patch_embed(video_tokens_BTN)\n act_embed_BTm11M = self.action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0)))\n vid_embed_BTNp1M = jnp.concatenate([padded_act_embed_BT1M, vid_embed_BTNM], axis=2)\n\n logits_BTNp1V = self.transformer(vid_embed_BTNp1M)\n logits_BTNV = logits_BTNp1V[:, :, :-1]\n\n return logits_BTNV, jnp.ones_like(video_tokens_BTN)\n",python,tab +147,426480,"models/dynamics.py",5380,0,"",python,selection_mouse +148,426633,"models/dynamics.py",5372,21,"padded_act_embed_BT1M",python,selection_mouse +149,441511,"models/dynamics.py",5363,0,"",python,selection_mouse +150,441513,"models/dynamics.py",5362,0,"",python,selection_command +151,442057,"models/dynamics.py",5432,0,"",python,selection_mouse +152,443275,"genie.py",0,0,"",python,tab +153,446072,"train_dynamics.py",0,0,"",python,tab +154,447891,"genie.py",0,0,"",python,tab +155,449534,"models/dynamics.py",0,0,"",python,tab +156,451238,"genie.py",0,0,"",python,tab +157,453399,"models/lam.py",0,0,"",python,tab +158,454925,"models/lam.py",4794,0,"",python,selection_mouse +159,454926,"models/lam.py",4793,0,"",python,selection_command +160,456596,"models/lam.py",4684,0,"",python,selection_mouse +161,458093,"models/lam.py",4692,0,"",python,selection_mouse +162,467426,"models/lam.py",4693,0,"",python,selection_mouse +163,467775,"models/lam.py",4687,19,"padded_patch_BTNp1P",python,selection_mouse +164,468548,"models/lam.py",4654,0,"",python,selection_mouse +165,468550,"models/lam.py",4653,0,"",python,selection_command +166,469125,"models/lam.py",4694,0,"",python,selection_mouse +167,469287,"models/lam.py",4687,19,"padded_patch_BTNp1P",python,selection_mouse +168,470077,"models/lam.py",4568,0,"",python,selection_mouse +169,470847,"models/lam.py",4681,0,"",python,selection_mouse +170,471005,"models/lam.py",4679,7,"encoder",python,selection_mouse +171,563510,"models/lam.py",2092,0,"",python,selection_mouse +172,569393,"models/lam.py",4649,0,"",python,selection_mouse +173,570377,"models/lam.py",4858,0,"",python,selection_mouse +174,574538,"models/lam.py",4332,0,"",python,selection_mouse +175,574882,"models/lam.py",4332,2," B",python,selection_mouse +176,574913,"models/lam.py",4332,3," B,",python,selection_mouse +177,574927,"models/lam.py",4332,4," B, ",python,selection_mouse +178,574983,"models/lam.py",4332,5," B, T",python,selection_mouse +179,574984,"models/lam.py",4332,6," B, T ",python,selection_mouse +180,574987,"models/lam.py",4332,7," B, T =",python,selection_mouse +181,575004,"models/lam.py",4332,8," B, T = ",python,selection_mouse +182,575063,"models/lam.py",4332,9," B, T = v",python,selection_mouse +183,575068,"models/lam.py",4332,10," B, T = vi",python,selection_mouse +184,575538,"models/lam.py",4342,0,"",python,selection_mouse +185,575592,"models/lam.py",4340,12,"videos_BTHWC",python,selection_mouse +186,575810,"models/lam.py",4338,14,"= videos_BTHWC",python,selection_mouse +187,575867,"models/lam.py",4337,15," = videos_BTHWC",python,selection_mouse +188,575868,"models/lam.py",4336,16,"T = videos_BTHWC",python,selection_mouse +189,575927,"models/lam.py",4335,17," T = videos_BTHWC",python,selection_mouse +190,575935,"models/lam.py",4334,18,", T = videos_BTHWC",python,selection_mouse +191,576319,"models/lam.py",4334,0,"",python,selection_mouse +192,577217,"models/lam.py",4463,0,"",python,selection_mouse +193,578181,"models/lam.py",4375,0,"",python,selection_mouse +194,578949,"models/lam.py",4436,0,"",python,selection_mouse +195,588969,"models/lam.py",4603,0,"",python,selection_mouse +196,589559,"models/lam.py",4615,0,"",python,selection_mouse +197,589731,"models/lam.py",4608,10,"patch_BTNP",python,selection_mouse +198,590602,"models/lam.py",4607,0,"",python,selection_mouse +199,591866,"models/lam.py",4372,0,"",python,selection_mouse +200,592010,"models/lam.py",4371,10,"patch_BTNP",python,selection_mouse +201,603571,"models/lam.py",4452,0,"",python,selection_mouse +202,604558,"models/lam.py",4435,0,"",python,selection_mouse +203,624230,"models/lam.py",4510,0,"",python,selection_mouse +204,624909,"models/lam.py",4511,0,"",python,selection_mouse +205,625802,"models/lam.py",4788,0,"",python,selection_mouse +206,626685,"models/lam.py",4508,0,"",python,selection_mouse +207,627265,"models/lam.py",4510,0,"",python,selection_mouse +208,628134,"models/lam.py",4564,0,"",python,selection_mouse +209,628288,"models/lam.py",4552,19,"padded_patch_BTNp1P",python,selection_mouse +210,630655,"models/lam.py",4443,0,"",python,selection_mouse +211,630817,"models/lam.py",4432,15,"action_pad_BT1P",python,selection_mouse +212,631946,"models/lam.py",4629,0,"",python,selection_mouse +213,632395,"models/lam.py",4543,0,"",python,selection_mouse +214,632399,"models/lam.py",4542,0,"",python,selection_command +215,632543,"models/lam.py",4543,0,"",python,selection_mouse +216,632545,"models/lam.py",4542,0,"",python,selection_command +217,633657,"models/lam.py",4561,0,"",python,selection_mouse +218,633804,"models/lam.py",4552,19,"padded_patch_BTNp1P",python,selection_mouse +219,635073,"models/lam.py",4666,0,"",python,selection_mouse +220,635380,"models/lam.py",4663,8,"z_BTNp1L",python,selection_mouse +221,642664,"models/lam.py",4622,0,"",python,selection_mouse +222,642842,"models/lam.py",4622,1,"x",python,selection_mouse +223,642865,"models/lam.py",4622,2,"xi",python,selection_mouse +224,642876,"models/lam.py",4622,3,"xis",python,selection_mouse +225,642891,"models/lam.py",4622,4,"xis=",python,selection_mouse +226,642908,"models/lam.py",4622,5,"xis=2",python,selection_mouse +227,642963,"models/lam.py",4622,6,"xis=2)",python,selection_mouse +228,643773,"models/lam.py",4625,0,"",python,selection_mouse +229,643774,"models/lam.py",4621,4,"axis",python,selection_mouse +230,644041,"models/lam.py",4544,85," padded_patch_BTNp1P = jnp.concatenate((action_pad_BT1P, patch_BTNP), axis=2)\n",python,selection_mouse +231,645896,"models/lam.py",4565,0,"",python,selection_mouse +232,646317,"models/lam.py",4567,0,"",python,selection_mouse +233,646958,"models/lam.py",4568,0,"",python,selection_mouse +234,647965,"models/lam.py",4567,0,"",python,selection_mouse +235,654018,"models/lam.py",4566,0,"",python,selection_command +236,654785,"models/lam.py",4565,0,"",python,selection_command +237,655185,"models/lam.py",4566,0,"",python,selection_command +238,655365,"models/lam.py",4567,0,"",python,selection_command +239,667582,"models/lam.py",4490,0,"",python,selection_mouse +240,667747,"models/lam.py",4485,9,"action_in",python,selection_mouse +241,669213,"models/lam.py",4501,0,"",python,selection_mouse +242,671112,"models/lam.py",4509,0,"",python,selection_mouse +243,681574,"models/lam.py",4742,0,"",python,selection_mouse +244,681983,"models/lam.py",4792,0,"",python,selection_mouse +245,683825,"models/lam.py",4774,0,"",python,selection_mouse +246,685456,"models/lam.py",4787,0,"",python,selection_mouse +247,685644,"models/lam.py",4787,1," ",python,selection_mouse +248,685664,"models/lam.py",4787,2," 1",python,selection_mouse +249,685821,"models/lam.py",4787,3," 1:",python,selection_mouse +250,689140,"models/lam.py",4791,0,"",python,selection_mouse +251,689312,"models/lam.py",4791,1," ",python,selection_mouse +252,689361,"models/lam.py",4791,2," 0",python,selection_mouse +253,689439,"models/lam.py",4791,3," 0]",python,selection_mouse +254,692710,"models/lam.py",4794,0,"",python,selection_mouse +255,692717,"models/lam.py",4793,0,"",python,selection_command +256,693325,"models/lam.py",4794,0,"",python,selection_mouse +257,693327,"models/lam.py",4793,0,"",python,selection_command +258,694332,"models/lam.py",4795,0,"",python,selection_mouse +259,976424,"models/lam.py",4794,0,"",python,selection_mouse +260,976425,"models/lam.py",4793,0,"",python,selection_command +261,985165,"models/lam.py",5118,0,"",python,selection_mouse +262,985886,"models/lam.py",5022,0,"",python,selection_mouse +263,985887,"models/lam.py",5021,0,"",python,selection_command +264,986486,"models/lam.py",5117,0,"",python,selection_mouse +265,986487,"models/lam.py",5116,0,"",python,selection_command +266,995683,"models/lam.py",5118,0,"",python,selection_mouse +267,1018068,"TERMINAL",0,0,"bash",,terminal_focus +268,1021354,"TERMINAL",0,0,"queue",,terminal_command +269,1021404,"TERMINAL",0,0,"]633;E;2025-08-11 16:07:37 queue;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +270,1021459,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Mon Aug 11 16:07:37 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3414046 accelerat train_to tum_cte0 R 4:16:07\t 1 hkn07283412401 accelerat train_to tum_cte0 R 1-00:25:47\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3414284 cpuonly wrap tum_cte0 R 2:27:45\t 1 hkn03103414283 cpuonly wrap tum_cte0 R 2:27:57\t 1 hkn03093414282 cpuonly wrap tum_cte0 R 2:28:07\t 1 hkn16653414281 cpuonly wrap tum_cte0 R 2:28:15\t 1 hkn16623414280 cpuonly wrap tum_cte0 R 2:28:24\t 1 hkn02383413833 cpuonly wrap tum_cte0 R 5:45:46\t 1 hkn0364",,terminal_output +271,1022511,"TERMINAL",0,0,"888688657",,terminal_output +272,1023559,"TERMINAL",0,0,"999799768",,terminal_output +273,1024019,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +274,1066938,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_2_nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=3\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_2_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=3373407\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --restore_ckpt \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --init_lr=0 \\n --max_lr=2e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-2-node-$slurm_job_id \\n --tags dynamics causal 2-node \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +275,1073455,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_2_nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=3\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_2_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=3373407\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --restore_ckpt \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --init_lr=0 \\n --max_lr=2e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-2-node-$slurm_job_id \\n --tags dynamics causal 2-node \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +276,1080374,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=3\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_2_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=3373407\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --restore_ckpt \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --init_lr=0 \\n --max_lr=2e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-2-node-$slurm_job_id \\n --tags dynamics causal 2-node \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +277,1081529,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",38,0,"",shellscript,selection_mouse +278,1081530,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",37,0,"",shellscript,selection_command +279,1081980,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",38,0,"",shellscript,selection_command +280,1082350,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",37,1,"",shellscript,content +281,1082494,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",37,0,"1",shellscript,content +282,1082495,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",38,0,"",shellscript,selection_keyboard +283,1082822,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",37,0,"",shellscript,selection_command +284,1083166,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",20,0,"",shellscript,selection_command +285,1083247,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",37,0,"",shellscript,selection_command +286,1083478,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",55,0,"",shellscript,selection_command +287,1084155,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",66,0,"",shellscript,selection_command +288,1084669,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",65,0,"",shellscript,selection_command +289,1084855,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",89,0,"",shellscript,selection_command +290,1085354,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",117,0,"",shellscript,selection_command +291,1085390,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",147,0,"",shellscript,selection_command +292,1085440,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",168,0,"",shellscript,selection_command +293,1085493,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",196,0,"",shellscript,selection_command +294,1085578,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",322,0,"",shellscript,selection_command +295,1087647,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",323,0,"",shellscript,selection_command +296,1088139,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",324,0,"",shellscript,selection_command +297,1088194,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",325,0,"",shellscript,selection_command +298,1088199,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",326,0,"",shellscript,selection_command +299,1088257,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",327,0,"",shellscript,selection_command +300,1088258,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",328,0,"",shellscript,selection_command +301,1088313,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",329,0,"",shellscript,selection_command +302,1088315,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",330,0,"",shellscript,selection_command +303,1088373,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",331,0,"",shellscript,selection_command +304,1088378,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",332,0,"",shellscript,selection_command +305,1088429,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",333,0,"",shellscript,selection_command +306,1088436,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",334,0,"",shellscript,selection_command +307,1088490,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",335,0,"",shellscript,selection_command +308,1088497,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",336,0,"",shellscript,selection_command +309,1088550,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",337,0,"",shellscript,selection_command +310,1088557,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",338,0,"",shellscript,selection_command +311,1088617,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",339,0,"",shellscript,selection_command +312,1088619,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",340,0,"",shellscript,selection_command +313,1088673,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",341,0,"",shellscript,selection_command +314,1088682,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",342,0,"",shellscript,selection_command +315,1088740,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",343,0,"",shellscript,selection_command +316,1088741,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",344,0,"",shellscript,selection_command +317,1088810,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",345,0,"",shellscript,selection_command +318,1088811,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",346,0,"",shellscript,selection_command +319,1088829,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",347,0,"",shellscript,selection_command +320,1088897,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",348,0,"",shellscript,selection_command +321,1088898,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",349,0,"",shellscript,selection_command +322,1088951,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",350,0,"",shellscript,selection_command +323,1088956,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",351,0,"",shellscript,selection_command +324,1089013,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",352,0,"",shellscript,selection_command +325,1089014,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",353,0,"",shellscript,selection_command +326,1089077,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",354,0,"",shellscript,selection_command +327,1089077,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",355,0,"",shellscript,selection_command +328,1089134,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",356,0,"",shellscript,selection_command +329,1089134,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",357,0,"",shellscript,selection_command +330,1089190,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",358,0,"",shellscript,selection_command +331,1089191,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",359,0,"",shellscript,selection_command +332,1089256,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",360,0,"",shellscript,selection_command +333,1089258,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",361,0,"",shellscript,selection_command +334,1089324,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",362,0,"",shellscript,selection_command +335,1089324,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",363,0,"",shellscript,selection_command +336,1089384,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",364,0,"",shellscript,selection_command +337,1089384,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",365,0,"",shellscript,selection_command +338,1089441,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",366,0,"",shellscript,selection_command +339,1089442,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",367,0,"",shellscript,selection_command +340,1089494,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",368,0,"",shellscript,selection_command +341,1089497,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",369,0,"",shellscript,selection_command +342,1089557,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",370,0,"",shellscript,selection_command +343,1089559,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",371,0,"",shellscript,selection_command +344,1089613,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",372,0,"",shellscript,selection_command +345,1089620,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",373,0,"",shellscript,selection_command +346,1089674,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",374,0,"",shellscript,selection_command +347,1089674,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",375,0,"",shellscript,selection_command +348,1089733,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",376,0,"",shellscript,selection_command +349,1089734,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",377,0,"",shellscript,selection_command +350,1089787,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",378,0,"",shellscript,selection_command +351,1089799,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",379,0,"",shellscript,selection_command +352,1089853,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",380,0,"",shellscript,selection_command +353,1089883,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",381,0,"",shellscript,selection_command +354,1089897,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",382,0,"",shellscript,selection_command +355,1089953,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",383,0,"",shellscript,selection_command +356,1089954,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",384,0,"",shellscript,selection_command +357,1090011,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",385,0,"",shellscript,selection_command +358,1090252,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",259,0,"",shellscript,selection_command +359,1094295,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",259,0,"c",shellscript,content +360,1094297,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",260,0,"",shellscript,selection_keyboard +361,1094404,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",260,0,"o",shellscript,content +362,1094405,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",261,0,"",shellscript,selection_keyboard +363,1095023,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",261,0,"i",shellscript,content +364,1095024,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",262,0,"",shellscript,selection_keyboard +365,1095118,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",262,0,"n",shellscript,content +366,1095119,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",263,0,"",shellscript,selection_keyboard +367,1095543,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",263,0,"r",shellscript,content +368,1095544,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",264,0,"",shellscript,selection_keyboard +369,1095626,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",264,0,"u",shellscript,content +370,1095627,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",265,0,"",shellscript,selection_keyboard +371,1095685,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",265,0,"n",shellscript,content +372,1095686,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",266,0,"",shellscript,selection_keyboard +373,1096678,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",266,0,"/",shellscript,content +374,1096679,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",267,0,"",shellscript,selection_keyboard +375,1097413,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",401,0,"",shellscript,selection_command +376,1098962,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",392,0,"c",shellscript,content +377,1098965,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",393,0,"",shellscript,selection_keyboard +378,1099053,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",393,0,"o",shellscript,content +379,1099055,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",394,0,"",shellscript,selection_keyboard +380,1099270,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",394,0,"i",shellscript,content +381,1099271,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",395,0,"",shellscript,selection_keyboard +382,1099359,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",395,0,"n",shellscript,content +383,1099360,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",396,0,"",shellscript,selection_keyboard +384,1099549,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",396,0,"t",shellscript,content +385,1099550,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",397,0,"",shellscript,selection_keyboard +386,1099659,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",397,0,"u",shellscript,content +387,1099660,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",398,0,"",shellscript,selection_keyboard +388,1099961,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",397,1,"",shellscript,content +389,1100088,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",396,1,"",shellscript,content +390,1100293,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",396,0,"r",shellscript,content +391,1100294,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",397,0,"",shellscript,selection_keyboard +392,1100367,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",397,0,"u",shellscript,content +393,1100368,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",398,0,"",shellscript,selection_keyboard +394,1100369,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",398,0,"i",shellscript,content +395,1100370,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",399,0,"",shellscript,selection_keyboard +396,1100436,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",399,0,"n",shellscript,content +397,1100437,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",400,0,"",shellscript,selection_keyboard +398,1101053,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",399,1,"",shellscript,content +399,1101179,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",398,1,"",shellscript,content +400,1101444,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",398,0,"n",shellscript,content +401,1101445,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",399,0,"",shellscript,selection_keyboard +402,1103280,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",399,0,"/",shellscript,content +403,1103281,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",400,0,"",shellscript,selection_keyboard +404,1105078,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_2_nodes.sbatch",0,0,"",shellscript,tab +405,1110051,"slurm/jobs/mihir/horeka/coinrun/train_tokenizer_lr_1e-4.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/tokenizer/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/tokenizer/%x_%j.log\n#SBATCH --job-name=train_tokenizer_1e-4\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/array_records\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --save_ckpt \\n --image_height=64 \\n --image_width=64 \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=48 \\n --init_lr=0 \\n --max_lr=1e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=1000 \\n --log \\n --name=coinrun-tokenizer-1e-4-$slurm_job_id \\n --tags tokenizer coinrun 1e-4 \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n\n",shellscript,tab +406,1112667,"TERMINAL",0,0,"bash",,terminal_focus +407,1113987,"utils/nn.py",0,0,"import math\nfrom typing import Tuple, Callable, List\n\nfrom flax import nnx\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass SpatioTemporalPositionalEncoding(nnx.Module):\n """"""\n Applies separate sinusoidal positional encodings to the temporal and spatial dimensions.\n """"""\n def __init__(self, d_model: int, max_len: int = 5000):\n self.d_model = d_model\n self.max_len = max_len\n\n pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n pe = pe.at[:, 0::2].set(jnp.sin(position * div_term))\n pe = pe.at[:, 1::2].set(jnp.cos(position * div_term))\n self.pe = nnx.Variable(pe)\n\n def __call__(self, x: jax.Array) -> jax.Array:\n """"""\n Args:\n x: The input tensor of shape (Batch, Time, Space, Dimension).\n\n Returns:\n The input tensor with positional encodings added.\n """"""\n assert x.ndim == 4, f""Input must be 4-dimensional, but got shape {x.shape}""\n\n num_timesteps = x.shape[1]\n num_spatial_patches = x.shape[2]\n\n # Temporal positional encoding: (1, T, 1, D)\n temporal_pe = self.pe.value[None, :num_timesteps, None, :]\n x = x + temporal_pe\n\n # Spatial positional encoding: (1, 1, S, D)\n spatial_pe = self.pe.value[None, None, :num_spatial_patches, :]\n x = x + spatial_pe\n\n return x\n\n\nclass STBlock(nnx.Module):\n def __init__(\n self,\n dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.dim = dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=False\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_norm(x_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM\n\n\nclass STTransformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n O: number of output features\n """"""\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n max_len: int = 5000,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n self.pos_enc = SpatioTemporalPositionalEncoding(self.model_dim, max_len=max_len)\n\n self.blocks = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n )\n\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n x_BTNM = self.pos_enc(x_BTNM)\n for block in self.blocks:\n x_BTNM = block(x_BTNM)\n\n x_BTNO = self.output_dense(x_BTNM)\n return x_BTNO\n\nclass TransformerBlock(nnx.Module):\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None) -> jax.Array:\n # --- Spatial attention ---\n B, T, N, M = x_BTNM.shape\n z_FNM = einops.rearrange(x_BTNM, ""b t n m -> (b t) n m"")\n z_FNM = self.spatial_norm(z_FNM)\n z_FNM = self.spatial_attention(z_FNM)\n z_BTNM = einops.rearrange(z_FNM, ""(b t) n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> (b n) t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""(b n) t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM\n\nclass Transformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n O: number of output features\n F: number of frames in batch\n P: number of patch positions in batch\n """"""\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n max_len: int = 5000,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n self.pos_enc = SpatioTemporalPositionalEncoding(self.model_dim, max_len=max_len)\n\n self.blocks: List[TransformerBlock] = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n TransformerBlock(\n model_dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n rngs=rngs,\n )\n )\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n x_BTNM = self.pos_enc(x_BTNM)\n for block in self.blocks:\n x_BTNM = block(x_BTNM, pos_index)\n\n x_BTNV = self.output_dense(x_BTNM)\n return x_BTNV\n\ndef normalize(x: jax.Array) -> jax.Array:\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nnx.Module):\n """"""\n Dimension keys:\n D: B * T * N\n K: number of latents\n L: latent dimension\n """"""\n def __init__(\n self, latent_dim: int, num_latents: int, dropout: float, rngs: nnx.Rngs\n ):\n self.latent_dim = latent_dim\n self.num_latents = num_latents\n self.dropout = dropout\n\n self.codebook = nnx.Param(\n normalize(\n nnx.initializers.lecun_uniform()(\n rngs.params(), (self.num_latents, self.latent_dim)\n )\n )\n )\n self.drop = nnx.Dropout(self.dropout, rngs=rngs)\n\n def __call__(\n self, x_DL: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x_DL = normalize(x_DL)\n normalized_codebook_KL = normalize(self.codebook.value)\n distance_DK = -jnp.matmul(x_DL, normalized_codebook_KL.T)\n if training:\n distance_DK = self.drop(distance_DK)\n\n # --- Get indices and embeddings ---\n indices_D = jnp.argmin(distance_DK, axis=-1)\n z_DL = self.codebook[indices_D]\n\n # --- Straight through estimator ---\n z_q_DL = x_DL + jax.lax.stop_gradient(z_DL - x_DL)\n return z_q_DL, z_DL, x_DL, indices_D\n\n def get_codes(self, indices_E: jax.Array) -> jax.Array:\n return self.codebook[indices_E]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool) -> Callable:\n """"""\n Create an attention function that uses flash attention if enabled.\n\n flax.nnx.MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim),\n but jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim). We reshape to\n ensure compatibility. cuDNN's flash attention additionally requires a sequence length that\n is a multiple of 4. We pad the sequence length to the nearest multiple of 4 and mask\n accordingly. Note that cuDNN requires the mask to be broadcast before calling the attention\n function due to strict shape checking.\n """"""\n\n def attention_fn(query_BTHD, key_BSHD, value_BSHD, bias=None, mask_B111=None, **kwargs):\n implementation = ""cudnn"" if use_flash_attention else None\n\n def _merge_batch_dims(x):\n return einops.rearrange(x, ""... l h k -> (...) l h k"")\n\n def _pad(x, pad_size):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n\n original_shape = query_BTHD.shape\n T = query_BTHD.shape[-3]\n S = key_BSHD.shape[-3]\n\n # Pad to nearest multiple of 4\n Q = ((T + 3) // 4) * 4\n pad_size_Q = Q - T\n K = ((S + 3) // 4) * 4\n pad_size_K = K - S\n\n query_BQHD = _pad(_merge_batch_dims(query_BTHD), pad_size_Q)\n key_BKHD = _pad(_merge_batch_dims(key_BSHD), pad_size_K)\n value_BKHD = _pad(_merge_batch_dims(value_BSHD), pad_size_K)\n\n attention_mask = jnp.ones((Q, K), dtype=jnp.bool_)\n attention_mask = attention_mask.at[T:, :].set(False)\n attention_mask = attention_mask.at[:, S:].set(False)\n\n mask_11TS = attention_mask[jnp.newaxis, jnp.newaxis, :, :]\n\n bias_4d = jnp.pad(_merge_batch_dims(bias), ((0, 0), (0, 0), (0, pad_size_Q), (0, pad_size_K))) if bias is not None else None\n\n # NOTE: jax.nn.dot_product_attention does not support dropout\n output_4d = jax.nn.dot_product_attention(\n query=query_BQHD,\n key=key_BKHD,\n value=value_BKHD,\n bias=bias_4d,\n mask=mask_11TS,\n implementation=implementation,\n is_causal=is_causal,\n )\n return output_4d[..., :T, :, :].reshape(original_shape)\n\n return attention_fn\n",python,tab +408,1115653,"slurm/jobs/mihir/horeka/coinrun/train_tokenizer_lr_1e-4.sbatch",0,0,"",shellscript,tab +409,1116391,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=256 \\n --init_lr=0 \\n --dyna_type=causal \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +410,1118019,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",0,0,"",shellscript,tab +411,1121881,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",274,0,"",shellscript,selection_mouse +412,1122092,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",274,1,"d",shellscript,selection_mouse +413,1122109,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",274,2,"dy",shellscript,selection_mouse +414,1122167,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",274,3,"dyn",shellscript,selection_mouse +415,1122168,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",274,4,"dyna",shellscript,selection_mouse +416,1122171,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",274,5,"dynam",shellscript,selection_mouse +417,1122187,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",274,6,"dynami",shellscript,selection_mouse +418,1122244,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",274,7,"dynamic",shellscript,selection_mouse +419,1122319,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",274,8,"dynamics",shellscript,selection_mouse +420,1122907,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",281,0,"",shellscript,selection_mouse +421,1123543,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",270,0,"",shellscript,selection_mouse +422,1123711,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",267,6,"causal",shellscript,selection_mouse +423,1124452,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",267,6,"",shellscript,content +424,1124875,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",267,1,"",shellscript,content +425,1125503,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",394,0,"",shellscript,selection_command +426,1125751,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",393,0,"",shellscript,selection_command +427,1126033,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",393,6,"",shellscript,content +428,1126370,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",393,1,"",shellscript,content +429,1127836,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",411,1,"",shellscript,content +430,1128334,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",410,1,"",shellscript,content +431,1128346,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",409,1,"",shellscript,content +432,1128403,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",408,1,"",shellscript,content +433,1128420,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",407,1,"",shellscript,content +434,1128458,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",406,1,"",shellscript,content +435,1128474,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",405,1,"",shellscript,content +436,1128529,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",404,1,"",shellscript,content +437,1128531,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",403,1,"",shellscript,content +438,1128594,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",402,1,"",shellscript,content +439,1128987,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",401,1,"",shellscript,content +440,1129252,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",274,0,"",shellscript,selection_command +441,1130313,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",285,1,"",shellscript,content +442,1130814,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",284,1,"",shellscript,content +443,1130834,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",283,1,"",shellscript,content +444,1130887,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",282,1,"",shellscript,content +445,1130912,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",281,1,"",shellscript,content +446,1130925,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",280,1,"",shellscript,content +447,1130976,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",279,1,"",shellscript,content +448,1130980,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",278,1,"",shellscript,content +449,1131037,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",277,1,"",shellscript,content +450,1131147,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",276,1,"",shellscript,content +451,1131295,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",275,1,"",shellscript,content +452,1131770,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",276,0,"",shellscript,selection_command +453,1132256,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",276,0,"c",shellscript,content +454,1132257,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",277,0,"",shellscript,selection_keyboard +455,1132464,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",277,0,"o",shellscript,content +456,1132465,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",278,0,"",shellscript,selection_keyboard +457,1132772,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",277,1,"",shellscript,content +458,1132924,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",277,0,"a",shellscript,content +459,1132925,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",278,0,"",shellscript,selection_keyboard +460,1133006,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",278,0,"u",shellscript,content +461,1133007,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",279,0,"",shellscript,selection_keyboard +462,1133119,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",279,0,"s",shellscript,content +463,1133120,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",280,0,"",shellscript,selection_keyboard +464,1133263,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",280,0,"a",shellscript,content +465,1133264,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",281,0,"",shellscript,selection_keyboard +466,1133316,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",281,0,"l",shellscript,content +467,1133317,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",282,0,"",shellscript,selection_keyboard +468,1134081,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",282,0,"/",shellscript,content +469,1134082,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",283,0,"",shellscript,selection_keyboard +470,1134613,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",406,0,"",shellscript,selection_command +471,1134677,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",455,0,"",shellscript,selection_command +472,1135226,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",340,0,"",shellscript,selection_command +473,1135558,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",339,0,"",shellscript,selection_command +474,1137600,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",398,0,"",shellscript,selection_command +475,1138317,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",398,0,"c",shellscript,content +476,1138318,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",399,0,"",shellscript,selection_keyboard +477,1138477,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",399,0,"a",shellscript,content +478,1138478,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",400,0,"",shellscript,selection_keyboard +479,1138572,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",400,0,"u",shellscript,content +480,1138573,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",401,0,"",shellscript,selection_keyboard +481,1138689,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",401,0,"s",shellscript,content +482,1138690,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",402,0,"",shellscript,selection_keyboard +483,1138836,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",402,0,"a",shellscript,content +484,1138837,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",403,0,"",shellscript,selection_keyboard +485,1138907,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",403,0,"l",shellscript,content +486,1138908,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",404,0,"",shellscript,selection_keyboard +487,1139421,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",404,0,"/",shellscript,content +488,1139422,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",405,0,"",shellscript,selection_keyboard +489,1140439,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",532,0,"",shellscript,selection_mouse +490,1141106,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",531,0,"",shellscript,selection_mouse +491,1141688,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",480,0,"",shellscript,selection_mouse +492,1141967,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",459,21,"ode\n#SBATCH --requeue",shellscript,selection_mouse +493,1142032,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",458,22,"node\n#SBATCH --requeue",shellscript,selection_mouse +494,1142177,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",457,23,"_node\n#SBATCH --requeue",shellscript,selection_mouse +495,1142411,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",457,0,"",shellscript,selection_mouse +496,1143147,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",456,1,"",shellscript,content +497,1143320,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",456,0,"1",shellscript,content +498,1143321,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",457,0,"",shellscript,selection_keyboard +499,1153197,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1503,0,"",shellscript,selection_mouse +500,1154697,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1503,0,"c",shellscript,content +501,1154698,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1504,0,"",shellscript,selection_keyboard +502,1154787,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1504,0,"o",shellscript,content +503,1154788,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1505,0,"",shellscript,selection_keyboard +504,1154983,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1505,0,"u",shellscript,content +505,1154984,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1506,0,"",shellscript,selection_keyboard +506,1155126,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1506,0,"n",shellscript,content +507,1155127,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1507,0,"",shellscript,selection_keyboard +508,1155527,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1506,1,"",shellscript,content +509,1155648,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1505,1,"",shellscript,content +510,1155833,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1505,0,"i",shellscript,content +511,1155834,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1506,0,"",shellscript,selection_keyboard +512,1155890,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1506,0,"n",shellscript,content +513,1155891,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1507,0,"",shellscript,selection_keyboard +514,1156132,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1507,0,"r",shellscript,content +515,1156133,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1508,0,"",shellscript,selection_keyboard +516,1156239,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1508,0,"u",shellscript,content +517,1156240,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1509,0,"",shellscript,selection_keyboard +518,1156302,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1509,0,"n",shellscript,content +519,1156303,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1510,0,"",shellscript,selection_keyboard +520,1157065,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1510,0,"/",shellscript,content +521,1157066,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1511,0,"",shellscript,selection_keyboard +522,1160219,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1734,0,"",shellscript,selection_mouse +523,1160367,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1721,27,"train_tokenizer_lr_sweep_1e",shellscript,selection_mouse +524,1168860,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1762,0,"",shellscript,selection_mouse +525,1168986,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1761,1,"/",shellscript,selection_mouse +526,1169003,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1628,134,"\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +527,1169016,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1567,195,"\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +528,1169066,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1620,142," ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +529,1169068,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1606,156,"porting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +530,1169084,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1597,165,"cture supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +531,1169100,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1589,173,"new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +532,1169118,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1586,176,"he new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +533,1169133,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1584,178," the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +534,1169187,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1583,179,"h the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +535,1169240,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1582,180,"th the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +536,1169293,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1581,181,"ith the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +537,1169301,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1641,121,"pt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +538,1169357,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1642,120,"t_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +539,1169368,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1643,119,"_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +540,1169384,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1644,118,"dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +541,1169396,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1645,117,"ir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +542,1169450,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1646,116,"r=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +543,1169451,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1647,115,"=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +544,1169636,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1648,114,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/checkpoints/train_tokenizer_lr_sweep_1e-4_larger_ffn/",shellscript,selection_mouse +545,1170173,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1648,114,"",shellscript,content +546,1170415,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1648,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,content +547,1174430,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1695,0,"",shellscript,selection_mouse +548,1174574,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1628,67,"\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_w",shellscript,selection_mouse +549,1175028,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1628,0,"",shellscript,selection_mouse +550,1175176,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1621,7,"ffn_dim",shellscript,selection_mouse +551,1175354,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1614,14,"larger ffn_dim",shellscript,selection_mouse +552,1175371,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1603,25,"supporting larger ffn_dim",shellscript,selection_mouse +553,1175415,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1593,35,"structure supporting larger ffn_dim",shellscript,selection_mouse +554,1175467,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1592,36," structure supporting larger ffn_dim",shellscript,selection_mouse +555,1175479,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1589,39,"new structure supporting larger ffn_dim",shellscript,selection_mouse +556,1175533,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1588,40," new structure supporting larger ffn_dim",shellscript,selection_mouse +557,1175543,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1585,43,"the new structure supporting larger ffn_dim",shellscript,selection_mouse +558,1175596,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1584,44," the new structure supporting larger ffn_dim",shellscript,selection_mouse +559,1175597,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1580,48,"with the new structure supporting larger ffn_dim",shellscript,selection_mouse +560,1175651,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1579,49," with the new structure supporting larger ffn_dim",shellscript,selection_mouse +561,1175703,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1570,58,"tokenizer with the new structure supporting larger ffn_dim",shellscript,selection_mouse +562,1175881,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1569,59," tokenizer with the new structure supporting larger ffn_dim",shellscript,selection_mouse +563,1176277,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1569,59,"",shellscript,content +564,1176431,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1568,1,"",shellscript,content +565,1176796,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1567,1,"",shellscript,content +566,1178202,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1566,0,"",shellscript,selection_mouse +567,1178815,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1567,0,"",shellscript,selection_mouse +568,1180426,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1418,0,"",shellscript,selection_mouse +569,1180571,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1411,7,"3373407",shellscript,selection_mouse +570,1181580,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1419,0,"",shellscript,selection_mouse +571,1183478,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1416,0,"",shellscript,selection_mouse +572,1183681,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1411,7,"3373407",shellscript,selection_mouse +573,1184538,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1411,7,"",shellscript,content +574,1185511,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1411,0,"$",shellscript,content +575,1185512,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1412,0,"",shellscript,selection_keyboard +576,1186762,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1412,0,"S",shellscript,content +577,1186763,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1413,0,"",shellscript,selection_keyboard +578,1188260,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1412,1,"SLURM_JOB_ID",shellscript,content +579,1192326,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1400,0,"",shellscript,selection_mouse +580,1192469,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1398,12,"slurm_job_id",shellscript,selection_mouse +581,1198723,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2053,0,"",shellscript,selection_mouse +582,1199354,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2037,0,"",shellscript,selection_mouse +583,1200184,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2032,0,"",shellscript,selection_mouse +584,1201406,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2031,0,"",shellscript,selection_command +585,1202050,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2031,1,"2",shellscript,selection_command +586,1202642,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2031,1,"2",shellscript,selection_command +587,1203676,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2081,1,"1",shellscript,content +588,1203676,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2031,1,"1",shellscript,content +589,1203679,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2031,0,"",shellscript,selection_command +590,1204409,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2030,0,"",shellscript,selection_command +591,1204905,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2029,0,"",shellscript,selection_command +592,1204960,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2028,0,"",shellscript,selection_command +593,1204961,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2027,0,"",shellscript,selection_command +594,1205020,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2026,0,"",shellscript,selection_command +595,1205020,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2025,0,"",shellscript,selection_command +596,1205081,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2024,0,"",shellscript,selection_command +597,1205084,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2023,0,"",shellscript,selection_command +598,1205151,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2022,0,"",shellscript,selection_command +599,1205222,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2021,0,"",shellscript,selection_command +600,1205384,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2020,0,"",shellscript,selection_command +601,1205530,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2019,0,"",shellscript,selection_command +602,1205672,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2018,0,"",shellscript,selection_command +603,1205841,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2017,0,"",shellscript,selection_command +604,1205958,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2016,0,"",shellscript,selection_command +605,1206759,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2015,0,"",shellscript,selection_command +606,1207353,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2015,1,"d",shellscript,selection_command +607,1207589,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2015,1,"d",shellscript,selection_command +608,1207884,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2015,0,"",shellscript,selection_command +609,1208570,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2065,0,"c",shellscript,content +610,1208570,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2015,0,"c",shellscript,content +611,1208571,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2016,0,"",shellscript,selection_keyboard +612,1208653,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2067,0,"o",shellscript,content +613,1208653,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2016,0,"o",shellscript,content +614,1208654,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2017,0,"",shellscript,selection_keyboard +615,1209104,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2069,0,"i",shellscript,content +616,1209105,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2017,0,"i",shellscript,content +617,1209105,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2018,0,"",shellscript,selection_keyboard +618,1209232,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2071,0,"n",shellscript,content +619,1209232,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2018,0,"n",shellscript,content +620,1209233,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2019,0,"",shellscript,selection_keyboard +621,1209543,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2073,0,"r",shellscript,content +622,1209544,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2019,0,"r",shellscript,content +623,1209544,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2020,0,"",shellscript,selection_keyboard +624,1209545,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2075,0,"u",shellscript,content +625,1209545,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2020,0,"u",shellscript,content +626,1209545,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2021,0,"",shellscript,selection_keyboard +627,1209603,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2077,0,"n",shellscript,content +628,1209604,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2021,0,"n",shellscript,content +629,1209604,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2022,0,"",shellscript,selection_keyboard +630,1210246,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2079,0,"-",shellscript,content +631,1210246,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2022,0,"-",shellscript,content +632,1210247,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2023,0,"",shellscript,selection_keyboard +633,1211251,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2022,0,"",shellscript,selection_command +634,1211601,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2080,0,"",shellscript,selection_command +635,1212815,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2032,0,"",shellscript,selection_mouse +636,1214379,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2080,0,"",shellscript,selection_mouse +637,1214662,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2080,1," ",shellscript,content +638,1216133,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2175,0,"",shellscript,selection_mouse +639,1216148,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2174,0,"",shellscript,selection_command +640,1218900,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2254,0,"",shellscript,selection_mouse +641,1218911,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2253,0,"",shellscript,selection_command +642,1219035,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2253,1,"\",shellscript,selection_mouse +643,1219036,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2254,0,"",shellscript,selection_command +644,1219070,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2252,2," \",shellscript,selection_mouse +645,1219124,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2220,34,"ads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse +646,1219124,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2215,39,"um_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse +647,1219132,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2213,41,"_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse +648,1219149,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2210,44,"yna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse +649,1219168,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2208,46,"-dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse +650,1219182,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2179,75," --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse +651,1219234,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2178,76," --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse +652,1219234,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2177,77," --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse +653,1219235,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2176,78," --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse +654,1219845,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2154,100," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse +655,1220686,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2154,100,"",shellscript,content +656,1222372,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2154,1,"",shellscript,content +657,1222382,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2158,0,"",shellscript,selection_command +658,1228160,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1954,0,"",shellscript,selection_mouse +659,1228161,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1953,0,"",shellscript,selection_command +660,1229076,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1916,0,"",shellscript,selection_mouse +661,1230247,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1916,1,"1",shellscript,content +662,1230456,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1898,0,"",shellscript,selection_command +663,1230713,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1899,0,"",shellscript,selection_command +664,1230885,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1900,0,"",shellscript,selection_command +665,1231058,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1878,0,"",shellscript,selection_command +666,1231285,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1879,0,"",shellscript,selection_command +667,1231468,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1880,0,"",shellscript,selection_command +668,1231607,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1881,0,"",shellscript,selection_command +669,1231759,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1882,0,"",shellscript,selection_command +670,1232360,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1881,1,"",shellscript,content +671,1232490,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1880,1,"",shellscript,content +672,1233970,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1880,0,"4",shellscript,content +673,1233971,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1881,0,"",shellscript,selection_keyboard +674,1234030,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1881,0,"8",shellscript,content +675,1234031,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1882,0,"",shellscript,selection_keyboard +676,1271236,"TERMINAL",0,0,"bash",,terminal_focus +677,1278018,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",0,0,"",shellscript,tab +678,1285581,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",0,0,"",shellscript,tab +679,1292396,"TERMINAL",0,0,"runner",,terminal_command +680,1294584,"TERMINAL",0,0,"sync-runner",,terminal_command +681,1294661,"TERMINAL",0,0,"]633;E;2025-08-11 16:12:10 sync-runner;361f455a-15ad-4916-88b6-b9e49434d7af]633;Csending incremental file list\r\n",,terminal_output +682,1298916,"TERMINAL",0,0,"./\r\n",,terminal_output +683,1300506,"TERMINAL",0,0,"input_pipeline/preprocess/\r\ninput_pipeline/preprocess/npy_to_array_records.py\r\ninput_pipeline/preprocess/pngs_to_array_records.py\r\ninput_pipeline/preprocess/video_to_array_records.py\r\nslurm/jobs/mihir/horeka/\r\nslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch\r\nslurm/jobs/mihir/horeka/coinrun/\r\nslurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch\r\nslurm/jobs/mihir/horeka/coinrun/train_tokenizer_lr_1e-4.sbatch\r\nslurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch\r\nslurm/utils/mihir/\r\nslurm/utils/mihir/weekend-job-requeuer.sh\r\nslurm/utils/mihir/weekend-job-starter.sh\r\nutils/\r\n",,terminal_output +684,1300805,"TERMINAL",0,0,"\r\nsent 50,276 bytes received 343 bytes 7,787.54 bytes/sec\r\ntotal size is 219,128,081 speedup is 4,328.97\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +685,1317965,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",,terminal_command +686,1317990,"TERMINAL",0,0,"]633;E;2025-08-11 16:12:33 sbatch slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch;361f455a-15ad-4916-88b6-b9e49434d7af]633;CSubmitted batch job 3415062\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +687,1320349,"TERMINAL",0,0,"queue",,terminal_command +688,1320401,"TERMINAL",0,0,"]633;E;2025-08-11 16:12:36 queue;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +689,1320465,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Mon Aug 11 16:12:36 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3415062 accelerat train_dy tum_cte0 R\t0:02\t 1 hkn07193414046 accelerat train_to tum_cte0 R 4:21:06\t 1 hkn07283412401 accelerat train_to tum_cte0 R 1-00:30:46\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3414284 cpuonly wrap tum_cte0 R 2:32:44\t 1 hkn03103414283 cpuonly wrap tum_cte0 R 2:32:56\t 1 hkn03093414282 cpuonly wrap tum_cte0 R 2:33:06\t 1 hkn16653414281 cpuonly wrap tum_cte0 R 2:33:14\t 1 hkn16623414280 cpuonly wrap tum_cte0 R 2:33:23\t 1 hkn02383413833 cpuonly wrap tum_cte0 R 5:50:45\t 1 hkn0364",,terminal_output +690,1321515,"TERMINAL",0,0,"7377577546",,terminal_output +691,1322642,"TERMINAL",0,0,"8488688657",,terminal_output +692,1323679,"TERMINAL",0,0,"9599799768",,terminal_output +693,1324645,"TERMINAL",0,0,"406105083:0010879",,terminal_output +694,1325816,"TERMINAL",0,0,"17119119850",,terminal_output +695,1326743,"TERMINAL",0,0,"282250222091",,terminal_output +696,1327798,"TERMINAL",0,0,"39331331302",,terminal_output +697,1328838,"TERMINAL",0,0,"41044244213",,terminal_output +698,1329911,"TERMINAL",0,0,"5155355324",,terminal_output +699,1331039,"TERMINAL",0,0,"6266466435",,terminal_output +700,1332063,"TERMINAL",0,0,"7377577546",,terminal_output +701,1333086,"TERMINAL",0,0,"8488688657",,terminal_output +702,1334109,"TERMINAL",0,0,"9599799768",,terminal_output +703,1335236,"TERMINAL",0,0,"506201:0081020879",,terminal_output +704,1336261,"TERMINAL",0,0,"1711911981:00",,terminal_output +705,1337284,"TERMINAL",0,0,"29333:013331402",,terminal_output +706,1338308,"TERMINAL",0,0,"42044244213",,terminal_output +707,1339436,"TERMINAL",0,0,"5155355324",,terminal_output +708,1340402,"TERMINAL",0,0,"6266466435",,terminal_output +709,1341483,"TERMINAL",0,0,"7377577546",,terminal_output +710,1342508,"TERMINAL",0,0,"8488688657",,terminal_output +711,1343633,"TERMINAL",0,0,"9599799768",,terminal_output +712,1344657,"TERMINAL",0,0,"3:006301082030879",,terminal_output +713,1345682,"TERMINAL",0,0,"17119119810",,terminal_output +714,1346708,"TERMINAL",0,0,"282210224091",,terminal_output +715,1347745,"TERMINAL",0,0,"39331331502",,terminal_output +716,1348792,"TERMINAL",0,0,"43044244213",,terminal_output +717,1349882,"TERMINAL",0,0,"5155355324",,terminal_output +718,1350905,"TERMINAL",0,0,"6266466435",,terminal_output +719,1352029,"TERMINAL",0,0,"7377577546",,terminal_output +720,1353056,"TERMINAL",0,0,"8488688657",,terminal_output +721,1354079,"TERMINAL",0,0,"9599799768",,terminal_output +722,1355106,"TERMINAL",0,0,"106402083040879",,terminal_output +723,1356228,"TERMINAL",0,0,"17119119820",,terminal_output +724,1357191,"TERMINAL",0,0,"282220225091",,terminal_output +725,1358276,"TERMINAL",0,0,"3404424424:013",,terminal_output +726,1359301,"TERMINAL",0,0,"5155355324",,terminal_output +727,1360384,"TERMINAL",0,0,"6266466435",,terminal_output +728,1361494,"TERMINAL",0,0,"7377577546",,terminal_output +729,1362475,"TERMINAL",0,0,"8488688657",,terminal_output +730,1363493,"TERMINAL",0,0,"9599799768",,terminal_output +731,1364507,"TERMINAL",0,0,"206503084050879",,terminal_output +732,1365550,"TERMINAL",0,0,"17119119830",,terminal_output +733,1366601,"TERMINAL",0,0,"282230224:0091",,terminal_output +734,1367650,"TERMINAL",0,0,"39331331102",,terminal_output +735,1368720,"TERMINAL",0,0,"45044244213",,terminal_output +736,1369764,"TERMINAL",0,0,"5155355324",,terminal_output +737,1370810,"TERMINAL",0,0,"6266466435",,terminal_output +738,1371857,"TERMINAL",0,0,"7377577546",,terminal_output +739,1372920,"TERMINAL",0,0,"8488688657",,terminal_output +740,1373955,"TERMINAL",0,0,"9599799768",,terminal_output +741,1375072,"TERMINAL",0,0,"3062:00408504:00879",,terminal_output +742,1376094,"TERMINAL",0,0,"17119119840",,terminal_output +743,1377118,"TERMINAL",0,0,"282240221091",,terminal_output +744,1378246,"TERMINAL",0,0,"39331331202",,terminal_output +745,1379269,"TERMINAL",0,0,"41:0044244213",,terminal_output +746,1380293,"TERMINAL",0,0,"5266466435",,terminal_output +747,1381323,"TERMINAL",0,0,"7377577546",,terminal_output +748,1382392,"TERMINAL",0,0,"8488688657",,terminal_output +749,1383391,"TERMINAL",0,0,"9599799768",,terminal_output +750,1384494,"TERMINAL",0,0,"406105084:0010879",,terminal_output +751,1385500,"TERMINAL",0,0,"17119119850",,terminal_output +752,1386642,"TERMINAL",0,0,"282250222091",,terminal_output +753,1387588,"TERMINAL",0,0,"39331331302",,terminal_output +754,1388691,"TERMINAL",0,0,"41044244213",,terminal_output +755,1389715,"TERMINAL",0,0,"5155355324",,terminal_output +756,1390738,"TERMINAL",0,0,"6266466435",,terminal_output +757,1391801,"TERMINAL",0,0,"7377577546",,terminal_output +758,1392822,"TERMINAL",0,0,"8488688657",,terminal_output +759,1393899,"TERMINAL",0,0,"9599799768",,terminal_output +760,1394922,"TERMINAL",0,0,"506202:0081020879",,terminal_output +761,1396063,"TERMINAL",0,0,"1711911982:00",,terminal_output +762,1397025,"TERMINAL",0,0,"28224:00223091",,terminal_output +763,1398075,"TERMINAL",0,0,"39331331402",,terminal_output +764,1399135,"TERMINAL",0,0,"42044244213",,terminal_output +765,1400261,"TERMINAL",0,0,"5155355324",,terminal_output +766,1401251,"TERMINAL",0,0,"6266466435",,terminal_output +767,1402309,"TERMINAL",0,0,"7488688657",,terminal_output +768,1403331,"TERMINAL",0,0,"9599799768",,terminal_output +769,1404460,"TERMINAL",0,0,"4:006301082030879",,terminal_output +770,1405408,"TERMINAL",0,0,"17119119810",,terminal_output +771,1406457,"TERMINAL",0,0,"282210224091",,terminal_output +772,1407532,"TERMINAL",0,0,"39331331502",,terminal_output +773,1408659,"TERMINAL",0,0,"43044244213",,terminal_output +774,1409605,"TERMINAL",0,0,"5155355324",,terminal_output +775,1410705,"TERMINAL",0,0,"6266466435",,terminal_output +776,1411732,"TERMINAL",0,0,"7377577546",,terminal_output +777,1412759,"TERMINAL",0,0,"8488688657",,terminal_output +778,1413802,"TERMINAL",0,0,"9599799768",,terminal_output +779,1414856,"TERMINAL",0,0,"106402083040879",,terminal_output +780,1415929,"TERMINAL",0,0,"17119119820",,terminal_output +781,1417057,"TERMINAL",0,0,"282220225091",,terminal_output +782,1417998,"TERMINAL",0,0,"393313315:002",,terminal_output +783,1419051,"TERMINAL",0,0,"44044244213",,terminal_output +784,1420097,"TERMINAL",0,0,"5155355324",,terminal_output +785,1421150,"TERMINAL",0,0,"6266466435",,terminal_output +786,1422275,"TERMINAL",0,0,"7377577546",,terminal_output +787,1423247,"TERMINAL",0,0,"8599799768",,terminal_output +788,1424328,"TERMINAL",0,0,"206503084050879",,terminal_output +789,1425370,"TERMINAL",0,0,"17119119830",,terminal_output +790,1426476,"TERMINAL",0,0,"282230225:0091",,terminal_output +791,1427500,"TERMINAL",0,0,"39331331102",,terminal_output +792,1428627,"TERMINAL",0,0,"45044244213",,terminal_output +793,1429660,"TERMINAL",0,0,"5155355324",,terminal_output +794,1430673,"TERMINAL",0,0,"6266466435",,terminal_output +795,1431697,"TERMINAL",0,0,"7377577546",,terminal_output +796,1432725,"TERMINAL",0,0,"8488688657",,terminal_output +797,1433778,"TERMINAL",0,0,"9599799768",,terminal_output +798,1434834,"TERMINAL",0,0,"3063:00408505:00879",,terminal_output +799,1435876,"TERMINAL",0,0,"17119119840",,terminal_output +800,1436927,"TERMINAL",0,0,"282240221091",,terminal_output +801,1437973,"TERMINAL",0,0,"39331331202",,terminal_output +802,1439071,"TERMINAL",0,0,"42:0044244213",,terminal_output +803,1440071,"TERMINAL",0,0,"5155355324",,terminal_output +804,1441220,"TERMINAL",0,0,"6266466435",,terminal_output +805,1442246,"TERMINAL",0,0,"7377577546",,terminal_output +806,1443256,"TERMINAL",0,0,"8599799768",,terminal_output +807,1444265,"TERMINAL",0,0,"406105085:0010879",,terminal_output +808,1445320,"TERMINAL",0,0,"17119119850",,terminal_output +809,1446446,"TERMINAL",0,0,"282250222091",,terminal_output +810,1447009,"models/tokenizer.py",0,0,"",python,tab +811,1447415,"TERMINAL",0,0,"39331331302",,terminal_output +812,1448491,"TERMINAL",0,0,"41044244213",,terminal_output +813,1449616,"TERMINAL",0,0,"5155355324",,terminal_output +814,1450687,"TERMINAL",0,0,"6CG66466435",,terminal_output +815,1451666,"TERMINAL",0,0,"777577546",,terminal_output +816,1452705,"train_dynamics.py",0,0,"",python,tab +817,1452797,"TERMINAL",0,0,"888688657",,terminal_output +818,1453715,"TERMINAL",0,0,"999799768",,terminal_output +819,1454773,"TERMINAL",0,0,"50203:0081020879",,terminal_output +820,1455804,"TERMINAL",0,0,"111911983:00",,terminal_output +821,1456856,"TERMINAL",0,0,"2225:00223091",,terminal_output +822,1457490,"train_dynamics.py",1446,0,"",python,selection_mouse +823,1457674,"train_dynamics.py",1433,20,"tokenizer_checkpoint",python,selection_mouse +824,1457913,"TERMINAL",0,0,"3331331402",,terminal_output +825,1458952,"TERMINAL",0,0,"444244213",,terminal_output +826,1460063,"TERMINAL",0,0,"555355324",,terminal_output +827,1461052,"TERMINAL",0,0,"\r64046to R 4:23:262824011-00:33:06\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]4284 cpuonly wrap 2:35:04\t 1 hkn0310316092261665134204023838335:53:05364",,terminal_output +828,1462147,"TERMINAL",0,0,"777577546",,terminal_output +829,1463239,"TERMINAL",0,0,"888688657",,terminal_output +830,1464209,"TERMINAL",0,0,"999799768",,terminal_output +831,1465251,"TERMINAL",0,0,"5:003111921319810",,terminal_output +832,1466348,"TERMINAL",0,0,"22210224091",,terminal_output +833,1467499,"TERMINAL",0,0,"3331331502",,terminal_output +834,1468572,"TERMINAL",0,0,"444244213",,terminal_output +835,1469542,"TERMINAL",0,0,"555355324",,terminal_output +836,1470494,"TERMINAL",0,0,"666466435",,terminal_output +837,1471544,"TERMINAL",0,0,"777577546",,terminal_output +838,1472592,"TERMINAL",0,0,"888688657",,terminal_output +839,1472685,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",0,0,"",shellscript,tab +840,1473641,"TERMINAL",0,0,"999799768",,terminal_output +841,1474721,"TERMINAL",0,0,"10402083040879",,terminal_output +842,1475737,"TERMINAL",0,0,"1119119820",,terminal_output +843,1476802,"TERMINAL",0,0,"22220225091",,terminal_output +844,1477880,"TERMINAL",0,0,"33313316:002",,terminal_output +845,1478884,"TERMINAL",0,0,"444244213",,terminal_output +846,1480030,"TERMINAL",0,0,"555355324",,terminal_output +847,1480224,"TERMINAL",0,0,"bash",,terminal_focus +848,1481053,"TERMINAL",0,0,"666466435",,terminal_output +849,1482080,"TERMINAL",0,0,"777577546",,terminal_output +850,1483105,"TERMINAL",0,0,"888688657",,terminal_output +851,1484186,"TERMINAL",0,0,"999799768",,terminal_output +852,1484279,"TERMINAL",0,0,"cd ..",,terminal_command +853,1484513,"TERMINAL",0,0,"ls",,terminal_command +854,1484521,"TERMINAL",0,0,"]633;E;2025-08-11 16:15:20 ls;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;Cdynamics-cotraining\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal]633;D;0",,terminal_output +855,1485215,"TERMINAL",0,0,"20503084050879",,terminal_output +856,1485744,"TERMINAL",0,0,"cd ..",,terminal_command +857,1485756,"TERMINAL",0,0,"]633;E;2025-08-11 16:15:21 cd ..;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +858,1486224,"TERMINAL",0,0,"12230226:00931",,terminal_output +859,1486598,"TERMINAL",0,0,"ls",,terminal_command +860,1486648,"TERMINAL",0,0,"]633;E;2025-08-11 16:15:22 ls;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C",,terminal_output +861,1487091,"TERMINAL",0,0,"big_run train_lam_action_space_scaling_10_3329786.log train_tokenizer_batch_size_scaling_1_node_3318551.log\r\nbig-runs train_lam_action_space_scaling_10_3329801.log train_tokenizer_batch_size_scaling_2_node_3318552.log\r\ncausal train_lam_action_space_scaling_10_3331283.log train_tokenizer_batch_size_scaling_2_node_3330806.log\r\ncoinrun train_lam_action_space_scaling_12_3318546.log train_tokenizer_batch_size_scaling_2_node_3330848.log\r\nmaskgit train_lam_action_space_scaling_12_3320177.log train_tokenizer_batch_size_scaling_2_node_3331282.log\r\nmaskgit-maskprob-fix train_lam_action_space_scaling_12_3321527.log train_tokenizer_batch_size_scaling_4_node_3318553.log\r\ntrain_dyn_causal_180M_3372931.log train_lam_action_space_scaling_12_3329787.log train_tokenizer_batch_size_scaling_4_node_3320175.log\r\ntrain_dyn_causal_180M_3372963.log train_lam_action_space_scaling_12_3329802.log train_tokenizer_batch_size_scaling_4_node_3321524.log\r\ntrain_dyn_causal_180M_3372969.log train_lam_action_space_scaling_12_3331284.log train_tokenizer_batch_size_scaling_8_node_3320176.log\r\ntrain_dyn_causal_180M_3373107.log train_lam_action_space_scaling_20_3318547.log train_tokenizer_batch_size_scaling_8_node_3321525.log\r\ntrain_dyn_causal_255M_3372932.log train_lam_action_space_scaling_20_3329788.log train_tokenizer_minecraft_overfit_sample_3309656.log\r\ntrain_dyn_causal_255M_3372970.log train_lam_action_space_scaling_20_3329803.log train_tokenizer_model_size_scaling_127M_3317233.log\r\ntrain_dyn_causal_255M_3373108.log train_lam_action_space_scaling_20_3331285.log train_tokenizer_model_size_scaling_127M_3318554.log\r\ntrain_dyn_causal_356M_3372934.log train_lam_action_space_scaling_50_3320180.log train_tokenizer_model_size_scaling_140M_3313562.log\r\ntrain_dyn_causal_356M_3372971.log train_lam_action_space_scaling_50_3329789.log train_tokenizer_model_size_scaling_140M_3316019.log\r\ntrain_dyn_causal_356M_3373109.log train_lam_action_space_scaling_50_3329804.log train_tokenizer_model_size_scaling_200M_3313563.log\r\ntrain_dyn_causal_500M_3372936.log train_lam_action_space_scaling_50_3331286.log train_tokenizer_model_size_scaling_200M_3316020.log\r\ntrain_dyn_causal_500M_3372972.log train_lam_action_space_scaling_6_3318549.log train_tokenizer_model_size_scaling_227M_3317234.log\r\ntrain_dyn_causal_500M_3373110.log train_lam_action_space_scaling_6_3320178.log train_tokenizer_model_size_scaling_227M_3318555.log\r\ntrain_dyn_new_arch-bugfixed-spatial-shift_3359343.log train_lam_action_space_scaling_6_3321528.log train_tokenizer_model_size_scaling_227M_3320173.log\r\ntrain_dyn_new_arch-bugfixed-temporal-shift_3359349.log train_lam_action_space_scaling_6_3329790.log train_tokenizer_model_size_scaling_227M_3321523.log\r\ntrain_dyn_yolorun_3333026.log train_lam_action_space_scaling_6_3329805.log train_tokenizer_model_size_scaling_37M_3313565.log\r\ntrain_dyn_yolorun_3333448.log train_lam_action_space_scaling_6_3331287.log train_tokenizer_model_size_scaling_37M_3316022.log\r\ntrain_dyn_yolorun_3335345.log train_lam_action_space_scaling_8_3318550.log train_tokenizer_model_size_scaling_37M_3317232.log\r\ntrain_dyn_yolorun_3335362.log train_lam_action_space_scaling_8_3329791.log train_tokenizer_model_size_scaling_37M_3317239.log\r\ntrain_dyn_yolorun_3348592.log train_lam_action_space_scaling_8_3329806.log train_tokenizer_model_size_scaling_37M_3318556.log\r\ntrain_dyn_yolorun_new_arch_3351743.log train_lam_action_space_scaling_8_3331288.log train_tokenizer_model_size_scaling_74M_3318557.log\r\ntrain_dyn_yolorun_new_arch_3352103.log train_lam_minecraft_overfit_sample_3309655.log train_tokenizer_model_size_scaling_74M_3320174.log\r\ntrain_dyn_yolorun_new_arch_3352115.log train_lam_model_size_scaling_38M_3317098.log train_tokenizer_model_size_scaling_74M_3321522.log\r\ntrain_dyn_yolorun_new_arch_3358457.log train_lam_model_size_scaling_38M_3317115.log train_tokenizer_model_size_scaling_80M_3313564.log\r\ntrain_lam_action_space_scaling_10_3320179.log train_lam_model_size_scaling_38M_3317231.log train_tokenizer_model_size_scaling_80M_3316026.log\r\ntrain_lam_action_space_scaling_10_3321529.log train_tokenizer_batch_size_scaling_16_node_3321526.log yoloruns\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +862,1487284,"TERMINAL",0,0,"3331331102",,terminal_output +863,1488327,"TERMINAL",0,0,"444244213",,terminal_output +864,1489361,"TERMINAL",0,0,"555355324",,terminal_output +865,1489620,"TERMINAL",0,0,"cd coinrun/",,terminal_command +866,1490162,"TERMINAL",0,0,"ls",,terminal_command +867,1490177,"TERMINAL",0,0,"]633;E;2025-08-11 16:15:25 ls;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;Cdynamics tokenizer\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun]633;D;0",,terminal_output +868,1490404,"TERMINAL",0,0,"666466435",,terminal_output +869,1491487,"TERMINAL",0,0,"777577546",,terminal_output +870,1492535,"TERMINAL",0,0,"888688657",,terminal_output +871,1493194,"TERMINAL",0,0,"cd dynamics/",,terminal_command +872,1493481,"TERMINAL",0,0,"ls",,terminal_command +873,1493545,"TERMINAL",0,0,"]633;E;2025-08-11 16:15:29 ls;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;Ccausal\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics]633;D;0",,terminal_output +874,1493554,"TERMINAL",0,0,"999799768",,terminal_output +875,1494638,"TERMINAL",0,0,"304:00408506:00879",,terminal_output +876,1495306,"TERMINAL",0,0,"ls",,terminal_command +877,1495700,"TERMINAL",0,0,"1119119840",,terminal_output +878,1496708,"TERMINAL",0,0,"22240221091",,terminal_output +879,1497579,"TERMINAL",0,0,"cd causal/",,terminal_command +880,1497591,"TERMINAL",0,0,"]633;E;2025-08-11 16:15:33 cd causal/;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal]633;D;0",,terminal_output +881,1497752,"TERMINAL",0,0,"3331331202",,terminal_output +882,1498804,"TERMINAL",0,0,"444244213",,terminal_output +883,1499661,"TERMINAL",0,0,"ls",,terminal_command +884,1499853,"TERMINAL",0,0,"555355324",,terminal_output +885,1500878,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_1_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --restore_ckpt \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=48 \\n --init_lr=0 \\n --max_lr=1e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=coinrun-dynamics-causal-1-node-$slurm_job_id \\n --tags coinrun dynamics causal 1-node \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n/var/spool/slurmd/job3415062/slurm_script: line 42: .venv/bin/activate: No such file or directory\nSLURM_JOB_USER=tum_cte0515\nSLURM_TASKS_PER_NODE=4\nSLURM_JOB_UID=999226\nSLURM_TASK_PID=433511\nSLURM_JOB_GPUS=0,1,2,3\nSLURM_LOCALID=0\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs\nSLURMD_NODENAME=hkn0719\nSLURM_JOB_START_TIME=1754921554\nSLURM_CLUSTER_NAME=hk\nSLURM_JOB_END_TIME=1755094354\nSLURM_CPUS_ON_NODE=24\nSLURM_JOB_CPUS_PER_NODE=24\nSLURM_GPUS_ON_NODE=4\nSLURM_GTIDS=0\nSLURM_JOB_PARTITION=accelerated\nSLURM_TRES_PER_TASK=cpu=5\nSLURM_OOM_KILL_STEP=0\nSLURM_JOB_NUM_NODES=1\nSLURM_JOBID=3415062\nSLURM_JOB_QOS=normal\nSLURM_PROCID=0\nSLURM_CPUS_PER_TASK=5\nSLURM_NTASKS=4\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0719\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\nSLURM_SCRIPT_CONTEXT=prolog_task\nSLURM_NODELIST=hkn0719\nSLURM_JOB_ACCOUNT=hk-project-p0023960\nSLURM_PRIO_PROCESS=0\nSLURM_NPROCS=4\nSLURM_NNODES=1\nSLURM_SUBMIT_HOST=hkn1993.localdomain\nSLURM_JOB_ID=3415062\nSLURM_NODEID=0\nSLURM_CONF=/etc/slurm/slurm.conf\nSLURM_JOB_NAME=train_dynamics_causal_1_node\nSLURM_NTASKS_PER_NODE=4\nSLURM_JOB_GID=502226\nSLURM_JOB_NODELIST=hkn0719\nGpuFreq=control_disabled\nwandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\nwandb: Tracking run with wandb version 0.19.11\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/wandb/run-20250811_161335-3415062\nwandb: Run `wandb offline` to turn off syncing.\nwandb: Syncing run coinrun-dynamics-causal-1-node-3415062\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3415062\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\nRunning on 4 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 26555904, 'lam': 35115232, 'tokenizer': 33750256, 'total': 95421392}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 333, in \n restored = checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1563, in restore\n raise FileNotFoundError(f'No steps found in {self.directory}.')\nFileNotFoundError: No steps found in /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/train_dynamics_causal_1_node/3415062.\nRunning on 4 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 26555904, 'lam': 35115232, 'tokenizer': 33750256, 'total': 95421392}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 333, in \n restored = checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1563, in restore\n raise FileNotFoundError(f'No steps found in {self.directory}.')\nFileNotFoundError: No steps found in /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/train_dynamics_causal_1_node/3415062.\nRunning on 4 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 26555904, 'lam': 35115232, 'tokenizer': 33750256, 'total': 95421392}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 333, in \n restored = checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1563, in restore\n raise FileNotFoundError(f'No steps found in {self.directory}.')\nFileNotFoundError: No steps found in /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/train_dynamics_causal_1_node/3415062.\nRunning on 4 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 26555904, 'lam': 35115232, 'tokenizer': 33750256, 'total': 95421392}\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/train_dynamics.py"", line 333, in \n restored = checkpoint_manager.restore(\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1563, in restore\n raise FileNotFoundError(f'No steps found in {self.directory}.')\nFileNotFoundError: No steps found in /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/train_dynamics_causal_1_node/3415062.\nwandb: \nwandb: 🚀 View run coinrun-dynamics-causal-1-node-3415062 at: https://wandb.ai/instant-uv/jafar/runs/3415062\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/wandb/run-20250811_161335-3415062/logs\nsrun: error: hkn0719: tasks 1-2: Exited with exit code 1\nsrun: error: hkn0719: task 3: Exited with exit code 1\nsrun: error: hkn0719: task 0: Exited with exit code 1\n\n============================= JOB FEEDBACK =============================\n\nJob ID: 3415062\nCluster: hk\nUser/Group: tum_cte0515/hk-project-p0023960\nAccount: hk-project-p0023960\nState: FAILED (exit code 1)\nPartition: accelerated\nNodes: 1\nCores per node: 24\nNodelist: hkn0719\nCPU Utilized: 00:02:52\nCPU Efficiency: 5.47% of 00:52:24 core-walltime\nJob Wall-clock time: 00:02:11\nStarttime: Mon Aug 11 16:12:34 2025\nEndtime: Mon Aug 11 16:14:45 2025\nMemory Utilized: 5.66 GB (estimated maximum)\nMemory Efficiency: 0.00% of 0.00 MB (0.00 MB/node)\nEnergy Consumed: 71078 Joule / 19.7438888888889 Watthours\nAverage node power draw: 542.580152671756 Watt\n",log,tab +886,1501013,"TERMINAL",0,0,"666466435",,terminal_output +887,1501332,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",653,0,"",log,selection_mouse +888,1501924,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",8519,0,"",log,selection_command +889,1501984,"TERMINAL",0,0,"777577546",,terminal_output +890,1503007,"TERMINAL",0,0,"888688657",,terminal_output +891,1504058,"TERMINAL",0,0,"999799768",,terminal_output +892,1505117,"TERMINAL",0,0,"40105086:0010879",,terminal_output +893,1506080,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7277,0,"",log,selection_mouse +894,1506159,"TERMINAL",0,0,"1119119850",,terminal_output +895,1506228,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7275,9,"workspace",log,selection_mouse +896,1507208,"TERMINAL",0,0,"22250222091",,terminal_output +897,1507338,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7403,0,"workspace",log,content +898,1507338,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7275,9,"",log,content +899,1507527,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7403,0,"",log,selection_mouse +900,1507527,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7402,0,"",log,selection_command +901,1508142,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7403,0,"",log,selection_mouse +902,1508142,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7402,0,"",log,selection_command +903,1508323,"TERMINAL",0,0,"3442442313",,terminal_output +904,1509368,"TERMINAL",0,0,"555355324",,terminal_output +905,1509855,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7394,9,"",log,content +906,1509855,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7275,0,"workspace",log,content +907,1510416,"TERMINAL",0,0,"666466435",,terminal_output +908,1511271,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7351,0,"",log,selection_mouse +909,1511424,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7347,28,"train_dynamics_causal_1_node",log,selection_mouse +910,1511477,"TERMINAL",0,0,"777577546",,terminal_output +911,1511561,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7227,158,"FileNotFoundError: No steps found in /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/train_dynamics_causal_1_node/3415062.\n",log,selection_mouse +912,1512121,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7355,0,"",log,selection_mouse +913,1512233,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7347,28,"train_dynamics_causal_1_node",log,selection_mouse +914,1512596,"TERMINAL",0,0,"888688657",,terminal_output +915,1512902,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7365,0,"",log,selection_mouse +916,1513563,"TERMINAL",0,0,"999799768",,terminal_output +917,1513948,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7380,0,"",log,selection_mouse +918,1514139,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7376,7,"3415062",log,selection_mouse +919,1514641,"TERMINAL",0,0,"50204:0081020879",,terminal_output +920,1515519,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7264,0,"",log,selection_mouse +921,1515666,"TERMINAL",0,0,"111911984:00",,terminal_output +922,1515683,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7264,1,"/",log,selection_mouse +923,1515697,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7264,9,"/hkfs/wor",log,selection_mouse +924,1515750,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7264,139,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/train_dynamics_causal_1_node/3415062.\nwandb: ",log,selection_mouse +925,1515751,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7264,66,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoint",log,selection_mouse +926,1515751,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7226,38,"\nFileNotFoundError: No steps found in ",log,selection_mouse +927,1515778,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7129,135,"er.py"", line 1563, in restore\n raise FileNotFoundError(f'No steps found in {self.directory}.')\nFileNotFoundError: No steps found in ",log,selection_mouse +928,1515779,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7134,130,""", line 1563, in restore\n raise FileNotFoundError(f'No steps found in {self.directory}.')\nFileNotFoundError: No steps found in ",log,selection_mouse +929,1515834,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7139,125,"ne 1563, in restore\n raise FileNotFoundError(f'No steps found in {self.directory}.')\nFileNotFoundError: No steps found in ",log,selection_mouse +930,1515834,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7144,120,"63, in restore\n raise FileNotFoundError(f'No steps found in {self.directory}.')\nFileNotFoundError: No steps found in ",log,selection_mouse +931,1515835,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7146,118,", in restore\n raise FileNotFoundError(f'No steps found in {self.directory}.')\nFileNotFoundError: No steps found in ",log,selection_mouse +932,1515842,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7147,117," in restore\n raise FileNotFoundError(f'No steps found in {self.directory}.')\nFileNotFoundError: No steps found in ",log,selection_mouse +933,1515859,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7148,116,"in restore\n raise FileNotFoundError(f'No steps found in {self.directory}.')\nFileNotFoundError: No steps found in ",log,selection_mouse +934,1515975,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7149,115,"n restore\n raise FileNotFoundError(f'No steps found in {self.directory}.')\nFileNotFoundError: No steps found in ",log,selection_mouse +935,1515993,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7150,114," restore\n raise FileNotFoundError(f'No steps found in {self.directory}.')\nFileNotFoundError: No steps found in ",log,selection_mouse +936,1516010,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7151,113,"restore\n raise FileNotFoundError(f'No steps found in {self.directory}.')\nFileNotFoundError: No steps found in ",log,selection_mouse +937,1516068,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7226,38,"\nFileNotFoundError: No steps found in ",log,selection_mouse +938,1516120,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7264,120,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/train_dynamics_causal_1_node/3415062.",log,selection_mouse +939,1516487,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415062.log",7264,119,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/train_dynamics_causal_1_node/3415062",log,selection_mouse +940,1516795,"TERMINAL",0,0,"2226:00223091",,terminal_output +941,1517764,"TERMINAL",0,0,"3331331402",,terminal_output +942,1518815,"TERMINAL",0,0,"444244213",,terminal_output +943,1519398,"TERMINAL",0,0,"bash",,terminal_focus +944,1519867,"TERMINAL",0,0,"555355324",,terminal_output +945,1520921,"TERMINAL",0,0,"666466435",,terminal_output +946,1520947,"TERMINAL",0,0,"ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/train_dynamics_causal_1_node/3415062",,terminal_command +947,1520967,"TERMINAL",0,0,"]633;E;2025-08-11 16:15:56 ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/train_dynamics_causal_1_node/3415062;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes]633;D;0",,terminal_output +948,1521965,"TERMINAL",0,0,"777577546",,terminal_output +949,1523017,"TERMINAL",0,0,"888688657",,terminal_output +950,1524169,"TERMINAL",0,0,"999799768",,terminal_output +951,1525187,"TERMINAL",0,0,"6:00301082030879",,terminal_output +952,1526164,"TERMINAL",0,0,"1119119810",,terminal_output +953,1526865,"slurm/jobs/mihir/horeka/coinrun/train_tokenizer_lr_1e-4.sbatch",0,0,"",shellscript,tab +954,1527209,"TERMINAL",0,0,"22210224091",,terminal_output +955,1528263,"TERMINAL",0,0,"3442442513",,terminal_output +956,1529389,"TERMINAL",0,0,"555355324",,terminal_output +957,1530390,"TERMINAL",0,0,"666466435",,terminal_output +958,1531447,"TERMINAL",0,0,"777577546",,terminal_output +959,1532462,"TERMINAL",0,0,"888688657",,terminal_output +960,1533495,"TERMINAL",0,0,"999799768",,terminal_output +961,1534609,"TERMINAL",0,0,"10402083040879",,terminal_output +962,1535601,"TERMINAL",0,0,"1119119820",,terminal_output +963,1536759,"TERMINAL",0,0,"22220225091",,terminal_output +964,1537700,"TERMINAL",0,0,"33313317:002",,terminal_output +965,1538810,"TERMINAL",0,0,"444244213",,terminal_output +966,1539809,"TERMINAL",0,0,"555355324",,terminal_output +967,1540854,"TERMINAL",0,0,"666466435",,terminal_output +968,1541905,"TERMINAL",0,0,"777577546",,terminal_output +969,1542955,"TERMINAL",0,0,"888688657",,terminal_output +970,1544033,"TERMINAL",0,0,"999799768",,terminal_output +971,1545159,"TERMINAL",0,0,"20503084050879",,terminal_output +972,1546097,"TERMINAL",0,0,"1119119830",,terminal_output +973,1547206,"TERMINAL",0,0,"22230227:0091",,terminal_output +974,1548230,"TERMINAL",0,0,"3331331102",,terminal_output +975,1549254,"TERMINAL",0,0,"455355324",,terminal_output +976,1550382,"TERMINAL",0,0,"666466435",,terminal_output +977,1551403,"TERMINAL",0,0,"777577546",,terminal_output +978,1552409,"TERMINAL",0,0,"888688657",,terminal_output +979,1553556,"TERMINAL",0,0,"999799768",,terminal_output +980,1554580,"TERMINAL",0,0,"305:00408507:00879",,terminal_output +981,1555571,"TERMINAL",0,0,"1119119840",,terminal_output +982,1556627,"TERMINAL",0,0,"22240221091",,terminal_output +983,1557672,"TERMINAL",0,0,"3331331202",,terminal_output +984,1558776,"TERMINAL",0,0,"444244213",,terminal_output +985,1559802,"TERMINAL",0,0,"555355324",,terminal_output +986,1560930,"TERMINAL",0,0,"666466435",,terminal_output +987,1561874,"TERMINAL",0,0,"777577546",,terminal_output +988,1562925,"TERMINAL",0,0,"888688657",,terminal_output +989,1563979,"TERMINAL",0,0,"999799768",,terminal_output +990,1565061,"TERMINAL",0,0,"40105087:0010879",,terminal_output +991,1566072,"TERMINAL",0,0,"1119119850",,terminal_output +992,1566325,"TERMINAL",0,0,"cd ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046""",,terminal_command +993,1566336,"TERMINAL",0,0,"]633;E;2025-08-11 16:16:42 cd ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046"";8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046]633;D;0",,terminal_output +994,1566809,"TERMINAL",0,0,"ls",,terminal_command +995,1566860,"TERMINAL",0,0,"]633;E;2025-08-11 16:16:42 ls;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;C",,terminal_output +996,1566955,"TERMINAL",0,0,"020000 040000 054000 055000 056000\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046]633;D;0",,terminal_output +997,1567122,"TERMINAL",0,0,"22250222091",,terminal_output +998,1568199,"TERMINAL",0,0,"3331331302",,terminal_output +999,1569234,"TERMINAL",0,0,"455355324",,terminal_output +1000,1570390,"TERMINAL",0,0,"666466435",,terminal_output +1001,1571375,"TERMINAL",0,0,"777577546",,terminal_output +1002,1572396,"TERMINAL",0,0,"888688657",,terminal_output +1003,1573431,"TERMINAL",0,0,"999799768",,terminal_output +1004,1574487,"TERMINAL",0,0,"50205:0081020879",,terminal_output +1005,1575537,"TERMINAL",0,0,"111911985:00",,terminal_output +1006,1576594,"TERMINAL",0,0,"2227:00223091",,terminal_output +1007,1577615,"TERMINAL",0,0,"3331331402",,terminal_output +1008,1578668,"TERMINAL",0,0,"444244213",,terminal_output +1009,1579781,"TERMINAL",0,0,"555355324",,terminal_output +1010,1580767,"TERMINAL",0,0,"666466435",,terminal_output +1011,1581244,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1012,1581816,"TERMINAL",0,0,"777577546",,terminal_output +1013,1582207,"slurm/jobs/mihir/horeka/coinrun/train_tokenizer_lr_1e-4.sbatch",0,0,"",shellscript,tab +1014,1582873,"TERMINAL",0,0,"888688657",,terminal_output +1015,1583919,"TERMINAL",0,0,"999799768",,terminal_output +1016,1584991,"TERMINAL",0,0,"7:00301082030879",,terminal_output +1017,1585553,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",0,0,"",shellscript,tab +1018,1586013,"TERMINAL",0,0,"1119119810",,terminal_output +1019,1587048,"TERMINAL",0,0,"22210224091",,terminal_output +1020,1587304,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1708,0,"",shellscript,selection_mouse +1021,1587453,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1707,1,"6",shellscript,selection_mouse +1022,1587469,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1697,11,"e-4/3414046",shellscript,selection_mouse +1023,1587487,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1688,20,"kenizer_1e-4/3414046",shellscript,selection_mouse +1024,1587541,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1708,1,"\n",shellscript,selection_mouse +1025,1587541,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1708,18,"\n\nenv | grep SLURM",shellscript,selection_mouse +1026,1587618,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1708,1,"\n",shellscript,selection_mouse +1027,1587852,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1642,66,"shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1028,1587870,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1643,65,"hared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1029,1587885,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1645,63,"red/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1030,1587937,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1649,59,"checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1031,1587938,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1651,57,"eckpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1032,1587938,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1654,54,"points/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1033,1587952,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1658,50,"ts/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1034,1588004,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1660,48,"/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1035,1588005,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1663,45,"g-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1036,1588005,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1665,43,"runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1037,1588019,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1666,42,"uns/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1038,1588037,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1668,40,"s/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1039,1588062,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1670,38,"tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1040,1588072,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1671,37,"okenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1041,1588124,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1673,35,"enizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1042,1588124,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1674,34,"nizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1043,1588125,"TERMINAL",0,0,"3331331502",,terminal_output +1044,1588136,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1675,33,"izer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1045,1588204,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1674,34,"nizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1046,1588220,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1673,35,"enizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1047,1588281,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1708,1,"\n",shellscript,selection_mouse +1048,1588285,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1622,86,"tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1049,1588302,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1573,135,"\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1050,1588754,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1602,106,"k/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1051,1588754,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1604,104,"workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1052,1588756,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1606,102,"rkspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1053,1588769,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1608,100,"space/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1054,1588829,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1610,98,"ace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1055,1588829,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1611,97,"ce/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1056,1588830,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1573,135,"\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1057,1589057,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1616,92,"ratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1058,1589071,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1614,94,"scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1059,1589096,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1611,97,"ce/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1060,1589107,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1608,100,"space/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1061,1589172,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1605,103,"orkspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1062,1589172,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1603,105,"/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1063,1589173,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1601,107,"rk/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1064,1589173,"TERMINAL",0,0,"444244213",,terminal_output +1065,1589174,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1600,108,"ork/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1066,1589193,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1598,110,"/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1067,1589257,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1597,111,"s/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1068,1589258,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1596,112,"fs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1069,1589311,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1595,113,"kfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1070,1589370,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1594,114,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1071,1589540,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1593,115,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046",shellscript,selection_mouse +1072,1590217,"TERMINAL",0,0,"555355324",,terminal_output +1073,1590468,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1593,115,"",shellscript,content +1074,1590798,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1593,0,"""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046""",shellscript,content +1075,1591342,"TERMINAL",0,0,"677577546",,terminal_output +1076,1592278,"TERMINAL",0,0,"888688657",,terminal_output +1077,1593322,"TERMINAL",0,0,"999799768",,terminal_output +1078,1594416,"TERMINAL",0,0,"10402083040879",,terminal_output +1079,1595439,"TERMINAL",0,0,"1119119820",,terminal_output +1080,1596572,"TERMINAL",0,0,"22220225091",,terminal_output +1081,1597590,"TERMINAL",0,0,"33313318:002",,terminal_output +1082,1598598,"TERMINAL",0,0,"444244213",,terminal_output +1083,1599636,"TERMINAL",0,0,"555355324",,terminal_output +1084,1600763,"TERMINAL",0,0,"666466435",,terminal_output +1085,1601787,"TERMINAL",0,0,"777577546",,terminal_output +1086,1602813,"TERMINAL",0,0,"888688657",,terminal_output +1087,1603836,"TERMINAL",0,0,"999799768",,terminal_output +1088,1604878,"TERMINAL",0,0,"20503084050879",,terminal_output +1089,1605930,"TERMINAL",0,0,"1119119830",,terminal_output +1090,1607009,"TERMINAL",0,0,"22230228:0091",,terminal_output +1091,1608029,"TERMINAL",0,0,"3331331102",,terminal_output +1092,1609161,"TERMINAL",0,0,"444244213",,terminal_output +1093,1610185,"TERMINAL",0,0,"555355324",,terminal_output +1094,1611208,"TERMINAL",0,0,"666466435",,terminal_output +1095,1612340,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1572,0,"",shellscript,selection_mouse +1096,1612350,"TERMINAL",0,0,"788688657",,terminal_output +1097,1613288,"TERMINAL",0,0,"999799768",,terminal_output +1098,1614335,"TERMINAL",0,0,"306:00408508:00879",,terminal_output +1099,1615381,"TERMINAL",0,0,"1119119840",,terminal_output +1100,1616532,"TERMINAL",0,0,"22240221091",,terminal_output +1101,1617181,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1102,1617524,"TERMINAL",0,0,"3331331202",,terminal_output +1103,1618537,"TERMINAL",0,0,"444244213",,terminal_output +1104,1619603,"TERMINAL",0,0,"555355324",,terminal_output +1105,1620629,"TERMINAL",0,0,"666466435",,terminal_output +1106,1621753,"TERMINAL",0,0,"777577546",,terminal_output +1107,1622721,"TERMINAL",0,0,"888688657",,terminal_output +1108,1623856,"TERMINAL",0,0,"999799768",,terminal_output +1109,1624827,"TERMINAL",0,0,"40105088:0010879",,terminal_output +1110,1625855,"TERMINAL",0,0,"1119119850",,terminal_output +1111,1626527,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1112,1626908,"TERMINAL",0,0,"22250222091",,terminal_output +1113,1627950,"TERMINAL",0,0,"3331331302",,terminal_output +1114,1629024,"TERMINAL",0,0,"444244213",,terminal_output +1115,1630036,"TERMINAL",0,0,"555355324",,terminal_output +1116,1630239,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1867,0,"",shellscript,selection_mouse +1117,1631177,"TERMINAL",0,0,"666466435",,terminal_output +1118,1632198,"TERMINAL",0,0,"777577546",,terminal_output +1119,1632417,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",0,0,"",shellscript,tab +1120,1632418,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1773,0,"",shellscript,selection_mouse +1121,1633181,"TERMINAL",0,0,"888688657",,terminal_output +1122,1633843,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1772,0,"",shellscript,selection_command +1123,1634233,"TERMINAL",0,0,"9206:0081020879",,terminal_output +1124,1634533,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1779,0,"\n $restore_ckpt_flag \",shellscript,content +1125,1634543,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1784,0,"",shellscript,selection_command +1126,1634847,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1766,0,"",shellscript,selection_command +1127,1635077,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1784,0,"",shellscript,selection_command +1128,1635247,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1809,0,"",shellscript,selection_command +1129,1635307,"TERMINAL",0,0,"5111911986:00",,terminal_output +1130,1635742,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1805,21,"",shellscript,content +1131,1635761,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1809,0,"",shellscript,selection_command +1132,1636449,"TERMINAL",0,0,"2228:00223091",,terminal_output +1133,1637375,"TERMINAL",0,0,"3331331402",,terminal_output +1134,1638292,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1847,0,"",shellscript,selection_mouse +1135,1638414,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1842,8,"ckpt_dir",shellscript,selection_mouse +1136,1638480,"TERMINAL",0,0,"444244213",,terminal_output +1137,1639025,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1880,0,"",shellscript,selection_mouse +1138,1639571,"TERMINAL",0,0,"555355324",,terminal_output +1139,1639780,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1901,0,"",shellscript,selection_mouse +1140,1639927,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1897,7,"init_lr",shellscript,selection_mouse +1141,1640539,"TERMINAL",0,0,"666466435",,terminal_output +1142,1640612,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1919,0,"",shellscript,selection_mouse +1143,1640747,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1915,6,"max_lr",shellscript,selection_mouse +1144,1641708,"TERMINAL",0,0,"777577546",,terminal_output +1145,1641958,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1146,1641959,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1995,0,"",shellscript,selection_mouse +1147,1642639,"TERMINAL",0,0,"888688657",,terminal_output +1148,1643771,"TERMINAL",0,0,"999799768",,terminal_output +1149,1644310,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",0,0,"",shellscript,tab +1150,1644311,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1883,0,"",shellscript,selection_mouse +1151,1644732,"TERMINAL",0,0,"8:00301082030879",,terminal_output +1152,1645035,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1890,0,"\n --dyna_type=causal \",shellscript,content +1153,1645038,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1895,0,"",shellscript,selection_command +1154,1645780,"TERMINAL",0,0,"1119119810",,terminal_output +1155,1645810,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1920,0,"",shellscript,selection_command +1156,1646829,"TERMINAL",0,0,"22210224091",,terminal_output +1157,1647292,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1938,0,"",shellscript,selection_command +1158,1647877,"TERMINAL",0,0,"3331331502",,terminal_output +1159,1648159,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1958,0,"",shellscript,selection_command +1160,1648923,"TERMINAL",0,0,"444244213",,terminal_output +1161,1649649,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",1990,0,"",shellscript,selection_command +1162,1649867,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2002,0,"",shellscript,selection_command +1163,1649970,"TERMINAL",0,0,"555355324",,terminal_output +1164,1651017,"TERMINAL",0,0,"666466435",,terminal_output +1165,1651578,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2039,0,"",shellscript,selection_command +1166,1652067,"TERMINAL",0,0,"777577546",,terminal_output +1167,1653035,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2097,0,"",shellscript,selection_command +1168,1653113,"TERMINAL",0,0,"888688657",,terminal_output +1169,1654160,"TERMINAL",0,0,"999799768",,terminal_output +1170,1655241,"TERMINAL",0,0,"10402083040879",,terminal_output +1171,1655294,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2141,0,"",shellscript,selection_command +1172,1656264,"TERMINAL",0,0,"122202250921",,terminal_output +1173,1656819,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2167,0,"",shellscript,selection_command +1174,1657390,"TERMINAL",0,0,"33313319:002",,terminal_output +1175,1658415,"TERMINAL",0,0,"444244213",,terminal_output +1176,1658522,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",2189,0,"",shellscript,selection_command +1177,1659411,"TERMINAL",0,0,"555355324",,terminal_output +1178,1660479,"TERMINAL",0,0,"666466435",,terminal_output +1179,1661586,"TERMINAL",0,0,"777577546",,terminal_output +1180,1662613,"TERMINAL",0,0,"888688657",,terminal_output +1181,1663637,"TERMINAL",0,0,"999799768",,terminal_output +1182,1664661,"TERMINAL",0,0,"20503084050879",,terminal_output +1183,1665788,"TERMINAL",0,0,"1119119830",,terminal_output +1184,1666811,"TERMINAL",0,0,"22230229:0091",,terminal_output +1185,1667803,"TERMINAL",0,0,"3331331102",,terminal_output +1186,1668861,"TERMINAL",0,0,"444244213",,terminal_output +1187,1669902,"TERMINAL",0,0,"555355324",,terminal_output +1188,1670949,"TERMINAL",0,0,"666466435",,terminal_output +1189,1672012,"TERMINAL",0,0,"777577546",,terminal_output +1190,1673048,"TERMINAL",0,0,"888688657",,terminal_output +1191,1674130,"TERMINAL",0,0,"999799768",,terminal_output +1192,1675208,"TERMINAL",0,0,"307:00408509:00879",,terminal_output +1193,1676207,"TERMINAL",0,0,"1119119840",,terminal_output +1194,1677247,"TERMINAL",0,0,"233413311202",,terminal_output +1195,1678295,"TERMINAL",0,0,"444244213",,terminal_output +1196,1679343,"TERMINAL",0,0,"555355324",,terminal_output +1197,1680390,"TERMINAL",0,0,"666466435",,terminal_output +1198,1681464,"TERMINAL",0,0,"777577546",,terminal_output +1199,1682583,"TERMINAL",0,0,"888688657",,terminal_output +1200,1683608,"TERMINAL",0,0,"999799768",,terminal_output +1201,1684583,"TERMINAL",0,0,"40105089:0010879",,terminal_output +1202,1685655,"TERMINAL",0,0,"1119119850",,terminal_output +1203,1686788,"TERMINAL",0,0,"22250222091",,terminal_output +1204,1687758,"TERMINAL",0,0,"3331331302",,terminal_output +1205,1688800,"TERMINAL",0,0,"444244213",,terminal_output +1206,1689853,"TERMINAL",0,0,"555355324",,terminal_output +1207,1690875,"TERMINAL",0,0,"666466435",,terminal_output +1208,1691920,"TERMINAL",0,0,"777577546",,terminal_output +1209,1692974,"TERMINAL",0,0,"888688657",,terminal_output +1210,1694020,"TERMINAL",0,0,"999799768",,terminal_output +1211,1695076,"TERMINAL",0,0,"50207:0081020879",,terminal_output +1212,1696200,"TERMINAL",0,0,"111911987:00",,terminal_output +1213,1697237,"TERMINAL",0,0,"2229:00223091",,terminal_output +1214,1698250,"TERMINAL",0,0,"3331331402",,terminal_output +1215,1699277,"TERMINAL",0,0,"455355324",,terminal_output +1216,1700306,"TERMINAL",0,0,"666466435",,terminal_output +1217,1701423,"TERMINAL",0,0,"777577546",,terminal_output +1218,1702447,"TERMINAL",0,0,"888688657",,terminal_output +1219,1703472,"TERMINAL",0,0,"999799768",,terminal_output +1220,1704600,"TERMINAL",0,0,"9:00301082030879",,terminal_output +1221,1705551,"TERMINAL",0,0,"1119119810",,terminal_output +1222,1706648,"TERMINAL",0,0,"22210224091",,terminal_output +1223,1707670,"TERMINAL",0,0,"3331331502",,terminal_output +1224,1708811,"TERMINAL",0,0,"444244213",,terminal_output +1225,1709819,"TERMINAL",0,0,"555355324",,terminal_output +1226,1710843,"TERMINAL",0,0,"666466435",,terminal_output +1227,1711870,"TERMINAL",0,0,"777577546",,terminal_output +1228,1712887,"TERMINAL",0,0,"888688657",,terminal_output +1229,1713932,"TERMINAL",0,0,"999799768",,terminal_output +1230,1714978,"TERMINAL",0,0,"10402083040879",,terminal_output +1231,1716067,"TERMINAL",0,0,"1119119820",,terminal_output +1232,1717068,"TERMINAL",0,0,"22220225091",,terminal_output +1233,1718119,"TERMINAL",0,0,"333133140:002",,terminal_output +1234,1719168,"TERMINAL",0,0,"444244213",,terminal_output +1235,1720265,"TERMINAL",0,0,"555355324",,terminal_output +1236,1721263,"TERMINAL",0,0,"777577546",,terminal_output +1237,1722373,"TERMINAL",0,0,"888688657",,terminal_output +1238,1723441,"TERMINAL",0,0,"999799768",,terminal_output +1239,1724461,"TERMINAL",0,0,"20503084050879",,terminal_output +1240,1725482,"TERMINAL",0,0,"1119119830",,terminal_output +1241,1726612,"TERMINAL",0,0,"222302240:0091",,terminal_output +1242,1727635,"TERMINAL",0,0,"3331331102",,terminal_output +1243,1728660,"TERMINAL",0,0,"444244213",,terminal_output +1244,1729683,"TERMINAL",0,0,"555355324",,terminal_output +1245,1730709,"TERMINAL",0,0,"666466435",,terminal_output +1246,1731838,"TERMINAL",0,0,"777577546",,terminal_output +1247,1732860,"TERMINAL",0,0,"888688657",,terminal_output +1248,1733839,"TERMINAL",0,0,"999799768",,terminal_output +1249,1734931,"TERMINAL",0,0,"308:004085040:00879",,terminal_output +1250,1735935,"TERMINAL",0,0,"1119119840",,terminal_output +1251,1736994,"TERMINAL",0,0,"22240221091",,terminal_output +1252,1738033,"TERMINAL",0,0,"3331331202",,terminal_output +1253,1739104,"TERMINAL",0,0,"444244213",,terminal_output +1254,1740145,"TERMINAL",0,0,"555355324",,terminal_output +1255,1741196,"TERMINAL",0,0,"666466435",,terminal_output +1256,1742248,"TERMINAL",0,0,"788688657",,terminal_output +1257,1743411,"TERMINAL",0,0,"999799768",,terminal_output +1258,1744429,"TERMINAL",0,0,"401050840:0010879",,terminal_output +1259,1745403,"TERMINAL",0,0,"1119119850",,terminal_output +1260,1746478,"TERMINAL",0,0,"22250222091",,terminal_output +1261,1747604,"TERMINAL",0,0,"3331331302",,terminal_output +1262,1748574,"TERMINAL",0,0,"444244213",,terminal_output +1263,1749599,"TERMINAL",0,0,"555355324",,terminal_output +1264,1750684,"TERMINAL",0,0,"666466435",,terminal_output +1265,1751757,"TERMINAL",0,0,"777577546",,terminal_output +1266,1752774,"TERMINAL",0,0,"888688657",,terminal_output +1267,1753811,"TERMINAL",0,0,"999799768",,terminal_output +1268,1754878,"TERMINAL",0,0,"50208:0081020879",,terminal_output +1269,1756009,"TERMINAL",0,0,"111911988:00",,terminal_output +1270,1756953,"TERMINAL",0,0,"22240:00223091",,terminal_output +1271,1758003,"TERMINAL",0,0,"3331331402",,terminal_output +1272,1759052,"TERMINAL",0,0,"444244213",,terminal_output +1273,1760203,"TERMINAL",0,0,"555355324",,terminal_output +1274,1761148,"TERMINAL",0,0,"666466435",,terminal_output +1275,1762249,"TERMINAL",0,0,"777577546",,terminal_output +1276,1763245,"TERMINAL",0,0,"899799768",,terminal_output +1277,1764403,"TERMINAL",0,0,"20:00301082030879",,terminal_output +1278,1765386,"TERMINAL",0,0,"1119119810",,terminal_output +1279,1766449,"TERMINAL",0,0,"22210224091",,terminal_output +1280,1767477,"TERMINAL",0,0,"3331331502",,terminal_output +1281,1768605,"TERMINAL",0,0,"444244213",,terminal_output +1282,1769622,"TERMINAL",0,0,"555355324",,terminal_output +1283,1770662,"TERMINAL",0,0,"666466435",,terminal_output +1284,1771670,"TERMINAL",0,0,"777577546",,terminal_output +1285,1772696,"TERMINAL",0,0,"888688657",,terminal_output +1286,1773734,"TERMINAL",0,0,"999799768",,terminal_output +1287,1774784,"TERMINAL",0,0,"10402083040879",,terminal_output +1288,1775870,"TERMINAL",0,0,"1119119820",,terminal_output +1289,1776894,"TERMINAL",0,0,"22220225091",,terminal_output +1290,1777924,"TERMINAL",0,0,"33313311:002",,terminal_output +1291,1779045,"TERMINAL",0,0,"444244213",,terminal_output +1292,1780076,"TERMINAL",0,0,"555355324",,terminal_output +1293,1781196,"TERMINAL",0,0,"666466435",,terminal_output +1294,1782219,"TERMINAL",0,0,"777577546",,terminal_output +1295,1783242,"TERMINAL",0,0,"888688657",,terminal_output +1296,1784266,"TERMINAL",0,0,"9503084050879",,terminal_output +1297,1785386,"TERMINAL",0,0,"21119119830",,terminal_output +1298,1786378,"TERMINAL",0,0,"22230221:0091",,terminal_output +1299,1787440,"TERMINAL",0,0,"3331331102",,terminal_output +1300,1788567,"TERMINAL",0,0,"444244213",,terminal_output +1301,1789588,"TERMINAL",0,0,"555355324",,terminal_output +1302,1790613,"TERMINAL",0,0,"666466435",,terminal_output +1303,1791636,"TERMINAL",0,0,"777577546",,terminal_output +1304,1792604,"TERMINAL",0,0,"watch",,terminal_focus +1305,1792715,"TERMINAL",0,0,"888688657",,terminal_output +1306,1793786,"TERMINAL",0,0,"999799768",,terminal_output +1307,1794763,"TERMINAL",0,0,"309:00408501:00879",,terminal_output +1308,1795821,"TERMINAL",0,0,"1119119840",,terminal_output +1309,1796863,"TERMINAL",0,0,"22240221091",,terminal_output +1310,1797896,"TERMINAL",0,0,"3331331202",,terminal_output +1311,1798942,"TERMINAL",0,0,"444244213",,terminal_output +1312,1799990,"TERMINAL",0,0,"555355324",,terminal_output +1313,1801064,"TERMINAL",0,0,"666466435",,terminal_output +1314,1802064,"TERMINAL",0,0,"777577546",,terminal_output +1315,1803110,"TERMINAL",0,0,"888688657",,terminal_output +1316,1804164,"TERMINAL",0,0,"999799768",,terminal_output +1317,1805258,"TERMINAL",0,0,"40105081:0010879",,terminal_output +1318,1806283,"TERMINAL",0,0,"122502220951",,terminal_output +1319,1807501,"TERMINAL",0,0,"3331331302",,terminal_output +1320,1808369,"TERMINAL",0,0,"444244213",,terminal_output +1321,1809411,"TERMINAL",0,0,"555355324",,terminal_output +1322,1810488,"TERMINAL",0,0,"666466435",,terminal_output +1323,1811608,"TERMINAL",0,0,"777577546",,terminal_output +1324,1812628,"TERMINAL",0,0,"888688657",,terminal_output +1325,1813623,"TERMINAL",0,0,"999799768",,terminal_output +1326,1814677,"TERMINAL",0,0,"50209:0081020879",,terminal_output +1327,1815700,"TERMINAL",0,0,"111911989:00",,terminal_output +1328,1816829,"TERMINAL",0,0,"2221:00223091",,terminal_output +1329,1817862,"TERMINAL",0,0,"3331331402",,terminal_output +1330,1818883,"TERMINAL",0,0,"444244213",,terminal_output +1331,1819913,"TERMINAL",0,0,"555355324",,terminal_output +1332,1820965,"TERMINAL",0,0,"666466435",,terminal_output +1333,1822018,"TERMINAL",0,0,"777577546",,terminal_output +1334,1823178,"TERMINAL",0,0,"888688657",,terminal_output +1335,1824117,"TERMINAL",0,0,"999799768",,terminal_output +1336,1825226,"TERMINAL",0,0,"1:00301082030879",,terminal_output +1337,1826247,"TERMINAL",0,0,"122102240911",,terminal_output +1338,1827376,"TERMINAL",0,0,"3331331502",,terminal_output +1339,1828503,"TERMINAL",0,0,"444244213",,terminal_output +1340,1829527,"TERMINAL",0,0,"555355324",,terminal_output +1341,1830508,"TERMINAL",0,0,"666466435",,terminal_output +1342,1831576,"TERMINAL",0,0,"777577546",,terminal_output +1343,1832601,"TERMINAL",0,0,"888688657",,terminal_output +1344,1833650,"TERMINAL",0,0,"999799768",,terminal_output +1345,1834707,"TERMINAL",0,0,"10402083040879",,terminal_output +1346,1835756,"TERMINAL",0,0,"1119119820",,terminal_output +1347,1836790,"TERMINAL",0,0,"22220225091",,terminal_output +1348,1837844,"TERMINAL",0,0,"33313312:002",,terminal_output +1349,1838893,"TERMINAL",0,0,"444244213",,terminal_output +1350,1839940,"TERMINAL",0,0,"555355324",,terminal_output +1351,1840985,"TERMINAL",0,0,"666466435",,terminal_output +1352,1842036,"TERMINAL",0,0,"777577546",,terminal_output +1353,1843088,"TERMINAL",0,0,"888688657",,terminal_output +1354,1844151,"TERMINAL",0,0,"999799768",,terminal_output +1355,1845190,"TERMINAL",0,0,"20503084050879",,terminal_output +1356,1846241,"TERMINAL",0,0,"12230222:00931",,terminal_output +1357,1847345,"TERMINAL",0,0,"3331331102",,terminal_output +1358,1848335,"TERMINAL",0,0,"444244213",,terminal_output +1359,1849494,"TERMINAL",0,0,"555355324",,terminal_output +1360,1850439,"TERMINAL",0,0,"666466435",,terminal_output +1361,1851520,"TERMINAL",0,0,"777577546",,terminal_output +1362,1852544,"TERMINAL",0,0,"888688657",,terminal_output +1363,1853693,"TERMINAL",0,0,"999799768",,terminal_output +1364,1854716,"TERMINAL",0,0,"3030:00408502:00879",,terminal_output +1365,1855742,"TERMINAL",0,0,"1119119840",,terminal_output +1366,1856762,"TERMINAL",0,0,"22240221091",,terminal_output +1367,1857791,"TERMINAL",0,0,"3331331202",,terminal_output +1368,1858916,"TERMINAL",0,0,"444244213",,terminal_output +1369,1859943,"TERMINAL",0,0,"555355324",,terminal_output +1370,1860934,"TERMINAL",0,0,"666466435",,terminal_output +1371,1861984,"TERMINAL",0,0,"777577546",,terminal_output +1372,1863048,"TERMINAL",0,0,"888688657",,terminal_output +1373,1864140,"TERMINAL",0,0,"999799768",,terminal_output +1374,1865160,"TERMINAL",0,0,"40105082:0010879",,terminal_output +1375,1866287,"TERMINAL",0,0,"1119119850",,terminal_output +1376,1867250,"TERMINAL",0,0,"233513321302",,terminal_output +1377,1868335,"TERMINAL",0,0,"444244213",,terminal_output +1378,1869359,"TERMINAL",0,0,"555355324",,terminal_output +1379,1870388,"TERMINAL",0,0,"666466435",,terminal_output +1380,1871513,"TERMINAL",0,0,"777577546",,terminal_output +1381,1872469,"TERMINAL",0,0,"888688657",,terminal_output +1382,1873559,"TERMINAL",0,0,"999799768",,terminal_output +1383,1874576,"TERMINAL",0,0,"502040:0081020879",,terminal_output +1384,1875712,"TERMINAL",0,0,"111911986:00:00",,terminal_output +1385,1876736,"TERMINAL",0,0,"2222:00223091",,terminal_output +1386,1877721,"TERMINAL",0,0,"3331331402",,terminal_output +1387,1878783,"TERMINAL",0,0,"444244213",,terminal_output +1388,1879524,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +1389,1883525,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_1_node.sbatch",0,0,"",shellscript,tab +1390,1892650,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_1_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046""\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=48 \\n --dyna_type=causal \\n --init_lr=0 \\n --max_lr=1e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=coinrun-dynamics-causal-1-node-$slurm_job_id \\n --tags coinrun dynamics causal 1-node \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +1391,1894156,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal copy.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_1_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046""\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=48 \\n --dyna_type=causal \\n --init_lr=0 \\n --max_lr=1e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=coinrun-dynamics-causal-1-node-$slurm_job_id \\n --tags coinrun dynamics causal 1-node \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +1392,1900278,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_1_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046""\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=48 \\n --dyna_type=causal \\n --init_lr=0 \\n --max_lr=1e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=coinrun-dynamics-causal-1-node-$slurm_job_id \\n --tags coinrun dynamics causal 1-node \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +1393,1906021,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1907,0,"",shellscript,selection_mouse +1394,1906339,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1907,7,"",shellscript,content +1395,1907441,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1907,0,"m",shellscript,content +1396,1907442,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1908,0,"",shellscript,selection_keyboard +1397,1907564,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1908,0,"a",shellscript,content +1398,1907565,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1909,0,"",shellscript,selection_keyboard +1399,1907662,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1909,0,"s",shellscript,content +1400,1907663,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1910,0,"",shellscript,selection_keyboard +1401,1907718,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1910,0,"k",shellscript,content +1402,1907719,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1911,0,"",shellscript,selection_keyboard +1403,1907951,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1911,0,"g",shellscript,content +1404,1907951,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1912,0,"",shellscript,selection_keyboard +1405,1908091,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1912,0,"i",shellscript,content +1406,1908092,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1913,0,"",shellscript,selection_keyboard +1407,1908151,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1913,0,"t",shellscript,content +1408,1908152,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1914,0,"",shellscript,selection_keyboard +1409,1908267,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1914,0," ",shellscript,content +1410,1908268,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1915,0,"",shellscript,selection_keyboard +1411,1908713,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1914,0,"",shellscript,selection_command +1412,1911281,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",2066,0,"",shellscript,selection_mouse +1413,1912792,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",2105,1,"c",shellscript,selection_command +1414,1912940,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",2122,2,"ca",shellscript,selection_command +1415,1913029,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",2122,3,"cau",shellscript,selection_command +1416,1913145,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",2122,4,"caus",shellscript,selection_command +1417,1913270,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",2122,5,"causa",shellscript,selection_command +1418,1913341,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",2122,6,"causal",shellscript,selection_command +1419,1918592,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",2122,6,"maskgit",shellscript,content +1420,1918596,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",276,6,"causal",shellscript,selection_command +1421,1919543,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",276,6,"maskgit",shellscript,content +1422,1919546,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",399,6,"causal",shellscript,selection_command +1423,1919737,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",399,6,"maskgit",shellscript,content +1424,1919740,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",451,6,"causal",shellscript,selection_command +1425,1920684,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",451,6,"maskgit",shellscript,content +1426,1920688,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1520,6,"causal",shellscript,selection_command +1427,1921304,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",1520,6,"maskgit",shellscript,content +1428,1921308,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",2068,6,"causal",shellscript,selection_command +1429,1921960,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",2068,6,"maskgit",shellscript,content +1430,1927231,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",465,0,"",shellscript,selection_mouse +1431,1927233,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",464,0,"",shellscript,selection_command +1432,1945986,"TERMINAL",0,0,"sync-runner",,terminal_command +1433,1946035,"TERMINAL",0,0,"]633;E;2025-08-11 16:23:01 sync-runner;361f455a-15ad-4916-88b6-b9e49434d7af]633;Csending incremental file list\r\n",,terminal_output +1434,1946614,"TERMINAL",0,0,"r",,terminal_output +1435,1946789,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +1436,1946891,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1437,1946955,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1438,1947067,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1439,1947135,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +1440,1948019,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/coinrun/\r\nslurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch\r\nslurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch\r\n",,terminal_output +1441,1948040,"TERMINAL",0,0,"\r\nsent 30,960 bytes received 191 bytes 12,460.40 bytes/sec\r\ntotal size is 219,130,420 speedup is 7,034.46\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +1442,1948070,"TERMINAL",0,0,"runner",,terminal_command +1443,1948087,"TERMINAL",0,0,"]633;E;2025-08-11 16:23:03 runner;361f455a-15ad-4916-88b6-b9e49434d7af]633;C]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +1444,1976328,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",,terminal_command +1445,1976354,"TERMINAL",0,0,"]633;E;2025-08-11 16:23:32 sbatch slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch;361f455a-15ad-4916-88b6-b9e49434d7af]633;CSubmitted batch job 3415079\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +1446,2008774,"TERMINAL",0,0,"queue",,terminal_command +1447,2008816,"TERMINAL",0,0,"]633;E;2025-08-11 16:24:04 queue;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +1448,2008880,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Mon Aug 11 16:24:04 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3415079 accelerat train_dy tum_cte0 R\t0:31\t 1 hkn07263414046 accelerat train_to tum_cte0 R 4:32:34\t 1 hkn07283412401 accelerat train_to tum_cte0 R 1-00:42:14\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3414284 cpuonly wrap tum_cte0 R 2:44:12\t 1 hkn03103414283 cpuonly wrap tum_cte0 R 2:44:24\t 1 hkn03093414282 cpuonly wrap tum_cte0 R 2:44:34\t 1 hkn16653414281 cpuonly wrap tum_cte0 R 2:44:42\t 1 hkn16623414280 cpuonly wrap tum_cte0 R 2:44:51\t 1 hkn0238",,terminal_output +1449,2009958,"TERMINAL",0,0,"525535532",,terminal_output +1450,2010411,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +1451,2010755,"TERMINAL",0,0,"bash",,terminal_focus +1452,2011695,"TERMINAL",0,0,"ls",,terminal_command +1453,2011744,"TERMINAL",0,0,"]633;E;2025-08-11 16:24:07 ls;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;C",,terminal_output +1454,2011896,"TERMINAL",0,0,"020000 040000 055000 056000 057000\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046]633;D;0",,terminal_output +1455,2093688,"TERMINAL",0,0,"bash",,terminal_focus +1456,2095308,"TERMINAL",0,0,"queue",,terminal_command +1457,2095355,"TERMINAL",0,0,"]633;E;2025-08-11 16:25:31 queue;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +1458,2095421,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Mon Aug 11 16:25:31 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3415079 accelerat train_dy tum_cte0 R\t1:58\t 1 hkn07263414046 accelerat train_to tum_cte0 R 4:34:01\t 1 hkn07283412401 accelerat train_to tum_cte0 R 1-00:43:41\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3414284 cpuonly wrap tum_cte0 R 2:45:39\t 1 hkn03103414283 cpuonly wrap tum_cte0 R 2:45:51\t 1 hkn03093414282 cpuonly wrap tum_cte0 R 2:46:01\t 1 hkn16653414281 cpuonly wrap tum_cte0 R 2:46:09\t 1 hkn16623414280 cpuonly wrap tum_cte0 R 2:46:18\t 1 hkn0238",,terminal_output +1459,2096486,"TERMINAL",0,0,"29224022109",,terminal_output +1460,2096862,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +1461,2098205,"TERMINAL",0,0,"bash",,terminal_focus +1462,2106033,"TERMINAL",0,0,"cd $ws_dir",,terminal_command +1463,2106050,"TERMINAL",0,0,"]633;E;2025-08-11 16:25:41 cd $ws_dir;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared]633;D;0",,terminal_output +1464,2106458,"TERMINAL",0,0,"ls",,terminal_command +1465,2106489,"TERMINAL",0,0,"]633;E;2025-08-11 16:25:42 ls;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;Ccheckpoints count_items.sh data data_atari data_coinrun data_minecraft data_new huggingface logs possibly_corrupt_files_in_this_workspace.txt scripts\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared]633;D;0",,terminal_output +1466,2110530,"TERMINAL",0,0,"cd data_atari/",,terminal_command +1467,2111484,"TERMINAL",0,0,"ls",,terminal_command +1468,2113519,"TERMINAL",0,0,"cd per-game/",,terminal_command +1469,2113860,"TERMINAL",0,0,"ls",,terminal_command +1470,2117448,"TERMINAL",0,0,"cd atari_v1/screens/",,terminal_command +1471,2117466,"TERMINAL",0,0,"]633;E;2025-08-11 16:25:53 cd atari_v1/screens/;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens]633;D;0",,terminal_output +1472,2117700,"TERMINAL",0,0,"ls",,terminal_command +1473,2117737,"TERMINAL",0,0,"]633;E;2025-08-11 16:25:53 ls;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;Cmspacman pinball qbert revenge spaceinvaders\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens]633;D;0",,terminal_output +1474,2130521,"TERMINAL",0,0,"ls -l mspacman/ | wc -l",,terminal_command +1475,2130573,"TERMINAL",0,0,"]633;E;2025-08-11 16:26:06 ls -l mspacman/ | wc -l;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;C",,terminal_output +1476,2130923,"TERMINAL",0,0,"138\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens]633;D;0",,terminal_output +1477,2140405,"TERMINAL",0,0,"ls -l ../trajectories/mspacman/ | wc -l",,terminal_command +1478,2140456,"TERMINAL",0,0,"]633;E;2025-08-11 16:26:16 ls -l ../trajectories/mspacman/ | wc -l;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;C",,terminal_output +1479,2141051,"TERMINAL",0,0,"383\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens]633;D;0",,terminal_output +1480,2148082,"TERMINAL",0,0,"ls -l ../trajectories/pinball/ | wc -l",,terminal_command +1481,2148133,"TERMINAL",0,0,"]633;E;2025-08-11 16:26:23 ls -l ../trajectories/pinball/ | wc -l;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;C",,terminal_output +1482,2148314,"TERMINAL",0,0,"208\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens]633;D;0",,terminal_output +1483,2151157,"TERMINAL",0,0,"ls -l pinball/ | wc -l",,terminal_command +1484,2151169,"TERMINAL",0,0,"]633;E;2025-08-11 16:26:26 ls -l pinball/ | wc -l;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;C4\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens]633;D;0",,terminal_output +1485,2199030,"TERMINAL",0,0,"bash",,terminal_focus +1486,2200377,"TERMINAL",0,0,"bash",,terminal_focus +1487,2205089,"TERMINAL",0,0,"bash",,terminal_focus +1488,2206357,"TERMINAL",0,0,"cd",,terminal_command +1489,2207681,"TERMINAL",0,0,"dev",,terminal_command +1490,2207691,"TERMINAL",0,0,"]633;E;2025-08-11 16:27:23 dev;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +1491,2209456,"TERMINAL",0,0,"cd slurm",,terminal_command +1492,2213378,"TERMINAL",0,0,"git status",,terminal_command +1493,2213413,"TERMINAL",0,0,"]633;E;2025-08-11 16:27:29 git status;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C",,terminal_output +1494,2213926,"TERMINAL",0,0,"On branch main\r\nYour branch is up to date with 'origin/main'.\r\n\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4.sbatch\r\n\tmodified: jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdev/mihir/horeka/causal_fit_modelsizes/\r\n\tdev/mihir/horeka/overfit_batch/\r\n\tdev/mihir/horeka/overfit_batch_tiny/\r\n\tdev/mihir/horeka/overfit_sample/\r\n\tdev/mihir/horeka/overfit_sample_tiny/\r\n\tdev/mihir/horeka/yolo-runs/\r\n\tjobs/mihir/horeka/causal_big_runs/\r\n\tjobs/mihir/horeka/coinrun/\r\n\tjobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes_dev.sbatch\r\n\tjobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_dev.sbatch\r\n\tjobs/mihir/horeka/maskgit_big_runs/\r\n\tutils/alfred/sqrt_lr_scaling.py\r\n\tutils/mihir/weekend-job-requeuer.sh\r\n\tutils/mihir/weekend-job-starter.sh\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar/slurm]633;D;0",,terminal_output +1495,2215599,"TERMINAL",0,0,"git pull",,terminal_command +1496,2215650,"TERMINAL",0,0,"]633;E;2025-08-11 16:27:31 git pull;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C",,terminal_output +1497,2217202,"TERMINAL",0,0,"remote: Enumerating objects: 82, done.\r\nremote: Counting objects: 1% (1/82)\rremote: Counting objects: 2% (2/82)\rremote: Counting objects: 3% (3/82)\rremote: Counting objects: 4% (4/82)\rremote: Counting objects: 6% (5/82)\rremote: Counting objects: 7% (6/82)\rremote: Counting objects: 8% (7/82)\rremote: Counting objects: 9% (8/82)\rremote: Counting objects: 10% (9/82)\rremote: Counting objects: 12% (10/82)\rremote: Counting objects: 13% (11/82)\rremote: Counting objects: 14% (12/82)\rremote: Counting objects: 15% (13/82)\rremote: Counting objects: 17% (14/82)\rremote: Counting objects: 18% (15/82)\rremote: Counting objects: 19% (16/82)\rremote: Counting objects: 20% (17/82)\rremote: Counting objects: 21% (18/82)\rremote: Counting objects: 23% (19/82)\rremote: Counting objects: 24% (20/82)\rremote: Counting objects: 25% (21/82)\rremote: Counting objects: 26% (22/82)\rremote: Counting objects: 28% (23/82)\rremote: Counting objects: 29% (24/82)\rremote: Counting objects: 30% (25/82)\rremote: Counting objects: 31% (26/82)\rremote: Counting objects: 32% (27/82)\rremote: Counting objects: 34% (28/82)\rremote: Counting objects: 35% (29/82)\rremote: Counting objects: 36% (30/82)\rremote: Counting objects: 37% (31/82)\rremote: Counting objects: 39% (32/82)\rremote: Counting objects: 40% (33/82)\rremote: Counting objects: 41% (34/82)\rremote: Counting objects: 42% (35/82)\rremote: Counting objects: 43% (36/82)\rremote: Counting objects: 45% (37/82)\rremote: Counting objects: 46% (38/82)\rremote: Counting objects: 47% (39/82)\rremote: Counting objects: 48% (40/82)\rremote: Counting objects: 50% (41/82)\rremote: Counting objects: 51% (42/82)\rremote: Counting objects: 52% (43/82)\rremote: Counting objects: 53% (44/82)\rremote: Counting objects: 54% (45/82)\rremote: Counting objects: 56% (46/82)\rremote: Counting objects: 57% (47/82)\rremote: Counting objects: 58% (48/82)\rremote: Counting objects: 59% (49/82)\rremote: Counting objects: 60% (50/82)\rremote: Counting objects: 62% (51/82)\rremote: Counting objects: 63% (52/82)\rremote: Counting objects: 64% (53/82)\rremote: Counting objects: 65% (54/82)\rremote: Counting objects: 67% (55/82)\rremote: Counting objects: 68% (56/82)\rremote: Counting objects: 69% (57/82)\rremote: Counting objects: 70% (58/82)\rremote: Counting objects: 71% (59/82)\rremote: Counting objects: 73% (60/82)\rremote: Counting objects: 74% (61/82)\rremote: Counting objects: 75% (62/82)\rremote: Counting objects: 76% (63/82)\rremote: Counting objects: 78% (64/82)\rremote: Counting objects: 79% (65/82)\rremote: Counting objects: 80% (66/82)\rremote: Counting objects: 81% (67/82)\rremote: Counting objects: 82% (68/82)\rremote: Counting objects: 84% (69/82)\rremote: Counting objects: 85% (70/82)\rremote: Counting objects: 86% (71/82)\rremote: Counting objects: 87% (72/82)\rremote: Counting objects: 89% (73/82)\rremote: Counting objects: 90% (74/82)\rremote: Counting objects: 91% (75/82)\rremote: Counting objects: 92% (76/82)\rremote: Counting objects: 93% (77/82)\rremote: Counting objects: 95% (78/82)\rremote: Counting objects: 96% (79/82)\rremote: Counting objects: 97% (80/82)\rremote: Counting objects: 98% (81/82)\rremote: Counting objects: 100% (82/82)\rremote: Counting objects: 100% (82/82), done.\r\nremote: Compressing objects: 2% (1/47)\rremote: Compressing objects: 4% (2/47)\rremote: Compressing objects: 6% (3/47)\rremote: Compressing objects: 8% (4/47)\rremote: Compressing objects: 10% (5/47)\rremote: Compressing objects: 12% (6/47)\rremote: Compressing objects: 14% (7/47)\r",,terminal_output +1498,2217318,"TERMINAL",0,0,"remote: Compressing objects: 17% (8/47)\rremote: Compressing objects: 19% (9/47)\rremote: Compressing objects: 21% (10/47)\rremote: Compressing objects: 23% (11/47)\rremote: Compressing objects: 25% (12/47)\rremote: Compressing objects: 27% (13/47)\rremote: Compressing objects: 29% (14/47)\rremote: Compressing objects: 31% (15/47)\rremote: Compressing objects: 34% (16/47)\rremote: Compressing objects: 36% (17/47)\rremote: Compressing objects: 38% (18/47)\rremote: Compressing objects: 40% (19/47)\rremote: Compressing objects: 42% (20/47)\rremote: Compressing objects: 44% (21/47)\rremote: Compressing objects: 46% (22/47)\rremote: Compressing objects: 48% (23/47)\rremote: Compressing objects: 51% (24/47)\rremote: Compressing objects: 53% (25/47)\rremote: Compressing objects: 55% (26/47)\rremote: Compressing objects: 57% (27/47)\rremote: Compressing objects: 59% (28/47)\rremote: Compressing objects: 61% (29/47)\rremote: Compressing objects: 63% (30/47)\rremote: Compressing objects: 65% (31/47)\rremote: Compressing objects: 68% (32/47)\rremote: Compressing objects: 70% (33/47)\rremote: Compressing objects: 72% (34/47)\rremote: Compressing objects: 74% (35/47)\rremote: Compressing objects: 76% (36/47)\rremote: Compressing objects: 78% (37/47)\rremote: Compressing objects: 80% (38/47)\rremote: Compressing objects: 82% (39/47)\rremote: Compressing objects: 85% (40/47)\rremote: Compressing objects: 87% (41/47)\rremote: Compressing objects: 89% (42/47)\rremote: Compressing objects: 91% (43/47)\rremote: Compressing objects: 93% (44/47)\rremote: Compressing objects: 95% (45/47)\rremote: Compressing objects: 97% (46/47)\rremote: Compressing objects: 100% (47/47)\rremote: Compressing objects: 100% (47/47), done.\r\nremote: Total 71 (delta 34), reused 55 (delta 20), pack-reused 0 (from 0)\r\nUnpacking objects: 1% (1/71)\r",,terminal_output +1499,2217399,"TERMINAL",0,0,"Unpacking objects: 2% (2/71)\rUnpacking objects: 4% (3/71)\r",,terminal_output +1500,2217720,"TERMINAL",0,0,"Unpacking objects: 5% (4/71)\rUnpacking objects: 7% (5/71)\rUnpacking objects: 8% (6/71)\rUnpacking objects: 9% (7/71)\rUnpacking objects: 11% (8/71)\rUnpacking objects: 12% (9/71)\rUnpacking objects: 14% (10/71)\rUnpacking objects: 15% (11/71)\rUnpacking objects: 16% (12/71)\rUnpacking objects: 18% (13/71)\rUnpacking objects: 19% (14/71)\rUnpacking objects: 21% (15/71)\rUnpacking objects: 22% (16/71)\rUnpacking objects: 23% (17/71)\rUnpacking objects: 25% (18/71)\rUnpacking objects: 26% (19/71)\rUnpacking objects: 28% (20/71)\rUnpacking objects: 29% (21/71)\rUnpacking objects: 30% (22/71)\rUnpacking objects: 32% (23/71)\rUnpacking objects: 33% (24/71)\rUnpacking objects: 35% (25/71)\rUnpacking objects: 36% (26/71)\rUnpacking objects: 38% (27/71)\rUnpacking objects: 39% (28/71)\rUnpacking objects: 40% (29/71)\rUnpacking objects: 42% (30/71)\rUnpacking objects: 43% (31/71)\rUnpacking objects: 45% (32/71)\rUnpacking objects: 46% (33/71)\rUnpacking objects: 47% (34/71)\rUnpacking objects: 49% (35/71)\rUnpacking objects: 50% (36/71)\rUnpacking objects: 52% (37/71)\rUnpacking objects: 53% (38/71)\rUnpacking objects: 54% (39/71)\rUnpacking objects: 56% (40/71)\rUnpacking objects: 57% (41/71)\rUnpacking objects: 59% (42/71)\rUnpacking objects: 60% (43/71)\rUnpacking objects: 61% (44/71)\r",,terminal_output +1501,2217811,"TERMINAL",0,0,"Unpacking objects: 63% (45/71)\rUnpacking objects: 64% (46/71)\rUnpacking objects: 66% (47/71)\rUnpacking objects: 67% (48/71)\rUnpacking objects: 69% (49/71)\rUnpacking objects: 70% (50/71)\rUnpacking objects: 71% (51/71)\rUnpacking objects: 73% (52/71)\rUnpacking objects: 74% (53/71), 9.47 KiB | 18.00 KiB/s\r",,terminal_output +1502,2217922,"TERMINAL",0,0,"Unpacking objects: 76% (54/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 77% (55/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 78% (56/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 80% (57/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 81% (58/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 83% (59/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 84% (60/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 85% (61/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 87% (62/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 88% (63/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 90% (64/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 91% (65/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 92% (66/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 94% (67/71), 9.47 KiB | 18.00 KiB/s\r",,terminal_output +1503,2218066,"TERMINAL",0,0,"Unpacking objects: 95% (68/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 97% (69/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 98% (70/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 100% (71/71), 9.47 KiB | 18.00 KiB/s\rUnpacking objects: 100% (71/71), 11.33 KiB | 14.00 KiB/s, done.\r\n",,terminal_output +1504,2218351,"TERMINAL",0,0,"From github.com:p-doom/slurm\r\n d9639d1..9ebc49c main -> origin/main\r\n",,terminal_output +1505,2218418,"TERMINAL",0,0,"Updating d9639d1..9ebc49c\r\n",,terminal_output +1506,2219473,"TERMINAL",0,0,"Updating files: 52% (190/363)\rUpdating files: 53% (193/363)\r",,terminal_output +1507,2219539,"TERMINAL",0,0,"Updating files: 54% (197/363)\rUpdating files: 55% (200/363)\r",,terminal_output +1508,2219802,"TERMINAL",0,0,"Updating files: 56% (204/363)\rUpdating files: 57% (207/363)\rUpdating files: 58% (211/363)\rUpdating files: 59% (215/363)\rUpdating files: 60% (218/363)\rUpdating files: 61% (222/363)\rUpdating files: 62% (226/363)\rUpdating files: 63% (229/363)\rUpdating files: 64% (233/363)\rUpdating files: 65% (236/363)\rUpdating files: 66% (240/363)\rUpdating files: 67% (244/363)\rUpdating files: 68% (247/363)\rUpdating files: 69% (251/363)\rUpdating files: 70% (255/363)\rUpdating files: 71% (258/363)\rUpdating files: 72% (262/363)\rUpdating files: 73% (265/363)\rUpdating files: 74% (269/363)\r",,terminal_output +1509,2219880,"TERMINAL",0,0,"Updating files: 75% (273/363)\rUpdating files: 76% (276/363)\rUpdating files: 77% (280/363)\rUpdating files: 78% (284/363)\rUpdating files: 79% (287/363)\rUpdating files: 80% (291/363)\rUpdating files: 81% (295/363)\rUpdating files: 82% (298/363)\r",,terminal_output +1510,2220005,"TERMINAL",0,0,"Updating files: 83% (302/363)\rUpdating files: 84% (305/363)\rUpdating files: 85% (309/363)\rUpdating files: 86% (313/363)\rUpdating files: 87% (316/363)\rUpdating files: 88% (320/363)\rUpdating files: 89% (324/363)\rUpdating files: 90% (327/363)\rUpdating files: 91% (331/363)\rUpdating files: 92% (334/363)\rUpdating files: 93% (338/363)\r",,terminal_output +1511,2220119,"TERMINAL",0,0,"Updating files: 94% (342/363)\rUpdating files: 95% (345/363)\rUpdating files: 96% (349/363)\rUpdating files: 97% (353/363)\rUpdating files: 98% (356/363)\rUpdating files: 99% (360/363)\rUpdating files: 100% (363/363)\rUpdating files: 100% (363/363), done.\r\nFast-forward\r\n",,terminal_output +1512,2220453,"TERMINAL",0,0," dev/alfred/berlin/job_requeueing/notes.md | 14 ++++++++\r\n dev/alfred/berlin/train_dyn_dev/train_dynacmis.sbatch | 23 ++++++++++---\r\n dev/alfred/berlin/train_lam_dev/train_lam.sbatch | 29 ++++++++++++++++\r\n dev/alfred/berlin/train_tok_dev/train_tok.sbatch | 29 ++++++++++++++++\r\n dev/alfred/horeka/jobs_cur/dev.sbatch | 52 +++++++++++++++++++++++++++++\r\n dev/alfred/horeka/jobs_cur/dev_single.sbatch | 59 ++++++++++++++++++++++++++++++++\r\n dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions.sh | 17 ++++++++++\r\n dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation.sbatch | 82 +++++++++++++++++++++++++++++++++++++++++++++\r\n .../dyn_gt_actions_ablation_prepend}/dyn_gt_actions_ablation_baseline.sbatch | 5 +--\r\n dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation_dev_bak.sbatch | 78 +++++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation_single.sbatch | 86 +++++++++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/horeka/{ => jobs_old}/allocate/cpu.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/allocate/multigpu_gpu.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/allocate/single_gpu.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_16_nodes.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_1_nodes.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_1_nodes.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_2_nodes.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_32_nodes.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes_frequent_chkpt.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_64_nodes.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_8_nodes.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/const_lr/train_tokenizer_16_nodes.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/const_lr/train_tokenizer_1_nodes.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/const_lr/train_tokenizer_2_nodes.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/const_lr/train_tokenizer_32_nodes.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/const_lr/train_tokenizer_4_nodes.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/const_lr/train_tokenizer_8_nodes.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/oai_subset/train_tokenizer_1_nodes.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/oai_subset/train_tokenizer_2_nodes.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/oai_subset/train_tokenizer_2_nodes_samples_500.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/checkpoint_fix/train_tokenizer.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/coinrun/base/train_dynamics_coinrun.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/coinrun/base/train_lam_coinrun.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/coinrun/base/train_tokenizer_coinrun.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_dynamics_coinrun.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_dynamics_coinrun.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_lam_12.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_lam_24.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_lam_48.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_lam_6.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_lam_6.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_tokenizer_coinrun.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_tokenizer_coinrun.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/coinrun/train_tokenizer_coinrun.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/dyn_gt_actions_ablation/dyn_gt_actions_ablation.sbatch | 5 +--\r\n dev/alfred/horeka/jobs_old/dyn_gt_actions_ablation/dyn_gt_actions_ablation_baseline.sbatch | 80 ++++++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/horeka/{ => jobs_old}/dyn_gt_actions_ablation/dyn_gt_actions_ablation_dev.sbatch | 1 +\r\n dev/alfred/horeka/{ => jobs_old}/dyn_gt_actions_ablation/masked_lim_yolo.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/generate_single_samples/generate_samples_50k.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/download_10xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/download_6xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/download_7xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/download_8xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/download_9xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/download_index_json.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/mp4_to_array_record_open_ai_6xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/mp4_to_array_record_open_ai_dev.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/mp4_to_npy_open_ai_10xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/mp4_to_npy_open_ai_6xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/mp4_to_npy_open_ai_7xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/mp4_to_npy_open_ai_8xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/mp4_to_npy_open_ai_9xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/actions_download/download_actions.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/actions_download/download_actions_all.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/mp4_to_arrayrecords_w_actions/preproc_w_actions.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/download_10xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/download_6xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/download_7xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/download_8xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/download_9xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/download_index_json.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/mp4_to_array_record_open_ai.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/mp4_to_array_record_open_ai_chunked.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/mp4_to_array_record_open_ai_dev.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/mp4_to_array_record_open_ai_to_fast.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/mp4_to_npy_open_ai.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/job_chaining/chain_example.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/job_requeueing/example_tokenizer_lr_tuning/lr_tuning_tokenizer.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/job_requeueing/example_tokenizer_lr_tuning/train_tokenizer_lr_general.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/job_requeueing/lr_tuning/tokenizer/lr_tuning_tokenizer.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/job_requeueing/lr_tuning/tokenizer/train_tokenizer_lr_general.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/job_requeueing/lr_tuning/tokenizer_lr_tuning.py | 0\r\n dev/alfred/horeka/{ => jobs_old}/job_requeueing/train_lam_chain_dev.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/job_requeueing/train_lam_requeue_dev.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/masked_lim/masked_lim_dev.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/masked_lim/masked_lim_yolo.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/masked_lim/masked_lim_yolo.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/masked_lim_noise/masked_lim_dev.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/masked_lim_noise/masked_lim_yolo.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/masked_lim_noise/masked_lim_yolo.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_minecraft_single_sample/train_dynamics_overfit_sample.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_minecraft_single_sample/train_dynamics_overfit_sample.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/from_ckpt/train_lam_dev.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/from_ckpt/train_lam_samples_12.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_dev.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_init.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_12.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_12288.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_1536.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_24576.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_3072.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_384.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_49152.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_6144.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_96.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/from_ckpt/train_tokenizer_samples_12.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/from_ckpt/train_tokenizer_samples_12.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_dev.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_12.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_12288.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_1536.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_24576.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_3072.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_384.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_49152.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_6144.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_96.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_dynamics_overfit_sample.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_lam_overfit_sample.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_tokenizer_overfit_sample.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_tokenizer_overfit_sample_size_0.6_mio.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_tokenizer_overfit_sample_size_0_5.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_tokenizer_overfit_sample_size_21_mio.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_tokenizer_overfit_sample_size_2_mio.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_tokenizer_overfit_sample_size_9_mio.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_tokenizer_overfit_sample_size_small_mio.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/mp4_to_npy_10xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/mp4_to_npy_6xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/mp4_to_npy_7xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/mp4_to_npy_8xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/mp4_to_npy_9xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/mp4_to_npy_open_ai.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/mp4_to_npy_test.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/npy_to_tfrecord.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/npy_to_tfrecord_10xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/npy_to_tfrecord_6xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/npy_to_tfrecord_7xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/npy_to_tfrecord_8xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/npy_to_tfrecord_9xx.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/preprocess_video_splitter.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/preprocess_video_to_npy.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/preprocess/preprocess_video_to_npy_test.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/procgen/cp_script.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_acrobot.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_carracing.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_coinrun.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_coinrun.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_mountaincar copy.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_mountaincar.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_multi.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_pendulum.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/rsync/rsync.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/rsync/rsync_tf_records.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/sample_jafar/sample_coinrun.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/sampling/sample_coinrun.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/sampling/sample_knoms.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/sampling/sample_knoms.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/sampling/sample_knoms_mihir.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_dyn/train_dyn_knoms_full.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_dyn_dev/train_dyn.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_dyn_dev/train_dyn_checkpt_loading_test_dev.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_dyn_dev/train_dyn_dev.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_dyn_dev/train_dyn_single_batch.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_lam/train_lam_full.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_lam_dev/train_lam.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_lam_dev/train_lam_dev.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_lam_dev/train_lam_full_dev.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_lam_dev/train_lam_single_batch.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_tokenizer/train_lam_oai_dev copy.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_tokenizer/train_lam_oai_dev.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_tokenizer/train_tokenizer_knoms_overfit_single_batch.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_tokenizer/train_tokenizer_knoms_overfit_single_sample.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_tokenizer/train_tokenizer_knoms_overfit_tfrecord_10.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_tokenizer/train_tokenizer_knoms_overfit_tfrecord_full.sbatch | 0\r\n dev/alfred/horeka/jobs_old/train_tokenizer_dev/train_tokenizer.sbatch | 102 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/horeka/{ => jobs_old}/train_tokenizer_dev/train_tokenizer.sh | 0\r\n .../{train_tokenizer_dev/train_tokenizer.sbatch => jobs_old/train_tokenizer_dev/train_tokenizer_copy.sbatch} | 15 +++++----\r\n dev/alfred/horeka/{ => jobs_old}/train_tokenizer_dev/train_tokenizer_h100.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_tokenizer_dev/train_tokenizer_overfit_tfrecord_10.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/train_tokenizer_dev/train_tokenizer_single_batch.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/validation/tokenizer_lr_tuning/lr_tuning_tokenizer.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/validation/tokenizer_lr_tuning/train_tokenizer_lr_general.sbatch | 0\r\n dev/alfred/horeka/{ => jobs_old}/validation/tokenizer_without_optimizer/lr_tuning_tokenizer.sh | 0\r\n dev/alfred/horeka/{ => jobs_old}/validation/tokenizer_without_optimizer/train_tokenizer_lr_general.sbatch | 0\r\n dev/mihir/horeka/train_tokenizer.sh | 2 +-\r\n 188 files changed, 662 insertions(+), 17 deletions(-)\r\n create mode 100644 dev/alfred/berlin/train_lam_dev/train_lam.sbatch\r\n create mode 100644 dev/alfred/berlin/train_tok_dev/train_tok.sbatch\r\n create mode 100644 dev/alfred/horeka/jobs_cur/dev.sbatch\r\n create mode 100644 dev/alfred/horeka/jobs_cur/dev_single.sbatch\r\n create mode 100644 dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions.sh\r\n create mode 100644 dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation.sbatch\r\n rename dev/alfred/horeka/{dyn_gt_actions_ablation => jobs_cur/dyn_gt_actions_ablation_prepend}/dyn_gt_actions_ablation_baseline.sbatch (94%)\r\n create mode 100644 dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation_dev_bak.sbatch\r\n create mode 100644 dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation_single.sbatch\r\n rename dev/alfred/horeka/{ => jobs_old}/allocate/cpu.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/allocate/multigpu_gpu.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/allocate/single_gpu.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_16_nodes.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_1_nodes.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_1_nodes.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_2_nodes.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_32_nodes.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes_frequent_chkpt.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_64_nodes.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/adjusted_lr/train_tokenizer_8_nodes.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/const_lr/train_tokenizer_16_nodes.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/const_lr/train_tokenizer_1_nodes.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/const_lr/train_tokenizer_2_nodes.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/const_lr/train_tokenizer_32_nodes.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/const_lr/train_tokenizer_4_nodes.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/const_lr/train_tokenizer_8_nodes.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/oai_subset/train_tokenizer_1_nodes.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/oai_subset/train_tokenizer_2_nodes.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/batchsize_scaling/oai_subset/train_tokenizer_2_nodes_samples_500.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/checkpoint_fix/train_tokenizer.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/coinrun/base/train_dynamics_coinrun.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/coinrun/base/train_lam_coinrun.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/coinrun/base/train_tokenizer_coinrun.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_dynamics_coinrun.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_dynamics_coinrun.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_lam_12.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_lam_24.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_lam_48.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_lam_6.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_lam_6.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_tokenizer_coinrun.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/coinrun/latent_action_ablation/train_tokenizer_coinrun.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/coinrun/train_tokenizer_coinrun.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/dyn_gt_actions_ablation/dyn_gt_actions_ablation.sbatch (94%)\r\n create mode 100644 dev/alfred/horeka/jobs_old/dyn_gt_actions_ablation/dyn_gt_actions_ablation_baseline.sbatch\r\n rename dev/alfred/horeka/{ => jobs_old}/dyn_gt_actions_ablation/dyn_gt_actions_ablation_dev.sbatch (98%)\r\n rename dev/alfred/horeka/{ => jobs_old}/dyn_gt_actions_ablation/masked_lim_yolo.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/generate_single_samples/generate_samples_50k.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/download_10xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/download_6xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/download_7xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/download_8xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/download_9xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/download_index_json.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/mp4_to_array_record_open_ai_6xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/mp4_to_array_record_open_ai_dev.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/mp4_to_npy_open_ai_10xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/mp4_to_npy_open_ai_6xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/mp4_to_npy_open_ai_7xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/mp4_to_npy_open_ai_8xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_local/mp4_to_npy_open_ai_9xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/actions_download/download_actions.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/actions_download/download_actions_all.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/mp4_to_arrayrecords_w_actions/preproc_w_actions.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/download_10xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/download_6xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/download_7xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/download_8xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/download_9xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/download_index_json.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/mp4_to_array_record_open_ai.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/mp4_to_array_record_open_ai_chunked.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/mp4_to_array_record_open_ai_dev.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/mp4_to_array_record_open_ai_to_fast.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/input_pipeline_ws/videos/mp4_to_npy_open_ai.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/job_chaining/chain_example.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/job_requeueing/example_tokenizer_lr_tuning/lr_tuning_tokenizer.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/job_requeueing/example_tokenizer_lr_tuning/train_tokenizer_lr_general.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/job_requeueing/lr_tuning/tokenizer/lr_tuning_tokenizer.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/job_requeueing/lr_tuning/tokenizer/train_tokenizer_lr_general.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/job_requeueing/lr_tuning/tokenizer_lr_tuning.py (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/job_requeueing/train_lam_chain_dev.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/job_requeueing/train_lam_requeue_dev.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/masked_lim/masked_lim_dev.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/masked_lim/masked_lim_yolo.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/masked_lim/masked_lim_yolo.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/masked_lim_noise/masked_lim_dev.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/masked_lim_noise/masked_lim_yolo.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/masked_lim_noise/masked_lim_yolo.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_minecraft_single_sample/train_dynamics_overfit_sample.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_minecraft_single_sample/train_dynamics_overfit_sample.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/from_ckpt/train_lam_dev.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/from_ckpt/train_lam_samples_12.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_dev.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_init.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_12.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_12288.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_1536.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_24576.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_3072.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_384.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_49152.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_6144.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/lam/train_lam_samples_96.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/from_ckpt/train_tokenizer_samples_12.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/from_ckpt/train_tokenizer_samples_12.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_dev.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_12.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_12288.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_1536.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_24576.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_3072.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_384.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_49152.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_6144.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_96.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_dynamics_overfit_sample.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_lam_overfit_sample.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_tokenizer_overfit_sample.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_tokenizer_overfit_sample_size_0.6_mio.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_tokenizer_overfit_sample_size_0_5.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_tokenizer_overfit_sample_size_21_mio.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_tokenizer_overfit_sample_size_2_mio.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_tokenizer_overfit_sample_size_9_mio.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/overfit_sample/train_tokenizer_overfit_sample_size_small_mio.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/mp4_to_npy_10xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/mp4_to_npy_6xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/mp4_to_npy_7xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/mp4_to_npy_8xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/mp4_to_npy_9xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/mp4_to_npy_open_ai.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/mp4_to_npy_test.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/npy_to_tfrecord.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/npy_to_tfrecord_10xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/npy_to_tfrecord_6xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/npy_to_tfrecord_7xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/npy_to_tfrecord_8xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/npy_to_tfrecord_9xx.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/preprocess_video_splitter.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/preprocess_video_to_npy.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/preprocess/preprocess_video_to_npy_test.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/procgen/cp_script.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_acrobot.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_carracing.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_coinrun.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_coinrun.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_mountaincar copy.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_mountaincar.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_multi.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/procgen/data_gen_gym_pendulum.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/rsync/rsync.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/rsync/rsync_tf_records.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/sample_jafar/sample_coinrun.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/sampling/sample_coinrun.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/sampling/sample_knoms.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/sampling/sample_knoms.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/sampling/sample_knoms_mihir.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_dyn/train_dyn_knoms_full.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_dyn_dev/train_dyn.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_dyn_dev/train_dyn_checkpt_loading_test_dev.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_dyn_dev/train_dyn_dev.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_dyn_dev/train_dyn_single_batch.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_lam/train_lam_full.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_lam_dev/train_lam.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_lam_dev/train_lam_dev.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_lam_dev/train_lam_full_dev.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_lam_dev/train_lam_single_batch.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_tokenizer/train_lam_oai_dev copy.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_tokenizer/train_lam_oai_dev.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_tokenizer/train_tokenizer_knoms_overfit_single_batch.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_tokenizer/train_tokenizer_knoms_overfit_single_sample.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_tokenizer/train_tokenizer_knoms_overfit_tfrecord_10.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_tokenizer/train_tokenizer_knoms_overfit_tfrecord_full.sbatch (100%)\r\n create mode 100644 dev/alfred/horeka/jobs_old/train_tokenizer_dev/train_tokenizer.sbatch\r\n rename dev/alfred/horeka/{ => jobs_old}/train_tokenizer_dev/train_tokenizer.sh (100%)\r\n rename dev/alfred/horeka/{train_tokenizer_dev/train_tokenizer.sbatch => jobs_old/train_tokenizer_dev/train_tokenizer_copy.sbatch} (73%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_tokenizer_dev/train_tokenizer_h100.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_tokenizer_dev/train_tokenizer_overfit_tfrecord_10.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/train_tokenizer_dev/train_tokenizer_single_batch.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/validation/tokenizer_lr_tuning/lr_tuning_tokenizer.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/validation/tokenizer_lr_tuning/train_tokenizer_lr_general.sbatch (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/validation/tokenizer_without_optimizer/lr_tuning_tokenizer.sh (100%)\r\n rename dev/alfred/horeka/{ => jobs_old}/validation/tokenizer_without_optimizer/train_tokenizer_lr_general.sbatch (100%)\r\n",,terminal_output +1513,2232103,"TERMINAL",0,0,"git commit -am ""added scripts""",,terminal_command +1514,2232151,"TERMINAL",0,0,"]633;E;2025-08-11 16:27:47 git commit -am ""added scripts"";05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C",,terminal_output +1515,2232565,"TERMINAL",0,0,"[main 985bc87] added scripts\r\n 2 files changed, 38 insertions(+), 10 deletions(-)\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar/slurm]633;D;0",,terminal_output +1516,2233892,"TERMINAL",0,0,"git push",,terminal_command +1517,2233944,"TERMINAL",0,0,"]633;E;2025-08-11 16:27:49 git push;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C",,terminal_output +1518,2235241,"TERMINAL",0,0,"Enumerating objects: 17, done.\r\nCounting objects: 5% (1/17)\rCounting objects: 11% (2/17)\rCounting objects: 17% (3/17)\rCounting objects: 23% (4/17)\rCounting objects: 29% (5/17)\rCounting objects: 35% (6/17)\rCounting objects: 41% (7/17)\rCounting objects: 47% (8/17)\rCounting objects: 52% (9/17)\rCounting objects: 58% (10/17)\rCounting objects: 64% (11/17)\rCounting objects: 70% (12/17)\rCounting objects: 76% (13/17)\rCounting objects: 82% (14/17)\rCounting objects: 88% (15/17)\rCounting objects: 94% (16/17)\rCounting objects: 100% (17/17)\rCounting objects: 100% (17/17), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 11% (1/9)\rCompressing objects: 22% (2/9)\rCompressing objects: 33% (3/9)\rCompressing objects: 44% (4/9)\rCompressing objects: 55% (5/9)\rCompressing objects: 66% (6/9)\rCompressing objects: 77% (7/9)\rCompressing objects: 88% (8/9)\rCompressing objects: 100% (9/9)\rCompressing objects: 100% (9/9), done.\r\nWriting objects: 11% (1/9)\rWriting objects: 22% (2/9)\rWriting objects: 33% (3/9)\rWriting objects: 44% (4/9)\rWriting objects: 55% (5/9)\rWriting objects: 66% (6/9)\rWriting objects: 77% (7/9)\rWriting objects: 88% (8/9)\rWriting objects: 100% (9/9)\rWriting objects: 100% (9/9), 1.18 KiB | 1.18 MiB/s, done.\r\nTotal 9 (delta 6), reused 0 (delta 0), pack-reused 0\r\n",,terminal_output +1519,2235369,"TERMINAL",0,0,"remote: Resolving deltas: 0% (0/6)\rremote: Resolving deltas: 16% (1/6)\rremote: Resolving deltas: 33% (2/6)\rremote: Resolving deltas: 50% (3/6)\rremote: Resolving deltas: 66% (4/6)\rremote: Resolving deltas: 83% (5/6)\rremote: Resolving deltas: 100% (6/6)\rremote: Resolving deltas: 100% (6/6), completed with 6 local objects.\r\n",,terminal_output +1520,2235419,"TERMINAL",0,0,"To github.com:p-doom/slurm.git\r\n 9ebc49c..985bc87 main -> main\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar/slurm]633;D;0",,terminal_output +1521,2316351,"TERMINAL",0,0,"git status",,terminal_command +1522,2316398,"TERMINAL",0,0,"]633;E;2025-08-11 16:29:12 git status;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C",,terminal_output +1523,2317072,"TERMINAL",0,0,"On branch main\r\nYour branch is up to date with 'origin/main'.\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdev/mihir/horeka/causal_fit_modelsizes/\r\n\tdev/mihir/horeka/overfit_batch/\r\n\tdev/mihir/horeka/overfit_batch_tiny/\r\n\tdev/mihir/horeka/overfit_sample/\r\n\tdev/mihir/horeka/overfit_sample_tiny/\r\n\tdev/mihir/horeka/yolo-runs/\r\n\tjobs/mihir/horeka/causal_big_runs/\r\n\tjobs/mihir/horeka/coinrun/\r\n\tjobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes_dev.sbatch\r\n\tjobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_dev.sbatch\r\n\tjobs/mihir/horeka/maskgit_big_runs/\r\n\tutils/alfred/sqrt_lr_scaling.py\r\n\tutils/mihir/weekend-job-requeuer.sh\r\n\tutils/mihir/weekend-job-starter.sh\r\n\r\nnothing added to commit but untracked files present (use ""git add"" to track)\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar/slurm]633;D;0",,terminal_output +1524,2323770,"TERMINAL",0,0,"git add jobs/mihir/",,terminal_command +1525,2323816,"TERMINAL",0,0,"]633;E;2025-08-11 16:29:19 git add jobs/mihir/;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C",,terminal_output +1526,2323842,"TERMINAL",0,0,"]0;tum_cte0515@hkn1993:~/Projects/jafar/slurm]633;D;0",,terminal_output +1527,2326750,"TERMINAL",0,0,"git add dev/mihir/",,terminal_command +1528,2326796,"TERMINAL",0,0,"]633;E;2025-08-11 16:29:22 git add dev/mihir/;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C",,terminal_output +1529,2327004,"TERMINAL",0,0,"g[?25l]0;tum_cte0515@hkn1993:~/Projects/jafar/slurm]633;D;0[?25h",,terminal_output +1530,2329013,"TERMINAL",0,0,"git add utils/mihir/",,terminal_command +1531,2329018,"TERMINAL",0,0,"]633;E;2025-08-11 16:29:24 git add utils/mihir/;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C]0;tum_cte0515@hkn1993:~/Projects/jafar/slurm]633;D;0",,terminal_output +1532,2332571,"TERMINAL",0,0,"git commit -am ""added scripts""",,terminal_command +1533,2332658,"TERMINAL",0,0,"]633;E;2025-08-11 16:29:28 git commit -am ""added scripts"";05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C",,terminal_output +1534,2332936,"TERMINAL",0,0,"[main 4e6382f] added scripts\r\n 45 files changed, 2535 insertions(+)\r\n create mode 100644 dev/mihir/horeka/causal_fit_modelsizes/train_dynamics_new_arch_180M.sbatch\r\n create mode 100644 dev/mihir/horeka/causal_fit_modelsizes/train_dynamics_new_arch_255M.sbatch\r\n create mode 100644 dev/mihir/horeka/causal_fit_modelsizes/train_dynamics_new_arch_356M.sbatch\r\n create mode 100644 dev/mihir/horeka/causal_fit_modelsizes/train_dynamics_new_arch_500M.sbatch\r\n create mode 100644 dev/mihir/horeka/causal_fit_modelsizes/train_dynamics_new_arch_80M.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_batch/train_lam_overfit_batch.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_batch/train_tokenizer_overfit_batch.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_batch_tiny/sample.sh\r\n create mode 100644 dev/mihir/horeka/overfit_batch_tiny/tester.sh\r\n create mode 100644 dev/mihir/horeka/overfit_batch_tiny/train_dynamics_overfit_batch_big_lr.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_batch_tiny/train_dynamics_overfit_batch_mid_lr.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_batch_tiny/train_dynamics_overfit_batch_smol_lr.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_batch_tiny/train_lam_overfit_batch.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_batch_tiny/train_tokenizer_overfit_batch.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample_gaussian_noise.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_sample/maskgit/train_dynamics_overfit_sample.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_sample/maskgit/train_lam_overfit_sample.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_sample/maskgit/train_tokenizer_overfit_sample.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_sample_tiny/sample.sh\r\n create mode 100644 dev/mihir/horeka/overfit_sample_tiny/tester.sh\r\n create mode 100644 dev/mihir/horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_sample_tiny/train_dynamics_overfit_sample_mid_lr.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_sample_tiny/train_dynamics_overfit_sample_smol_lr.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_sample_tiny/train_lam_overfit_sample.sbatch\r\n create mode 100644 dev/mihir/horeka/overfit_sample_tiny/train_tokenizer_overfit_sample.sbatch\r\n create mode 100644 dev/mihir/horeka/yolo-runs/sampling.sh\r\n create mode 100644 dev/mihir/horeka/yolo-runs/sampling_dev.sh\r\n create mode 100644 dev/mihir/horeka/yolo-runs/tester.sh\r\n create mode 100644 dev/mihir/horeka/yolo-runs/train_dynamics_maskgit_speedrun.sbatch\r\n create mode 100644 dev/mihir/horeka/yolo-runs/train_dynamics_new_arch.sbatch\r\n create mode 100644 dev/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch\r\n create mode 100644 dev/mihir/horeka/yolo-runs/train_dynamics_yolorun.sbatch\r\n create mode 100644 dev/mihir/horeka/yolo-runs/train_tokenizer_maskgit_speedrun.sbatch\r\n create mode 100644 jobs/mihir/horeka/causal_big_runs/train_dynamics_2_nodes.sbatch\r\n create mode 100644 jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch\r\n create mode 100644 jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch\r\n create mode 100644 jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch\r\n create mode 100644 jobs/mihir/horeka/coinrun/train_tokenizer_lr_1e-4.sbatch\r\n create mode 100644 jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes_dev.sbatch\r\n create mode 100644 jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_dev.sbatch\r\n create mode 100644 jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch\r\n create mode 100644 utils/mihir/weekend-job-requeuer.sh\r\n create mode 100644 utils/mihir/weekend-job-starter.sh\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar/slurm]633;D;0",,terminal_output +1535,2334230,"TERMINAL",0,0,"git push",,terminal_command +1536,2334280,"TERMINAL",0,0,"]633;E;2025-08-11 16:29:30 git push;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C",,terminal_output +1537,2335528,"TERMINAL",0,0,"To github.com:p-doom/slurm.git\r\n ! [rejected]  main -> main (fetch first)\r\nerror: failed to push some refs to 'github.com:p-doom/slurm.git'\r\nhint: Updates were rejected because the remote contains work that you do not\r\nhint: have locally. This is usually caused by another repository pushing to\r\nhint: the same ref. If you want to integrate the remote changes, use\r\nhint: 'git pull' before pushing again.\r\nhint: See the 'Note about fast-forwards' in 'git push --help' for details.\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar/slurm]633;D;1",,terminal_output +1538,2342287,"TERMINAL",0,0,"git pull",,terminal_command +1539,2342338,"TERMINAL",0,0,"]633;E;2025-08-11 16:29:38 git pull;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C",,terminal_output +1540,2343994,"TERMINAL",0,0,"remote: Enumerating objects: 18, done.\r\nremote: Counting objects: 5% (1/18)\rremote: Counting objects: 11% (2/18)\rremote: Counting objects: 16% (3/18)\rremote: Counting objects: 22% (4/18)\rremote: Counting objects: 27% (5/18)\rremote: Counting objects: 33% (6/18)\rremote: Counting objects: 38% (7/18)\rremote: Counting objects: 44% (8/18)\rremote: Counting objects: 50% (9/18)\rremote: Counting objects: 55% (10/18)\rremote: Counting objects: 61% (11/18)\rremote: Counting objects: 66% (12/18)\rremote: Counting objects: 72% (13/18)\rremote: Counting objects: 77% (14/18)\rremote: Counting objects: 83% (15/18)\rremote: Counting objects: 88% (16/18)\rremote: Counting objects: 94% (17/18)\rremote: Counting objects: 100% (18/18)\rremote: Counting objects: 100% (18/18), done.\r\nremote: Compressing objects: 14% (1/7)\rremote: Compressing objects: 28% (2/7)\rremote: Compressing objects: 42% (3/7)\rremote: Compressing objects: 57% (4/7)\rremote: Compressing objects: 71% (5/7)\rremote: Compressing objects: 85% (6/7)\rremote: Compressing objects: 100% (7/7)\rremote: Compressing objects: 100% (7/7), done.\r\nremote: Total 11 (delta 4), reused 11 (delta 4), pack-reused 0 (from 0)\r\nUnpacking objects: 9% (1/11)\rUnpacking objects: 18% (2/11)\rUnpacking objects: 27% (3/11)\rUnpacking objects: 36% (4/11)\rUnpacking objects: 45% (5/11)\rUnpacking objects: 54% (6/11)\rUnpacking objects: 63% (7/11)\rUnpacking objects: 72% (8/11)\rUnpacking objects: 81% (9/11)\rUnpacking objects: 90% (10/11)\rUnpacking objects: 100% (11/11)\rUnpacking objects: 100% (11/11), 1.69 KiB | 28.00 KiB/s, done.\r\n",,terminal_output +1541,2344254,"TERMINAL",0,0,"From github.com:p-doom/slurm\r\n 985bc87..931e909 main -> origin/main\r\n",,terminal_output +1542,2344362,"TERMINAL",0,0,"hint: Waiting for your editor to close the file... ",,terminal_output +1543,2344426,"TERMINAL",0,0,"[?1049h[>4;2m[?1h=[?2004h[?1004h[?12h[?12l",,terminal_output +1544,2344552,"TERMINAL",0,0,"[?25l""~/Projects/jafar/slurm/.git/MERGE_MSG"" 6L, 273B▽ Pzz\[0%m [>c]10;?]11;?",,terminal_output +1545,2344630,"TERMINAL",0,0,"Merge branch 'main' of github.com:p-doom/slurm\r\n# Please enter a commit message to explain why this merge is necessary,# especially if it merges an updated upstream into a topic branch.#\r\n# Lines starting with '#' will be ignored, and an empty message aborts\r\n# the commit.\r\n~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ 1,1All[?25hP+q436f\P+q6b75\P+q6b64\P+q6b72\P+q6b6c\P+q2332\P+q2334\P+q2569\P+q2a37\P+q6b31\[?12$p[?25l/3333/3333 [?25h[?25l/f6f6/e3e3 [?25h",,terminal_output +1546,2345742,"TERMINAL",0,0,"[?25l::[?25h",,terminal_output +1547,2345911,"TERMINAL",0,0,"w",,terminal_output +1548,2345972,"TERMINAL",0,0,"q",,terminal_output +1549,2346123,"TERMINAL",0,0,"\r[?25l[?2004l[>4;m"".git/MERGE_MSG"" 6L, 273B written\r\r\r\n[?1004l[?2004l[?1l>[?25h[>4;m[?1049l\rMerge made by the 'ort' strategy.\r\n dev/alfred/horeka/jobs_cur/{dev.sbatch => dyn_gt_actions_ablation_prepend/dev_dyn_gt_actions.sbatch} | 0\r\n .../jobs_cur/{dev_single.sbatch => dyn_gt_actions_ablation_prepend/dev_dyn_gt_actions_dev_single.sbatch} | 22 +++++++++------\r\n dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation.sbatch | 14 ++++------\r\n dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation_dev_bak.sbatch | 78 ---------------------------------------------------\r\n dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation_single.sbatch | 86 ---------------------------------------------------------\r\n dev/alfred/horeka/jobs_cur/tokenizer/train_tokenizer_37M.sbatch | 44 +++++++++++++++++++++++++++++\r\n 6 files changed, 63 insertions(+), 181 deletions(-)\r\n rename dev/alfred/horeka/jobs_cur/{dev.sbatch => dyn_gt_actions_ablation_prepend/dev_dyn_gt_actions.sbatch} (100%)\r\n rename dev/alfred/horeka/jobs_cur/{dev_single.sbatch => dyn_gt_actions_ablation_prepend/dev_dyn_gt_actions_dev_single.sbatch} (64%)\r\n delete mode 100644 dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation_dev_bak.sbatch\r\n delete mode 100644 dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation_single.sbatch\r\n create mode 100644 dev/alfred/horeka/jobs_cur/tokenizer/train_tokenizer_37M.sbatch\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar/slurm]633;D;0",,terminal_output +1550,2348189,"TERMINAL",0,0,"git push",,terminal_command +1551,2348236,"TERMINAL",0,0,"]633;E;2025-08-11 16:29:43 git push;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C",,terminal_output +1552,2349431,"TERMINAL",0,0,"Enumerating objects: 89, done.\r\nCounting objects: 1% (1/81)\rCounting objects: 2% (2/81)\rCounting objects: 3% (3/81)\rCounting objects: 4% (4/81)\rCounting objects: 6% (5/81)\rCounting objects: 7% (6/81)\rCounting objects: 8% (7/81)\rCounting objects: 9% (8/81)\rCounting objects: 11% (9/81)\rCounting objects: 12% (10/81)\rCounting objects: 13% (11/81)\rCounting objects: 14% (12/81)\rCounting objects: 16% (13/81)\rCounting objects: 17% (14/81)\rCounting objects: 18% (15/81)\rCounting objects: 19% (16/81)\rCounting objects: 20% (17/81)\rCounting objects: 22% (18/81)\rCounting objects: 23% (19/81)\rCounting objects: 24% (20/81)\rCounting objects: 25% (21/81)\rCounting objects: 27% (22/81)\rCounting objects: 28% (23/81)\rCounting objects: 29% (24/81)\rCounting objects: 30% (25/81)\rCounting objects: 32% (26/81)\rCounting objects: 33% (27/81)\rCounting objects: 34% (28/81)\rCounting objects: 35% (29/81)\rCounting objects: 37% (30/81)\rCounting objects: 38% (31/81)\rCounting objects: 39% (32/81)\rCounting objects: 40% (33/81)\rCounting objects: 41% (34/81)\rCounting objects: 43% (35/81)\rCounting objects: 44% (36/81)\rCounting objects: 45% (37/81)\rCounting objects: 46% (38/81)\rCounting objects: 48% (39/81)\rCounting objects: 49% (40/81)\rCounting objects: 50% (41/81)\rCounting objects: 51% (42/81)\rCounting objects: 53% (43/81)\rCounting objects: 54% (44/81)\rCounting objects: 55% (45/81)\rCounting objects: 56% (46/81)\rCounting objects: 58% (47/81)\rCounting objects: 59% (48/81)\rCounting objects: 60% (49/81)\rCounting objects: 61% (50/81)\rCounting objects: 62% (51/81)\rCounting objects: 64% (52/81)\rCounting objects: 65% (53/81)\rCounting objects: 66% (54/81)\rCounting objects: 67% (55/81)\rCounting objects: 69% (56/81)\rCounting objects: 70% (57/81)\rCounting objects: 71% (58/81)\rCounting objects: 72% (59/81)\rCounting objects: 74% (60/81)\rCounting objects: 75% (61/81)\rCounting objects: 76% (62/81)\rCounting objects: 77% (63/81)\rCounting objects: 79% (64/81)\rCounting objects: 80% (65/81)\rCounting objects: 81% (66/81)\rCounting objects: 82% (67/81)\rCounting objects: 83% (68/81)\rCounting objects: 85% (69/81)\rCounting objects: 86% (70/81)\rCounting objects: 87% (71/81)\rCounting objects: 88% (72/81)\rCounting objects: 90% (73/81)\rCounting objects: 91% (74/81)\rCounting objects: 92% (75/81)\rCounting objects: 93% (76/81)\rCounting objects: 95% (77/81)\rCounting objects: 96% (78/81)\rCounting objects: 97% (79/81)\rCounting objects: 98% (80/81)\rCounting objects: 100% (81/81)\rCounting objects: 100% (81/81), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 1% (1/69)\rCompressing objects: 2% (2/69)\rCompressing objects: 4% (3/69)\rCompressing objects: 5% (4/69)\rCompressing objects: 7% (5/69)\rCompressing objects: 8% (6/69)\rCompressing objects: 10% (7/69)\rCompressing objects: 11% (8/69)\rCompressing objects: 13% (9/69)\rCompressing objects: 14% (10/69)\rCompressing objects: 15% (11/69)\rCompressing objects: 17% (12/69)\rCompressing objects: 18% (13/69)\rCompressing objects: 20% (14/69)\rCompressing objects: 21% (15/69)\rCompressing objects: 23% (16/69)\rCompressing objects: 24% (17/69)\rCompressing objects: 26% (18/69)\rCompressing objects: 27% (19/69)\rCompressing objects: 28% (20/69)\rCompressing objects: 30% (21/69)\rCompressing objects: 31% (22/69)\rCompressing objects: 33% (23/69)\rCompressing objects: 34% (24/69)\rCompressing objects: 36% (25/69)\rCompressing objects: 37% (26/69)\rCompressing objects: 39% (27/69)\rCompressing objects: 40% (28/69)\rCompressing objects: 42% (29/69)\rCompressing objects: 43% (30/69)\rCompressing objects: 44% (31/69)\rCompressing objects: 46% (32/69)\rCompressing objects: 47% (33/69)\rCompressing objects: 49% (34/69)\rCompressing objects: 50% (35/69)\rCompressing objects: 52% (36/69)\rCompressing objects: 53% (37/69)\rCompressing objects: 55% (38/69)\rCompressing objects: 56% (39/69)\rCompressing objects: 57% (40/69)\rCompressing objects: 59% (41/69)\rCompressing objects: 60% (42/69)\rCompressing objects: 62% (43/69)\rCompressing objects: 63% (44/69)\rCompressing objects: 65% (45/69)\rCompressing objects: 66% (46/69)\rCompressing objects: 68% (47/69)\rCompressing objects: 69% (48/69)\rCompressing objects: 71% (49/69)\rCompressing objects: 72% (50/69)\rCompressing objects: 73% (51/69)\rCompressing objects: 75% (52/69)\rCompressing objects: 76% (53/69)\rCompressing objects: 78% (54/69)\rCompressing objects: 79% (55/69)\rCompressing objects: 81% (56/69)\rCompressing objects: 82% (57/69)\rCompressing objects: 84% (58/69)\rCompressing objects: 85% (59/69)\rCompressing objects: 86% (60/69)\rCompressing objects: 88% (61/69)\rCompressing objects: 89% (62/69)\rCompressing objects: 91% (63/69)\rCompressing objects: 92% (64/69)\rCompressing objects: 94% (65/69)\rCompressing objects: 95% (66/69)\rCompressing objects: 97% (67/69)\rCompressing objects: 98% (68/69)\rCompressing objects: 100% (69/69)\rCompressing objects: 100% (69/69), done.\r\nWriting objects: 1% (1/69)\rWriting objects: 2% (2/69)\rWriting objects: 4% (3/69)\rWriting objects: 5% (4/69)\rWriting objects: 7% (5/69)\rWriting objects: 8% (6/69)\rWriting objects: 10% (7/69)\rWriting objects: 14% (10/69)\rWriting objects: 15% (11/69)\rWriting objects: 17% (12/69)\rWriting objects: 18% (13/69)\rWriting objects: 30% (21/69)\rWriting objects: 31% (22/69)\rWriting objects: 33% (23/69)\rWriting objects: 36% (25/69)\rWriting objects: 39% (27/69)\rWriting objects: 42% (29/69)\rWriting objects: 43% (30/69)\rWriting objects: 44% (31/69)\rWriting objects: 46% (32/69)\rWriting objects: 47% (33/69)\rWriting objects: 53% (37/69)\rWriting objects: 55% (38/69)\rWriting objects: 56% (39/69)\rWriting objects: 57% (40/69)\rWriting objects: 59% (41/69)\rWriting objects: 60% (42/69)\rWriting objects: 62% (43/69)\rWriting objects: 63% (44/69)\rWriting objects: 65% (45/69)\rWriting objects: 66% (46/69)\rWriting objects: 68% (47/69)\rWriting objects: 71% (49/69)\rWriting objects: 72% (50/69)\rWriting objects: 73% (51/69)\rWriting objects: 75% (52/69)\rWriting objects: 76% (53/69)\rWriting objects: 78% (54/69)\rWriting objects: 79% (55/69)\rWriting objects: 81% (56/69)\rWriting objects: 82% (57/69)\rWriting objects: 84% (58/69)\rWriting objects: 85% (59/69)\rWriting objects: 86% (60/69)\rWriting objects: 88% (61/69)\rWriting objects: 89% (62/69)\rWriting objects: 91% (63/69)\rWriting objects: 92% (64/69)\rWriting objects: 94% (65/69)\rWriting objects: 97% (67/69)\rWriting objects: 98% (68/69)\rWriting objects: 100% (69/69)\rWriting objects: 100% (69/69), 15.02 KiB | 961.00 KiB/s, done.\r\nTotal 69 (delta 43), reused 0 (delta 0), pack-reused 0\r\n",,terminal_output +1553,2349703,"TERMINAL",0,0,"remote: Resolving deltas: 0% (0/43)\rremote: Resolving deltas: 2% (1/43)\rremote: Resolving deltas: 4% (2/43)\rremote: Resolving deltas: 6% (3/43)\rremote: Resolving deltas: 9% (4/43)\rremote: Resolving deltas: 11% (5/43)\rremote: Resolving deltas: 13% (6/43)\rremote: Resolving deltas: 16% (7/43)\rremote: Resolving deltas: 18% (8/43)\rremote: Resolving deltas: 20% (9/43)\rremote: Resolving deltas: 23% (10/43)\rremote: Resolving deltas: 25% (11/43)\rremote: Resolving deltas: 27% (12/43)\rremote: Resolving deltas: 30% (13/43)\rremote: Resolving deltas: 32% (14/43)\rremote: Resolving deltas: 34% (15/43)\rremote: Resolving deltas: 37% (16/43)\rremote: Resolving deltas: 39% (17/43)\rremote: Resolving deltas: 41% (18/43)\rremote: Resolving deltas: 44% (19/43)\rremote: Resolving deltas: 46% (20/43)\rremote: Resolving deltas: 48% (21/43)\rremote: Resolving deltas: 51% (22/43)\rremote: Resolving deltas: 53% (23/43)\rremote: Resolving deltas: 55% (24/43)\rremote: Resolving deltas: 58% (25/43)\rremote: Resolving deltas: 60% (26/43)\rremote: Resolving deltas: 62% (27/43)\rremote: Resolving deltas: 65% (28/43)\rremote: Resolving deltas: 67% (29/43)\rremote: Resolving deltas: 69% (30/43)\rremote: Resolving deltas: 72% (31/43)\rremote: Resolving deltas: 74% (32/43)\rremote: Resolving deltas: 76% (33/43)\rremote: Resolving deltas: 79% (34/43)\rremote: Resolving deltas: 81% (35/43)\rremote: Resolving deltas: 83% (36/43)\rremote: Resolving deltas: 86% (37/43)\rremote: Resolving deltas: 88% (38/43)\rremote: Resolving deltas: 90% (39/43)\rremote: Resolving deltas: 93% (40/43)\rremote: Resolving deltas: 95% (41/43)\rremote: Resolving deltas: 97% (42/43)\rremote: Resolving deltas: 100% (43/43)\rremote: Resolving deltas: 100% (43/43), completed with 6 local objects.\r\n",,terminal_output +1554,2349744,"TERMINAL",0,0,"To github.com:p-doom/slurm.git\r\n 931e909..9124c90 main -> main\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar/slurm]633;D;0",,terminal_output +1555,2393014,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",0,0,"",shellscript,tab +1556,2395387,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1557,2426862,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/maskgit/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_maskgit_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/maskgit/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3404607\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=256 \\n --init_lr=0 \\n --dyna_type=maskgit \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-maskgit-8-node-$slurm_job_id \\n --tags dynamics maskgit 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +1558,2465272,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",874,0,"",shellscript,selection_mouse +1559,2466291,"TERMINAL",0,0,"bash",,terminal_focus +1560,2550053,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch",,terminal_command +1561,2550104,"TERMINAL",0,0,"]633;E;2025-08-11 16:33:05 sbatch slurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +1562,2550151,"TERMINAL",0,0,"Submitted batch job 3415111\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D;0",,terminal_output +1563,2556744,"TERMINAL",0,0,"logs",,terminal_command +1564,2557307,"TERMINAL",0,0,"ls",,terminal_command +1565,2557358,"TERMINAL",0,0,"]633;E;2025-08-11 16:33:13 ls;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +1566,2557487,"TERMINAL",0,0,"big_run train_lam_action_space_scaling_10_3329786.log train_tokenizer_batch_size_scaling_1_node_3318551.log\r\nbig-runs train_lam_action_space_scaling_10_3329801.log train_tokenizer_batch_size_scaling_2_node_3318552.log\r\ncausal train_lam_action_space_scaling_10_3331283.log train_tokenizer_batch_size_scaling_2_node_3330806.log\r\ncoinrun train_lam_action_space_scaling_12_3318546.log train_tokenizer_batch_size_scaling_2_node_3330848.log\r\nmaskgit train_lam_action_space_scaling_12_3320177.log train_tokenizer_batch_size_scaling_2_node_3331282.log\r\nmaskgit-maskprob-fix train_lam_action_space_scaling_12_3321527.log train_tokenizer_batch_size_scaling_4_node_3318553.log\r\ntrain_dyn_causal_180M_3372931.log train_lam_action_space_scaling_12_3329787.log train_tokenizer_batch_size_scaling_4_node_3320175.log\r\ntrain_dyn_causal_180M_3372963.log train_lam_action_space_scaling_12_3329802.log train_tokenizer_batch_size_scaling_4_node_3321524.log\r\ntrain_dyn_causal_180M_3372969.log train_lam_action_space_scaling_12_3331284.log train_tokenizer_batch_size_scaling_8_node_3320176.log\r\ntrain_dyn_causal_180M_3373107.log train_lam_action_space_scaling_20_3318547.log train_tokenizer_batch_size_scaling_8_node_3321525.log\r\ntrain_dyn_causal_255M_3372932.log train_lam_action_space_scaling_20_3329788.log train_tokenizer_minecraft_overfit_sample_3309656.log\r\ntrain_dyn_causal_255M_3372970.log train_lam_action_space_scaling_20_3329803.log train_tokenizer_model_size_scaling_127M_3317233.log\r\ntrain_dyn_causal_255M_3373108.log train_lam_action_space_scaling_20_3331285.log train_tokenizer_model_size_scaling_127M_3318554.log\r\ntrain_dyn_causal_356M_3372934.log train_lam_action_space_scaling_50_3320180.log train_tokenizer_model_size_scaling_140M_3313562.log\r\ntrain_dyn_causal_356M_3372971.log train_lam_action_space_scaling_50_3329789.log train_tokenizer_model_size_scaling_140M_3316019.log\r\ntrain_dyn_causal_356M_3373109.log train_lam_action_space_scaling_50_3329804.log train_tokenizer_model_size_scaling_200M_3313563.log\r\ntrain_dyn_causal_500M_3372936.log train_lam_action_space_scaling_50_3331286.log train_tokenizer_model_size_scaling_200M_3316020.log\r\ntrain_dyn_causal_500M_3372972.log train_lam_action_space_scaling_6_3318549.log train_tokenizer_model_size_scaling_227M_3317234.log\r\ntrain_dyn_causal_500M_3373110.log train_lam_action_space_scaling_6_3320178.log train_tokenizer_model_size_scaling_227M_3318555.log\r\ntrain_dyn_new_arch-bugfixed-spatial-shift_3359343.log train_lam_action_space_scaling_6_3321528.log train_tokenizer_model_size_scaling_227M_3320173.log\r\ntrain_dyn_new_arch-bugfixed-temporal-shift_3359349.log train_lam_action_space_scaling_6_3329790.log train_tokenizer_model_size_scaling_227M_3321523.log\r\ntrain_dyn_yolorun_3333026.log train_lam_action_space_scaling_6_3329805.log train_tokenizer_model_size_scaling_37M_3313565.log\r\ntrain_dyn_yolorun_3333448.log train_lam_action_space_scaling_6_3331287.log train_tokenizer_model_size_scaling_37M_3316022.log\r\ntrain_dyn_yolorun_3335345.log train_lam_action_space_scaling_8_3318550.log train_tokenizer_model_size_scaling_37M_3317232.log\r\ntrain_dyn_yolorun_3335362.log train_lam_action_space_scaling_8_3329791.log train_tokenizer_model_size_scaling_37M_3317239.log\r\ntrain_dyn_yolorun_3348592.log train_lam_action_space_scaling_8_3329806.log train_tokenizer_model_size_scaling_37M_3318556.log\r\ntrain_dyn_yolorun_new_arch_3351743.log train_lam_action_space_scaling_8_3331288.log train_tokenizer_model_size_scaling_74M_3318557.log\r\ntrain_dyn_yolorun_new_arch_3352103.log train_lam_minecraft_overfit_sample_3309655.log train_tokenizer_model_size_scaling_74M_3320174.log\r\ntrain_dyn_yolorun_new_arch_3352115.log train_lam_model_size_scaling_38M_3317098.log train_tokenizer_model_size_scaling_74M_3321522.log\r\ntrain_dyn_yolorun_new_arch_3358457.log train_lam_model_size_scaling_38M_3317115.log train_tokenizer_model_size_scaling_80M_3313564.log\r\ntrain_lam_action_space_scaling_10_3320179.log train_lam_model_size_scaling_38M_3317231.log train_tokenizer_model_size_scaling_80M_3316026.log\r\ntrain_lam_action_space_scaling_10_3321529.log train_tokenizer_batch_size_scaling_16_node_3321526.log yoloruns\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +1567,2560343,"TERMINAL",0,0,"cd coinrun/",,terminal_command +1568,2561288,"TERMINAL",0,0,"ls",,terminal_command +1569,2563144,"TERMINAL",0,0,"cd dynamics/",,terminal_command +1570,2563163,"TERMINAL",0,0,"]633;E;2025-08-11 16:33:18 cd dynamics/;361f455a-15ad-4916-88b6-b9e49434d7af]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics]633;D;0",,terminal_output +1571,2563518,"TERMINAL",0,0,"ls",,terminal_command +1572,2563528,"TERMINAL",0,0,"]633;E;2025-08-11 16:33:19 ls;361f455a-15ad-4916-88b6-b9e49434d7af]633;Ccausal\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics]633;D;0",,terminal_output +1573,2564855,"TERMINAL",0,0,"cd causal/",,terminal_command +1574,2564865,"TERMINAL",0,0,"]633;E;2025-08-11 16:33:20 cd causal/;361f455a-15ad-4916-88b6-b9e49434d7af]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal]633;D;0",,terminal_output +1575,2565124,"TERMINAL",0,0,"ls",,terminal_command +1576,2565149,"TERMINAL",0,0,"]633;E;2025-08-11 16:33:20 ls;361f455a-15ad-4916-88b6-b9e49434d7af]633;Ctrain_dynamics_causal_1_node_3415062.log train_dynamics_causal_1_node_3415079.log\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal]633;D;0",,terminal_output +1577,2569589,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415079.log",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_1_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046""\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=48 \\n --dyna_type=causal \\n --init_lr=0 \\n --max_lr=1e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=coinrun-dynamics-causal-1-node-$slurm_job_id \\n --tags coinrun dynamics causal 1-node \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n/var/spool/slurmd/job3415079/slurm_script: line 42: .venv/bin/activate: No such file or directory\nSLURM_JOB_USER=tum_cte0515\nSLURM_TASKS_PER_NODE=4\nSLURM_JOB_UID=999226\nSLURM_TASK_PID=2179981\nSLURM_JOB_GPUS=0,1,2,3\nSLURM_LOCALID=0\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs\nSLURMD_NODENAME=hkn0726\nSLURM_JOB_START_TIME=1754922213\nSLURM_CLUSTER_NAME=hk\nSLURM_JOB_END_TIME=1755095013\nSLURM_CPUS_ON_NODE=24\nSLURM_JOB_CPUS_PER_NODE=24\nSLURM_GPUS_ON_NODE=4\nSLURM_GTIDS=0\nSLURM_JOB_PARTITION=accelerated\nSLURM_TRES_PER_TASK=cpu=5\nSLURM_OOM_KILL_STEP=0\nSLURM_JOB_NUM_NODES=1\nSLURM_JOBID=3415079\nSLURM_JOB_QOS=normal\nSLURM_PROCID=0\nSLURM_CPUS_PER_TASK=5\nSLURM_NTASKS=4\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0726\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\nSLURM_SCRIPT_CONTEXT=prolog_task\nSLURM_NODELIST=hkn0726\nSLURM_JOB_ACCOUNT=hk-project-p0023960\nSLURM_PRIO_PROCESS=0\nSLURM_NPROCS=4\nSLURM_NNODES=1\nSLURM_SUBMIT_HOST=hkn1993.localdomain\nSLURM_JOB_ID=3415079\nSLURM_NODEID=0\nSLURM_CONF=/etc/slurm/slurm.conf\nSLURM_JOB_NAME=train_dynamics_causal_1_node\nSLURM_NTASKS_PER_NODE=4\nSLURM_JOB_GID=502226\nSLURM_JOB_NODELIST=hkn0726\nGpuFreq=control_disabled\nwandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\nwandb: Tracking run with wandb version 0.19.11\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/wandb/run-20250811_162435-3415079\nwandb: Run `wandb offline` to turn off syncing.\nwandb: Syncing run coinrun-dynamics-causal-1-node-3415079\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3415079\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 40000\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 40000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/040000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/040000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 56000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/056000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 56000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/056000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 56000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/056000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 56000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/056000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 57000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/057000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 57000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/057000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 57000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/057000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 57000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/057000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 20000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/020000/metrics/metrics not found.\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/020000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 58000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/058000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 58000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/058000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 58000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/058000/metrics/metrics not found.\nWARNING:absl:Missing metrics for step 58000\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/058000/metrics/metrics not found.\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\n warnings.warn(\nRunning on 4 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 26555392, 'lam': 35115232, 'tokenizer': 33750256, 'total': 95420880}\nStarting training from step 0...\nRunning on 4 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 26555392, 'lam': 35115232, 'tokenizer': 33750256, 'total': 95420880}\nStarting training from step 0...\nRunning on 4 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 26555392, 'lam': 35115232, 'tokenizer': 33750256, 'total': 95420880}\nStarting training from step 0...\nRunning on 4 devices.\nCounting all components: ['dynamics', 'lam', 'tokenizer']\nParameter counts:\n{'dynamics': 26555392, 'lam': 35115232, 'tokenizer': 33750256, 'total': 95420880}\nStarting training from step 0...\n2025-08-11 16:26:15.661574: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-11 16:26:15.662856: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-11 16:26:15.662880: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-11 16:26:18.430125: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-11 16:26:18.430191: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\n2025-08-11 16:26:25.898518: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\nStep 0, loss: 12.667804718017578\nStep 1, loss: 12.771347999572754\nStep 2, loss: 12.642227172851562\nStep 3, loss: 12.731294631958008\nStep 4, loss: 12.625615119934082\nStep 5, loss: 12.726526260375977\nStep 6, loss: 12.679959297180176\nStep 7, loss: 12.740076065063477\nStep 8, loss: 12.758808135986328\nStep 9, loss: 12.621700286865234\nStep 10, loss: 12.689375877380371\nStep 11, loss: 12.711395263671875\nStep 12, loss: 12.635071754455566\nStep 13, loss: 12.737500190734863\nStep 14, loss: 12.727999687194824\nStep 15, loss: 12.6896390914917\nStep 16, loss: 12.66757869720459\nStep 17, loss: 12.713518142700195\nStep 18, loss: 12.667643547058105\nStep 19, loss: 12.741531372070312\nStep 20, loss: 12.656143188476562\nStep 21, loss: 12.708353996276855\nStep 22, loss: 12.722424507141113\nStep 23, loss: 12.613386154174805\nStep 24, loss: 12.706329345703125\nStep 25, loss: 12.670353889465332\nStep 26, loss: 12.654339790344238\nStep 27, loss: 12.607614517211914\nStep 28, loss: 12.662657737731934\nStep 29, loss: 12.653752326965332\nStep 30, loss: 12.605743408203125\nStep 31, loss: 12.71632194519043\nStep 32, loss: 12.681879043579102\nStep 33, loss: 12.564464569091797\nStep 34, loss: 12.617006301879883\nStep 35, loss: 12.60033130645752\nStep 36, loss: 12.583608627319336\nStep 37, loss: 12.695188522338867\nStep 38, loss: 12.601780891418457\nStep 39, loss: 12.555039405822754\nStep 40, loss: 12.539531707763672\nStep 41, loss: 12.610857009887695\nStep 42, loss: 12.575074195861816\nStep 43, loss: 12.597280502319336\nStep 44, loss: 12.570100784301758\nStep 45, loss: 12.572305679321289\nStep 46, loss: 12.584064483642578\nStep 47, loss: 12.492785453796387\nStep 48, loss: 12.607348442077637\nStep 49, loss: 12.546814918518066\nStep 50, loss: 12.495633125305176\nStep 51, loss: 12.483382225036621\nStep 52, loss: 12.526220321655273\nStep 53, loss: 12.498763084411621\nStep 54, loss: 12.548930168151855\nStep 55, loss: 12.573156356811523\nStep 56, loss: 12.49267578125\nStep 57, loss: 12.404444694519043\nStep 58, loss: 12.397965431213379\nStep 59, loss: 12.419803619384766\nStep 60, loss: 12.445556640625\nStep 61, loss: 12.421695709228516\nStep 62, loss: 12.470447540283203\nStep 63, loss: 12.449533462524414\nStep 64, loss: 12.47229290008545\nStep 65, loss: 12.380741119384766\nStep 66, loss: 12.41545295715332\nStep 67, loss: 12.410737991333008\nStep 68, loss: 12.396747589111328\nStep 69, loss: 12.404146194458008\nStep 70, loss: 12.31839656829834\nStep 71, loss: 12.361237525939941\nStep 72, loss: 12.293877601623535\nStep 73, loss: 12.361063957214355\nStep 74, loss: 12.32302474975586\nStep 75, loss: 12.389338493347168\nStep 76, loss: 12.24377727508545\nStep 77, loss: 12.283151626586914\nStep 78, loss: 12.348030090332031\nStep 79, loss: 12.286529541015625\nStep 80, loss: 12.259624481201172\nStep 81, loss: 12.337592124938965\nStep 82, loss: 12.294958114624023\nStep 83, loss: 12.2918062210083\nStep 84, loss: 12.283763885498047\nStep 85, loss: 12.357927322387695\nStep 86, loss: 12.222745895385742\nStep 87, loss: 12.186835289001465\nStep 88, loss: 12.234064102172852\nStep 89, loss: 12.225297927856445\nStep 90, loss: 12.133995056152344\nStep 91, loss: 12.153714179992676\nStep 92, loss: 12.19133186340332\nStep 93, loss: 12.185487747192383\nStep 94, loss: 12.103611946105957\nStep 95, loss: 12.179014205932617\nStep 96, loss: 12.194933891296387\nStep 97, loss: 12.12974739074707\nStep 98, loss: 12.182798385620117\nStep 99, loss: 12.08813190460205\nStep 100, loss: 12.116350173950195\nStep 101, loss: 12.13232421875\nStep 102, loss: 12.077340126037598\nStep 103, loss: 12.075632095336914\nStep 104, loss: 12.0490140914917\nStep 105, loss: 12.019495010375977\nStep 106, loss: 12.09927749633789\nStep 107, loss: 12.032045364379883\nStep 108, loss: 12.031266212463379\nStep 109, loss: 12.077627182006836\nStep 110, loss: 11.903912544250488\nStep 111, loss: 11.966301918029785\nStep 112, loss: 11.957892417907715\nStep 113, loss: 11.926132202148438\nStep 114, loss: 11.861052513122559\nStep 115, loss: 11.882122039794922\nStep 116, loss: 11.95129108428955\nStep 117, loss: 11.872302055358887\nStep 118, loss: 11.946744918823242\nStep 119, loss: 11.910722732543945\nStep 120, loss: 11.818961143493652\nStep 121, loss: 11.851935386657715\nStep 122, loss: 11.751891136169434\nStep 123, loss: 11.846611022949219\nStep 124, loss: 11.800748825073242\nStep 125, loss: 11.796424865722656\nStep 126, loss: 11.81689167022705\nStep 127, loss: 11.72142505645752\nStep 128, loss: 11.75197982788086\nStep 129, loss: 11.763428688049316\nStep 130, loss: 11.691763877868652\nStep 131, loss: 11.680811882019043\nStep 132, loss: 11.72774887084961\nStep 133, loss: 11.640332221984863\nStep 134, loss: 11.707895278930664\nStep 135, loss: 11.7560396194458\nStep 136, loss: 11.70436954498291\nStep 137, loss: 11.6412935256958\nStep 138, loss: 11.607065200805664\nStep 139, loss: 11.677067756652832\nStep 140, loss: 11.646750450134277\nStep 141, loss: 11.694757461547852\nStep 142, loss: 11.600968360900879\nStep 143, loss: 11.685184478759766\nStep 144, loss: 11.597763061523438\nStep 145, loss: 11.548248291015625\nStep 146, loss: 11.546873092651367\nStep 147, loss: 11.577543258666992\nStep 148, loss: 11.547377586364746\nStep 149, loss: 11.524612426757812\nStep 150, loss: 11.507003784179688\nStep 151, loss: 11.515893936157227\nStep 152, loss: 11.48664379119873\nStep 153, loss: 11.502408027648926\nStep 154, loss: 11.445438385009766\nStep 155, loss: 11.46118450164795\nStep 156, loss: 11.405437469482422\nStep 157, loss: 11.443511962890625\nStep 158, loss: 11.451423645019531\nStep 159, loss: 11.461939811706543\nStep 160, loss: 11.349418640136719\nStep 161, loss: 11.422706604003906\nStep 162, loss: 11.401671409606934\nStep 163, loss: 11.287398338317871\nStep 164, loss: 11.302481651306152\nStep 165, loss: 11.289762496948242\nStep 166, loss: 11.334724426269531\nStep 167, loss: 11.282970428466797\nStep 168, loss: 11.257315635681152\nStep 169, loss: 11.28621768951416\nStep 170, loss: 11.20481014251709\nStep 171, loss: 11.281057357788086\nStep 172, loss: 11.262025833129883\nStep 173, loss: 11.243955612182617\nStep 174, loss: 11.169376373291016\nStep 175, loss: 11.254855155944824\nStep 176, loss: 11.154994010925293\nStep 177, loss: 11.21339225769043\nStep 178, loss: 11.251949310302734\nStep 179, loss: 11.18138599395752\nStep 180, loss: 11.146556854248047\nStep 181, loss: 11.13279914855957\nStep 182, loss: 11.167080879211426\nStep 183, loss: 11.185643196105957\nStep 184, loss: 11.071303367614746\nStep 185, loss: 11.087559700012207\nStep 186, loss: 11.119420051574707\nStep 187, loss: 11.075028419494629\nStep 188, loss: 11.067861557006836\nStep 189, loss: 10.989562034606934\nStep 190, loss: 11.052154541015625\nStep 191, loss: 11.065995216369629\nStep 192, loss: 11.021690368652344\nStep 193, loss: 10.993510246276855\nStep 194, loss: 11.0082368850708\nStep 195, loss: 10.894791603088379\nStep 196, loss: 11.0162935256958\nStep 197, loss: 10.924046516418457\nStep 198, loss: 10.87924575805664\nStep 199, loss: 10.848145484924316\nStep 200, loss: 10.899628639221191\nStep 201, loss: 10.91595458984375\nStep 202, loss: 10.843033790588379\nStep 203, loss: 10.911846160888672\nStep 204, loss: 10.81973648071289\nStep 205, loss: 10.85107135772705\nStep 206, loss: 10.819169044494629\nStep 207, loss: 10.854994773864746\nStep 208, loss: 10.806999206542969\nStep 209, loss: 10.800849914550781\nStep 210, loss: 10.799738883972168\nStep 211, loss: 10.830042839050293\nStep 212, loss: 10.736322402954102\nStep 213, loss: 10.752872467041016\nStep 214, loss: 10.725028991699219\nStep 215, loss: 10.731749534606934\nStep 216, loss: 10.689827919006348\nStep 217, loss: 10.62132453918457\nStep 218, loss: 10.703144073486328\nStep 219, loss: 10.668985366821289\nStep 220, loss: 10.677373886108398\nStep 221, loss: 10.62880802154541\nStep 222, loss: 10.65042781829834\nStep 223, loss: 10.613385200500488\nStep 224, loss: 10.603299140930176\nStep 225, loss: 10.608231544494629\nStep 226, loss: 10.598186492919922\nStep 227, loss: 10.607372283935547\nStep 228, loss: 10.588812828063965\nStep 229, loss: 10.578028678894043\nStep 230, loss: 10.530580520629883\nStep 231, loss: 10.605957984924316\nStep 232, loss: 10.505242347717285\nStep 233, loss: 10.477911949157715\nStep 234, loss: 10.557123184204102\nStep 235, loss: 10.461018562316895\nStep 236, loss: 10.512309074401855\nStep 237, loss: 10.466352462768555\nStep 0, loss: 12.667804718017578\nStep 1, loss: 12.771347999572754\nStep 2, loss: 12.642227172851562\nStep 3, loss: 12.731294631958008\nStep 4, loss: 12.625615119934082\nStep 5, loss: 12.726526260375977\nStep 6, loss: 12.679959297180176\nStep 7, loss: 12.740076065063477\nStep 8, loss: 12.758808135986328\nStep 9, loss: 12.621700286865234\nStep 10, loss: 12.689375877380371\nStep 11, loss: 12.711395263671875\nStep 12, loss: 12.635071754455566\nStep 13, loss: 12.737500190734863\nStep 14, loss: 12.727999687194824\nStep 15, loss: 12.6896390914917\nStep 16, loss: 12.66757869720459\nStep 17, loss: 12.713518142700195\nStep 18, loss: 12.667643547058105\nStep 19, loss: 12.741531372070312\nStep 20, loss: 12.656143188476562\nStep 21, loss: 12.708353996276855\nStep 22, loss: 12.722424507141113\nStep 23, loss: 12.613386154174805\nStep 24, loss: 12.706329345703125\nStep 25, loss: 12.670353889465332\nStep 26, loss: 12.654339790344238\nStep 27, loss: 12.607614517211914\nStep 28, loss: 12.662657737731934\nStep 29, loss: 12.653752326965332\nStep 30, loss: 12.605743408203125\nStep 31, loss: 12.71632194519043\nStep 32, loss: 12.681879043579102\nStep 33, loss: 12.564464569091797\nStep 34, loss: 12.617006301879883\nStep 35, loss: 12.60033130645752\nStep 36, loss: 12.583608627319336\nStep 37, loss: 12.695188522338867\nStep 38, loss: 12.601780891418457\nStep 39, loss: 12.555039405822754\nStep 40, loss: 12.539531707763672\nStep 41, loss: 12.610857009887695\nStep 42, loss: 12.575074195861816\nStep 43, loss: 12.597280502319336\nStep 44, loss: 12.570100784301758\nStep 45, loss: 12.572305679321289\nStep 46, loss: 12.584064483642578\nStep 47, loss: 12.492785453796387\nStep 48, loss: 12.607348442077637\nStep 49, loss: 12.546814918518066\nStep 50, loss: 12.495633125305176\nStep 51, loss: 12.483382225036621\nStep 52, loss: 12.526220321655273\nStep 53, loss: 12.498763084411621\nStep 54, loss: 12.548930168151855\nStep 55, loss: 12.573156356811523\nStep 56, loss: 12.49267578125\nStep 57, loss: 12.404444694519043\nStep 58, loss: 12.397965431213379\nStep 59, loss: 12.419803619384766\nStep 60, loss: 12.445556640625\nStep 61, loss: 12.421695709228516\nStep 62, loss: 12.470447540283203\nStep 63, loss: 12.449533462524414\nStep 64, loss: 12.47229290008545\nStep 65, loss: 12.380741119384766\nStep 66, loss: 12.41545295715332\nStep 67, loss: 12.410737991333008\nStep 68, loss: 12.396747589111328\nStep 69, loss: 12.404146194458008\nStep 70, loss: 12.31839656829834\nStep 71, loss: 12.361237525939941\nStep 72, loss: 12.293877601623535\nStep 73, loss: 12.361063957214355\nStep 74, loss: 12.32302474975586\nStep 75, loss: 12.389338493347168\nStep 76, loss: 12.24377727508545\nStep 77, loss: 12.283151626586914\nStep 78, loss: 12.348030090332031\nStep 79, loss: 12.286529541015625\nStep 80, loss: 12.259624481201172\nStep 81, loss: 12.337592124938965\nStep 82, loss: 12.294958114624023\nStep 83, loss: 12.2918062210083\nStep 84, loss: 12.283763885498047\nStep 85, loss: 12.357927322387695\nStep 86, loss: 12.222745895385742\nStep 87, loss: 12.186835289001465\nStep 88, loss: 12.234064102172852\nStep 89, loss: 12.225297927856445\nStep 90, loss: 12.133995056152344\nStep 91, loss: 12.153714179992676\nStep 92, loss: 12.19133186340332\nStep 93, loss: 12.185487747192383\nStep 94, loss: 12.103611946105957\nStep 95, loss: 12.179014205932617\nStep 96, loss: 12.194933891296387\nStep 97, loss: 12.12974739074707\nStep 98, loss: 12.182798385620117\nStep 99, loss: 12.08813190460205\nStep 100, loss: 12.116350173950195\nStep 101, loss: 12.13232421875\nStep 102, loss: 12.077340126037598\nStep 103, loss: 12.075632095336914\nStep 104, loss: 12.0490140914917\nStep 105, loss: 12.019495010375977\nStep 106, loss: 12.09927749633789\nStep 107, loss: 12.032045364379883\nStep 108, loss: 12.031266212463379\nStep 109, loss: 12.077627182006836\nStep 110, loss: 11.903912544250488\nStep 111, loss: 11.966301918029785\nStep 112, loss: 11.957892417907715\nStep 113, loss: 11.926132202148438\nStep 114, loss: 11.861052513122559\nStep 115, loss: 11.882122039794922\nStep 116, loss: 11.95129108428955\nStep 117, loss: 11.872302055358887\nStep 118, loss: 11.946744918823242\nStep 119, loss: 11.910722732543945\nStep 120, loss: 11.818961143493652\nStep 0, loss: 12.667804718017578\nStep 1, loss: 12.771347999572754\nStep 2, loss: 12.642227172851562\nStep 3, loss: 12.731294631958008\nStep 4, loss: 12.625615119934082\nStep 5, loss: 12.726526260375977\nStep 6, loss: 12.679959297180176\nStep 7, loss: 12.740076065063477\nStep 8, loss: 12.758808135986328\nStep 9, loss: 12.621700286865234\nStep 10, loss: 12.689375877380371\nStep 11, loss: 12.711395263671875\nStep 12, loss: 12.635071754455566\nStep 13, loss: 12.737500190734863\nStep 14, loss: 12.727999687194824\nStep 15, loss: 12.6896390914917\nStep 16, loss: 12.66757869720459\nStep 17, loss: 12.713518142700195\nStep 18, loss: 12.667643547058105\nStep 19, loss: 12.741531372070312\nStep 20, loss: 12.656143188476562\nStep 21, loss: 12.708353996276855\nStep 22, loss: 12.722424507141113\nStep 23, loss: 12.613386154174805\nStep 24, loss: 12.706329345703125\nStep 25, loss: 12.670353889465332\nStep 26, loss: 12.654339790344238\nStep 27, loss: 12.607614517211914\nStep 28, loss: 12.662657737731934\nStep 29, loss: 12.653752326965332\nStep 30, loss: 12.605743408203125\nStep 31, loss: 12.71632194519043\nStep 32, loss: 12.681879043579102\nStep 33, loss: 12.564464569091797\nStep 34, loss: 12.617006301879883\nStep 35, loss: 12.60033130645752\nStep 36, loss: 12.583608627319336\nStep 37, loss: 12.695188522338867\nStep 38, loss: 12.601780891418457\nStep 39, loss: 12.555039405822754\nStep 40, loss: 12.539531707763672\nStep 41, loss: 12.610857009887695\nStep 42, loss: 12.575074195861816\nStep 43, loss: 12.597280502319336\nStep 44, loss: 12.570100784301758\nStep 45, loss: 12.572305679321289\nStep 46, loss: 12.584064483642578\nStep 47, loss: 12.492785453796387\nStep 48, loss: 12.607348442077637\nStep 49, loss: 12.546814918518066\nStep 50, loss: 12.495633125305176\nStep 51, loss: 12.483382225036621\nStep 52, loss: 12.526220321655273\nStep 53, loss: 12.498763084411621\nStep 54, loss: 12.548930168151855\nStep 55, loss: 12.573156356811523\nStep 56, loss: 12.49267578125\nStep 57, loss: 12.404444694519043\nStep 58, loss: 12.397965431213379\nStep 59, loss: 12.419803619384766\nStep 60, loss: 12.445556640625\nStep 61, loss: 12.421695709228516\nStep 62, loss: 12.470447540283203\nStep 63, loss: 12.449533462524414\nStep 64, loss: 12.47229290008545\nStep 65, loss: 12.380741119384766\nStep 66, loss: 12.41545295715332\nStep 67, loss: 12.410737991333008\nStep 68, loss: 12.396747589111328\nStep 69, loss: 12.404146194458008\nStep 70, loss: 12.31839656829834\nStep 71, loss: 12.361237525939941\nStep 72, loss: 12.293877601623535\nStep 73, loss: 12.361063957214355\nStep 74, loss: 12.32302474975586\nStep 75, loss: 12.389338493347168\nStep 76, loss: 12.24377727508545\nStep 77, loss: 12.283151626586914\nStep 78, loss: 12.348030090332031\nStep 79, loss: 12.286529541015625\nStep 80, loss: 12.259624481201172\nStep 81, loss: 12.337592124938965\nStep 82, loss: 12.294958114624023\nStep 83, loss: 12.2918062210083\nStep 84, loss: 12.283763885498047\nStep 85, loss: 12.357927322387695\nStep 86, loss: 12.222745895385742\nStep 87, loss: 12.186835289001465\nStep 88, loss: 12.234064102172852\nStep 89, loss: 12.225297927856445\nStep 90, loss: 12.133995056152344\nStep 91, loss: 12.153714179992676\nStep 92, loss: 12.19133186340332\nStep 93, loss: 12.185487747192383\nStep 94, loss: 12.103611946105957\nStep 95, loss: 12.179014205932617\nStep 96, loss: 12.194933891296387\nStep 97, loss: 12.12974739074707\nStep 98, loss: 12.182798385620117\nStep 99, loss: 12.08813190460205\nStep 100, loss: 12.116350173950195\nStep 101, loss: 12.13232421875\nStep 102, loss: 12.077340126037598\nStep 103, loss: 12.075632095336914\nStep 104, loss: 12.0490140914917\nStep 105, loss: 12.019495010375977\nStep 106, loss: 12.09927749633789\nStep 107, loss: 12.032045364379883\nStep 108, loss: 12.031266212463379\nStep 109, loss: 12.077627182006836\nStep 110, loss: 11.903912544250488\nStep 111, loss: 11.966301918029785\nStep 112, loss: 11.957892417907715\nStep 113, loss: 11.926132202148438\nStep 114, loss: 11.861052513122559\nStep 115, loss: 11.882122039794922\nStep 116, loss: 11.95129108428955\nStep 117, loss: 11.872302055358887\nStep 118, loss: 11.946744918823242\nStep 119, loss: 11.910722732543945\nStep 120, loss: 11.818961143493652\nStep 121, loss: 11.851935386657715\nStep 122, loss: 11.751891136169434\nStep 123, loss: 11.846611022949219\nStep 124, loss: 11.800748825073242\nStep 125, loss: 11.796424865722656\nStep 126, loss: 11.81689167022705\nStep 127, loss: 11.72142505645752\nStep 128, loss: 11.75197982788086\nStep 129, loss: 11.763428688049316\nStep 130, loss: 11.691763877868652\nStep 131, loss: 11.680811882019043\nStep 132, loss: 11.72774887084961\nStep 133, loss: 11.640332221984863\nStep 134, loss: 11.707895278930664\nStep 135, loss: 11.7560396194458\nStep 136, loss: 11.70436954498291\nStep 137, loss: 11.6412935256958\nStep 138, loss: 11.607065200805664\nStep 139, loss: 11.677067756652832\nStep 140, loss: 11.646750450134277\nStep 141, loss: 11.694757461547852\nStep 142, loss: 11.600968360900879\nStep 143, loss: 11.685184478759766\nStep 144, loss: 11.597763061523438\nStep 145, loss: 11.548248291015625\nStep 146, loss: 11.546873092651367\nStep 147, loss: 11.577543258666992\nStep 148, loss: 11.547377586364746\nStep 149, loss: 11.524612426757812\nStep 150, loss: 11.507003784179688\nStep 151, loss: 11.515893936157227\nStep 152, loss: 11.48664379119873\nStep 153, loss: 11.502408027648926\nStep 154, loss: 11.445438385009766\nStep 155, loss: 11.46118450164795\nStep 156, loss: 11.405437469482422\nStep 157, loss: 11.443511962890625\nStep 158, loss: 11.451423645019531\nStep 159, loss: 11.461939811706543\nStep 160, loss: 11.349418640136719\nStep 161, loss: 11.422706604003906\nStep 162, loss: 11.401671409606934\nStep 163, loss: 11.287398338317871\nStep 164, loss: 11.302481651306152\nStep 165, loss: 11.289762496948242\nStep 166, loss: 11.334724426269531\nStep 167, loss: 11.282970428466797\nStep 168, loss: 11.257315635681152\nStep 169, loss: 11.28621768951416\nStep 170, loss: 11.20481014251709\nStep 171, loss: 11.281057357788086\nStep 172, loss: 11.262025833129883\nStep 173, loss: 11.243955612182617\nStep 174, loss: 11.169376373291016\nStep 175, loss: 11.254855155944824\nStep 176, loss: 11.154994010925293\nStep 177, loss: 11.21339225769043\nStep 178, loss: 11.251949310302734\nStep 179, loss: 11.18138599395752\nStep 180, loss: 11.146556854248047\nStep 181, loss: 11.13279914855957\nStep 182, loss: 11.167080879211426\nStep 183, loss: 11.185643196105957\nStep 184, loss: 11.071303367614746\nStep 185, loss: 11.087559700012207\nStep 186, loss: 11.119420051574707\nStep 187, loss: 11.075028419494629\nStep 188, loss: 11.067861557006836\nStep 189, loss: 10.989562034606934\nStep 190, loss: 11.052154541015625\nStep 191, loss: 11.065995216369629\nStep 192, loss: 11.021690368652344\nStep 193, loss: 10.993510246276855\nStep 194, loss: 11.0082368850708\nStep 195, loss: 10.894791603088379\nStep 196, loss: 11.0162935256958\nStep 197, loss: 10.924046516418457\nStep 198, loss: 10.87924575805664\nStep 199, loss: 10.848145484924316\nStep 200, loss: 10.899628639221191\nStep 201, loss: 10.91595458984375\nStep 202, loss: 10.843033790588379\nStep 203, loss: 10.911846160888672\nStep 204, loss: 10.81973648071289\nStep 205, loss: 10.85107135772705\nStep 206, loss: 10.819169044494629\nStep 207, loss: 10.854994773864746\nStep 208, loss: 10.806999206542969\nStep 209, loss: 10.800849914550781\nStep 210, loss: 10.799738883972168\nStep 211, loss: 10.830042839050293\nStep 212, loss: 10.736322402954102\nStep 213, loss: 10.752872467041016\nStep 214, loss: 10.725028991699219\nStep 215, loss: 10.731749534606934\nStep 216, loss: 10.689827919006348\nStep 217, loss: 10.62132453918457\nStep 218, loss: 10.703144073486328\nStep 219, loss: 10.668985366821289\nStep 220, loss: 10.677373886108398\nStep 221, loss: 10.62880802154541\nStep 222, loss: 10.65042781829834\nStep 223, loss: 10.613385200500488\nStep 224, loss: 10.603299140930176\nStep 225, loss: 10.608231544494629\nStep 226, loss: 10.598186492919922\nStep 227, loss: 10.607372283935547\nStep 228, loss: 10.588812828063965\nStep 229, loss: 10.578028678894043\nStep 230, loss: 10.530580520629883\nStep 231, loss: 10.605957984924316\nStep 232, loss: 10.505242347717285\nStep 233, loss: 10.477911949157715\nStep 234, loss: 10.557123184204102\nStep 235, loss: 10.461018562316895\nStep 236, loss: 10.512309074401855\nStep 237, loss: 10.466352462768555\nStep 0, loss: 12.667804718017578\nStep 1, loss: 12.771347999572754\nStep 2, loss: 12.642227172851562\nStep 3, loss: 12.731294631958008\nStep 4, loss: 12.625615119934082\nStep 5, loss: 12.726526260375977\nStep 6, loss: 12.679959297180176\nStep 7, loss: 12.740076065063477\nStep 8, loss: 12.758808135986328\nStep 9, loss: 12.621700286865234\nStep 10, loss: 12.689375877380371\nStep 11, loss: 12.711395263671875\nStep 12, loss: 12.635071754455566\nStep 13, loss: 12.737500190734863\nStep 14, loss: 12.727999687194824\nStep 15, loss: 12.6896390914917\nStep 16, loss: 12.66757869720459\nStep 17, loss: 12.713518142700195\nStep 18, loss: 12.667643547058105\nStep 19, loss: 12.741531372070312\nStep 20, loss: 12.656143188476562\nStep 21, loss: 12.708353996276855\nStep 22, loss: 12.722424507141113\nStep 23, loss: 12.613386154174805\nStep 24, loss: 12.706329345703125\nStep 25, loss: 12.670353889465332\nStep 26, loss: 12.654339790344238\nStep 27, loss: 12.607614517211914\nStep 28, loss: 12.662657737731934\nStep 29, loss: 12.653752326965332\nStep 30, loss: 12.605743408203125\nStep 31, loss: 12.71632194519043\nStep 32, loss: 12.681879043579102\nStep 33, loss: 12.564464569091797\nStep 34, loss: 12.617006301879883\nStep 35, loss: 12.60033130645752\nStep 36, loss: 12.583608627319336\nStep 37, loss: 12.695188522338867\nStep 38, loss: 12.601780891418457\nStep 39, loss: 12.555039405822754\nStep 40, loss: 12.539531707763672\nStep 41, loss: 12.610857009887695\nStep 42, loss: 12.575074195861816\nStep 43, loss: 12.597280502319336\nStep 44, loss: 12.570100784301758\nStep 45, loss: 12.572305679321289\nStep 46, loss: 12.584064483642578\nStep 47, loss: 12.492785453796387\nStep 48, loss: 12.607348442077637\nStep 49, loss: 12.546814918518066\nStep 50, loss: 12.495633125305176\nStep 51, loss: 12.483382225036621\nStep 52, loss: 12.526220321655273\nStep 53, loss: 12.498763084411621\nStep 54, loss: 12.548930168151855\nStep 55, loss: 12.573156356811523\nStep 56, loss: 12.49267578125\nStep 57, loss: 12.404444694519043\nStep 58, loss: 12.397965431213379\nStep 59, loss: 12.419803619384766\nStep 60, loss: 12.445556640625\nStep 61, loss: 12.421695709228516\nStep 62, loss: 12.470447540283203\nStep 63, loss: 12.449533462524414\nStep 64, loss: 12.47229290008545\nStep 65, loss: 12.380741119384766\nStep 66, loss: 12.41545295715332\nStep 67, loss: 12.410737991333008\nStep 68, loss: 12.396747589111328\nStep 69, loss: 12.404146194458008\nStep 70, loss: 12.31839656829834\nStep 71, loss: 12.361237525939941\nStep 72, loss: 12.293877601623535\nStep 73, loss: 12.361063957214355\nStep 74, loss: 12.32302474975586\nStep 75, loss: 12.389338493347168\nStep 76, loss: 12.24377727508545\nStep 77, loss: 12.283151626586914\nStep 78, loss: 12.348030090332031\nStep 79, loss: 12.286529541015625\nStep 80, loss: 12.259624481201172\nStep 81, loss: 12.337592124938965\nStep 82, loss: 12.294958114624023\nStep 83, loss: 12.2918062210083\nStep 84, loss: 12.283763885498047\nStep 85, loss: 12.357927322387695\nStep 86, loss: 12.222745895385742\nStep 87, loss: 12.186835289001465\nStep 88, loss: 12.234064102172852\nStep 89, loss: 12.225297927856445\nStep 90, loss: 12.133995056152344\nStep 91, loss: 12.153714179992676\nStep 92, loss: 12.19133186340332\nStep 93, loss: 12.185487747192383\nStep 94, loss: 12.103611946105957\nStep 95, loss: 12.179014205932617\nStep 96, loss: 12.194933891296387\nStep 97, loss: 12.12974739074707\nStep 98, loss: 12.182798385620117\nStep 99, loss: 12.08813190460205\nStep 100, loss: 12.116350173950195\nStep 101, loss: 12.13232421875\nStep 102, loss: 12.077340126037598\nStep 103, loss: 12.075632095336914\nStep 104, loss: 12.0490140914917\nStep 105, loss: 12.019495010375977\nStep 106, loss: 12.09927749633789\nStep 107, loss: 12.032045364379883\nStep 108, loss: 12.031266212463379\nStep 109, loss: 12.077627182006836\nStep 110, loss: 11.903912544250488\nStep 111, loss: 11.966301918029785\nStep 112, loss: 11.957892417907715\nStep 113, loss: 11.926132202148438\nStep 114, loss: 11.861052513122559\nStep 115, loss: 11.882122039794922\nStep 116, loss: 11.95129108428955\nStep 117, loss: 11.872302055358887\nStep 118, loss: 11.946744918823242\nStep 119, loss: 11.910722732543945\nStep 120, loss: 11.818961143493652\nStep 121, loss: 11.851935386657715\nStep 122, loss: 11.751891136169434\nStep 123, loss: 11.846611022949219\nStep 124, loss: 11.800748825073242\nStep 125, loss: 11.796424865722656\nStep 126, loss: 11.81689167022705\nStep 127, loss: 11.72142505645752\nStep 128, loss: 11.75197982788086\nStep 129, loss: 11.763428688049316\nStep 130, loss: 11.691763877868652\nStep 131, loss: 11.680811882019043\nStep 132, loss: 11.72774887084961\nStep 133, loss: 11.640332221984863\nStep 134, loss: 11.707895278930664\nStep 135, loss: 11.7560396194458\nStep 136, loss: 11.70436954498291\nStep 137, loss: 11.6412935256958\nStep 138, loss: 11.607065200805664\nStep 139, loss: 11.677067756652832\nStep 140, loss: 11.646750450134277\nStep 141, loss: 11.694757461547852\nStep 142, loss: 11.600968360900879\nStep 143, loss: 11.685184478759766\nStep 144, loss: 11.597763061523438\nStep 145, loss: 11.548248291015625\nStep 146, loss: 11.546873092651367\nStep 147, loss: 11.577543258666992\nStep 148, loss: 11.547377586364746\nStep 149, loss: 11.524612426757812\nStep 150, loss: 11.507003784179688\nStep 151, loss: 11.515893936157227\nStep 152, loss: 11.48664379119873\nStep 153, loss: 11.502408027648926\nStep 154, loss: 11.445438385009766\nStep 155, loss: 11.46118450164795\nStep 156, loss: 11.405437469482422\nStep 157, loss: 11.443511962890625\nStep 158, loss: 11.451423645019531\nStep 159, loss: 11.461939811706543\nStep 160, loss: 11.349418640136719\nStep 161, loss: 11.422706604003906\nStep 162, loss: 11.401671409606934\nStep 163, loss: 11.287398338317871\nStep 164, loss: 11.302481651306152\nStep 165, loss: 11.289762496948242\nStep 166, loss: 11.334724426269531\nStep 167, loss: 11.282970428466797\nStep 168, loss: 11.257315635681152\nStep 169, loss: 11.28621768951416\nStep 170, loss: 11.20481014251709\nStep 171, loss: 11.281057357788086\nStep 172, loss: 11.262025833129883\nStep 173, loss: 11.243955612182617\nStep 174, loss: 11.169376373291016\nStep 175, loss: 11.254855155944824\nStep 176, loss: 11.154994010925293\nStep 177, loss: 11.21339225769043\nStep 178, loss: 11.251949310302734\nStep 179, loss: 11.18138599395752\nStep 180, loss: 11.146556854248047\nStep 181, loss: 11.13279914855957\nStep 182, loss: 11.167080879211426\nStep 183, loss: 11.185643196105957\nStep 184, loss: 11.071303367614746\nStep 185, loss: 11.087559700012207\nStep 186, loss: 11.119420051574707\nStep 187, loss: 11.075028419494629\nStep 188, loss: 11.067861557006836\nStep 189, loss: 10.989562034606934\nStep 190, loss: 11.052154541015625\nStep 191, loss: 11.065995216369629\nStep 192, loss: 11.021690368652344\nStep 193, loss: 10.993510246276855\nStep 194, loss: 11.0082368850708\nStep 195, loss: 10.894791603088379\nStep 196, loss: 11.0162935256958\nStep 197, loss: 10.924046516418457\nStep 198, loss: 10.87924575805664\nStep 199, loss: 10.848145484924316\nStep 200, loss: 10.899628639221191\nStep 201, loss: 10.91595458984375\nStep 202, loss: 10.843033790588379\nStep 203, loss: 10.911846160888672\nStep 204, loss: 10.81973648071289\nStep 205, loss: 10.85107135772705\nStep 206, loss: 10.819169044494629\nStep 207, loss: 10.854994773864746\nStep 208, loss: 10.806999206542969\nStep 209, loss: 10.800849914550781\nStep 210, loss: 10.799738883972168\nStep 211, loss: 10.830042839050293\nStep 212, loss: 10.736322402954102\nStep 213, loss: 10.752872467041016\nStep 214, loss: 10.725028991699219\nStep 215, loss: 10.731749534606934\nStep 216, loss: 10.689827919006348\nStep 217, loss: 10.62132453918457\nStep 218, loss: 10.703144073486328\nStep 219, loss: 10.668985366821289\nStep 220, loss: 10.677373886108398\nStep 221, loss: 10.62880802154541\nStep 222, loss: 10.65042781829834\nStep 223, loss: 10.613385200500488\nStep 224, loss: 10.603299140930176\nStep 225, loss: 10.608231544494629\nStep 226, loss: 10.598186492919922\nStep 227, loss: 10.607372283935547\nStep 228, loss: 10.588812828063965\nStep 229, loss: 10.578028678894043\nStep 230, loss: 10.530580520629883\nStep 231, loss: 10.605957984924316\nStep 232, loss: 10.505242347717285\nStep 233, loss: 10.477911949157715\nStep 234, loss: 10.557123184204102\nStep 235, loss: 10.461018562316895\nStep 236, loss: 10.512309074401855\nStep 237, loss: 10.466352462768555\nStep 121, loss: 11.851935386657715\nStep 122, loss: 11.751891136169434\nStep 123, loss: 11.846611022949219\nStep 124, loss: 11.800748825073242\nStep 125, loss: 11.796424865722656\nStep 126, loss: 11.81689167022705\nStep 127, loss: 11.72142505645752\nStep 128, loss: 11.75197982788086\nStep 129, loss: 11.763428688049316\nStep 130, loss: 11.691763877868652\nStep 131, loss: 11.680811882019043\nStep 132, loss: 11.72774887084961\nStep 133, loss: 11.640332221984863\nStep 134, loss: 11.707895278930664\nStep 135, loss: 11.7560396194458\nStep 136, loss: 11.70436954498291\nStep 137, loss: 11.6412935256958\nStep 138, loss: 11.607065200805664\nStep 139, loss: 11.677067756652832\nStep 140, loss: 11.646750450134277\nStep 141, loss: 11.694757461547852\nStep 142, loss: 11.600968360900879\nStep 143, loss: 11.685184478759766\nStep 144, loss: 11.597763061523438\nStep 145, loss: 11.548248291015625\nStep 146, loss: 11.546873092651367\nStep 147, loss: 11.577543258666992\nStep 148, loss: 11.547377586364746\nStep 149, loss: 11.524612426757812\nStep 150, loss: 11.507003784179688\nStep 151, loss: 11.515893936157227\nStep 152, loss: 11.48664379119873\nStep 153, loss: 11.502408027648926\nStep 154, loss: 11.445438385009766\nStep 155, loss: 11.46118450164795\nStep 156, loss: 11.405437469482422\nStep 157, loss: 11.443511962890625\nStep 158, loss: 11.451423645019531\nStep 159, loss: 11.461939811706543\nStep 160, loss: 11.349418640136719\nStep 161, loss: 11.422706604003906\nStep 162, loss: 11.401671409606934\nStep 163, loss: 11.287398338317871\nStep 164, loss: 11.302481651306152\nStep 165, loss: 11.289762496948242\nStep 166, loss: 11.334724426269531\nStep 167, loss: 11.282970428466797\nStep 168, loss: 11.257315635681152\nStep 169, loss: 11.28621768951416\nStep 170, loss: 11.20481014251709\nStep 171, loss: 11.281057357788086\nStep 172, loss: 11.262025833129883\nStep 173, loss: 11.243955612182617\nStep 174, loss: 11.169376373291016\nStep 175, loss: 11.254855155944824\nStep 176, loss: 11.154994010925293\nStep 177, loss: 11.21339225769043\nStep 178, loss: 11.251949310302734\nStep 179, loss: 11.18138599395752\nStep 180, loss: 11.146556854248047\nStep 181, loss: 11.13279914855957\nStep 182, loss: 11.167080879211426\nStep 183, loss: 11.185643196105957\nStep 184, loss: 11.071303367614746\nStep 185, loss: 11.087559700012207\nStep 186, loss: 11.119420051574707\nStep 187, loss: 11.075028419494629\nStep 188, loss: 11.067861557006836\nStep 189, loss: 10.989562034606934\nStep 190, loss: 11.052154541015625\nStep 191, loss: 11.065995216369629\nStep 192, loss: 11.021690368652344\nStep 193, loss: 10.993510246276855\nStep 194, loss: 11.0082368850708\nStep 195, loss: 10.894791603088379\nStep 196, loss: 11.0162935256958\nStep 197, loss: 10.924046516418457\nStep 198, loss: 10.87924575805664\nStep 199, loss: 10.848145484924316\nStep 200, loss: 10.899628639221191\nStep 201, loss: 10.91595458984375\nStep 202, loss: 10.843033790588379\nStep 203, loss: 10.911846160888672\nStep 204, loss: 10.81973648071289\nStep 205, loss: 10.85107135772705\nStep 206, loss: 10.819169044494629\nStep 207, loss: 10.854994773864746\nStep 208, loss: 10.806999206542969\nStep 209, loss: 10.800849914550781\nStep 210, loss: 10.799738883972168\nStep 211, loss: 10.830042839050293\nStep 212, loss: 10.736322402954102\nStep 213, loss: 10.752872467041016\nStep 214, loss: 10.725028991699219\nStep 215, loss: 10.731749534606934\nStep 216, loss: 10.689827919006348\nStep 217, loss: 10.62132453918457\nStep 218, loss: 10.703144073486328\nStep 219, loss: 10.668985366821289\nStep 220, loss: 10.677373886108398\nStep 221, loss: 10.62880802154541\nStep 222, loss: 10.65042781829834\nStep 223, loss: 10.613385200500488\nStep 224, loss: 10.603299140930176\nStep 225, loss: 10.608231544494629\nStep 226, loss: 10.598186492919922\nStep 227, loss: 10.607372283935547\nStep 228, loss: 10.588812828063965\nStep 229, loss: 10.578028678894043\nStep 230, loss: 10.530580520629883\nStep 231, loss: 10.605957984924316\nStep 232, loss: 10.505242347717285\nStep 233, loss: 10.477911949157715\nStep 234, loss: 10.557123184204102\nStep 235, loss: 10.461018562316895\nStep 236, loss: 10.512309074401855\nStep 237, loss: 10.466352462768555\n",log,tab +1578,2569941,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415079.log",865,0,"",log,selection_mouse +1579,2570506,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/causal/train_dynamics_causal_1_node_3415079.log",45779,0,"",log,selection_command +1580,2617706,"TERMINAL",0,0,"cd ..",,terminal_command +1581,2619181,"TERMINAL",0,0,"ls",,terminal_command +1582,2621972,"TERMINAL",0,0,"cd maskgit/",,terminal_command +1583,2623060,"TERMINAL",0,0,"ls",,terminal_command +1584,2623085,"TERMINAL",0,0,"]633;E;2025-08-11 16:34:18 ls;361f455a-15ad-4916-88b6-b9e49434d7af]633;Ctrain_dynamics_maskgit_1_node_3415111.log\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/maskgit]633;D;0",,terminal_output +1585,2625390,"TERMINAL",0,0,"tail -f train_dynamics_maskgit_1_node_3415111.log",,terminal_command +1586,2625442,"TERMINAL",0,0,"]633;E;2025-08-11 16:34:21 tail -f train_dynamics_maskgit_1_node_3415111.log ;361f455a-15ad-4916-88b6-b9e49434d7af]633;CSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0610\r\nGpuFreq=control_disabled\r\nwandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\nwandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/wandb/run-20250811_163354-3415111\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run coinrun-dynamics-maskgit-1-node-3415111\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3415111\r\n",,terminal_output +1587,2667462,"TERMINAL",0,0,"WARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\r\nWARNING:absl:Dropping 2 examples of 89394 examples (shard 4).\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 58000\r\nWARNING:absl:Missing metrics for step 58000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/058000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/058000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 58000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/058000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 58000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/058000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 59000\r\nWARNING:absl:Missing metrics for step 59000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/059000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 59000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/059000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 59000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/059000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/059000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3414046/060000/metrics/metrics not found.\r\n",,terminal_output +1588,2668500,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +1589,2669524,"TERMINAL",0,0,"Running on 4 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 26555904, 'lam': 35115232, 'tokenizer': 33750256, 'total': 95421392}\r\nStarting training from step 0...\r\n",,terminal_output +1590,2670485,"TERMINAL",0,0,"Running on 4 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 26555904, 'lam': 35115232, 'tokenizer': 33750256, 'total': 95421392}\r\nStarting training from step 0...\r\nRunning on 4 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 26555904, 'lam': 35115232, 'tokenizer': 33750256, 'total': 95421392}\r\nStarting training from step 0...\r\nRunning on 4 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 26555904, 'lam': 35115232, 'tokenizer': 33750256, 'total': 95421392}\r\nStarting training from step 0...\r\n",,terminal_output +1591,2679046,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",0,0,"",shellscript,tab +1592,2680587,"TERMINAL",0,0,"bash",,terminal_focus +1593,2681949,"TERMINAL",0,0,"ls -l ../trajectories/pinball/ | wc -l",,terminal_command +1594,2681986,"TERMINAL",0,0,"]633;E;2025-08-11 16:35:17 ls -l ../trajectories/pinball/ | wc -l;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;C",,terminal_output +1595,2682016,"TERMINAL",0,0,"208\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens]633;D;0",,terminal_output +1596,2691834,"TERMINAL",0,0,"ls -l ../trajectories/spaceinvaders/ | wc -l",,terminal_command +1597,2691883,"TERMINAL",0,0,"]633;E;2025-08-11 16:35:27 ls -l ../trajectories/spaceinvaders/ | wc -l;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;C",,terminal_output +1598,2692465,"TERMINAL",0,0,"424\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens]633;D;0",,terminal_output +1599,2695662,"TERMINAL",0,0,"ls -l spaceinvaders/ | wc -l",,terminal_command +1600,2695712,"TERMINAL",0,0,"]633;E;2025-08-11 16:35:31 ls -l spaceinvaders/ | wc -l;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;C",,terminal_output +1601,2695753,"TERMINAL",0,0,"11\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens]633;D;0",,terminal_output +1602,2706499,"TERMINAL",0,0,"2025-08-11 16:35:41.327597: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-08-11 16:35:41.328046: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-08-11 16:35:41.702013: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-08-11 16:35:41.702054: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1603,2711509,"TERMINAL",0,0,"2025-08-11 16:35:46.378091: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1604,2718574,"TERMINAL",0,0,"2025-08-11 16:35:53.388180: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1605,2769519,"TERMINAL",0,0,"tail",,terminal_focus +1606,2775863,"TERMINAL",0,0,"^C\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/maskgit]633;D;130",,terminal_output +1607,2858513,"TERMINAL",0,0,"bash",,terminal_focus +1608,2860981,"TERMINAL",0,0,"python",,terminal_command +1609,2861027,"TERMINAL",0,0,"]633;E;2025-08-11 16:38:16 python;8cf2329e-a785-4b62-8fbf-a36eaf9297be]633;C",,terminal_output +1610,2861299,"TERMINAL",0,0,"Python 3.10.18 (main, Jun 4 2025, 17:36:27) [Clang 20.1.4 ] on linux\r\nType ""help"", ""copyright"", ""credits"" or ""license"" for more information.\r\n",,terminal_output +1611,2861804,"TERMINAL",0,0,">>> ",,terminal_output +1612,2861856,"TERMINAL",0,0,"1",,terminal_output +1613,2862348,"TERMINAL",0,0,"[?25l6[?25h",,terminal_output +1614,2862407,"TERMINAL",0,0,"[?25l0[?25h",,terminal_output +1615,2863156,"TERMINAL",0,0,"[?25l*[?25h",,terminal_output +1616,2864513,"TERMINAL",0,0,"[?25l9[?25h",,terminal_output +1617,2864616,"TERMINAL",0,0,"[?25l0[?25h",,terminal_output +1618,2864677,"TERMINAL",0,0,"\r\n14400\r\n>>> ",,terminal_output +1619,2867269,"TERMINAL",0,0,"6",,terminal_output +1620,2868339,"TERMINAL",0,0,"[?25l4[?25h",,terminal_output +1621,2868555,"TERMINAL",0,0,"[?25l*[?25h",,terminal_output +1622,2869139,"TERMINAL",0,0,"[?25l4[?25h",,terminal_output +1623,2869576,"TERMINAL",0,0,"[?25l4\r[?25h",,terminal_output +1624,2870005,"TERMINAL",0,0,"[?25l6[?25h",,terminal_output +1625,2870110,"TERMINAL",0,0,"[?25l4[?25h",,terminal_output +1626,2870475,"TERMINAL",0,0,"\r\n4096\r\n>>> ",,terminal_output +1627,2876930,"TERMINAL",0,0,"14400",,terminal_output +1628,2877775,"TERMINAL",0,0,"[?25l/[?25h",,terminal_output +1629,2879550,"TERMINAL",0,0,"[?25l4096[?25h",,terminal_output +1630,2879739,"TERMINAL",0,0,"\r\n3.515625\r\n>>> ",,terminal_output +1631,2894361,"TERMINAL",0,0,"2",,terminal_output +1632,2894496,"TERMINAL",0,0,"[?25l5[?25h",,terminal_output +1633,2894723,"TERMINAL",0,0,"[?25l6[?25h",,terminal_output +1634,2895733,"TERMINAL",0,0,"[?25l/[?25h",,terminal_output +1635,2895973,"TERMINAL",0,0,"[?25l2[?25h",,terminal_output +1636,2896036,"TERMINAL",0,0,"\r\n128.0\r\n>>> ",,terminal_output +1637,3003666,"TERMINAL",0,0,"bash",,terminal_focus +1638,3007766,"TERMINAL",0,0,"dev",,terminal_command +1639,3008481,"TERMINAL",0,0,"ls",,terminal_command +1640,3008531,"TERMINAL",0,0,"]633;E;2025-08-11 16:40:44 ls;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +1641,3008626,"TERMINAL",0,0,"data frame.png genie.py LICENSE overfit_dir README.md requirements.txt scripts_horeka train_dynamics.py utils\r\ndebug frames gifs logs overfit_dir.zip read_tf_record.py sample.py slurm train_lam.py wandb\r\nframe-knoms.png generate_dataset.py input_pipeline models __pycache__ requirements-franz.txt scripts_cremers tests train_tokenizer.py\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +1642,3014503,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1643,3014995,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1403,0,"",shellscript,selection_mouse +1644,3014997,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1402,0,"",shellscript,selection_command +1645,3017515,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1378,0,"",shellscript,selection_command +1646,3020470,"utils/nn.py",0,0,"",python,tab +1647,3154487,"utils/nn.py",12867,0,"",python,selection_mouse +1648,3154488,"utils/nn.py",12866,0,"",python,selection_command +1649,3155141,"utils/nn.py",12899,0,"",python,selection_mouse +1650,3155143,"utils/nn.py",12898,0,"",python,selection_command +1651,3185174,"utils/nn.py",4073,0,"",python,selection_mouse +1652,3185992,"utils/nn.py",4090,0,"",python,selection_mouse +1653,3186750,"utils/nn.py",4041,0,"",python,selection_mouse +1654,3187190,"utils/nn.py",4127,0,"",python,selection_mouse +1655,3187708,"utils/nn.py",4131,0,"",python,selection_mouse +1656,3188669,"utils/nn.py",4211,0,"",python,selection_mouse +1657,3192746,"utils/nn.py",4211,37,"ttention ---\n x_BNTM = x_BTNM.",python,selection_mouse +1658,3192840,"utils/nn.py",4211,38,"ttention ---\n x_BNTM = x_BTNM.s",python,selection_mouse +1659,3193028,"utils/nn.py",4249,0,"",python,selection_mouse +1660,3193410,"utils/nn.py",4288,0,"",python,selection_mouse +1661,3193857,"utils/nn.py",4332,0,"",python,selection_mouse +1662,3194057,"utils/nn.py",4329,18,"temporal_attention",python,selection_mouse +1663,3208030,"utils/nn.py",10747,0,"",python,selection_mouse +1664,3208042,"utils/nn.py",10746,0,"",python,selection_command +1665,3208569,"utils/nn.py",10510,0,"",python,selection_mouse +1666,3208713,"utils/nn.py",10507,6,"einops",python,selection_mouse +1667,3237954,"utils/nn.py",10453,0,"",python,selection_mouse +1668,3237965,"utils/nn.py",10452,0,"",python,selection_command +1669,3238542,"utils/nn.py",10397,0,"",python,selection_mouse +1670,3239909,"utils/nn.py",10393,0,"",python,selection_mouse +1671,3240460,"utils/nn.py",10398,0,"",python,selection_mouse +1672,3244047,"utils/nn.py",10252,0,"",python,selection_mouse +1673,3266015,"utils/nn.py",10241,0,"",python,selection_mouse +1674,3266296,"utils/nn.py",10241,1,"b",python,selection_mouse +1675,3266360,"utils/nn.py",10241,2,"b ",python,selection_mouse +1676,3266673,"utils/nn.py",10241,3,"b t",python,selection_mouse +1677,3337127,"utils/nn.py",10252,0,"",python,selection_mouse +1678,3337969,"utils/nn.py",10256,0,"",python,selection_mouse +1679,3339177,"utils/nn.py",10397,0,"",python,selection_mouse +1680,3339710,"utils/nn.py",10392,0,"",python,selection_mouse +1681,3340265,"utils/nn.py",10393,0,"",python,selection_mouse +1682,3343730,"TERMINAL",0,0,"bash",,terminal_focus +1683,3368709,"utils/nn.py",0,0,"",python,tab +1684,3385588,"utils/nn.py",10393,1,"",python,content +1685,3386583,"utils/nn.py",10394,0,"",python,selection_command +1686,3386758,"utils/nn.py",10395,0,"",python,selection_command +1687,3386882,"utils/nn.py",10396,0,"",python,selection_command +1688,3387103,"utils/nn.py",10396,1,"",python,content +1689,3387220,"utils/nn.py",10397,0,"",python,selection_command +1690,3387371,"utils/nn.py",10398,0,"",python,selection_command +1691,3387513,"utils/nn.py",10399,0,"",python,selection_command +1692,3387667,"utils/nn.py",10400,0,"",python,selection_command +1693,3387800,"utils/nn.py",10401,0,"",python,selection_command +1694,3388091,"utils/nn.py",10349,0,"",python,selection_command +1695,3388334,"utils/nn.py",10303,0,"",python,selection_command +1696,3388877,"utils/nn.py",10239,0,"",python,selection_command +1697,3389055,"utils/nn.py",10240,0,"",python,selection_command +1698,3389544,"utils/nn.py",10241,0,"",python,selection_command +1699,3389557,"utils/nn.py",10242,0,"",python,selection_command +1700,3389620,"utils/nn.py",10243,0,"",python,selection_command +1701,3389627,"utils/nn.py",10244,0,"",python,selection_command +1702,3389650,"utils/nn.py",10245,0,"",python,selection_command +1703,3389680,"utils/nn.py",10246,0,"",python,selection_command +1704,3389711,"utils/nn.py",10247,0,"",python,selection_command +1705,3389742,"utils/nn.py",10248,0,"",python,selection_command +1706,3389766,"utils/nn.py",10249,0,"",python,selection_command +1707,3389824,"utils/nn.py",10250,0,"",python,selection_command +1708,3389922,"utils/nn.py",10251,0,"",python,selection_command +1709,3390090,"utils/nn.py",10252,0,"",python,selection_command +1710,3390243,"utils/nn.py",10252,1,"",python,content +1711,3390374,"utils/nn.py",10253,0,"",python,selection_command +1712,3390569,"utils/nn.py",10254,0,"",python,selection_command +1713,3390715,"utils/nn.py",10255,0,"",python,selection_command +1714,3390884,"utils/nn.py",10255,1,"",python,content +1715,3391067,"utils/nn.py",10301,0,"",python,selection_command +1716,3391191,"utils/nn.py",10347,0,"",python,selection_command +1717,3391337,"utils/nn.py",10405,0,"",python,selection_command +1718,3391459,"utils/nn.py",10448,0,"",python,selection_command +1719,3391612,"utils/nn.py",10485,0,"",python,selection_command +1720,3391777,"utils/nn.py",10543,0,"",python,selection_command +1721,3392141,"utils/nn.py",10542,0,"",python,selection_command +1722,3392303,"utils/nn.py",10541,0,"",python,selection_command +1723,3392470,"utils/nn.py",10540,0,"",python,selection_command +1724,3392587,"utils/nn.py",10540,1,"",python,content +1725,3392982,"utils/nn.py",10541,0,"",python,selection_command +1726,3393145,"utils/nn.py",10542,0,"",python,selection_command +1727,3393277,"utils/nn.py",10543,0,"",python,selection_command +1728,3393426,"utils/nn.py",10543,1,"",python,content +1729,3393563,"utils/nn.py",10590,0,"",python,selection_command +1730,3393764,"utils/nn.py",10637,0,"",python,selection_command +1731,3393926,"utils/nn.py",10695,0,"",python,selection_command +1732,3394150,"utils/nn.py",10694,0,"",python,selection_command +1733,3394288,"utils/nn.py",10693,0,"",python,selection_command +1734,3394790,"utils/nn.py",10692,0,"",python,selection_command +1735,3394844,"utils/nn.py",10691,0,"",python,selection_command +1736,3394846,"utils/nn.py",10690,0,"",python,selection_command +1737,3394908,"utils/nn.py",10689,0,"",python,selection_command +1738,3394909,"utils/nn.py",10688,0,"",python,selection_command +1739,3394935,"utils/nn.py",10687,0,"",python,selection_command +1740,3394978,"utils/nn.py",10686,0,"",python,selection_command +1741,3395006,"utils/nn.py",10685,0,"",python,selection_command +1742,3395030,"utils/nn.py",10684,0,"",python,selection_command +1743,3395057,"utils/nn.py",10683,0,"",python,selection_command +1744,3395089,"utils/nn.py",10682,0,"",python,selection_command +1745,3395120,"utils/nn.py",10681,0,"",python,selection_command +1746,3395259,"utils/nn.py",10680,0,"",python,selection_command +1747,3395494,"utils/nn.py",10681,0,"",python,selection_command +1748,3395644,"utils/nn.py",10681,1,"",python,content +1749,3395805,"utils/nn.py",10681,1,"",python,content +1750,3396853,"utils/nn.py",10681,0,"b",python,content +1751,3396856,"utils/nn.py",10681,0,"",python,selection_command +1752,3397276,"utils/nn.py",10682,0,"",python,selection_command +1753,3397427,"utils/nn.py",10683,0,"",python,selection_command +1754,3397583,"utils/nn.py",10684,0,"",python,selection_command +1755,3397893,"utils/nn.py",10684,1,"",python,content +1756,3426009,"TERMINAL",0,0,"bash",,terminal_focus +1757,3427628,"TERMINAL",0,0,"dev",,terminal_command +1758,3428798,"TERMINAL",0,0,"ls",,terminal_command +1759,3428844,"TERMINAL",0,0,"]633;E;2025-08-11 16:47:44 ls;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +1760,3428926,"TERMINAL",0,0,"data generate_dataset.py logs README.md scripts_cremers train_lam.py\r\ndebug genie.py models read_tf_record.py scripts_horeka train_tokenizer.py\r\nframe-knoms.png gifs overfit_dir requirements-franz.txt slurm utils\r\nframe.png input_pipeline overfit_dir.zip requirements.txt tests wandb\r\nframes LICENSE __pycache__ sample.py train_dynamics.py\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +1761,3438763,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",0,0,"",shellscript,tab +1762,3441333,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +1763,3449567,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_command +1764,3449586,"TERMINAL",0,0,"]633;E;2025-08-11 16:48:05 sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch;361f455a-15ad-4916-88b6-b9e49434d7af]633;CSubmitted batch job 3415128\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +1765,3492648,"TERMINAL",0,0,"queue",,terminal_command +1766,3492699,"TERMINAL",0,0,"]633;E;2025-08-11 16:48:48 queue;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +1767,3492805,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Mon Aug 11 16:48:48 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3415128 accelerat train_dy tum_cte0 R\t0:42\t 8 hkn[0404,0521-0527]3415111 accelerat train_dy tum_cte0 R15:42\t 1 hkn06103415079 accelerat train_dy tum_cte0 R25:15\t 1 hkn07263414046 accelerat train_to tum_cte0 R 4:57:18\t 1 hkn07283412401 accelerat train_to tum_cte0 R 1-01:06:58\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3414284 cpuonly wrap tum_cte0 R 3:08:56\t 1 hkn03103414283 cpuonly wrap tum_cte0 R 3:09:08\t 1 hkn03093414282 cpuonly wrap tum_cte0 R 3:09:18\t 1 hkn16653414281 cpuonly wrap tum_cte0 R 3:09:26\t 1 hkn16623414280 cpuonly wrap tum_cte0 R 3:09:35\t 1 hkn0238",,terminal_output +1768,3493816,"TERMINAL",0,0,"93369979976",,terminal_output +1769,3494554,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +1770,3496258,"TERMINAL",0,0,"logs",,terminal_command +1771,3497883,"TERMINAL",0,0,"ls",,terminal_command +1772,3497937,"TERMINAL",0,0,"]633;E;2025-08-11 16:48:53 ls;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +1773,3498134,"TERMINAL",0,0,"big_run train_lam_action_space_scaling_50_3331286.log\r\nbig-runs train_lam_action_space_scaling_6_3318549.log\r\ncausal train_lam_action_space_scaling_6_3320178.log\r\ncoinrun train_lam_action_space_scaling_6_3321528.log\r\nmaskgit train_lam_action_space_scaling_6_3329790.log\r\nmaskgit-maskprob-fix train_lam_action_space_scaling_6_3329805.log\r\ntrain_dyn_causal_180M_3372931.log train_lam_action_space_scaling_6_3331287.log\r\ntrain_dyn_causal_180M_3372963.log train_lam_action_space_scaling_8_3318550.log\r\ntrain_dyn_causal_180M_3372969.log train_lam_action_space_scaling_8_3329791.log\r\ntrain_dyn_causal_180M_3373107.log train_lam_action_space_scaling_8_3329806.log\r\ntrain_dyn_causal_255M_3372932.log train_lam_action_space_scaling_8_3331288.log\r\ntrain_dyn_causal_255M_3372970.log train_lam_minecraft_overfit_sample_3309655.log\r\ntrain_dyn_causal_255M_3373108.log train_lam_model_size_scaling_38M_3317098.log\r\ntrain_dyn_causal_356M_3372934.log train_lam_model_size_scaling_38M_3317115.log\r\ntrain_dyn_causal_356M_3372971.log train_lam_model_size_scaling_38M_3317231.log\r\ntrain_dyn_causal_356M_3373109.log train_tokenizer_batch_size_scaling_16_node_3321526.log\r\ntrain_dyn_causal_500M_3372936.log train_tokenizer_batch_size_scaling_1_node_3318551.log\r\ntrain_dyn_causal_500M_3372972.log train_tokenizer_batch_size_scaling_2_node_3318552.log\r\ntrain_dyn_causal_500M_3373110.log train_tokenizer_batch_size_scaling_2_node_3330806.log\r\ntrain_dyn_new_arch-bugfixed-spatial-shift_3359343.log train_tokenizer_batch_size_scaling_2_node_3330848.log\r\ntrain_dyn_new_arch-bugfixed-temporal-shift_3359349.log train_tokenizer_batch_size_scaling_2_node_3331282.log\r\ntrain_dyn_yolorun_3333026.log train_tokenizer_batch_size_scaling_4_node_3318553.log\r\ntrain_dyn_yolorun_3333448.log train_tokenizer_batch_size_scaling_4_node_3320175.log\r\ntrain_dyn_yolorun_3335345.log train_tokenizer_batch_size_scaling_4_node_3321524.log\r\ntrain_dyn_yolorun_3335362.log train_tokenizer_batch_size_scaling_8_node_3320176.log\r\ntrain_dyn_yolorun_3348592.log train_tokenizer_batch_size_scaling_8_node_3321525.log\r\ntrain_dyn_yolorun_new_arch_3351743.log train_tokenizer_minecraft_overfit_sample_3309656.log\r\ntrain_dyn_yolorun_new_arch_3352103.log train_tokenizer_model_size_scaling_127M_3317233.log\r\ntrain_dyn_yolorun_new_arch_3352115.log train_tokenizer_model_size_scaling_127M_3318554.log\r\ntrain_dyn_yolorun_new_arch_3358457.log train_tokenizer_model_size_scaling_140M_3313562.log\r\ntrain_lam_action_space_scaling_10_3320179.log train_tokenizer_model_size_scaling_140M_3316019.log\r\ntrain_lam_action_space_scaling_10_3321529.log train_tokenizer_model_size_scaling_200M_3313563.log\r\ntrain_lam_action_space_scaling_10_3329786.log train_tokenizer_model_size_scaling_200M_3316020.log\r\ntrain_lam_action_space_scaling_10_3329801.log train_tokenizer_model_size_scaling_227M_3317234.log\r\ntrain_lam_action_space_scaling_10_3331283.log train_tokenizer_model_size_scaling_227M_3318555.log\r\ntrain_lam_action_space_scaling_12_3318546.log train_tokenizer_model_size_scaling_227M_3320173.log\r\ntrain_lam_action_space_scaling_12_3320177.log train_tokenizer_model_size_scaling_227M_3321523.log\r\ntrain_lam_action_space_scaling_12_3321527.log train_tokenizer_model_size_scaling_37M_3313565.log\r\ntrain_lam_action_space_scaling_12_3329787.log train_tokenizer_model_size_scaling_37M_3316022.log\r\ntrain_lam_action_space_scaling_12_3329802.log train_tokenizer_model_size_scaling_37M_3317232.log\r\ntrain_lam_action_space_scaling_12_3331284.log train_tokenizer_model_size_scaling_37M_3317239.log\r\ntrain_lam_action_space_scaling_20_3318547.log train_tokenizer_model_size_scaling_37M_3318556.log\r\ntrain_lam_action_space_scaling_20_3329788.log train_tokenizer_model_size_scaling_74M_3318557.log\r\ntrain_lam_action_space_scaling_20_3329803.log train_tokenizer_model_size_scaling_74M_3320174.log\r\ntrain_lam_action_space_scaling_20_3331285.log train_tokenizer_model_size_scaling_74M_3321522.log\r\ntrain_lam_action_space_scaling_50_3320180.log train_tokenizer_model_size_scaling_80M_3313564.log\r\ntrain_lam_action_space_scaling_50_3329789.log train_tokenizer_model_size_scaling_80M_3316026.log\r\ntrain_lam_action_space_scaling_50_3329804.log yoloruns\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +1774,3499583,"TERMINAL",0,0,"cd causal/",,terminal_command +1775,3499985,"TERMINAL",0,0,"ls",,terminal_command +1776,3499995,"TERMINAL",0,0,"]633;E;2025-08-11 16:48:55 ls;361f455a-15ad-4916-88b6-b9e49434d7af]633;Cdynamics-cotraining\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal]633;D;0",,terminal_output +1777,3501594,"TERMINAL",0,0,"cd dynamics-cotraining/",,terminal_command +1778,3501723,"TERMINAL",0,0,"ls",,terminal_command +1779,3501737,"TERMINAL",0,0,"]633;E;2025-08-11 16:48:57 ls;361f455a-15ad-4916-88b6-b9e49434d7af]633;Ctrain_dynamics_causal_2_node_3373407.log train_dynamics_causal_8_node_3388140.log train_dynamics_causal_8_node_3412349.log\r\ntrain_dynamics_causal_2_node_3373407.log_bak train_dynamics_causal_8_node_3389928.log train_dynamics_causal_8_node_3412356.log\r\ntrain_dynamics_causal_2_node_3388135.log train_dynamics_causal_8_node_3390458.log train_dynamics_causal_8_node_3412397.log\r\ntrain_dynamics_causal_2_node_3388147.log train_dynamics_causal_8_node_3393060.log train_dynamics_causal_8_node_3412399.log\r\ntrain_dynamics_causal_2_node_3389801.log train_dynamics_causal_8_node_3393061.log train_dynamics_causal_8_node_3414710.log\r\ntrain_dynamics_causal_2_node_3393065.log train_dynamics_causal_8_node_3393066.log train_dynamics_causal_8_node_3415128.log\r\ntrain_dynamics_causal_8_node_3373408.log train_dynamics_causal_8_node_3412343.log\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;0",,terminal_output +1780,3511584,"TERMINAL",0,0,"tail -f train_dynamics_causal_8_node_3415128.log",,terminal_command +1781,3511642,"TERMINAL",0,0,"]633;E;2025-08-11 16:49:07 tail -f train_dynamics_causal_8_node_3415128.log;361f455a-15ad-4916-88b6-b9e49434d7af]633;CSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0404,0521-0527]\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +1782,3515600,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +1783,3515636,"TERMINAL",0,0,"bash",,terminal_focus +1784,3516630,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250811_164910-3415128\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-causal-8-node-3415128\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3415128\r\n",,terminal_output +1785,3516992,"TERMINAL",0,0,"queue",,terminal_command +1786,3517042,"TERMINAL",0,0,"]633;E;2025-08-11 16:49:12 queue;05e3850a-ed85-4ec1-9b0b-474e72c4e7fe]633;C",,terminal_output +1787,3517104,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Mon Aug 11 16:49:12 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3415128 accelerat train_dy tum_cte0 R\t1:06\t 8 hkn[0404,0521-0527]3415111 accelerat train_dy tum_cte0 R16:06\t 1 hkn06103415079 accelerat train_dy tum_cte0 R25:39\t 1 hkn07263414046 accelerat train_to tum_cte0 R 4:57:42\t 1 hkn07283412401 accelerat train_to tum_cte0 R 1-01:07:22\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3414284 cpuonly wrap tum_cte0 R 3:09:20\t 1 hkn03103414283 cpuonly wrap tum_cte0 R 3:09:32\t 1 hkn03093414282 cpuonly wrap tum_cte0 R 3:09:42\t 1 hkn16653414281 cpuonly wrap tum_cte0 R 3:09:50\t 1 hkn16623414280 cpuonly wrap tum_cte0 R 3:09:59\t 1 hkn0238",,terminal_output +1788,3518222,"TERMINAL",0,0,"3774033133110:00",,terminal_output +1789,3519199,"TERMINAL",0,0,"48814424421",,terminal_output +1790,3520277,"TERMINAL",0,0,"5101036646643",,terminal_output +1791,3521296,"TERMINAL",0,0,"71147757754",,terminal_output +1792,3522362,"TERMINAL",0,0,"82258868865",,terminal_output +1793,3523387,"TERMINAL",0,0,"93369979976",,terminal_output +1794,3524428,"TERMINAL",0,0,"2044750308405087",,terminal_output +1795,3525479,"TERMINAL",0,0,"15581191198",,terminal_output +1796,3526528,"TERMINAL",0,0,"266922302210:009",,terminal_output +1797,3527572,"TERMINAL",0,0,"3775033133110",,terminal_output +1798,3528629,"TERMINAL",0,0,"48814424421",,terminal_output +1799,3529693,"TERMINAL",0,0,"59925535532",,terminal_output +1800,3530719,"TERMINAL",0,0,"6202036646643",,terminal_output +1801,3531759,"TERMINAL",0,0,"71147757754",,terminal_output +1802,3532870,"TERMINAL",0,0,"82258868865",,terminal_output +1803,3533885,"TERMINAL",0,0,"93369979976",,terminal_output +1804,3534917,"TERMINAL",0,0,"304478:004085010:0087",,terminal_output +1805,3536042,"TERMINAL",0,0,"15581191198",,terminal_output +1806,3537014,"TERMINAL",0,0,"2669224022109",,terminal_output +1807,3538091,"TERMINAL",0,0,"3776:0033133120",,terminal_output +1808,3539114,"TERMINAL",0,0,"48814424421",,terminal_output +1809,3540137,"TERMINAL",0,0,"59925535532",,terminal_output +1810,3541227,"TERMINAL",0,0,"6303036646643",,terminal_output +1811,3542228,"TERMINAL",0,0,"72258868865",,terminal_output +1812,3543278,"TERMINAL",0,0,"93369979976",,terminal_output +1813,3544337,"TERMINAL",0,0,"404471050810:001087",,terminal_output +1814,3545390,"TERMINAL",0,0,"15581191198",,terminal_output +1815,3546487,"TERMINAL",0,0,"2669225022209",,terminal_output +1816,3547511,"TERMINAL",0,0,"3771033133130",,terminal_output +1817,3548636,"TERMINAL",0,0,"48814424421",,terminal_output +1818,3549661,"TERMINAL",0,0,"59925535532",,terminal_output +1819,3550624,"TERMINAL",0,0,"6404036646643",,terminal_output +1820,3551710,"TERMINAL",0,0,"71147757754",,terminal_output +1821,3552719,"TERMINAL",0,0,"82258868865",,terminal_output +1822,3553770,"TERMINAL",0,0,"93369979976",,terminal_output +1823,3554813,"TERMINAL",0,0,"50447208:008102087",,terminal_output +1824,3555910,"TERMINAL",0,0,"15581191198",,terminal_output +1825,3556932,"TERMINAL",0,0,"26692210:0022309",,terminal_output +1826,3557966,"TERMINAL",0,0,"3772033133140",,terminal_output +1827,3559008,"TERMINAL",0,0,"48814424421",,terminal_output +1828,3560114,"TERMINAL",0,0,"59925535532",,terminal_output +1829,3561123,"TERMINAL",0,0,"6505036646643",,terminal_output +1830,3562258,"TERMINAL",0,0,"71147757754",,terminal_output +1831,3563198,"TERMINAL",0,0,"82258868865",,terminal_output +1832,3564319,"TERMINAL",0,0,"944730108203087",,terminal_output +1833,3565378,"TERMINAL",0,0,"50:015581191198",,terminal_output +1834,3566346,"TERMINAL",0,0,"2669221022409",,terminal_output +1835,3567480,"TERMINAL",0,0,"3773033133150",,terminal_output +1836,3568503,"TERMINAL",0,0,"48814424421",,terminal_output +1837,3569488,"TERMINAL",0,0,"59925535532",,terminal_output +1838,3570611,"TERMINAL",0,0,"62:007:0036646643",,terminal_output +1839,3571678,"TERMINAL",0,0,"71147757754",,terminal_output +1840,3572656,"TERMINAL",0,0,"82258868865",,terminal_output +1841,3573694,"TERMINAL",0,0,"93369979976",,terminal_output +1842,3574743,"TERMINAL",0,0,"1044740208304087",,terminal_output +1843,3575878,"TERMINAL",0,0,"15581191198",,terminal_output +1844,3576842,"TERMINAL",0,0,"2669222022509",,terminal_output +1845,3577924,"TERMINAL",0,0,"377403313311:00",,terminal_output +1846,3578947,"TERMINAL",0,0,"48814424421",,terminal_output +1847,3579972,"TERMINAL",0,0,"59925535532",,terminal_output +1848,3581098,"TERMINAL",0,0,"6101036646643",,terminal_output +1849,3582060,"TERMINAL",0,0,"71147757754",,terminal_output +1850,3583383,"TERMINAL",0,0,"82258868865",,terminal_output +1851,3584386,"TERMINAL",0,0,"93369979976",,terminal_output +1852,3585295,"TERMINAL",0,0,"2044750308405087",,terminal_output +1853,3586265,"TERMINAL",0,0,"16692230221:009",,terminal_output +1854,3587346,"TERMINAL",0,0,"3775033133110",,terminal_output +1855,3588366,"TERMINAL",0,0,"48814424421",,terminal_output +1856,3589394,"TERMINAL",0,0,"59925535532",,terminal_output +1857,3590453,"TERMINAL",0,0,"6202036646643",,terminal_output +1858,3591546,"TERMINAL",0,0,"71147757754",,terminal_output +1859,3592567,"TERMINAL",0,0,"82258868865",,terminal_output +1860,3593592,"TERMINAL",0,0,"93369979976",,terminal_output +1861,3594656,"TERMINAL",0,0,"304479:00408501:0087",,terminal_output +1862,3595680,"TERMINAL",0,0,"15581191198",,terminal_output +1863,3596726,"TERMINAL",0,0,"2669224022109",,terminal_output +1864,3597768,"TERMINAL",0,0,"3777:0033133120",,terminal_output +1865,3598817,"TERMINAL",0,0,"48814424421",,terminal_output +1866,3599854,"TERMINAL",0,0,"59925535532",,terminal_output +1867,3600900,"TERMINAL",0,0,"6303036646643",,terminal_output +1868,3601974,"TERMINAL",0,0,"71147757754",,terminal_output +1869,3603014,"TERMINAL",0,0,"82258868865",,terminal_output +1870,3604140,"TERMINAL",0,0,"93369979976",,terminal_output +1871,3605166,"TERMINAL",0,0,"40447105081:001087",,terminal_output +1872,3605704,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 75000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/075000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\n",,terminal_output +1873,3606140,"TERMINAL",0,0,"15581191198",,terminal_output +1874,3606722,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +1875,3607212,"TERMINAL",0,0,"2669225022209",,terminal_output +1876,3608314,"TERMINAL",0,0,"3881144244231",,terminal_output +1877,3609361,"TERMINAL",0,0,"59925535532",,terminal_output +1878,3609776,"TERMINAL",0,0,"Running on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\n",,terminal_output +1879,3610372,"TERMINAL",0,0,"6404036646643",,terminal_output +1880,3610695,"TERMINAL",0,0,"Running on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\n",,terminal_output +1881,3611382,"TERMINAL",0,0,"71147757754",,terminal_output +1882,3612533,"TERMINAL",0,0,"82258868865",,terminal_output +1883,3613563,"TERMINAL",0,0,"93369979976",,terminal_output +1884,3614551,"TERMINAL",0,0,"50447209:008102087",,terminal_output +1885,3615599,"TERMINAL",0,0,"15581191198",,terminal_output +1886,3616652,"TERMINAL",0,0,"2669221:0022309",,terminal_output +1887,3617703,"TERMINAL",0,0,"3772033133140",,terminal_output +1888,3618698,"TERMINAL",0,0,"tail",,terminal_focus +1889,3618759,"TERMINAL",0,0,"48814424421",,terminal_output +1890,3619804,"TERMINAL",0,0,"59925535532",,terminal_output +1891,3620935,"TERMINAL",0,0,"6505036646643",,terminal_output +1892,3621957,"TERMINAL",0,0,"71147757754",,terminal_output +1893,3622991,"TERMINAL",0,0,"82258868865",,terminal_output +1894,3624008,"TERMINAL",0,0,"93369979976",,terminal_output +1895,3625132,"TERMINAL",0,0,"1:0044730108203087",,terminal_output +1896,3626154,"TERMINAL",0,0,"15581191198",,terminal_output +1897,3627179,"TERMINAL",0,0,"2669221022409",,terminal_output +1898,3628208,"TERMINAL",0,0,"3773033133150",,terminal_output +1899,3629330,"TERMINAL",0,0,"49925535532",,terminal_output +1900,3630382,"TERMINAL",0,0,"63:008:0036646643",,terminal_output +1901,3631322,"TERMINAL",0,0,"71147757754",,terminal_output +1902,3632404,"TERMINAL",0,0,"82258868865",,terminal_output +1903,3633424,"TERMINAL",0,0,"93369979976",,terminal_output +1904,3634558,"TERMINAL",0,0,"1044740208304087",,terminal_output +1905,3635554,"TERMINAL",0,0,"15581191198",,terminal_output +1906,3636601,"TERMINAL",0,0,"2669222022509",,terminal_output +1907,3637726,"TERMINAL",0,0,"377403313312:00",,terminal_output +1908,3638753,"TERMINAL",0,0,"48814424421",,terminal_output +1909,3639782,"TERMINAL",0,0,"59925535532",,terminal_output +1910,3640902,"TERMINAL",0,0,"6101036646643",,terminal_output +1911,3641809,"TERMINAL",0,0,"71147757754",,terminal_output +1912,3642949,"TERMINAL",0,0,"82258868865",,terminal_output +1913,3643895,"TERMINAL",0,0,"93369979976",,terminal_output +1914,3644944,"TERMINAL",0,0,"2044750308405087",,terminal_output +1915,3645994,"TERMINAL",0,0,"15581191198",,terminal_output +1916,3647038,"TERMINAL",0,0,"26692230222:009",,terminal_output +1917,3648171,"TERMINAL",0,0,"3775033133110",,terminal_output +1918,3649125,"TERMINAL",0,0,"48814424421",,terminal_output +1919,3650219,"TERMINAL",0,0,"59925535532",,terminal_output +1920,3651223,"TERMINAL",0,0,"6212147757754",,terminal_output +1921,3652370,"TERMINAL",0,0,"82258868865",,terminal_output +1922,3652713,"TERMINAL",0,0,"2025-08-11 16:51:27.892017: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-08-11 16:51:27.973510: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-08-11 16:51:28.023838: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-08-11 16:51:28.198263: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-08-11 16:51:28.198308: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1923,3653317,"TERMINAL",0,0,"93369979976",,terminal_output +1924,3654417,"TERMINAL",0,0,"304475:00:00408502:0087",,terminal_output +1925,3655412,"TERMINAL",0,0,"15581191198",,terminal_output +1926,3655751,"TERMINAL",0,0,"2025-08-11 16:51:30.819812: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1927,3656458,"TERMINAL",0,0,"2669224022109",,terminal_output +1928,3657509,"TERMINAL",0,0,"3778:0033133120",,terminal_output +1929,3658619,"TERMINAL",0,0,"48814424421",,terminal_output +1930,3659640,"TERMINAL",0,0,"59925535532",,terminal_output +1931,3660766,"TERMINAL",0,0,"6303036646643",,terminal_output +1932,3661791,"TERMINAL",0,0,"71147757754",,terminal_output +1933,3662760,"TERMINAL",0,0,"82258868865",,terminal_output +1934,3663814,"TERMINAL",0,0,"93369979976",,terminal_output +1935,3664866,"TERMINAL",0,0,"40447105082:001087",,terminal_output +1936,3665913,"TERMINAL",0,0,"15581191198",,terminal_output +1937,3666978,"TERMINAL",0,0,"2669225022209",,terminal_output +1938,3668010,"TERMINAL",0,0,"3771033133130",,terminal_output +1939,3669164,"TERMINAL",0,0,"48814424421",,terminal_output +1940,3670106,"TERMINAL",0,0,"59925535532",,terminal_output +1941,3671211,"TERMINAL",0,0,"6404036646643",,terminal_output +1942,3672236,"TERMINAL",0,0,"71147757754",,terminal_output +1943,3673261,"TERMINAL",0,0,"83369979976",,terminal_output +1944,3674387,"TERMINAL",0,0,"504472010:008102087",,terminal_output +1945,3675389,"TERMINAL",0,0,"15581191198",,terminal_output +1946,3676421,"TERMINAL",0,0,"2669222:0022309",,terminal_output +1947,3677560,"TERMINAL",0,0,"3772033133140",,terminal_output +1948,3678585,"TERMINAL",0,0,"48814424421",,terminal_output +1949,3679612,"TERMINAL",0,0,"59925535532",,terminal_output +1950,3680610,"TERMINAL",0,0,"6505036646643",,terminal_output +1951,3681759,"TERMINAL",0,0,"71147757754",,terminal_output +1952,3682781,"TERMINAL",0,0,"82258868865",,terminal_output +1953,3683826,"TERMINAL",0,0,"93369979976",,terminal_output +1954,3684804,"TERMINAL",0,0,"2:0044730108203087",,terminal_output +1955,3685839,"TERMINAL",0,0,"15581191198",,terminal_output +1956,3686897,"TERMINAL",0,0,"2669221022409",,terminal_output +1957,3687939,"TERMINAL",0,0,"3773033133150",,terminal_output +1958,3689027,"TERMINAL",0,0,"48814424421",,terminal_output +1959,3690040,"TERMINAL",0,0,"59925535532",,terminal_output +1960,3691178,"TERMINAL",0,0,"64:009:0036646643",,terminal_output +1961,3692125,"TERMINAL",0,0,"71147757754",,terminal_output +1962,3693176,"TERMINAL",0,0,"82258868865",,terminal_output +1963,3694249,"TERMINAL",0,0,"944740208304087",,terminal_output +1964,3695377,"TERMINAL",0,0,"115581191198",,terminal_output +1965,3696409,"TERMINAL",0,0,"2669222022509",,terminal_output +1966,3697376,"TERMINAL",0,0,"377403313313:00",,terminal_output +1967,3698423,"TERMINAL",0,0,"48814424421",,terminal_output +1968,3699576,"TERMINAL",0,0,"59925535532",,terminal_output +1969,3700601,"TERMINAL",0,0,"6101036646643",,terminal_output +1970,3701625,"TERMINAL",0,0,"71147757754",,terminal_output +1971,3702656,"TERMINAL",0,0,"82258868865",,terminal_output +1972,3703674,"TERMINAL",0,0,"93369979976",,terminal_output +1973,3704799,"TERMINAL",0,0,"2044750308405087",,terminal_output +1974,3705823,"TERMINAL",0,0,"15581191198",,terminal_output +1975,3706793,"TERMINAL",0,0,"26692230223:009",,terminal_output +1976,3707845,"TERMINAL",0,0,"3775033133110",,terminal_output +1977,3708893,"TERMINAL",0,0,"48814424421",,terminal_output +1978,3709941,"TERMINAL",0,0,"59925535532",,terminal_output +1979,3711047,"TERMINAL",0,0,"6202036646643",,terminal_output +1980,3712038,"TERMINAL",0,0,"71147757754",,terminal_output +1981,3713094,"TERMINAL",0,0,"82258868865",,terminal_output +1982,3714223,"TERMINAL",0,0,"93369979976",,terminal_output +1983,3714837,"TERMINAL",0,0,"2025-08-11 16:52:30.044778: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 17.79GiB (rounded to 19101158912)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-08-11 16:52:30.049495: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] ****************************************____________________________________________________________\r\nE0811 16:52:30.050211 3234747 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101158744 bytes. [tf-allocator-allocation-error='']\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 365, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\r\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101158744 bytes.\r\n",,terminal_output +1984,3715247,"TERMINAL",0,0,"304471:00408503:0087",,terminal_output +1985,3715759,"TERMINAL",0,0,"2025-08-11 16:52:30.692136: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 17.79GiB (rounded to 19101158912)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-08-11 16:52:30.696798: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] ****************************************____________________________________________________________\r\nE0811 16:52:30.697413 3234750 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101158744 bytes. [tf-allocator-allocation-error='']\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 365, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\r\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101158744 bytes.\r\nFiltering out episode with length 2, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +1986,3716272,"TERMINAL",0,0,"1669224022109",,terminal_output +1987,3717269,"TERMINAL",0,0,"3779:0033133120",,terminal_output +1988,3718418,"TERMINAL",0,0,"48814424421",,terminal_output +1989,3719444,"TERMINAL",0,0,"59925535532",,terminal_output +1990,3720409,"TERMINAL",0,0,"6303036646643",,terminal_output +1991,3721450,"TERMINAL",0,0,"71147757754",,terminal_output +1992,3722514,"TERMINAL",0,0,"82258868865",,terminal_output +1993,3723643,"TERMINAL",0,0,"93369979976",,terminal_output +1994,3724665,"TERMINAL",0,0,"40447105083:001087",,terminal_output +1995,3725642,"TERMINAL",0,0,"15581191198",,terminal_output +1996,3726685,"TERMINAL",0,0,"2669225022209",,terminal_output +1997,3727738,"TERMINAL",0,0,"3771033133130",,terminal_output +1998,3728778,"TERMINAL",0,0,"48814424421",,terminal_output +1999,3729819,"TERMINAL",0,0,"59925535532",,terminal_output +2000,3730866,"TERMINAL",0,0,"6404036646643",,terminal_output +2001,3731935,"TERMINAL",0,0,"71147757754",,terminal_output +2002,3733063,"TERMINAL",0,0,"82258868865",,terminal_output +2003,3734086,"TERMINAL",0,0,"93369979976",,terminal_output +2004,3735111,"TERMINAL",0,0,"50447201:008102087",,terminal_output +2005,3736083,"TERMINAL",0,0,"15581191198",,terminal_output +2006,3737124,"TERMINAL",0,0,"2669223:0022309",,terminal_output +2007,3738183,"TERMINAL",0,0,"3772033133140",,terminal_output +2008,3739308,"TERMINAL",0,0,"48814424421",,terminal_output +2009,3740379,"TERMINAL",0,0,"6505036646643",,terminal_output +2010,3741316,"TERMINAL",0,0,"71147757754",,terminal_output +2011,3742382,"TERMINAL",0,0,"82258868865",,terminal_output +2012,3743408,"TERMINAL",0,0,"93369979976",,terminal_output +2013,3744532,"TERMINAL",0,0,"3:0044730108203087",,terminal_output +2014,3745497,"TERMINAL",0,0,"15581191198",,terminal_output +2015,3746535,"TERMINAL",0,0,"2669221022409",,terminal_output +2016,3747618,"TERMINAL",0,0,"3773033133150",,terminal_output +2017,3748739,"TERMINAL",0,0,"48814424421",,terminal_output +2018,3749754,"TERMINAL",0,0,"59925535532",,terminal_output +2019,3750776,"TERMINAL",0,0,"65:0020:0036646643",,terminal_output +2020,3751154,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",0,0,"",shellscript,tab +2021,3751765,"TERMINAL",0,0,"71147757754",,terminal_output +2022,3752065,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2083,0,"",shellscript,selection_mouse +2023,3752825,"TERMINAL",0,0,"82258868865",,terminal_output +2024,3752895,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2136,0,"",shellscript,selection_mouse +2025,3752906,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2135,0,"",shellscript,selection_command +2026,3753420,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2136,0,"",shellscript,selection_mouse +2027,3753421,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2135,0,"",shellscript,selection_command +2028,3753863,"TERMINAL",0,0,"93369979976",,terminal_output +2029,3754116,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2184,0,"",shellscript,selection_mouse +2030,3754127,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2183,0,"",shellscript,selection_command +2031,3754699,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2162,0,"",shellscript,selection_mouse +2032,3754700,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2161,0,"",shellscript,selection_command +2033,3754913,"TERMINAL",0,0,"1044740208304087",,terminal_output +2034,3755231,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2184,0,"",shellscript,selection_mouse +2035,3755242,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2183,0,"",shellscript,selection_command +2036,3755901,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2184,0,"",shellscript,selection_mouse +2037,3755907,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2183,0,"",shellscript,selection_command +2038,3755968,"TERMINAL",0,0,"15581191198",,terminal_output +2039,3756922,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2162,0,"",shellscript,selection_mouse +2040,3756924,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2161,0,"",shellscript,selection_command +2041,3757006,"TERMINAL",0,0,"2669222022509",,terminal_output +2042,3757865,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2184,0,"",shellscript,selection_mouse +2043,3757867,"slurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch",2183,0,"",shellscript,selection_command +2044,3758044,"TERMINAL",0,0,"377403313314:00",,terminal_output +2045,3759171,"TERMINAL",0,0,"48814424421",,terminal_output +2046,3760139,"TERMINAL",0,0,"59925535532",,terminal_output +2047,3761221,"TERMINAL",0,0,"6101036646643",,terminal_output +2048,3762233,"TERMINAL",0,0,"72258868865",,terminal_output +2049,3763277,"TERMINAL",0,0,"93369979976",,terminal_output +2050,3763662,"TERMINAL",0,0,"^C\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;130",,terminal_output +2051,3764324,"TERMINAL",0,0,"2044750308405087",,terminal_output +2052,3765391,"TERMINAL",0,0,"15581191198",,terminal_output +2053,3766405,"TERMINAL",0,0,"26692230224:009",,terminal_output +2054,3767448,"TERMINAL",0,0,"3775033133110",,terminal_output +2055,3768594,"TERMINAL",0,0,"48814424421",,terminal_output +2056,3769539,"TERMINAL",0,0,"59925535532",,terminal_output +2057,3770588,"TERMINAL",0,0,"6202036646643",,terminal_output +2058,3772612,"TERMINAL",0,0,"71147757754",,terminal_output +2059,3772802,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +2060,3772960,"TERMINAL",0,0,"82258868865",,terminal_output +2061,3773817,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1646,0,"",shellscript,selection_mouse +2062,3773819,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1645,0,"",shellscript,selection_command +2063,3774004,"TERMINAL",0,0,"93369979976",,terminal_output +2064,3775379,"TERMINAL",0,0,"305582:01419514:0198",,terminal_output +2065,3776494,"TERMINAL",0,0,"2669224022109",,terminal_output +2066,3777505,"TERMINAL",0,0,"37730:0033133120",,terminal_output +2067,3778504,"TERMINAL",0,0,"48814424421",,terminal_output +2068,3779555,"TERMINAL",0,0,"59925535532",,terminal_output +2069,3780623,"TERMINAL",0,0,"6303036646643",,terminal_output +2070,3781714,"TERMINAL",0,0,"71147757754",,terminal_output +2071,3782726,"TERMINAL",0,0,"82258868865",,terminal_output +2072,3783752,"TERMINAL",0,0,"93369979976",,terminal_output +2073,3784772,"TERMINAL",0,0,"40447105084:001087",,terminal_output +2074,3785736,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2354,0,"",shellscript,selection_mouse +2075,3785822,"TERMINAL",0,0,"15581191198",,terminal_output +2076,3785880,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2352,4,"4096",shellscript,selection_mouse +2077,3786072,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2333,26," --dyna_ffn_dim=4096 \\n",shellscript,selection_mouse +2078,3786925,"TERMINAL",0,0,"2669225022209",,terminal_output +2079,3787271,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2354,0,"",shellscript,selection_mouse +2080,3787272,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2352,4,"4096",shellscript,selection_mouse +2081,3787550,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2351,5,"=4096",shellscript,selection_mouse +2082,3787564,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2313,43,"dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse +2083,3787615,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2286,70,"dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse +2084,3787627,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2264,92,"dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse +2085,3787689,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2242,114,"project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse +2086,3787773,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2216,140,"entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse +2087,3787921,"TERMINAL",0,0,"3771033133130",,terminal_output +2088,3787939,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2215,141,"-entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse +2089,3787996,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2241,115,"-project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse +2090,3788052,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2263,93,"-dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse +2091,3788108,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2262,94,"--dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse +2092,3788162,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2261,95," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse +2093,3788171,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2283,73," --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse +2094,3788328,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2282,74," --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse +2095,3788406,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2281,75," --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse +2096,3788799,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2281,0,"",shellscript,selection_mouse +2097,3788799,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2280,4," ",shellscript,selection_mouse +2098,3788953,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2280,27," --dyna_num_blocks=16 \\n",shellscript,selection_mouse +2099,3789005,"TERMINAL",0,0,"48814424421",,terminal_output +2100,3789149,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2258,49," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n",shellscript,selection_mouse +2101,3789722,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2259,0,"",shellscript,selection_mouse +2102,3789723,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2258,4," ",shellscript,selection_mouse +2103,3789869,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2258,22," --dyna_dim=1024 \\n",shellscript,selection_mouse +2104,3790028,"TERMINAL",0,0,"59925535532",,terminal_output +2105,3790080,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2258,49," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n",shellscript,selection_mouse +2106,3790143,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2258,75," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n",shellscript,selection_mouse +2107,3790195,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2258,101," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n",shellscript,selection_mouse +2108,3791123,"TERMINAL",0,0,"6404036646643",,terminal_output +2109,3792128,"TERMINAL",0,0,"71147757754",,terminal_output +2110,3793274,"TERMINAL",0,0,"82258868865",,terminal_output +2111,3794297,"TERMINAL",0,0,"93369979976",,terminal_output +2112,3795319,"TERMINAL",0,0,"51558212:019112198",,terminal_output +2113,3796312,"TERMINAL",0,0,"2669224:0022309",,terminal_output +2114,3797368,"TERMINAL",0,0,"3772033133140",,terminal_output +2115,3798483,"TERMINAL",0,0,"48814424421",,terminal_output +2116,3800029,"TERMINAL",0,0,"59925535532",,terminal_output +2117,3801009,"TERMINAL",0,0,"6505036646643",,terminal_output +2118,3802082,"TERMINAL",0,0,"71147757754",,terminal_output +2119,3803209,"TERMINAL",0,0,"82258868865",,terminal_output +2120,3804231,"TERMINAL",0,0,"93369979976",,terminal_output +2121,3805254,"TERMINAL",0,0,"4:0044730108203087",,terminal_output +2122,3806279,"TERMINAL",0,0,"1669221022409",,terminal_output +2123,3807437,"TERMINAL",0,0,"3773033133150",,terminal_output +2124,3808529,"TERMINAL",0,0,"48814424421",,terminal_output +2125,3809531,"TERMINAL",0,0,"59925535532",,terminal_output +2126,3810577,"TERMINAL",0,0,"66:001:0036646643",,terminal_output +2127,3811619,"TERMINAL",0,0,"71147757754",,terminal_output +2128,3812352,"utils/nn.py",0,0,"",python,tab +2129,3812695,"TERMINAL",0,0,"82258868865",,terminal_output +2130,3813736,"TERMINAL",0,0,"93369979976",,terminal_output +2131,3814778,"TERMINAL",0,0,"1044740208304087",,terminal_output +2132,3815974,"TERMINAL",0,0,"15581191198",,terminal_output +2133,3817033,"TERMINAL",0,0,"2669222022509",,terminal_output +2134,3817102,"utils/nn.py",4672,0,"",python,selection_mouse +2135,3817114,"utils/nn.py",4671,0,"",python,selection_command +2136,3817255,"utils/nn.py",4666,6,"x_BTNM",python,selection_mouse +2137,3817265,"utils/nn.py",4667,5,"_BTNM",python,selection_command +2138,3817420,"utils/nn.py",4650,17,"\n return x",python,selection_mouse +2139,3817440,"utils/nn.py",4554,113,"= jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2140,3817496,"utils/nn.py",4506,161,"z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2141,3817496,"utils/nn.py",4438,229," --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2142,3817497,"utils/nn.py",4394,273," x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2143,3817503,"utils/nn.py",4360,307," x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2144,3817521,"utils/nn.py",4310,357," z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2145,3817539,"utils/nn.py",4265,402," z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2146,3817554,"utils/nn.py",4225,442," x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2147,3817609,"utils/nn.py",4224,443," x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2148,3817610,"utils/nn.py",4187,480," # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2149,3817619,"utils/nn.py",4186,481,"\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2150,3817675,"utils/nn.py",4153,514," x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2151,3817742,"utils/nn.py",4105,562," z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2152,3817801,"utils/nn.py",4062,605," z_BTNM = self.spatial_norm(x_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2153,3817921,"utils/nn.py",4026,641," # --- Spatial attention ---\n z_BTNM = self.spatial_norm(x_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,selection_mouse +2154,3818032,"TERMINAL",0,0,"377403313315:00",,terminal_output +2155,3819185,"TERMINAL",0,0,"48814424421",,terminal_output +2156,3820145,"TERMINAL",0,0,"59925535532",,terminal_output +2157,3821178,"TERMINAL",0,0,"6101036646643",,terminal_output +2158,3822230,"TERMINAL",0,0,"72258868865",,terminal_output +2159,3823379,"TERMINAL",0,0,"93369979976",,terminal_output +2160,3823404,"utils/nn.py",10983,0,"",python,selection_mouse +2161,3823414,"utils/nn.py",10982,0,"",python,selection_command +2162,3823575,"utils/nn.py",10982,1,"M",python,selection_mouse +2163,3823577,"utils/nn.py",10983,0,"",python,selection_command +2164,3823596,"utils/nn.py",10961,22,"\n return x_BTNM",python,selection_mouse +2165,3823626,"utils/nn.py",10904,79,"self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2166,3823648,"utils/nn.py",10781,202,"TNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2167,3823705,"utils/nn.py",10647,336,"z_BTNM = einops.rearrange(z_PTM, ""b n t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2168,3823706,"utils/nn.py",10555,428," z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""b n t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2169,3823706,"utils/nn.py",10453,530," # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> b n t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""b n t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2170,3823717,"utils/nn.py",10452,531," # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> b n t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""b n t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2171,3823769,"utils/nn.py",10419,564," x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> b n t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""b n t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2172,3823769,"utils/nn.py",10350,633," z_BTNM = einops.rearrange(z_FNM, ""b t n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> b n t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""b n t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2173,3823775,"utils/nn.py",10304,679," z_FNM = self.spatial_attention(z_FNM)\n z_BTNM = einops.rearrange(z_FNM, ""b t n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> b n t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""b n t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2174,3823836,"utils/nn.py",10263,720," z_FNM = self.spatial_norm(z_FNM)\n z_FNM = self.spatial_attention(z_FNM)\n z_BTNM = einops.rearrange(z_FNM, ""b t n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> b n t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""b n t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2175,3823891,"utils/nn.py",10200,783," z_FNM = einops.rearrange(x_BTNM, ""b t n m -> b t n m"")\n z_FNM = self.spatial_norm(z_FNM)\n z_FNM = self.spatial_attention(z_FNM)\n z_BTNM = einops.rearrange(z_FNM, ""b t n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> b n t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""b n t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2176,3823952,"utils/nn.py",10166,817," B, T, N, M = x_BTNM.shape\n z_FNM = einops.rearrange(x_BTNM, ""b t n m -> b t n m"")\n z_FNM = self.spatial_norm(z_FNM)\n z_FNM = self.spatial_attention(z_FNM)\n z_BTNM = einops.rearrange(z_FNM, ""b t n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> b n t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""b n t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2177,3823970,"utils/nn.py",10130,853," # --- Spatial attention ---\n B, T, N, M = x_BTNM.shape\n z_FNM = einops.rearrange(x_BTNM, ""b t n m -> b t n m"")\n z_FNM = self.spatial_norm(z_FNM)\n z_FNM = self.spatial_attention(z_FNM)\n z_BTNM = einops.rearrange(z_FNM, ""b t n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> b n t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""b n t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2178,3824060,"utils/nn.py",10020,963," def __call__(self, x_BTNM: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None) -> jax.Array:\n # --- Spatial attention ---\n B, T, N, M = x_BTNM.shape\n z_FNM = einops.rearrange(x_BTNM, ""b t n m -> b t n m"")\n z_FNM = self.spatial_norm(z_FNM)\n z_FNM = self.spatial_attention(z_FNM)\n z_BTNM = einops.rearrange(z_FNM, ""b t n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> b n t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""b n t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2179,3824342,"utils/nn.py",10130,853," # --- Spatial attention ---\n B, T, N, M = x_BTNM.shape\n z_FNM = einops.rearrange(x_BTNM, ""b t n m -> b t n m"")\n z_FNM = self.spatial_norm(z_FNM)\n z_FNM = self.spatial_attention(z_FNM)\n z_BTNM = einops.rearrange(z_FNM, ""b t n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> b n t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""b n t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM",python,selection_mouse +2180,3825020,"TERMINAL",0,0,"2044750308405087",,terminal_output +2181,3825207,"utils/nn.py",10130,853,"",python,content +2182,3825231,"utils/nn.py",10129,0,"",python,selection_command +2183,3825882,"utils/nn.py",10129,0," # --- Spatial attention ---\n z_BTNM = self.spatial_norm(x_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x",python,content +2184,3825987,"TERMINAL",0,0,"15581191198",,terminal_output +2185,3827057,"TERMINAL",0,0,"26692230225:009",,terminal_output +2186,3827842,"utils/nn.py",10600,0,"",python,selection_mouse +2187,3828138,"TERMINAL",0,0,"3775033133110",,terminal_output +2188,3829125,"TERMINAL",0,0,"48814424421",,terminal_output +2189,3830021,"utils/nn.py",10772,0,"",python,selection_mouse +2190,3830168,"TERMINAL",0,0,"59925535532",,terminal_output +2191,3830648,"utils/nn.py",10771,0,"",python,selection_mouse +2192,3831224,"TERMINAL",0,0,"6202036646643",,terminal_output +2193,3832273,"TERMINAL",0,0,"82258868865",,terminal_output +2194,3833303,"TERMINAL",0,0,"93369979976",,terminal_output +2195,3834375,"TERMINAL",0,0,"304473:00408505:0087",,terminal_output +2196,3835394,"TERMINAL",0,0,"15581191198",,terminal_output +2197,3836487,"TERMINAL",0,0,"2669224022109",,terminal_output +2198,3837506,"TERMINAL",0,0,"3771:0033133120",,terminal_output +2199,3838552,"TERMINAL",0,0,"48814424421",,terminal_output +2200,3839620,"TERMINAL",0,0,"59925535532",,terminal_output +2201,3840386,"utils/nn.py",10770,1,"",python,content +2202,3840639,"TERMINAL",0,0,"6303036646643",,terminal_output +2203,3840846,"utils/nn.py",10770,0,"_",python,content +2204,3840849,"utils/nn.py",10771,0,"",python,selection_keyboard +2205,3841416,"utils/nn.py",10771,0,"B",python,content +2206,3841417,"utils/nn.py",10772,0,"",python,selection_keyboard +2207,3841694,"TERMINAL",0,0,"71147757754",,terminal_output +2208,3842122,"utils/nn.py",10772,0,"T",python,content +2209,3842123,"utils/nn.py",10773,0,"",python,selection_keyboard +2210,3842762,"TERMINAL",0,0,"82258868865",,terminal_output +2211,3843786,"TERMINAL",0,0,"93369979976",,terminal_output +2212,3844613,"utils/nn.py",10769,4,"x_BTNM",python,content +2213,3844839,"TERMINAL",0,0,"40447105085:001087",,terminal_output +2214,3845915,"TERMINAL",0,0,"15581191198",,terminal_output +2215,3847026,"TERMINAL",0,0,"2669225022209",,terminal_output +2216,3847981,"TERMINAL",0,0,"3771033133130",,terminal_output +2217,3849088,"TERMINAL",0,0,"48814424421",,terminal_output +2218,3850089,"TERMINAL",0,0,"59925535532",,terminal_output +2219,3851130,"TERMINAL",0,0,"6404036646643",,terminal_output +2220,3852221,"TERMINAL",0,0,"71147757754",,terminal_output +2221,3853278,"TERMINAL",0,0,"83369979976",,terminal_output +2222,3854306,"TERMINAL",0,0,"50447203:008102087",,terminal_output +2223,3855324,"TERMINAL",0,0,"15581191198",,terminal_output +2224,3856410,"TERMINAL",0,0,"2669225:0022309",,terminal_output +2225,3857419,"TERMINAL",0,0,"3772033133140",,terminal_output +2226,3858466,"TERMINAL",0,0,"48814424421",,terminal_output +2227,3859499,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch^C",,terminal_command +2228,3859511,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;361f455a-15ad-4916-88b6-b9e49434d7af]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D",,terminal_output +2229,3859562,"TERMINAL",0,0,"59925535532",,terminal_output +2230,3860347,"TERMINAL",0,0,"dev",,terminal_command +2231,3860557,"TERMINAL",0,0,"6505036646643",,terminal_output +2232,3861679,"TERMINAL",0,0,"71147757754",,terminal_output +2233,3862655,"TERMINAL",0,0,"82258868865",,terminal_output +2234,3863731,"TERMINAL",0,0,"93369979976",,terminal_output +2235,3864709,"TERMINAL",0,0,"sync-runner",,terminal_command +2236,3864750,"TERMINAL",0,0,"]633;E;2025-08-11 16:55:00 sync-runner;361f455a-15ad-4916-88b6-b9e49434d7af]633;Csending incremental file list\r\n",,terminal_output +2237,3864759,"TERMINAL",0,0,"5:0044730108203087",,terminal_output +2238,3865789,"TERMINAL",0,0,"15581191198",,terminal_output +2239,3866835,"TERMINAL",0,0,"2669221022409",,terminal_output +2240,3867925,"TERMINAL",0,0,"3773033133150",,terminal_output +2241,3868923,"TERMINAL",0,0,"48814424421",,terminal_output +2242,3869726,"TERMINAL",0,0,"slurm/dev/alfred/\r\nslurm/dev/alfred/berlin/\r\nslurm/dev/alfred/berlin/job_requeueing/\r\nslurm/dev/alfred/berlin/job_requeueing/notes.md\r\nslurm/dev/alfred/berlin/train_dyn_dev/\r\nslurm/dev/alfred/berlin/train_dyn_dev/train_dynacmis.sbatch\r\nslurm/dev/alfred/berlin/train_lam_dev/\r\nslurm/dev/alfred/berlin/train_lam_dev/train_lam.sbatch\r\nslurm/dev/alfred/berlin/train_tok_dev/\r\nslurm/dev/alfred/berlin/train_tok_dev/train_tok.sbatch\r\nslurm/dev/alfred/horeka/\r\nslurm/dev/alfred/horeka/jobs_cur/\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dev_dyn_gt_actions.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dev_dyn_gt_actions_dev_single.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions.sh\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation_baseline.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/tokenizer/\r\nslurm/dev/alfred/horeka/jobs_cur/tokenizer/train_tokenizer_37M.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/\r\nslurm/dev/alfred/horeka/jobs_old/allocate/\r\nslurm/dev/alfred/horeka/jobs_old/allocate/cpu.sh\r\nslurm/dev/alfred/horeka/jobs_old/allocate/multigpu_gpu.sh\r\nslurm/dev/alfred/horeka/jobs_old/allocate/single_gpu.sh\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_16_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_1_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_1_nodes.sh\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_2_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_32_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes.sh\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes_frequent_chkpt.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_64_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_8_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/const_lr/\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/const_lr/train_tokenizer_16_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/const_lr/train_tokenizer_1_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/const_lr/train_tokenizer_2_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/const_lr/train_tokenizer_32_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/const_lr/train_tokenizer_4_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/const_lr/train_tokenizer_8_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/oai_subset/\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/oai_subset/train_tokenizer_1_nodes.sh\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/oai_subset/train_tokenizer_2_nodes.sh\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/oai_subset/train_tokenizer_2_nodes_samples_500.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/checkpoint_fix/\r\nslurm/dev/alfred/horeka/jobs_old/checkpoint_fix/train_tokenizer.sh\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/train_tokenizer_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/base/\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/base/train_dynamics_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/base/train_lam_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/base/train_tokenizer_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_dynamics_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_dynamics_coinrun.sh\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_lam_12.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_lam_24.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_lam_48.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_lam_6.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_lam_6.sh\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_tokenizer_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_tokenizer_coinrun.sh\r\nslurm/dev/alfred/horeka/jobs_old/dyn_gt_actions_ablation/\r\nslurm/dev/alfred/horeka/jobs_old/dyn_gt_actions_ablation/dyn_gt_actions_ablation.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/dyn_gt_actions_ablation/dyn_gt_actions_ablation_baseline.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/dyn_gt_actions_ablation/dyn_gt_actions_ablation_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/dyn_gt_actions_ablation/masked_lim_yolo.sh\r\nslurm/dev/alfred/horeka/jobs_old/generate_single_samples/\r\nslurm/dev/alfred/horeka/jobs_old/generate_single_samples/generate_samples_50k.sh\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/download_10xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/download_6xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/download_7xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/download_8xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/download_9xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/download_index_json.sh\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/mp4_to_array_record_open_ai_6xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/mp4_to_array_record_open_ai_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/mp4_to_npy_open_ai_10xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/mp4_to_npy_open_ai_6xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/mp4_to_npy_open_ai_7xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/mp4_to_npy_open_ai_8xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/mp4_to_npy_open_ai_9xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/actions_download/\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/actions_download/download_actions.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/actions_download/download_actions_all.sh\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/mp4_to_arrayrecords_w_actions/\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/mp4_to_arrayrecords_w_actions/preproc_w_actions.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/download_10xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/download_6xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/download_7xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/download_8xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/download_9xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/download_index_json.sh\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/mp4_to_array_record_open_ai.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/mp4_to_array_record_open_ai_chunked.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/mp4_to_array_record_open_ai_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/mp4_to_array_record_open_ai_to_fast.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/mp4_to_npy_open_ai.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/job_chaining/\r\nslurm/dev/alfred/horeka/jobs_old/job_chaining/chain_example.sh\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/train_lam_chain_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/train_lam_requeue_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/example_tokenizer_lr_tuning/\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/example_tokenizer_lr_tuning/lr_tuning_tokenizer.sh\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/example_tokenizer_lr_tuning/train_tokenizer_lr_general.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/lr_tuning/\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/lr_tuning/tokenizer_lr_tuning.py\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/lr_tuning/tokenizer/\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/lr_tuning/tokenizer/lr_tuning_tokenizer.sh\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/lr_tuning/tokenizer/train_tokenizer_lr_general.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim/\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim/masked_lim_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim/masked_lim_yolo.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim/masked_lim_yolo.sh\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim_noise/\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim_noise/masked_lim_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim_noise/masked_lim_yolo.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim_noise/masked_lim_yolo.sh\r\nslurm/dev/alfred/horeka/jobs_old/overfit_minecraft_single_sample/\r\nslurm/dev/alfred/horeka/jobs_old/overfit_minecraft_single_sample/train_dynamics_overfit_sample.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_minecraft_single_sample/train_dynamics_overfit_sample.sh\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_dev.sh\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_init.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_12.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_12288.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_1536.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_24576.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_3072.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_384.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_49152.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_6144.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_96.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/from_ckpt/\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/from_ckpt/train_lam_dev.sh\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/from_ckpt/train_lam_samples_12.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_dev.sh\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_12.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_12288.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_1536.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_24576.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_3072.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_384.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_49152.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_6144.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_96.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/from_ckpt/\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/from_ckpt/train_tokenizer_samples_12.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/from_ckpt/train_tokenizer_samples_12.sh\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_dynamics_overfit_sample.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_lam_overfit_sample.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_tokenizer_overfit_sample.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_tokenizer_overfit_sample_size_0.6_mio.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_tokenizer_overfit_sample_size_0_5.sh\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_tokenizer_overfit_sample_size_21_mio.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_tokenizer_overfit_sample_size_2_mio.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_tokenizer_overfit_sample_size_9_mio.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_tokenizer_overfit_sample_size_small_mio.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/mp4_to_npy_10xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/mp4_to_npy_6xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/mp4_to_npy_7xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/mp4_to_npy_8xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/mp4_to_npy_9xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/mp4_to_npy_open_ai.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/mp4_to_npy_test.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/npy_to_tfrecord.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/npy_to_tfrecord_10xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/npy_to_tfrecord_6xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/npy_to_tfrecord_7xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/npy_to_tfrecord_8xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/npy_to_tfrecord_9xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/preprocess_video_splitter.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/preprocess_video_to_npy.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/preprocess_video_to_npy_test.sh\r\nslurm/dev/alfred/horeka/jobs_old/procgen/\r\nslurm/dev/alfred/horeka/jobs_old/procgen/cp_script.sh\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_acrobot.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_carracing.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_coinrun.sh\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_mountaincar copy.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_mountaincar.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_multi.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_pendulum.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/rsync/\r\nslurm/dev/alfred/horeka/jobs_old/rsync/rsync.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/rsync/rsync_tf_records.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/sample_jafar/\r\nslurm/dev/alfred/horeka/jobs_old/sample_jafar/sample_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/sampling/\r\nslurm/dev/alfred/horeka/jobs_old/sampling/sample_coinrun.sh\r\nslurm/dev/alfred/horeka/jobs_old/sampling/sample_knoms.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/sampling/sample_knoms.sh\r\nslurm/dev/alfred/horeka/jobs_old/sampling/sample_knoms_mihir.sh\r\nslurm/dev/alfred/horeka/jobs_old/train_dyn/\r\nslurm/dev/alfred/horeka/jobs_old/train_dyn/train_dyn_knoms_full.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_dyn_dev/\r\nslurm/dev/alfred/horeka/jobs_old/train_dyn_dev/train_dyn.sh\r\nslurm/dev/alfred/horeka/jobs_old/train_dyn_dev/train_dyn_checkpt_loading_test_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_dyn_dev/train_dyn_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_dyn_dev/train_dyn_single_batch.sh\r\nslurm/dev/alfred/horeka/jobs_old/train_lam/\r\nslurm/dev/alfred/horeka/jobs_old/train_lam/train_lam_full.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_lam_dev/\r\nslurm/dev/alfred/horeka/jobs_old/train_lam_dev/train_lam.sh\r\nslurm/dev/alfred/horeka/jobs_old/train_lam_dev/train_lam_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_lam_dev/train_lam_full_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_lam_dev/train_lam_single_batch.sh\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer/\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer/train_lam_oai_dev copy.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer/train_lam_oai_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer/train_tokenizer_knoms_overfit_single_batch.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer/train_tokenizer_knoms_overfit_single_sample.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer/train_tokenizer_knoms_overfit_tfrecord_10.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer/train_tokenizer_knoms_overfit_tfrecord_full.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer_dev/\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer_dev/train_tokenizer.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer_dev/train_tokenizer.sh\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer_dev/train_tokenizer_copy.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer_dev/train_tokenizer_h100.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer_dev/train_tokenizer_overfit_tfrecord_10.sh\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer_dev/train_tokenizer_single_batch.sh\r\nslurm/dev/alfred/horeka/jobs_old/validation/\r\nslurm/dev/alfred/horeka/jobs_old/validation/tokenizer_lr_tuning/\r\nslurm/dev/alfred/horeka/jobs_old/validation/tokenizer_lr_tuning/lr_tuning_tokenizer.sh\r\nslurm/dev/alfred/horeka/jobs_old/validation/tokenizer_lr_tuning/train_tokenizer_lr_general.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/validation/tokenizer_without_optimizer/\r\nslurm/dev/alfred/horeka/jobs_old/validation/tokenizer_without_optimizer/lr_tuning_tokenizer.sh\r\nslurm/dev/alfred/horeka/jobs_old/validation/tokenizer_without_optimizer/train_tokenizer_lr_general.sbatch\r\nslurm/dev/mihir/horeka/\r\nslurm/dev/mihir/horeka/train_tokenizer.sh\r\nutils/nn.py\r\n",,terminal_output +2243,3869967,"TERMINAL",0,0,"59925535532",,terminal_output +2244,3870425,"TERMINAL",0,0,"\r\nsent 290,554 bytes received 3,930 bytes 45,305.23 bytes/sec\r\ntotal size is 219,145,575 speedup is 744.17\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +2245,3871100,"TERMINAL",0,0,"67:002:0036646643",,terminal_output +2246,3872057,"TERMINAL",0,0,"71147757754",,terminal_output +2247,3873115,"TERMINAL",0,0,"82258868865",,terminal_output +2248,3874152,"TERMINAL",0,0,"93369979976",,terminal_output +2249,3875301,"TERMINAL",0,0,"1044740208304087",,terminal_output +2250,3876308,"TERMINAL",0,0,"1669222022509",,terminal_output +2251,3877344,"TERMINAL",0,0,"377403313316:00",,terminal_output +2252,3878368,"TERMINAL",0,0,"48814424421",,terminal_output +2253,3879386,"TERMINAL",0,0,"59925535532",,terminal_output +2254,3880432,"TERMINAL",0,0,"6101036646643",,terminal_output +2255,3881485,"TERMINAL",0,0,"71147757754",,terminal_output +2256,3882572,"TERMINAL",0,0,"82258868865",,terminal_output +2257,3883570,"TERMINAL",0,0,"93369979976",,terminal_output +2258,3884621,"TERMINAL",0,0,"2044750308405087",,terminal_output +2259,3885670,"TERMINAL",0,0,"15581191198",,terminal_output +2260,3886760,"TERMINAL",0,0,"26692230226:009",,terminal_output +2261,3887765,"TERMINAL",0,0,"3775033133110",,terminal_output +2262,3888550,"TERMINAL",0,0,"sbatch sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch^C",,terminal_command +2263,3888570,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;361f455a-15ad-4916-88b6-b9e49434d7af]633;C]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D",,terminal_output +2264,3888808,"TERMINAL",0,0,"48814424421",,terminal_output +2265,3889876,"TERMINAL",0,0,"59925535532",,terminal_output +2266,3890457,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_command +2267,3890499,"TERMINAL",0,0,"]633;E;2025-08-11 16:55:26 sbatch slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch;361f455a-15ad-4916-88b6-b9e49434d7af]633;CSubmitted batch job 3415137\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +2268,3891011,"TERMINAL",0,0,"637PD\t0:00(None)28 7:20\t 8 hkn[0404,0521-0527]11122:206105079dy 31:5364046 5:03:56\t 1 hkn07282401 accelerat train_to1-01:13:36\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]43410346030925:56510416623414280 cpuonly wrap tum_cte0 R 3:16:13\t 1 hkn0238",,terminal_output +2269,3892066,"TERMINAL",0,0,"7 Rhkn[0627-0634]1147757754",,terminal_output +2270,3893114,"TERMINAL",0,0,"812258868865",,terminal_output +2271,3894239,"TERMINAL",0,0,"923369979976",,terminal_output +2272,3895276,"TERMINAL",0,0,"3034474:00408506:0087",,terminal_output +2273,3896255,"TERMINAL",0,0,"15669224022109",,terminal_output +2274,3897344,"TERMINAL",0,0,"36772:0033133120",,terminal_output +2275,3898391,"TERMINAL",0,0,"478814424421",,terminal_output +2276,3899437,"TERMINAL",0,0,"589925535532",,terminal_output +2277,3900506,"TERMINAL",0,0,"69303036646643",,terminal_output +2278,3901544,"TERMINAL",0,0,"7101147757754",,terminal_output +2279,3902587,"TERMINAL",0,0,"812258868865",,terminal_output +2280,3903663,"TERMINAL",0,0,"923369979976",,terminal_output +2281,3904688,"TERMINAL",0,0,"403447105086:001087",,terminal_output +2282,3905711,"TERMINAL",0,0,"145581191198",,terminal_output +2283,3906755,"TERMINAL",0,0,"25669225022209",,terminal_output +2284,3907804,"TERMINAL",0,0,"36771033133130",,terminal_output +2285,3908839,"TERMINAL",0,0,"478814424421",,terminal_output +2286,3909888,"TERMINAL",0,0,"589925535532",,terminal_output +2287,3909901,"TERMINAL",0,0,"logs",,terminal_command +2288,3909912,"TERMINAL",0,0,"]633;E;2025-08-11 16:55:45 logs;361f455a-15ad-4916-88b6-b9e49434d7af]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +2289,3910687,"TERMINAL",0,0,"ls",,terminal_command +2290,3910766,"TERMINAL",0,0,"]633;E;2025-08-11 16:55:46 ls;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +2291,3910864,"TERMINAL",0,0,"big_run train_lam_action_space_scaling_50_3331286.log\r\nbig-runs train_lam_action_space_scaling_6_3318549.log\r\ncausal train_lam_action_space_scaling_6_3320178.log\r\ncoinrun train_lam_action_space_scaling_6_3321528.log\r\nmaskgit train_lam_action_space_scaling_6_3329790.log\r\nmaskgit-maskprob-fix train_lam_action_space_scaling_6_3329805.log\r\ntrain_dyn_causal_180M_3372931.log train_lam_action_space_scaling_6_3331287.log\r\ntrain_dyn_causal_180M_3372963.log train_lam_action_space_scaling_8_3318550.log\r\ntrain_dyn_causal_180M_3372969.log train_lam_action_space_scaling_8_3329791.log\r\ntrain_dyn_causal_180M_3373107.log train_lam_action_space_scaling_8_3329806.log\r\ntrain_dyn_causal_255M_3372932.log train_lam_action_space_scaling_8_3331288.log\r\ntrain_dyn_causal_255M_3372970.log train_lam_minecraft_overfit_sample_3309655.log\r\ntrain_dyn_causal_255M_3373108.log train_lam_model_size_scaling_38M_3317098.log\r\ntrain_dyn_causal_356M_3372934.log train_lam_model_size_scaling_38M_3317115.log\r\ntrain_dyn_causal_356M_3372971.log train_lam_model_size_scaling_38M_3317231.log\r\ntrain_dyn_causal_356M_3373109.log train_tokenizer_batch_size_scaling_16_node_3321526.log\r\ntrain_dyn_causal_500M_3372936.log train_tokenizer_batch_size_scaling_1_node_3318551.log\r\ntrain_dyn_causal_500M_3372972.log train_tokenizer_batch_size_scaling_2_node_3318552.log\r\ntrain_dyn_causal_500M_3373110.log train_tokenizer_batch_size_scaling_2_node_3330806.log\r\ntrain_dyn_new_arch-bugfixed-spatial-shift_3359343.log train_tokenizer_batch_size_scaling_2_node_3330848.log\r\ntrain_dyn_new_arch-bugfixed-temporal-shift_3359349.log train_tokenizer_batch_size_scaling_2_node_3331282.log\r\ntrain_dyn_yolorun_3333026.log train_tokenizer_batch_size_scaling_4_node_3318553.log\r\ntrain_dyn_yolorun_3333448.log train_tokenizer_batch_size_scaling_4_node_3320175.log\r\ntrain_dyn_yolorun_3335345.log train_tokenizer_batch_size_scaling_4_node_3321524.log\r\ntrain_dyn_yolorun_3335362.log train_tokenizer_batch_size_scaling_8_node_3320176.log\r\ntrain_dyn_yolorun_3348592.log train_tokenizer_batch_size_scaling_8_node_3321525.log\r\ntrain_dyn_yolorun_new_arch_3351743.log train_tokenizer_minecraft_overfit_sample_3309656.log\r\ntrain_dyn_yolorun_new_arch_3352103.log train_tokenizer_model_size_scaling_127M_3317233.log\r\ntrain_dyn_yolorun_new_arch_3352115.log train_tokenizer_model_size_scaling_127M_3318554.log\r\ntrain_dyn_yolorun_new_arch_3358457.log train_tokenizer_model_size_scaling_140M_3313562.log\r\ntrain_lam_action_space_scaling_10_3320179.log train_tokenizer_model_size_scaling_140M_3316019.log\r\ntrain_lam_action_space_scaling_10_3321529.log train_tokenizer_model_size_scaling_200M_3313563.log\r\ntrain_lam_action_space_scaling_10_3329786.log train_tokenizer_model_size_scaling_200M_3316020.log\r\ntrain_lam_action_space_scaling_10_3329801.log train_tokenizer_model_size_scaling_227M_3317234.log\r\ntrain_lam_action_space_scaling_10_3331283.log train_tokenizer_model_size_scaling_227M_3318555.log\r\ntrain_lam_action_space_scaling_12_3318546.log train_tokenizer_model_size_scaling_227M_3320173.log\r\ntrain_lam_action_space_scaling_12_3320177.log train_tokenizer_model_size_scaling_227M_3321523.log\r\ntrain_lam_action_space_scaling_12_3321527.log train_tokenizer_model_size_scaling_37M_3313565.log\r\ntrain_lam_action_space_scaling_12_3329787.log train_tokenizer_model_size_scaling_37M_3316022.log\r\ntrain_lam_action_space_scaling_12_3329802.log train_tokenizer_model_size_scaling_37M_3317232.log\r\ntrain_lam_action_space_scaling_12_3331284.log train_tokenizer_model_size_scaling_37M_3317239.log\r\ntrain_lam_action_space_scaling_20_3318547.log train_tokenizer_model_size_scaling_37M_3318556.log\r\ntrain_lam_action_space_scaling_20_3329788.log train_tokenizer_model_size_scaling_74M_3318557.log\r\ntrain_lam_action_space_scaling_20_3329803.log train_tokenizer_model_size_scaling_74M_3320174.log\r\ntrain_lam_action_space_scaling_20_3331285.log train_tokenizer_model_size_scaling_74M_3321522.log\r\ntrain_lam_action_space_scaling_50_3320180.log train_tokenizer_model_size_scaling_80M_3313564.log\r\ntrain_lam_action_space_scaling_50_3329789.log train_tokenizer_model_size_scaling_80M_3316026.log\r\ntrain_lam_action_space_scaling_50_3329804.log yoloruns\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +2292,3910968,"TERMINAL",0,0,"69404036646643",,terminal_output +2293,3911989,"TERMINAL",0,0,"7201147757754",,terminal_output +2294,3913077,"TERMINAL",0,0,"812258868865",,terminal_output +2295,3913108,"TERMINAL",0,0,"cd causal/dynamics-cotraining/",,terminal_command +2296,3913117,"TERMINAL",0,0,"]633;E;2025-08-11 16:55:48 cd causal/dynamics-cotraining/;361f455a-15ad-4916-88b6-b9e49434d7af]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;0",,terminal_output +2297,3913450,"TERMINAL",0,0,"ls",,terminal_command +2298,3913500,"TERMINAL",0,0,"]633;E;2025-08-11 16:55:49 ls;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +2299,3913643,"TERMINAL",0,0,"train_dynamics_causal_2_node_3373407.log train_dynamics_causal_8_node_3388140.log train_dynamics_causal_8_node_3412349.log\r\ntrain_dynamics_causal_2_node_3373407.log_bak train_dynamics_causal_8_node_3389928.log train_dynamics_causal_8_node_3412356.log\r\ntrain_dynamics_causal_2_node_3388135.log train_dynamics_causal_8_node_3390458.log train_dynamics_causal_8_node_3412397.log\r\ntrain_dynamics_causal_2_node_3388147.log train_dynamics_causal_8_node_3393060.log train_dynamics_causal_8_node_3412399.log\r\ntrain_dynamics_causal_2_node_3389801.log train_dynamics_causal_8_node_3393061.log train_dynamics_causal_8_node_3414710.log\r\ntrain_dynamics_causal_2_node_3393065.log train_dynamics_causal_8_node_3393066.log train_dynamics_causal_8_node_3415128.log\r\ntrain_dynamics_causal_8_node_3373408.log train_dynamics_causal_8_node_3412343.log\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;0",,terminal_output +2300,3914108,"TERMINAL",0,0,"923369979976",,terminal_output +2301,3915131,"TERMINAL",0,0,"503447204:008102087",,terminal_output +2302,3916066,"TERMINAL",0,0,"queue",,terminal_command +2303,3916113,"TERMINAL",0,0,"]633;E;2025-08-11 16:55:51 queue;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +2304,3916173,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1993.localdomain: Mon Aug 11 16:55:51 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3415137 accelerat train_dy tum_cte0 R\t0:24\t 8 hkn[0627-0634]3415128 accelerat train_dy tum_cte0 R\t7:45\t 8 hkn[0404,0521-0527]3415111 accelerat train_dy tum_cte0 R22:45\t 1 hkn06103415079 accelerat train_dy tum_cte0 R32:18\t 1 hkn07263414046 accelerat train_to tum_cte0 R 5:04:21\t 1 hkn07283412401 accelerat train_to tum_cte0 R 1-01:14:01\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]3414284 cpuonly wrap tum_cte0 R 3:15:59\t 1 hkn03103414283 cpuonly wrap tum_cte0 R 3:16:11\t 1 hkn03093414282 cpuonly wrap tum_cte0 R 3:16:21\t 1 hkn16653414281 cpuonly wrap tum_cte0 R 3:16:29\t 1 hkn16623414280 cpuonly wrap tum_cte0 R 3:16:38\t 1 hkn0238",,terminal_output +2305,3916185,"TERMINAL",0,0,"145581191198",,terminal_output +2306,3917281,"TERMINAL",0,0,"25669226:0022309",,terminal_output +2307,3917291,"TERMINAL",0,0,"25669226:0022309",,terminal_output +2308,3918304,"TERMINAL",0,0,"37882144244241",,terminal_output +2309,3918314,"TERMINAL",0,0,"37882144244241",,terminal_output +2310,3919329,"TERMINAL",0,0,"589925535532",,terminal_output +2311,3919330,"TERMINAL",0,0,"589925535532",,terminal_output +2312,3919466,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;0",,terminal_output +2313,3920372,"TERMINAL",0,0,"69505036646643",,terminal_output +2314,3920646,"TERMINAL",0,0,"ls",,terminal_command +2315,3920655,"TERMINAL",0,0,"]633;E;2025-08-11 16:55:56 ls;361f455a-15ad-4916-88b6-b9e49434d7af]633;Ctrain_dynamics_causal_2_node_3373407.log train_dynamics_causal_8_node_3388140.log train_dynamics_causal_8_node_3412349.log\r\ntrain_dynamics_causal_2_node_3373407.log_bak train_dynamics_causal_8_node_3389928.log train_dynamics_causal_8_node_3412356.log\r\ntrain_dynamics_causal_2_node_3388135.log train_dynamics_causal_8_node_3390458.log train_dynamics_causal_8_node_3412397.log\r\ntrain_dynamics_causal_2_node_3388147.log train_dynamics_causal_8_node_3393060.log train_dynamics_causal_8_node_3412399.log\r\ntrain_dynamics_causal_2_node_3389801.log train_dynamics_causal_8_node_3393061.log train_dynamics_causal_8_node_3414710.log\r\ntrain_dynamics_causal_2_node_3393065.log train_dynamics_causal_8_node_3393066.log train_dynamics_causal_8_node_3415128.log\r\ntrain_dynamics_causal_8_node_3373408.log train_dynamics_causal_8_node_3412343.log train_dynamics_causal_8_node_3415137.log\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;0",,terminal_output +2316,3921376,"TERMINAL",0,0,"7301147757754",,terminal_output +2317,3922409,"TERMINAL",0,0,"812258868865",,terminal_output +2318,3923456,"TERMINAL",0,0,"923369979976",,terminal_output +2319,3924506,"TERMINAL",0,0,"6:00344730108203087",,terminal_output +2320,3925560,"TERMINAL",0,0,"145581191198",,terminal_output +2321,3926593,"TERMINAL",0,0,"25669221022409",,terminal_output +2322,3927175,"TERMINAL",0,0,"tail -f train_dynamics_causal_8_node_3415137.log",,terminal_command +2323,3927214,"TERMINAL",0,0,"]633;E;2025-08-11 16:56:02 tail -f train_dynamics_causal_8_node_3415137.log;361f455a-15ad-4916-88b6-b9e49434d7af]633;CSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0627-0634]\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +2324,3927726,"TERMINAL",0,0,"36773033133150",,terminal_output +2325,3928678,"TERMINAL",0,0,"478814424421",,terminal_output +2326,3929774,"TERMINAL",0,0,"589925535532",,terminal_output +2327,3930799,"TERMINAL",0,0,"698:003:0036646643",,terminal_output +2328,3931826,"TERMINAL",0,0,"7401147757754",,terminal_output +2329,3932876,"TERMINAL",0,0,"812258868865",,terminal_output +2330,3933923,"TERMINAL",0,0,"923369979976",,terminal_output +2331,3934973,"TERMINAL",0,0,"10344740208304087",,terminal_output +2332,3936124,"TERMINAL",0,0,"145581191198",,terminal_output +2333,3937147,"TERMINAL",0,0,"25669222022509",,terminal_output +2334,3938171,"TERMINAL",0,0,"3677403313317:00",,terminal_output +2335,3939196,"TERMINAL",0,0,"478814424421",,terminal_output +2336,3940219,"TERMINAL",0,0,"589925535532",,terminal_output +2337,3941345,"TERMINAL",0,0,"650111147757754",,terminal_output +2338,3942371,"TERMINAL",0,0,"812258868865",,terminal_output +2339,3943394,"TERMINAL",0,0,"923369979976",,terminal_output +2340,3944392,"TERMINAL",0,0,"20344750308405087",,terminal_output +2341,3945444,"TERMINAL",0,0,"145581191198",,terminal_output +2342,3946569,"TERMINAL",0,0,"256692230227:009",,terminal_output +2343,3947570,"TERMINAL",0,0,"36775033133110",,terminal_output +2344,3948616,"TERMINAL",0,0,"478814424421",,terminal_output +2345,3949640,"TERMINAL",0,0,"589925535532",,terminal_output +2346,3950688,"TERMINAL",0,0,"69202036646643",,terminal_output +2347,3951790,"TERMINAL",0,0,"71:001147757754",,terminal_output +2348,3952783,"TERMINAL",0,0,"812258868865",,terminal_output +2349,3953856,"TERMINAL",0,0,"923369979976",,terminal_output +2350,3954877,"TERMINAL",0,0,"3034475:00408507:0087",,terminal_output +2351,3955924,"TERMINAL",0,0,"145581191198",,terminal_output +2352,3957009,"TERMINAL",0,0,"25669224022109",,terminal_output +2353,3957209,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +2354,3958026,"TERMINAL",0,0,"36773:0033133120",,terminal_output +2355,3958229,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250811_165632-3415137\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-causal-8-node-3415137\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3415137\r\n",,terminal_output +2356,3959163,"TERMINAL",0,0,"478814424421",,terminal_output +2357,3960189,"TERMINAL",0,0,"589925535532",,terminal_output +2358,3961212,"TERMINAL",0,0,"69303036646643",,terminal_output +2359,3962237,"TERMINAL",0,0,"7101147757754",,terminal_output +2360,3963368,"TERMINAL",0,0,"823369979976",,terminal_output +2361,3964387,"TERMINAL",0,0,"403447105087:001087",,terminal_output +2362,3965389,"TERMINAL",0,0,"145581191198",,terminal_output +2363,3966416,"TERMINAL",0,0,"25669225022209",,terminal_output +2364,3967550,"TERMINAL",0,0,"36771033133130",,terminal_output +2365,3968557,"TERMINAL",0,0,"478814424421",,terminal_output +2366,3969607,"TERMINAL",0,0,"589925535532",,terminal_output +2367,3970608,"TERMINAL",0,0,"69404036646643",,terminal_output +2368,3971772,"TERMINAL",0,0,"7201147757754",,terminal_output +2369,3972780,"TERMINAL",0,0,"812258868865",,terminal_output +2370,3973807,"TERMINAL",0,0,"923369979976",,terminal_output +2371,3974799,"TERMINAL",0,0,"503447205:008102087",,terminal_output +2372,3975841,"TERMINAL",0,0,"145581191198",,terminal_output +2373,3976894,"TERMINAL",0,0,"25669227:0022309",,terminal_output +2374,3977937,"TERMINAL",0,0,"36772033133140",,terminal_output +2375,3979032,"TERMINAL",0,0,"478814424421",,terminal_output +2376,3980030,"TERMINAL",0,0,"589925535532",,terminal_output +2377,3981181,"TERMINAL",0,0,"69505036646643",,terminal_output +2378,3982133,"TERMINAL",0,0,"7301147757754",,terminal_output +2379,3983229,"TERMINAL",0,0,"812258868865",,terminal_output +2380,3984252,"TERMINAL",0,0,"9344730108203087",,terminal_output +2381,3985380,"TERMINAL",0,0,"7:0145581191198",,terminal_output +2382,3986403,"TERMINAL",0,0,"25669221022409",,terminal_output +2383,3987426,"TERMINAL",0,0,"36773033133150",,terminal_output +2384,3988413,"TERMINAL",0,0,"478814424421",,terminal_output +2385,3989478,"TERMINAL",0,0,"589925535532",,terminal_output +2386,3990601,"TERMINAL",0,0,"699:004:0036646643",,terminal_output +2387,3991624,"TERMINAL",0,0,"7401147757754",,terminal_output +2388,3992649,"TERMINAL",0,0,"812258868865",,terminal_output +2389,3993674,"TERMINAL",0,0,"923369979976",,terminal_output +2390,3994699,"TERMINAL",0,0,"10344740208304087",,terminal_output +2391,3995738,"TERMINAL",0,0,"145581191198",,terminal_output +2392,3996847,"TERMINAL",0,0,"25669222022509",,terminal_output +2393,3997831,"TERMINAL",0,0,"3677403313318:00",,terminal_output +2394,3998877,"TERMINAL",0,0,"478814424421",,terminal_output +2395,3999919,"TERMINAL",0,0,"589925535532",,terminal_output +2396,4001052,"TERMINAL",0,0,"69101036646643",,terminal_output +2397,4002011,"TERMINAL",0,0,"7501147757754",,terminal_output +2398,4003094,"TERMINAL",0,0,"812258868865",,terminal_output +2399,4004117,"TERMINAL",0,0,"923369979976",,terminal_output +2400,4005160,"TERMINAL",0,0,"20344750308405087",,terminal_output +2401,4006268,"TERMINAL",0,0,"145581191198",,terminal_output +2402,4007293,"TERMINAL",0,0,"2677503331338:0110",,terminal_output +2403,4008317,"TERMINAL",0,0,"478814424421",,terminal_output +2404,4009342,"TERMINAL",0,0,"589925535532",,terminal_output +2405,4010679,"TERMINAL",0,0,"69202036646643",,terminal_output +2406,4011798,"TERMINAL",0,0,"72:001147757754",,terminal_output +2407,4012792,"TERMINAL",0,0,"812258868865",,terminal_output +2408,4013836,"TERMINAL",0,0,"923369979976",,terminal_output +2409,4014974,"TERMINAL",0,0,"3034476:00408508:0087",,terminal_output +2410,4015931,"TERMINAL",0,0,"145581191198",,terminal_output +2411,4017009,"TERMINAL",0,0,"25669224022109",,terminal_output +2412,4018019,"TERMINAL",0,0,"36774:0033133120",,terminal_output +2413,4019171,"TERMINAL",0,0,"478814424421",,terminal_output +2414,4020116,"TERMINAL",0,0,"589925535532",,terminal_output +2415,4021221,"TERMINAL",0,0,"628CG\t9:30404,0521-0527]372:0627-0634]3036646643",,terminal_output +2416,4022214,"TERMINAL",0,0,"710147757754",,terminal_output +2417,4023259,"TERMINAL",0,0,"82369979976",,terminal_output +2418,4024392,"TERMINAL",0,0,"40347105088:001087",,terminal_output +2419,4025386,"TERMINAL",0,0,"14581191198",,terminal_output +2420,4026456,"TERMINAL",0,0,"2569225022209",,terminal_output +2421,4027454,"TERMINAL",0,0,"3671033133130",,terminal_output +2422,4028507,"TERMINAL",0,0,"47814424421",,terminal_output +2423,4029561,"TERMINAL",0,0,"58925535532",,terminal_output +2424,4030608,"TERMINAL",0,0,"694036646643",,terminal_output +2425,4031774,"TERMINAL",0,0,"\r737 R\t2:2627-0634]1124:41\t 1 hkn061007934:147264046to5:06:17824011-01:15:57\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]4284 cpuonly wrap 3:17:55\t 1 hkn031038:070921716651252030238",,terminal_output +2426,4032741,"TERMINAL",0,0,"81258868865",,terminal_output +2427,4033823,"TERMINAL",0,0,"92369979976",,terminal_output +2428,4034844,"TERMINAL",0,0,"50347206:008102087",,terminal_output +2429,4035378,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 76000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/076000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\n",,terminal_output +2430,4035862,"TERMINAL",0,0,"14581191198",,terminal_output +2431,4036387,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +2432,4036915,"TERMINAL",0,0,"2569228:0022309",,terminal_output +2433,4037957,"TERMINAL",0,0,"3672033133140",,terminal_output +2434,4039005,"TERMINAL",0,0,"47814424421",,terminal_output +2435,4039448,"TERMINAL",0,0,"Running on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\n",,terminal_output +2436,4040063,"TERMINAL",0,0,"58925535532",,terminal_output +2437,4040404,"TERMINAL",0,0,"Running on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\n",,terminal_output +2438,4041189,"TERMINAL",0,0,"695036646643",,terminal_output +2439,4042213,"TERMINAL",0,0,"730147757754",,terminal_output +2440,4043185,"TERMINAL",0,0,"81258868865",,terminal_output +2441,4044233,"TERMINAL",0,0,"934730108203087",,terminal_output +2442,4045388,"TERMINAL",0,0,"8:014581191198",,terminal_output +2443,4046354,"TERMINAL",0,0,"2569221022409",,terminal_output +2444,4047432,"TERMINAL",0,0,"3673033133150",,terminal_output +2445,4048455,"TERMINAL",0,0,"47814424421",,terminal_output +2446,4049582,"TERMINAL",0,0,"58925535532",,terminal_output +2447,4050610,"TERMINAL",0,0,"695:0036646643",,terminal_output +2448,4051631,"TERMINAL",0,0,"740147757754",,terminal_output +2449,4052658,"TERMINAL",0,0,"81258868865",,terminal_output +2450,4053662,"TERMINAL",0,0,"92369979976",,terminal_output +2451,4054807,"TERMINAL",0,0,"1034740208304087",,terminal_output +2452,4055746,"TERMINAL",0,0,"14581191198",,terminal_output +2453,4056803,"TERMINAL",0,0,"2569222022509",,terminal_output +2454,4057842,"TERMINAL",0,0,"367403313319:00",,terminal_output +2455,4058903,"TERMINAL",0,0,"47814424421",,terminal_output +2456,4059927,"TERMINAL",0,0,"58925535532",,terminal_output +2457,4061053,"TERMINAL",0,0,"691036646643",,terminal_output +2458,4062022,"TERMINAL",0,0,"750147757754",,terminal_output +2459,4063102,"TERMINAL",0,0,"81258868865",,terminal_output +2460,4064125,"TERMINAL",0,0,"92369979976",,terminal_output +2461,4065251,"TERMINAL",0,0,"2034750308405087",,terminal_output +2462,4066275,"TERMINAL",0,0,"14581191198",,terminal_output +2463,4067252,"TERMINAL",0,0,"267503331339:0110",,terminal_output +2464,4068323,"TERMINAL",0,0,"47814424421",,terminal_output +2465,4069350,"TERMINAL",0,0,"58925535532",,terminal_output +2466,4070402,"TERMINAL",0,0,"692036646643",,terminal_output +2467,4071502,"TERMINAL",0,0,"73:00147757754",,terminal_output +2468,4072498,"TERMINAL",0,0,"81258868865",,terminal_output +2469,4073426,"TERMINAL",0,0,"2025-08-11 16:58:28.958932: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2470,4073555,"TERMINAL",0,0,"92369979976",,terminal_output +2471,4074470,"TERMINAL",0,0,"2025-08-11 16:58:29.297694: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-08-11 16:58:29.297744: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-08-11 16:58:29.665801: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-08-11 16:58:29.922796: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2472,4074605,"TERMINAL",0,0,"303477:00408509:0087",,terminal_output +2473,4075642,"TERMINAL",0,0,"14581191198",,terminal_output +2474,4076720,"TERMINAL",0,0,"2569224022109",,terminal_output +2475,4077751,"TERMINAL",0,0,"3675:0033133120",,terminal_output +2476,4078819,"TERMINAL",0,0,"47814424421",,terminal_output +2477,4079486,"TERMINAL",0,0,"2025-08-11 16:58:34.964741: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2478,4079895,"TERMINAL",0,0,"58925535532",,terminal_output +2479,4080890,"TERMINAL",0,0,"693036646643",,terminal_output +2480,4081938,"TERMINAL",0,0,"710147757754",,terminal_output +2481,4082992,"TERMINAL",0,0,"81258868865",,terminal_output +2482,4084082,"TERMINAL",0,0,"92369979976",,terminal_output +2483,4085118,"TERMINAL",0,0,"40347105089:001087",,terminal_output +2484,4086142,"TERMINAL",0,0,"14581191198",,terminal_output +2485,4087181,"TERMINAL",0,0,"2569225022209",,terminal_output +2486,4088291,"TERMINAL",0,0,"3781144244231",,terminal_output +2487,4089317,"TERMINAL",0,0,"58925535532",,terminal_output +2488,4090376,"TERMINAL",0,0,"694036646643",,terminal_output +2489,4091467,"TERMINAL",0,0,"720147757754",,terminal_output +2490,4092491,"TERMINAL",0,0,"81258868865",,terminal_output +2491,4093552,"TERMINAL",0,0,"92369979976",,terminal_output +2492,4094540,"TERMINAL",0,0,"50347207:008102087",,terminal_output +2493,4095564,"TERMINAL",0,0,"14581191198",,terminal_output +2494,4096689,"TERMINAL",0,0,"2569229:0022309",,terminal_output +2495,4097712,"TERMINAL",0,0,"3672033133140",,terminal_output +2496,4098737,"TERMINAL",0,0,"47814424421",,terminal_output +2497,4099767,"TERMINAL",0,0,"58925535532",,terminal_output +2498,4100887,"TERMINAL",0,0,"695036646643",,terminal_output +2499,4101912,"TERMINAL",0,0,"730147757754",,terminal_output +2500,4102894,"TERMINAL",0,0,"81258868865",,terminal_output +2501,4103950,"TERMINAL",0,0,"92369979976",,terminal_output +2502,4105014,"TERMINAL",0,0,"9:0034730108203087",,terminal_output +2503,4106110,"TERMINAL",0,0,"14581191198",,terminal_output +2504,4107150,"TERMINAL",0,0,"2569221022409",,terminal_output +2505,4108157,"TERMINAL",0,0,"3673033133150",,terminal_output +2506,4109196,"TERMINAL",0,0,"47814424421",,terminal_output +2507,4110281,"TERMINAL",0,0,"596:0036646643",,terminal_output +2508,4111332,"TERMINAL",0,0,"740147757754",,terminal_output +2509,4112356,"TERMINAL",0,0,"81258868865",,terminal_output +2510,4113382,"TERMINAL",0,0,"92369979976",,terminal_output +2511,4114509,"TERMINAL",0,0,"1034740208304087",,terminal_output +2512,4115467,"TERMINAL",0,0,"14581191198",,terminal_output +2513,4116556,"TERMINAL",0,0,"2569222022509",,terminal_output +2514,4117601,"TERMINAL",0,0,"3674033133120:00",,terminal_output +2515,4118537,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +2516,4118623,"TERMINAL",0,0,"47814424421",,terminal_output +2517,4119730,"TERMINAL",0,0,"58925535532",,terminal_output +2518,4120751,"TERMINAL",0,0,"691036646643",,terminal_output +2519,4121774,"TERMINAL",0,0,"750147757754",,terminal_output +2520,4121919,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +2521,4122840,"TERMINAL",0,0,"81258868865",,terminal_output +2522,4123926,"TERMINAL",0,0,"92369979976",,terminal_output +2523,4124878,"TERMINAL",0,0,"2034750308405087",,terminal_output +2524,4125921,"TERMINAL",0,0,"14581191198",,terminal_output +2525,4126972,"TERMINAL",0,0,"256922302220:009",,terminal_output +2526,4128012,"TERMINAL",0,0,"3675033133110",,terminal_output +2527,4129150,"TERMINAL",0,0,"47814424421",,terminal_output +2528,4130175,"TERMINAL",0,0,"58925535532",,terminal_output +2529,4131198,"TERMINAL",0,0,"692036646643",,terminal_output +2530,4132222,"TERMINAL",0,0,"74:00147757754",,terminal_output +2531,4133233,"TERMINAL",0,0,"82369979976",,terminal_output +2532,4134371,"TERMINAL",0,0,"303478:004085020:0087",,terminal_output +2533,4135317,"TERMINAL",0,0,"14581191198",,terminal_output +2534,4136422,"TERMINAL",0,0,"2569224022109",,terminal_output +2535,4137443,"TERMINAL",0,0,"3676:0033133120",,terminal_output +2536,4137459,"TERMINAL",0,0,"2025-08-11 16:59:33.110313: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 17.79GiB (rounded to 19101157120)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-08-11 16:59:33.115071: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] ****************************************____________________________________________________________\r\nE0811 16:59:33.115839 3967043 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes. [tf-allocator-allocation-error='']\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 365, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\r\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes.\r\n",,terminal_output +2537,4138469,"TERMINAL",0,0,"47814424421",,terminal_output +2538,4138485,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\r\n2025-08-11 16:59:33.834118: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_2_bfc) ran out of memory trying to allocate 17.79GiB (rounded to 19101157120)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-08-11 16:59:33.838886: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] ****************************************____________________________________________________________\r\nE0811 16:59:33.839561 3592077 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes. [tf-allocator-allocation-error='']\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 365, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\r\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes.\r\n",,terminal_output +2539,4139494,"TERMINAL",0,0,"Filtering out episode with length 6, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2540,4139510,"TERMINAL",0,0,"58925535532",,terminal_output +2541,4140764,"TERMINAL",0,0,"693036646643",,terminal_output +2542,4141492,"TERMINAL",0,0,"2025-08-11 16:59:36.876421: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 17.79GiB (rounded to 19101157120)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-08-11 16:59:36.881327: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] ****************************************____________________________________________________________\r\nE0811 16:59:36.882089 2857674 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes. [tf-allocator-allocation-error='']\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 365, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\r\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes.\r\n",,terminal_output +2543,4141599,"TERMINAL",0,0,"710147757754",,terminal_output +2544,4142501,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +2545,4142652,"TERMINAL",0,0,"81258868865",,terminal_output +2546,4143690,"TERMINAL",0,0,"92369979976",,terminal_output +2547,4144742,"TERMINAL",0,0,"403471050820:001087",,terminal_output +2548,4145797,"TERMINAL",0,0,"14581191198",,terminal_output +2549,4146878,"TERMINAL",0,0,"2569225022209",,terminal_output +2550,4147890,"TERMINAL",0,0,"3671033133130",,terminal_output +2551,4148941,"TERMINAL",0,0,"47814424421",,terminal_output +2552,4149987,"TERMINAL",0,0,"58925535532",,terminal_output +2553,4151065,"TERMINAL",0,0,"694036646643",,terminal_output +2554,4152087,"TERMINAL",0,0,"720147757754",,terminal_output +2555,4153217,"TERMINAL",0,0,"81258868865",,terminal_output +2556,4154238,"TERMINAL",0,0,"92369979976",,terminal_output +2557,4155230,"TERMINAL",0,0,"50458218:019112198",,terminal_output +2558,4156385,"TERMINAL",0,0,"25692220:0022309",,terminal_output +2559,4157413,"TERMINAL",0,0,"3672033133140",,terminal_output +2560,4158498,"TERMINAL",0,0,"47814424421",,terminal_output +2561,4159551,"TERMINAL",0,0,"58925535532",,terminal_output +2562,4160634,"TERMINAL",0,0,"695036646643",,terminal_output +2563,4161655,"TERMINAL",0,0,"730147757754",,terminal_output +2564,4162739,"TERMINAL",0,0,"81258868865",,terminal_output +2565,4163822,"TERMINAL",0,0,"92369979976",,terminal_output +2566,4164888,"TERMINAL",0,0,"7:00:0034730108203087",,terminal_output +2567,4165856,"TERMINAL",0,0,"14581191198",,terminal_output +2568,4166936,"TERMINAL",0,0,"2569221022409",,terminal_output +2569,4167962,"TERMINAL",0,0,"3673033133150",,terminal_output +2570,4169004,"TERMINAL",0,0,"47814424421",,terminal_output +2571,4170060,"TERMINAL",0,0,"58925535532",,terminal_output +2572,4171104,"TERMINAL",0,0,"697:0036646643",,terminal_output +2573,4172150,"TERMINAL",0,0,"740147757754",,terminal_output +2574,4173204,"TERMINAL",0,0,"81258868865",,terminal_output +2575,4174298,"TERMINAL",0,0,"934740208304087",,terminal_output +2576,4175374,"TERMINAL",0,0,"114581191198",,terminal_output +2577,4176358,"TERMINAL",0,0,"2569222022509",,terminal_output +2578,4177481,"TERMINAL",0,0,"367403313311:00",,terminal_output +2579,4178509,"TERMINAL",0,0,"47814424421",,terminal_output +2580,4179545,"TERMINAL",0,0,"58925535532",,terminal_output +2581,4180571,"TERMINAL",0,0,"691036646643",,terminal_output +2582,4181684,"TERMINAL",0,0,"750147757754",,terminal_output +2583,4182706,"TERMINAL",0,0,"81258868865",,terminal_output +2584,4183730,"TERMINAL",0,0,"92369979976",,terminal_output +2585,4184755,"TERMINAL",0,0,"2034750308405087",,terminal_output +2586,4185880,"TERMINAL",0,0,"14581191198",,terminal_output +2587,4186822,"TERMINAL",0,0,"25692230221:009",,terminal_output +2588,4187958,"TERMINAL",0,0,"3675033133110",,terminal_output +2589,4189170,"TERMINAL",0,0,"47814424421",,terminal_output +2590,4189988,"TERMINAL",0,0,"58925535532",,terminal_output +2591,4191000,"TERMINAL",0,0,"692036646643",,terminal_output +2592,4192052,"TERMINAL",0,0,"75:00147757754",,terminal_output +2593,4193098,"TERMINAL",0,0,"81258868865",,terminal_output +2594,4194152,"TERMINAL",0,0,"92369979976",,terminal_output +2595,4195302,"TERMINAL",0,0,"303479:00408501:0087",,terminal_output +2596,4196325,"TERMINAL",0,0,"1569224022109",,terminal_output +2597,4197299,"TERMINAL",0,0,"3677:0033133120",,terminal_output +2598,4198374,"TERMINAL",0,0,"47814424421",,terminal_output +2599,4199397,"TERMINAL",0,0,"58925535532",,terminal_output +2600,4200441,"TERMINAL",0,0,"693036646643",,terminal_output +2601,4201550,"TERMINAL",0,0,"710147757754",,terminal_output +2602,4202574,"TERMINAL",0,0,"81258868865",,terminal_output +2603,4203598,"TERMINAL",0,0,"92369979976",,terminal_output +2604,4204633,"TERMINAL",0,0,"40347105081:001087",,terminal_output +2605,4205685,"TERMINAL",0,0,"14581191198",,terminal_output +2606,4206773,"TERMINAL",0,0,"2569225022209",,terminal_output +2607,4207795,"TERMINAL",0,0,"3671033133130",,terminal_output +2608,4208920,"TERMINAL",0,0,"47814424421",,terminal_output +2609,4209884,"TERMINAL",0,0,"58925535532",,terminal_output +2610,4210936,"TERMINAL",0,0,"694036646643",,terminal_output +2611,4211985,"TERMINAL",0,0,"720147757754",,terminal_output +2612,4213119,"TERMINAL",0,0,"81258868865",,terminal_output +2613,4214074,"TERMINAL",0,0,"92369979976",,terminal_output +2614,4215170,"TERMINAL",0,0,"50347209:008102087",,terminal_output +2615,4216191,"TERMINAL",0,0,"14581191198",,terminal_output +2616,4217319,"TERMINAL",0,0,"26720331:01333140",,terminal_output +2617,4218341,"TERMINAL",0,0,"47814424421",,terminal_output +2618,4219366,"TERMINAL",0,0,"58925535532",,terminal_output +2619,4220390,"TERMINAL",0,0,"695036646643",,terminal_output +2620,4221515,"TERMINAL",0,0,"730147757754",,terminal_output +2621,4222520,"TERMINAL",0,0,"81258868865",,terminal_output +2622,4223563,"TERMINAL",0,0,"92369979976",,terminal_output +2623,4224591,"TERMINAL",0,0,"1:0034730108203087",,terminal_output +2624,4225612,"TERMINAL",0,0,"14581191198",,terminal_output +2625,4226739,"TERMINAL",0,0,"2569221022409",,terminal_output +2626,4227762,"TERMINAL",0,0,"3673033133150",,terminal_output +2627,4228748,"TERMINAL",0,0,"47814424421",,terminal_output +2628,4229804,"TERMINAL",0,0,"58925535532",,terminal_output +2629,4230852,"TERMINAL",0,0,"698:0036646643",,terminal_output +2630,4231898,"TERMINAL",0,0,"740147757754",,terminal_output +2631,4233291,"TERMINAL",0,0,"81258868865",,terminal_output +2632,4234257,"TERMINAL",0,0,"934740208304087",,terminal_output +2633,4235370,"TERMINAL",0,0,"114581191198",,terminal_output +2634,4236377,"TERMINAL",0,0,"2569222022509",,terminal_output +2635,4237596,"TERMINAL",0,0,"367403313312:00",,terminal_output +2636,4238591,"TERMINAL",0,0,"47814424421",,terminal_output +2637,4239648,"TERMINAL",0,0,"58925535532",,terminal_output +2638,4240770,"TERMINAL",0,0,"691036646643",,terminal_output +2639,4240977,"TERMINAL",0,0,"^C\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining]633;D;130",,terminal_output +2640,4241791,"TERMINAL",0,0,"750147757754",,terminal_output +2641,4242815,"TERMINAL",0,0,"81258868865",,terminal_output +2642,4243840,"TERMINAL",0,0,"92369979976",,terminal_output +2643,4244971,"TERMINAL",0,0,"2034750308405087",,terminal_output +2644,4245928,"TERMINAL",0,0,"14581191198",,terminal_output +2645,4246980,"TERMINAL",0,0,"25692230222:009",,terminal_output +2646,4248016,"TERMINAL",0,0,"3675033133110",,terminal_output +2647,4249061,"TERMINAL",0,0,"47814424421",,terminal_output +2648,4250188,"TERMINAL",0,0,"58925535532",,terminal_output +2649,4251212,"TERMINAL",0,0,"692036646643",,terminal_output +2650,4252237,"TERMINAL",0,0,"76:00147757754",,terminal_output +2651,4253257,"TERMINAL",0,0,"82369979976",,terminal_output +2652,4254386,"TERMINAL",0,0,"3034710:00408502:0087",,terminal_output +2653,4255373,"TERMINAL",0,0,"14581191198",,terminal_output +2654,4256432,"TERMINAL",0,0,"2569224022109",,terminal_output +2655,4256911,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +2656,4257450,"TERMINAL",0,0,"3678:0033133120",,terminal_output +2657,4258433,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2235,0,"",shellscript,selection_mouse +2658,4258435,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2234,0,"",shellscript,selection_command +2659,4258509,"TERMINAL",0,0,"47814424421",,terminal_output +2660,4259047,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2279,0,"",shellscript,selection_mouse +2661,4259049,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2278,0,"",shellscript,selection_command +2662,4259469,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2279,0,"",shellscript,selection_mouse +2663,4259471,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2278,0,"",shellscript,selection_command +2664,4259539,"TERMINAL",0,0,"58925535532",,terminal_output +2665,4259645,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2279,0,"",shellscript,selection_mouse +2666,4259658,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2278,0,"",shellscript,selection_command +2667,4260185,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2279,0,"",shellscript,selection_mouse +2668,4260187,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2278,0,"",shellscript,selection_command +2669,4260342,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2279,0,"",shellscript,selection_mouse +2670,4260343,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2278,0,"",shellscript,selection_command +2671,4260505,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2258,22," --dyna_dim=1024 \\n",shellscript,selection_mouse +2672,4260506,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2259,21," --dyna_dim=1024 \\n",shellscript,selection_command +2673,4260619,"TERMINAL",0,0,"693036646643",,terminal_output +2674,4260794,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2259,0,"",shellscript,selection_mouse +2675,4260823,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2259,48," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n",shellscript,selection_mouse +2676,4260886,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2259,74," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n",shellscript,selection_mouse +2677,4260901,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2259,100," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n",shellscript,selection_mouse +2678,4261452,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2358,0,"",shellscript,selection_mouse +2679,4261452,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2357,0,"",shellscript,selection_command +2680,4261637,"TERMINAL",0,0,"710147757754",,terminal_output +2681,4262786,"TERMINAL",0,0,"81258868865",,terminal_output +2682,4263807,"TERMINAL",0,0,"92369979976",,terminal_output +2683,4264831,"TERMINAL",0,0,"40347105082:001087",,terminal_output +2684,4265867,"TERMINAL",0,0,"14581191198",,terminal_output +2685,4266902,"TERMINAL",0,0,"2569225022209",,terminal_output +2686,4267925,"TERMINAL",0,0,"3671033133130",,terminal_output +2687,4268979,"TERMINAL",0,0,"47814424421",,terminal_output +2688,4270030,"TERMINAL",0,0,"58925535532",,terminal_output +2689,4271191,"TERMINAL",0,0,"694036646643",,terminal_output +2690,4272205,"TERMINAL",0,0,"720147757754",,terminal_output +2691,4273228,"TERMINAL",0,0,"81258868865",,terminal_output +2692,4274227,"TERMINAL",0,0,"93472020:008102087",,terminal_output +2693,4275378,"TERMINAL",0,0,"514581191198",,terminal_output +2694,4276406,"TERMINAL",0,0,"2569222:0022309",,terminal_output +2695,4277426,"TERMINAL",0,0,"3672033133140",,terminal_output +2696,4278448,"TERMINAL",0,0,"47814424421",,terminal_output +2697,4279576,"TERMINAL",0,0,"58925535532",,terminal_output +2698,4280605,"TERMINAL",0,0,"695036646643",,terminal_output +2699,4281111,"TERMINAL",0,0,"dev",,terminal_command +2700,4281126,"TERMINAL",0,0,"]633;E;2025-08-11 17:01:56 dev;361f455a-15ad-4916-88b6-b9e49434d7af]633;C]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +2701,4281599,"TERMINAL",0,0,"730147757754",,terminal_output +2702,4282629,"TERMINAL",0,0,"81258868865",,terminal_output +2703,4282983,"TERMINAL",0,0,"git diff",,terminal_command +2704,4283054,"TERMINAL",0,0,"]633;E;2025-08-11 17:01:58 git diff;361f455a-15ad-4916-88b6-b9e49434d7af]633;C[?1h=\r",,terminal_output +2705,4283152,"TERMINAL",0,0,"diff --git a/utils/nn.py b/utils/nn.py\r\nindex 53cb68a..ff7c0d0 100644\r\n--- a/utils/nn.py\r\n+++ b/utils/nn.py\r\n@@ -330,18 +330,17 @@ class TransformerBlock(nnx.Module):\r\n @nnx.remat\r\n def __call__(self, x_BTNM: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None) -> jax.Array:\r\n # --- Spatial attention ---\r\n- B, T, N, M = x_BTNM.shape\r\n- z_FNM = einops.rearrange(x_BTNM, ""b t n m -> (b t) n m"")\r\n- z_FNM = self.spatial_norm(z_FNM)\r\n- z_FNM = self.spatial_attention(z_FNM)\r\n- z_BTNM = einops.rearrange(z_FNM, ""(b t) n m -> b t n m"", t=T)\r\n+ z_BTNM = self.spatial_norm(x_BTNM)\r\n+ z_BTNM = self.spatial_attention(z_BTNM)\r\n x_BTNM = x_BTNM + z_BTNM\r\n+\r\n # --- Temporal attention ---\r\n- z_PTM = einops.rearrange(x_BTNM, ""b t n m -> (b n) t m"")\r\n- z_PTM = self.temporal_norm(z_PTM)\r\n- z_PTM = self.temporal_attention(z_PTM)\r\n- z_BTNM = einops.rearrange(z_PTM, ""(b n) t m -> b t n m"", n=N)\r\n- x_BTNM = x_BTNM + z_BTNM\r\n+ x_BNTM = x_BTNM.swapaxes(1, 2)\r\n+ z_BNTM = self.temporal_norm(x_BNTM)\r\n+ z_BNTM = self.temporal_attention(z_BNTM)\r\n+ x_BNTM = x_BNTM + z_BNTM\r\n+ x_BTNM = x_BNTM.swapaxes(1, 2)\r\n+\r\n # --- Feedforward ---\r\n z_BTNM = self.ffn_norm(x_BTNM)\r\n z_BTND = self.ffn_dense1(z_BTNM)\r\n\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +2706,4283672,"TERMINAL",0,0,"92369979976",,terminal_output +2707,4284717,"TERMINAL",0,0,"2:0034730108203087",,terminal_output +2708,4285769,"TERMINAL",0,0,"14581191198",,terminal_output +2709,4286677,"TERMINAL",0,0,"git stash",,terminal_command +2710,4286728,"TERMINAL",0,0,"]633;E;2025-08-11 17:02:02 git stash;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +2711,4286846,"TERMINAL",0,0,"2569221022409",,terminal_output +2712,4287000,"TERMINAL",0,0,"Saved working directory and index state WIP on input_pipeline/add-npy2array_record: 4e0846b adding pngs to array records as well\r\n",,terminal_output +2713,4287034,"TERMINAL",0,0,"]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +2714,4287889,"TERMINAL",0,0,"3673033133150",,terminal_output +2715,4288480,"TERMINAL",0,0,"git diff",,terminal_command +2716,4288521,"TERMINAL",0,0,"]633;E;2025-08-11 17:02:04 git diff;361f455a-15ad-4916-88b6-b9e49434d7af]633;C[?1h=\r\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +2717,4288918,"TERMINAL",0,0,"47814424421",,terminal_output +2718,4289966,"TERMINAL",0,0,"58925535532",,terminal_output +2719,4290471,"TERMINAL",0,0,"git branch",,terminal_command +2720,4290482,"TERMINAL",0,0,"]633;E;2025-08-11 17:02:06 git branch;361f455a-15ad-4916-88b6-b9e49434d7af]633;C[?1h=\r add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n* input_pipeline/add-npy2array_record\r\n logging-variants\r\n lr-schedules\r\n main\r\n maskgit-different-maskprob-per-sample\r\n maskgit-sampling-iterative-unmasking-fix\r\n metrics-logging-for-dynamics-model\r\n monkey-patch\r\n new-arch-sampling\r\n preprocess_video\r\n refactor-tmp\r\n revised-dataloader\r\n runner\r\n runner-grain\r\n sample-ali-branch\r\n sample-from-different-topologies\r\n sampling-startframe-indexing-fix\r\n speedup-tfrecord-preprocessing\r\n tmp\r\n\r[?1l>]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +2721,4291016,"TERMINAL",0,0,"699:0036646643",,terminal_output +2722,4292053,"TERMINAL",0,0,"740147757754",,terminal_output +2723,4293097,"TERMINAL",0,0,"81258868865",,terminal_output +2724,4293921,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +2725,4294154,"TERMINAL",0,0,"92369979976",,terminal_output +2726,4295243,"TERMINAL",0,0,"1034740208304087",,terminal_output +2727,4296268,"TERMINAL",0,0,"1569222022509",,terminal_output +2728,4297394,"TERMINAL",0,0,"367403313313:00",,terminal_output +2729,4297426,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2025,0,"",shellscript,selection_mouse +2730,4297437,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2024,0,"",shellscript,selection_command +2731,4297951,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2443,0,"",shellscript,selection_mouse +2732,4297962,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2442,0,"",shellscript,selection_command +2733,4298340,"TERMINAL",0,0,"47814424421",,terminal_output +2734,4298544,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2332,0,"",shellscript,selection_mouse +2735,4298555,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2331,0,"",shellscript,selection_command +2736,4299247,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2095,0,"",shellscript,selection_mouse +2737,4299390,"TERMINAL",0,0,"58925535532",,terminal_output +2738,4299780,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2102,0,"",shellscript,selection_mouse +2739,4300437,"TERMINAL",0,0,"691036646643",,terminal_output +2740,4300699,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1959,0,"",shellscript,selection_mouse +2741,4300852,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1957,3,"256",shellscript,selection_mouse +2742,4301493,"TERMINAL",0,0,"750147757754",,terminal_output +2743,4302616,"TERMINAL",0,0,"81258868865",,terminal_output +2744,4303642,"TERMINAL",0,0,"92369979976",,terminal_output +2745,4304665,"TERMINAL",0,0,"2034750308405087",,terminal_output +2746,4305679,"TERMINAL",0,0,"14581191198",,terminal_output +2747,4305964,"TERMINAL",0,0,"python",,terminal_focus +2748,4306818,"TERMINAL",0,0,"25692230223:009",,terminal_output +2749,4307784,"TERMINAL",0,0,"3675033133110",,terminal_output +2750,4308468,"TERMINAL",0,0,"6",,terminal_output +2751,4308763,"TERMINAL",0,0,"[?25l*[?25h",,terminal_output +2752,4308883,"TERMINAL",0,0,"47814424421",,terminal_output +2753,4309892,"TERMINAL",0,0,"58925535532",,terminal_output +2754,4309901,"TERMINAL",0,0,"[?25l3[?25h[?25l2[?25h",,terminal_output +2755,4310055,"TERMINAL",0,0,"\r\n192\r\n>>> ",,terminal_output +2756,4310930,"TERMINAL",0,0,"692036646643",,terminal_output +2757,4311976,"TERMINAL",0,0,"77:00147757754",,terminal_output +2758,4313027,"TERMINAL",0,0,"81258868865",,terminal_output +2759,4314068,"TERMINAL",0,0,"92369979976",,terminal_output +2760,4315117,"TERMINAL",0,0,"303471:00408503:0087",,terminal_output +2761,4316237,"TERMINAL",0,0,"14581191198",,terminal_output +2762,4317265,"TERMINAL",0,0,"2569224022109",,terminal_output +2763,4318395,"TERMINAL",0,0,"4789:0144244221",,terminal_output +2764,4319430,"TERMINAL",0,0,"58925535532",,terminal_output +2765,4320410,"TERMINAL",0,0,"693036646643",,terminal_output +2766,4321513,"TERMINAL",0,0,"710147757754",,terminal_output +2767,4322495,"TERMINAL",0,0,"81258868865",,terminal_output +2768,4323623,"TERMINAL",0,0,"92369979976",,terminal_output +2769,4324581,"TERMINAL",0,0,"40347105083:001087",,terminal_output +2770,4325635,"TERMINAL",0,0,"14581191198",,terminal_output +2771,4325799,"TERMINAL",0,0,"8",,terminal_output +2772,4326322,"TERMINAL",0,0,"[?25l*[?25h",,terminal_output +2773,4326703,"TERMINAL",0,0,"2569225022209",,terminal_output +2774,4327252,"TERMINAL",0,0,"[?25l3[?25h",,terminal_output +2775,4327337,"TERMINAL",0,0,"[?25l2[?25h",,terminal_output +2776,4327429,"TERMINAL",0,0,"\r\n256\r\n>>> ",,terminal_output +2777,4327930,"TERMINAL",0,0,"3671033133130",,terminal_output +2778,4328842,"TERMINAL",0,0,"47814424421",,terminal_output +2779,4329824,"TERMINAL",0,0,"58925535532",,terminal_output +2780,4330905,"TERMINAL",0,0,"694036646643",,terminal_output +2781,4331661,"TERMINAL",0,0,"\r>>> 8*32",,terminal_output +2782,4331944,"TERMINAL",0,0,"720147757754",,terminal_output +2783,4332083,"TERMINAL",0,0,"\r>>> ",,terminal_output +2784,4332981,"TERMINAL",0,0,"81258868865",,terminal_output +2785,4334005,"TERMINAL",0,0,"92369979976",,terminal_output +2786,4335064,"TERMINAL",0,0,"50347201:008102087",,terminal_output +2787,4336095,"TERMINAL",0,0,"14581191198",,terminal_output +2788,4337245,"TERMINAL",0,0,"2569223:0022309",,terminal_output +2789,4338254,"TERMINAL",0,0,"3672033133140",,terminal_output +2790,4339278,"TERMINAL",0,0,"48925535532",,terminal_output +2791,4340302,"TERMINAL",0,0,"695036646643",,terminal_output +2792,4341427,"TERMINAL",0,0,"730147757754",,terminal_output +2793,4342388,"TERMINAL",0,0,"81258868865",,terminal_output +2794,4343121,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2332,0,"",shellscript,selection_mouse +2795,4343121,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",2331,0,"",shellscript,selection_command +2796,4343439,"TERMINAL",0,0,"92369979976",,terminal_output +2797,4343979,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1901,0,"",shellscript,selection_mouse +2798,4344498,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1962,0,"",shellscript,selection_mouse +2799,4344519,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1961,0,"",shellscript,selection_command +2800,4344520,"TERMINAL",0,0,"3:0034730108203087",,terminal_output +2801,4345181,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1959,0,"",shellscript,selection_mouse +2802,4345315,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1957,3,"256",shellscript,selection_mouse +2803,4345589,"TERMINAL",0,0,"14581191198",,terminal_output +2804,4346653,"TERMINAL",0,0,"2569221022409",,terminal_output +2805,4347674,"TERMINAL",0,0,"3673033133150",,terminal_output +2806,4348699,"TERMINAL",0,0,"47814424421",,terminal_output +2807,4349823,"TERMINAL",0,0,"58925535532",,terminal_output +2808,4350847,"TERMINAL",0,0,"6930:0036646643",,terminal_output +2809,4351809,"TERMINAL",0,0,"740147757754",,terminal_output +2810,4352892,"TERMINAL",0,0,"81258868865",,terminal_output +2811,4353914,"TERMINAL",0,0,"92369979976",,terminal_output +2812,4354958,"TERMINAL",0,0,"1034740208304087",,terminal_output +2813,4356004,"TERMINAL",0,0,"14581191198",,terminal_output +2814,4357056,"TERMINAL",0,0,"2569222022509",,terminal_output +2815,4358105,"TERMINAL",0,0,"367403313314:00",,terminal_output +2816,4359245,"TERMINAL",0,0,"47814424421",,terminal_output +2817,4360268,"TERMINAL",0,0,"58925535532",,terminal_output +2818,4361294,"TERMINAL",0,0,"6501147757754",,terminal_output +2819,4362322,"TERMINAL",0,0,"81258868865",,terminal_output +2820,4363444,"TERMINAL",0,0,"92369979976",,terminal_output +2821,4364467,"TERMINAL",0,0,"2034750308405087",,terminal_output +2822,4365436,"TERMINAL",0,0,"14581191198",,terminal_output +2823,4366492,"TERMINAL",0,0,"25692230224:009",,terminal_output +2824,4367643,"TERMINAL",0,0,"3675033133110",,terminal_output +2825,4368585,"TERMINAL",0,0,"47814424421",,terminal_output +2826,4369690,"TERMINAL",0,0,"58925535532",,terminal_output +2827,4370675,"TERMINAL",0,0,"692036646643",,terminal_output +2828,4371745,"TERMINAL",0,0,"78:00147757754",,terminal_output +2829,4372769,"TERMINAL",0,0,"81258868865",,terminal_output +2830,4373887,"TERMINAL",0,0,"92369979976",,terminal_output +2831,4374914,"TERMINAL",0,0,"303472:00408504:0087",,terminal_output +2832,4375912,"TERMINAL",0,0,"14581191198",,terminal_output +2833,4376963,"TERMINAL",0,0,"2569224022109",,terminal_output +2834,4378017,"TERMINAL",0,0,"36740:0033133120",,terminal_output +2835,4379073,"TERMINAL",0,0,"47814424421",,terminal_output +2836,4380135,"TERMINAL",0,0,"58925535532",,terminal_output +2837,4381262,"TERMINAL",0,0,"693036646643",,terminal_output +2838,4382261,"TERMINAL",0,0,"710147757754",,terminal_output +2839,4383311,"TERMINAL",0,0,"92369979976",,terminal_output +2840,4384333,"TERMINAL",0,0,"40347105084:001087",,terminal_output +2841,4385385,"TERMINAL",0,0,"14581191198",,terminal_output +2842,4386493,"TERMINAL",0,0,"2569225022209",,terminal_output +2843,4387500,"TERMINAL",0,0,"3671033133130",,terminal_output +2844,4388531,"TERMINAL",0,0,"47814424421",,terminal_output +2845,4389658,"TERMINAL",0,0,"58925535532",,terminal_output +2846,4390621,"TERMINAL",0,0,"694036646643",,terminal_output +2847,4391655,"TERMINAL",0,0,"720147757754",,terminal_output +2848,4392731,"TERMINAL",0,0,"81258868865",,terminal_output +2849,4393575,"TERMINAL",0,0,"5",,terminal_output +2850,4393750,"TERMINAL",0,0,"92369979976",,terminal_output +2851,4393803,"TERMINAL",0,0,"[?25l*[?25h",,terminal_output +2852,4394885,"TERMINAL",0,0,"50347202:008102087",,terminal_output +2853,4395185,"TERMINAL",0,0,"[?25l3[?25h",,terminal_output +2854,4395246,"TERMINAL",0,0,"[?25l2[?25h",,terminal_output +2855,4395396,"TERMINAL",0,0,"\r\n160\r\n>>> ",,terminal_output +2856,4395852,"TERMINAL",0,0,"14581191198",,terminal_output +2857,4396945,"TERMINAL",0,0,"2569224:0022309",,terminal_output +2858,4397944,"TERMINAL",0,0,"3672033133140",,terminal_output +2859,4398997,"TERMINAL",0,0,"47814424421",,terminal_output +2860,4400047,"TERMINAL",0,0,"58925535532",,terminal_output +2861,4401144,"TERMINAL",0,0,"695036646643",,terminal_output +2862,4402152,"TERMINAL",0,0,"730147757754",,terminal_output +2863,4403281,"TERMINAL",0,0,"81258868865",,terminal_output +2864,4404301,"TERMINAL",0,0,"934730108203087",,terminal_output +2865,4405374,"TERMINAL",0,0,"4:014581191198",,terminal_output +2866,4406351,"TERMINAL",0,0,"2569221022409",,terminal_output +2867,4407476,"TERMINAL",0,0,"3673033133150",,terminal_output +2868,4408500,"TERMINAL",0,0,"47814424421",,terminal_output +2869,4409526,"TERMINAL",0,0,"58925535532",,terminal_output +2870,4410592,"TERMINAL",0,0,"691:0036646643",,terminal_output +2871,4411674,"TERMINAL",0,0,"740147757754",,terminal_output +2872,4412699,"TERMINAL",0,0,"81258868865",,terminal_output +2873,4413723,"TERMINAL",0,0,"92369979976",,terminal_output +2874,4414747,"TERMINAL",0,0,"1034740208304087",,terminal_output +2875,4415873,"TERMINAL",0,0,"14581191198",,terminal_output +2876,4416908,"TERMINAL",0,0,"2569222022509",,terminal_output +2877,4417861,"TERMINAL",0,0,"367403313315:00",,terminal_output +2878,4418955,"TERMINAL",0,0,"47814424421",,terminal_output +2879,4419972,"TERMINAL",0,0,"58925535532",,terminal_output +2880,4421012,"TERMINAL",0,0,"691036646643",,terminal_output +2881,4422064,"TERMINAL",0,0,"750147757754",,terminal_output +2882,4423143,"TERMINAL",0,0,"81258868865",,terminal_output +2883,4424167,"TERMINAL",0,0,"92369979976",,terminal_output +2884,4425253,"TERMINAL",0,0,"2034750308405087",,terminal_output +2885,4426320,"TERMINAL",0,0,"15692230225:009",,terminal_output +2886,4427342,"TERMINAL",0,0,"3675033133110",,terminal_output +2887,4428366,"TERMINAL",0,0,"47814424421",,terminal_output +2888,4429495,"TERMINAL",0,0,"58925535532",,terminal_output +2889,4430447,"TERMINAL",0,0,"692036646643",,terminal_output +2890,4431541,"TERMINAL",0,0,"79:00147757754",,terminal_output +2891,4432550,"TERMINAL",0,0,"81258868865",,terminal_output +2892,4433693,"TERMINAL",0,0,"92369979976",,terminal_output +2893,4434716,"TERMINAL",0,0,"303473:00408505:0087",,terminal_output +2894,4435696,"TERMINAL",0,0,"14581191198",,terminal_output +2895,4436743,"TERMINAL",0,0,"2569224022109",,terminal_output +2896,4437794,"TERMINAL",0,0,"3671:0033133120",,terminal_output +2897,4438927,"TERMINAL",0,0,"47814424421",,terminal_output +2898,4439938,"TERMINAL",0,0,"58925535532",,terminal_output +2899,4440963,"TERMINAL",0,0,"693036646643",,terminal_output +2900,4441981,"TERMINAL",0,0,"710147757754",,terminal_output +2901,4443022,"TERMINAL",0,0,"8CG258868865",,terminal_output +2902,4444075,"TERMINAL",0,0,"9369979976",,terminal_output +2903,4445159,"TERMINAL",0,0,"4047105085:001087",,terminal_output +2904,4446183,"TERMINAL",0,0,"1581191198",,terminal_output +2905,4447311,"TERMINAL",0,0,"27103351332130",,terminal_output +2906,4448334,"TERMINAL",0,0,"4814424421",,terminal_output +2907,4449359,"TERMINAL",0,0,"5925535532",,terminal_output +2908,4450385,"TERMINAL",0,0,"64036646643",,terminal_output +2909,4451508,"TERMINAL",0,0,"7147757754",,terminal_output +2910,4452533,"TERMINAL",0,0,"8258868865",,terminal_output +2911,4453558,"TERMINAL",0,0,"\r911 R31:43\t 1 hkn061007941:167264046to5:13:19824011-01:22:59\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]4284 cpuonly wrap 3:24:57\t 1 hkn031035:090921916651272030238",,terminal_output +2912,4454683,"TERMINAL",0,0,"5047203:008102087",,terminal_output +2913,4455632,"TERMINAL",0,0,"1581191198",,terminal_output +2914,4456731,"TERMINAL",0,0,"269225:0022309",,terminal_output +2915,4457756,"TERMINAL",0,0,"372033133140",,terminal_output +2916,4458881,"TERMINAL",0,0,"4814424421",,terminal_output +2917,4460213,"TERMINAL",0,0,"5925535532",,terminal_output +2918,4461194,"TERMINAL",0,0,"65036646643",,terminal_output +2919,4462260,"TERMINAL",0,0,"7258868865",,terminal_output +2920,4463387,"TERMINAL",0,0,"9369979976",,terminal_output +2921,4464414,"TERMINAL",0,0,"5:004730108203087",,terminal_output +2922,4465428,"TERMINAL",0,0,"1581191198",,terminal_output +2923,4466483,"TERMINAL",0,0,"269221022409",,terminal_output +2924,4467523,"TERMINAL",0,0,"373033133150",,terminal_output +2925,4468573,"TERMINAL",0,0,"4814424421",,terminal_output +2926,4469623,"TERMINAL",0,0,"5925535532",,terminal_output +2927,4470672,"TERMINAL",0,0,"62:0036646643",,terminal_output +2928,4471784,"TERMINAL",0,0,"7147757754",,terminal_output +2929,4472773,"TERMINAL",0,0,"8258868865",,terminal_output +2930,4473826,"TERMINAL",0,0,"9369979976",,terminal_output +2931,4474880,"TERMINAL",0,0,"104740208304087",,terminal_output +2932,4475982,"TERMINAL",0,0,"1581191198",,terminal_output +2933,4477007,"TERMINAL",0,0,"269222022509",,terminal_output +2934,4478034,"TERMINAL",0,0,"37403313316:00",,terminal_output +2935,4479079,"TERMINAL",0,0,"4814424421",,terminal_output +2936,4480129,"TERMINAL",0,0,"5925535532",,terminal_output +2937,4481206,"TERMINAL",0,0,"61036646643",,terminal_output +2938,4482330,"TERMINAL",0,0,"7258868865",,terminal_output +2939,4483356,"TERMINAL",0,0,"9369979976",,terminal_output +2940,4484381,"TERMINAL",0,0,"204750308405087",,terminal_output +2941,4485386,"TERMINAL",0,0,"1581191198",,terminal_output +2942,4486444,"TERMINAL",0,0,"2692230226:009",,terminal_output +2943,4487479,"TERMINAL",0,0,"375033133110",,terminal_output +2944,4488578,"TERMINAL",0,0,"4814424421",,terminal_output +2945,4489602,"TERMINAL",0,0,"5925535532",,terminal_output +2946,4490843,"TERMINAL",0,0,"62036646643",,terminal_output +2947,4491830,"TERMINAL",0,0,"7147757754",,terminal_output +2948,4492708,"TERMINAL",0,0,"8258868865",,terminal_output +2949,4493800,"TERMINAL",0,0,"9369979976",,terminal_output +2950,4494824,"TERMINAL",0,0,"30474:00408506:0087",,terminal_output +2951,4495856,"TERMINAL",0,0,"1581191198",,terminal_output +2952,4496964,"TERMINAL",0,0,"269224022109",,terminal_output +2953,4497966,"TERMINAL",0,0,"372:0033133120",,terminal_output +2954,4499009,"TERMINAL",0,0,"4814424421",,terminal_output +2955,4500108,"TERMINAL",0,0,"5925535532",,terminal_output +2956,4501173,"TERMINAL",0,0,"63036646643",,terminal_output +2957,4502199,"TERMINAL",0,0,"7147757754",,terminal_output +2958,4503220,"TERMINAL",0,0,"8258868865",,terminal_output +2959,4504349,"TERMINAL",0,0,"947105086:001087",,terminal_output +2960,4505377,"TERMINAL",0,0,"41581191198",,terminal_output +2961,4506339,"TERMINAL",0,0,"269225022209",,terminal_output +2962,4507419,"TERMINAL",0,0,"371033133130",,terminal_output +2963,4508444,"TERMINAL",0,0,"4814424421",,terminal_output +2964,4509469,"TERMINAL",0,0,"5925535532",,terminal_output +2965,4510519,"TERMINAL",0,0,"64036646643",,terminal_output +2966,4511618,"TERMINAL",0,0,"7147757754",,terminal_output +2967,4512642,"TERMINAL",0,0,"8258868865",,terminal_output +2968,4514042,"TERMINAL",0,0,"9369979976",,terminal_output +2969,4515087,"TERMINAL",0,0,"5047204:008102087",,terminal_output +2970,4516133,"TERMINAL",0,0,"1581191198",,terminal_output +2971,4517173,"TERMINAL",0,0,"269226:0022309",,terminal_output +2972,4518273,"TERMINAL",0,0,"382144244241",,terminal_output +2973,4519298,"TERMINAL",0,0,"5925535532",,terminal_output +2974,4520385,"TERMINAL",0,0,"65036646643",,terminal_output +2975,4521450,"TERMINAL",0,0,"7147757754",,terminal_output +2976,4522430,"TERMINAL",0,0,"8258868865",,terminal_output +2977,4523472,"TERMINAL",0,0,"9369979976",,terminal_output +2978,4524623,"TERMINAL",0,0,"6:004730108203087",,terminal_output +2979,4525649,"TERMINAL",0,0,"1581191198",,terminal_output +2980,4526672,"TERMINAL",0,0,"269221022409",,terminal_output +2981,4527652,"TERMINAL",0,0,"373033133150",,terminal_output +2982,4528719,"TERMINAL",0,0,"4814424421",,terminal_output +2983,4530153,"TERMINAL",0,0,"5925535532",,terminal_output +2984,4531369,"TERMINAL",0,0,"63:0147757754",,terminal_output +2985,4532420,"TERMINAL",0,0,"8258868865",,terminal_output +2986,4533535,"TERMINAL",0,0,"9369979976",,terminal_output +2987,4534555,"TERMINAL",0,0,"104740208304087",,terminal_output +2988,4535621,"TERMINAL",0,0,"1581191198",,terminal_output +2989,4536613,"TERMINAL",0,0,"269222022509",,terminal_output +2990,4537733,"TERMINAL",0,0,"37403313317:00",,terminal_output +2991,4538756,"TERMINAL",0,0,"4814424421",,terminal_output +2992,4539762,"TERMINAL",0,0,"5925535532",,terminal_output +2993,4540906,"TERMINAL",0,0,"61036646643",,terminal_output +2994,4541929,"TERMINAL",0,0,"7147757754",,terminal_output +2995,4542892,"TERMINAL",0,0,"8258868865",,terminal_output +2996,4543977,"TERMINAL",0,0,"9369979976",,terminal_output +2997,4545048,"TERMINAL",0,0,"204750308405087",,terminal_output +2998,4546097,"TERMINAL",0,0,"1581191198",,terminal_output +2999,4547144,"TERMINAL",0,0,"2692230227:009",,terminal_output +3000,4548278,"TERMINAL",0,0,"375033133110",,terminal_output +3001,4549302,"TERMINAL",0,0,"4925535532",,terminal_output +3002,4550297,"TERMINAL",0,0,"62036646643",,terminal_output +3003,4551351,"TERMINAL",0,0,"7147757754",,terminal_output +3004,4552405,"TERMINAL",0,0,"8258868865",,terminal_output +3005,4553502,"TERMINAL",0,0,"9369979976",,terminal_output +3006,4554531,"TERMINAL",0,0,"30475:00408507:0087",,terminal_output +3007,4555549,"TERMINAL",0,0,"1581191198",,terminal_output +3008,4556586,"TERMINAL",0,0,"269224022109",,terminal_output +3009,4557631,"TERMINAL",0,0,"373:0033133120",,terminal_output +3010,4558722,"TERMINAL",0,0,"4814424421",,terminal_output +3011,4560062,"TERMINAL",0,0,"5925535532",,terminal_output +3012,4561078,"TERMINAL",0,0,"63036646643",,terminal_output +3013,4562120,"TERMINAL",0,0,"7147757754",,terminal_output +3014,4563173,"TERMINAL",0,0,"8258868865",,terminal_output +3015,4564252,"TERMINAL",0,0,"947105087:001087",,terminal_output +3016,4565268,"TERMINAL",0,0,"41581191198",,terminal_output +3017,4566403,"TERMINAL",0,0,"269225022209",,terminal_output +3018,4567366,"TERMINAL",0,0,"371033133130",,terminal_output +3019,4568450,"TERMINAL",0,0,"4814424421",,terminal_output +3020,4569773,"TERMINAL",0,0,"5925535532",,terminal_output +3021,4570825,"TERMINAL",0,0,"64036646643",,terminal_output +3022,4571877,"TERMINAL",0,0,"7147757754",,terminal_output +3023,4572967,"TERMINAL",0,0,"8258868865",,terminal_output +3024,4573983,"TERMINAL",0,0,"9369979976",,terminal_output +3025,4575020,"TERMINAL",0,0,"5047205:008102087",,terminal_output +3026,4576070,"TERMINAL",0,0,"1581191198",,terminal_output +3027,4577123,"TERMINAL",0,0,"269227:0022309",,terminal_output +3028,4578179,"TERMINAL",0,0,"372033133140",,terminal_output +3029,4579220,"TERMINAL",0,0,"4925535532",,terminal_output +3030,4580321,"TERMINAL",0,0,"65036646643",,terminal_output +3031,4581320,"TERMINAL",0,0,"7147757754",,terminal_output +3032,4582384,"TERMINAL",0,0,"8258868865",,terminal_output +3033,4583504,"TERMINAL",0,0,"9369979976",,terminal_output +3034,4584473,"TERMINAL",0,0,"7:004730108203087",,terminal_output +3035,4585521,"TERMINAL",0,0,"1581191198",,terminal_output +3036,4586575,"TERMINAL",0,0,"269221022409",,terminal_output +3037,4587626,"TERMINAL",0,0,"373033133150",,terminal_output +3038,4588733,"TERMINAL",0,0,"4814424421",,terminal_output +3039,4589750,"TERMINAL",0,0,"5925535532",,terminal_output +3040,4590783,"TERMINAL",0,0,"64:0036646643",,terminal_output +3041,4591828,"TERMINAL",0,0,"7147757754",,terminal_output +3042,4592881,"TERMINAL",0,0,"8258868865",,terminal_output +3043,4593932,"TERMINAL",0,0,"9369979976",,terminal_output +3044,4594989,"TERMINAL",0,0,"104740208304087",,terminal_output +3045,4596030,"TERMINAL",0,0,"1581191198",,terminal_output +3046,4597083,"TERMINAL",0,0,"269222022509",,terminal_output +3047,4598147,"TERMINAL",0,0,"37403313318:00",,terminal_output +3048,4599186,"TERMINAL",0,0,"4814424421",,terminal_output +3049,4600297,"TERMINAL",0,0,"51036646643",,terminal_output +3050,4601325,"TERMINAL",0,0,"7147757754",,terminal_output +3051,4602326,"TERMINAL",0,0,"8258868865",,terminal_output +3052,4603475,"TERMINAL",0,0,"9369979976",,terminal_output +3053,4604428,"TERMINAL",0,0,"204750308405087",,terminal_output +3054,4605483,"TERMINAL",0,0,"1581191198",,terminal_output +3055,4606546,"TERMINAL",0,0,"2692230228:009",,terminal_output +3056,4607670,"TERMINAL",0,0,"375033133110",,terminal_output +3057,4608695,"TERMINAL",0,0,"4814424421",,terminal_output +3058,4609823,"TERMINAL",0,0,"5925535532",,terminal_output +3059,4610743,"TERMINAL",0,0,"62036646643",,terminal_output +3060,4611869,"TERMINAL",0,0,"7147757754",,terminal_output +3061,4612838,"TERMINAL",0,0,"8258868865",,terminal_output +3062,4613924,"TERMINAL",0,0,"9369979976",,terminal_output +3063,4614941,"TERMINAL",0,0,"30476:00408508:0087",,terminal_output +3064,4616066,"TERMINAL",0,0,"1581191198",,terminal_output +3065,4617015,"TERMINAL",0,0,"269224022109",,terminal_output +3066,4618060,"TERMINAL",0,0,"374:0033133120",,terminal_output +3067,4619112,"TERMINAL",0,0,"4814424421",,terminal_output +3068,4620157,"TERMINAL",0,0,"5925535532",,terminal_output +3069,4621294,"TERMINAL",0,0,"63036646643",,terminal_output +3070,4622312,"TERMINAL",0,0,"7258868865",,terminal_output +3071,4623338,"TERMINAL",0,0,"9369979976",,terminal_output +3072,4624465,"TERMINAL",0,0,"4047105088:001087",,terminal_output +3073,4625406,"TERMINAL",0,0,"1581191198",,terminal_output +3074,4626514,"TERMINAL",0,0,"269225022209",,terminal_output +3075,4627542,"TERMINAL",0,0,"371033133130",,terminal_output +3076,4628662,"TERMINAL",0,0,"4814424421",,terminal_output +3077,4629604,"TERMINAL",0,0,"5925535532",,terminal_output +3078,4630654,"TERMINAL",0,0,"64036646643",,terminal_output +3079,4631735,"TERMINAL",0,0,"7147757754",,terminal_output +3080,4632758,"TERMINAL",0,0,"8258868865",,terminal_output +3081,4633885,"TERMINAL",0,0,"9369979976",,terminal_output +3082,4634909,"TERMINAL",0,0,"5047206:008102087",,terminal_output +3083,4635932,"TERMINAL",0,0,"1581191198",,terminal_output +3084,4636950,"TERMINAL",0,0,"269228:0022309",,terminal_output +3085,4637999,"TERMINAL",0,0,"372033133140",,terminal_output +3086,4639054,"TERMINAL",0,0,"4814424421",,terminal_output +3087,4640101,"TERMINAL",0,0,"5925535532",,terminal_output +3088,4641149,"TERMINAL",0,0,"65036646643",,terminal_output +3089,4642200,"TERMINAL",0,0,"7147757754",,terminal_output +3090,4643308,"TERMINAL",0,0,"8369979976",,terminal_output +3091,4644329,"TERMINAL",0,0,"8:004730108203087",,terminal_output +3092,4645383,"TERMINAL",0,0,"1581191198",,terminal_output +3093,4646429,"TERMINAL",0,0,"269221022409",,terminal_output +3094,4647504,"TERMINAL",0,0,"373033133150",,terminal_output +3095,4648505,"TERMINAL",0,0,"4814424421",,terminal_output +3096,4649553,"TERMINAL",0,0,"5925535532",,terminal_output +3097,4650606,"TERMINAL",0,0,"65:0036646643",,terminal_output +3098,4651633,"TERMINAL",0,0,"7147757754",,terminal_output +3099,4652712,"TERMINAL",0,0,"8258868865",,terminal_output +3100,4653724,"TERMINAL",0,0,"9369979976",,terminal_output +3101,4654768,"TERMINAL",0,0,"104740208304087",,terminal_output +3102,4655903,"TERMINAL",0,0,"1581191198",,terminal_output +3103,4656927,"TERMINAL",0,0,"269222022509",,terminal_output +3104,4657923,"TERMINAL",0,0,"37403313319:00",,terminal_output +3105,4659075,"TERMINAL",0,0,"4814424421",,terminal_output +3106,4660101,"TERMINAL",0,0,"5925535532",,terminal_output +3107,4661076,"TERMINAL",0,0,"61036646643",,terminal_output +3108,4662116,"TERMINAL",0,0,"7147757754",,terminal_output +3109,4663167,"TERMINAL",0,0,"8258868865",,terminal_output +3110,4664219,"TERMINAL",0,0,"9369979976",,terminal_output +3111,4665268,"TERMINAL",0,0,"215851319415198",,terminal_output +3112,4666352,"TERMINAL",0,0,"2692230229:009",,terminal_output +3113,4667369,"TERMINAL",0,0,"375033133110",,terminal_output +3114,4668496,"TERMINAL",0,0,"4814424421",,terminal_output +3115,4669520,"TERMINAL",0,0,"5925535532",,terminal_output +3116,4670545,"TERMINAL",0,0,"62036646643",,terminal_output +3117,4671569,"TERMINAL",0,0,"7147757754",,terminal_output +3118,4672615,"TERMINAL",0,0,"8258868865",,terminal_output +3119,4673722,"TERMINAL",0,0,"9369979976",,terminal_output +3120,4674748,"TERMINAL",0,0,"30477:00408509:0087",,terminal_output +3121,4675870,"TERMINAL",0,0,"1581191198",,terminal_output +3122,4676898,"TERMINAL",0,0,"269224022109",,terminal_output +3123,4677867,"TERMINAL",0,0,"375:0033133120",,terminal_output +3124,4678941,"TERMINAL",0,0,"4814424421",,terminal_output +3125,4680068,"TERMINAL",0,0,"5925535532",,terminal_output +3126,4681096,"TERMINAL",0,0,"63036646643",,terminal_output +3127,4682065,"TERMINAL",0,0,"7147757754",,terminal_output +3128,4683124,"TERMINAL",0,0,"8258868865",,terminal_output +3129,4684170,"TERMINAL",0,0,"9369979976",,terminal_output +3130,4685220,"TERMINAL",0,0,"4058115199:011198",,terminal_output +3131,4686314,"TERMINAL",0,0,"269225022209",,terminal_output +3132,4687339,"TERMINAL",0,0,"371033133130",,terminal_output +3133,4688391,"TERMINAL",0,0,"4814424421",,terminal_output +3134,4689490,"TERMINAL",0,0,"5925535532",,terminal_output +3135,4690490,"TERMINAL",0,0,"64036646643",,terminal_output +3136,4691639,"TERMINAL",0,0,"7147757754",,terminal_output +3137,4692662,"TERMINAL",0,0,"8258868865",,terminal_output +3138,4693639,"TERMINAL",0,0,"9369979976",,terminal_output +3139,4694711,"TERMINAL",0,0,"5047207:008102087",,terminal_output +3140,4695838,"TERMINAL",0,0,"1581191198",,terminal_output +3141,4696862,"TERMINAL",0,0,"269229:0022309",,terminal_output +3142,4697847,"TERMINAL",0,0,"372033133140",,terminal_output +3143,4698910,"TERMINAL",0,0,"4814424421",,terminal_output +3144,4700039,"TERMINAL",0,0,"5925535532",,terminal_output +3145,4701060,"TERMINAL",0,0,"65036646643",,terminal_output +3146,4702083,"TERMINAL",0,0,"7147757754",,terminal_output +3147,4703107,"TERMINAL",0,0,"8258868865",,terminal_output +3148,4704124,"TERMINAL",0,0,"9369979976",,terminal_output +3149,4705255,"TERMINAL",0,0,"9:004730108203087",,terminal_output +3150,4706282,"TERMINAL",0,0,"169221022409",,terminal_output +3151,4707276,"TERMINAL",0,0,"373033133150",,terminal_output +3152,4708433,"TERMINAL",0,0,"4814424421",,terminal_output +3153,4709378,"TERMINAL",0,0,"5925535532",,terminal_output +3154,4710425,"TERMINAL",0,0,"66:0036646643",,terminal_output +3155,4711477,"TERMINAL",0,0,"7147757754",,terminal_output +3156,4712524,"TERMINAL",0,0,"8258868865",,terminal_output +3157,4713655,"TERMINAL",0,0,"9369979976",,terminal_output +3158,4714679,"TERMINAL",0,0,"104740208304087",,terminal_output +3159,4715677,"TERMINAL",0,0,"1581191198",,terminal_output +3160,4716831,"TERMINAL",0,0,"269222022509",,terminal_output +3161,4717793,"TERMINAL",0,0,"374033133130:00",,terminal_output +3162,4718828,"TERMINAL",0,0,"4814424421",,terminal_output +3163,4719902,"TERMINAL",0,0,"5925535532",,terminal_output +3164,4721027,"TERMINAL",0,0,"61036646643",,terminal_output +3165,4722055,"TERMINAL",0,0,"7147757754",,terminal_output +3166,4723084,"TERMINAL",0,0,"8258868865",,terminal_output +3167,4724101,"TERMINAL",0,0,"9369979976",,terminal_output +3168,4725135,"TERMINAL",0,0,"204750308405087",,terminal_output +3169,4726186,"TERMINAL",0,0,"1581191198",,terminal_output +3170,4727274,"TERMINAL",0,0,"275033313330:0110",,terminal_output +3171,4728300,"TERMINAL",0,0,"4814424421",,terminal_output +3172,4729333,"TERMINAL",0,0,"5925535532",,terminal_output +3173,4731021,"TERMINAL",0,0,"62036646643",,terminal_output +3174,4731905,"TERMINAL",0,0,"7147757754",,terminal_output +3175,4732960,"TERMINAL",0,0,"8258868865",,terminal_output +3176,4734019,"TERMINAL",0,0,"9369979976",,terminal_output +3177,4735059,"TERMINAL",0,0,"30478:004085030:0087",,terminal_output +3178,4736106,"TERMINAL",0,0,"1581191198",,terminal_output +3179,4737162,"TERMINAL",0,0,"269224022109",,terminal_output +3180,4738231,"TERMINAL",0,0,"376:0033133120",,terminal_output +3181,4739355,"TERMINAL",0,0,"4925535532",,terminal_output +3182,4740381,"TERMINAL",0,0,"63036646643",,terminal_output +3183,4741398,"TERMINAL",0,0,"7147757754",,terminal_output +3184,4742430,"TERMINAL",0,0,"8258868865",,terminal_output +3185,4743559,"TERMINAL",0,0,"9369979976",,terminal_output +3186,4744581,"TERMINAL",0,0,"40471050830:001087",,terminal_output +3187,4745548,"TERMINAL",0,0,"1581191198",,terminal_output +3188,4746625,"TERMINAL",0,0,"269225022209",,terminal_output +3189,4747640,"TERMINAL",0,0,"371033133130",,terminal_output +3190,4748777,"TERMINAL",0,0,"4814424421",,terminal_output +3191,4749749,"TERMINAL",0,0,"5925535532",,terminal_output +3192,4750827,"TERMINAL",0,0,"64036646643",,terminal_output +3193,4751827,"TERMINAL",0,0,"7147757754",,terminal_output +3194,4752895,"TERMINAL",0,0,"8258868865",,terminal_output +3195,4753943,"TERMINAL",0,0,"9369979976",,terminal_output +3196,4754990,"TERMINAL",0,0,"5047208:008102087",,terminal_output +3197,4756051,"TERMINAL",0,0,"1581191198",,terminal_output +3198,4757089,"TERMINAL",0,0,"2692230:0022309",,terminal_output +3199,4758133,"TERMINAL",0,0,"372033133140",,terminal_output +3200,4759180,"TERMINAL",0,0,"4814424421",,terminal_output +3201,4760236,"TERMINAL",0,0,"55036646643",,terminal_output +3202,4761278,"TERMINAL",0,0,"7147757754",,terminal_output +3203,4762324,"TERMINAL",0,0,"8258868865",,terminal_output +3204,4763383,"TERMINAL",0,0,"9369979976",,terminal_output +3205,4764443,"TERMINAL",0,0,"10:004730108203087",,terminal_output +3206,4765472,"TERMINAL",0,0,"1581191198",,terminal_output +3207,4766520,"TERMINAL",0,0,"269221022409",,terminal_output +3208,4767625,"TERMINAL",0,0,"373033133150",,terminal_output +3209,4768643,"TERMINAL",0,0,"4814424421",,terminal_output +3210,4769769,"TERMINAL",0,0,"5925535532",,terminal_output +3211,4770717,"TERMINAL",0,0,"67:0036646643",,terminal_output +3212,4771768,"TERMINAL",0,0,"7147757754",,terminal_output +3213,4772879,"TERMINAL",0,0,"8258868865",,terminal_output +3214,4773865,"TERMINAL",0,0,"9369979976",,terminal_output +3215,4774917,"TERMINAL",0,0,"104740208304087",,terminal_output +3216,4775969,"TERMINAL",0,0,"1581191198",,terminal_output +3217,4777029,"TERMINAL",0,0,"269222022509",,terminal_output +3218,4778062,"TERMINAL",0,0,"37403313311:00",,terminal_output +3219,4779105,"TERMINAL",0,0,"4814424421",,terminal_output +3220,4780152,"TERMINAL",0,0,"5925535532",,terminal_output +3221,4781203,"TERMINAL",0,0,"61036646643",,terminal_output +3222,4782245,"TERMINAL",0,0,"7258868865",,terminal_output +3223,4783295,"TERMINAL",0,0,"9369979976",,terminal_output +3224,4784412,"TERMINAL",0,0,"204750308405087",,terminal_output +3225,4785395,"TERMINAL",0,0,"1581191198",,terminal_output +3226,4786462,"TERMINAL",0,0,"2692230221:009",,terminal_output +3227,4787542,"TERMINAL",0,0,"375033133110",,terminal_output +3228,4788612,"TERMINAL",0,0,"4814424421",,terminal_output +3229,4789639,"TERMINAL",0,0,"5925535532",,terminal_output +3230,4790641,"TERMINAL",0,0,"62036646643",,terminal_output +3231,4791787,"TERMINAL",0,0,"7147757754",,terminal_output +3232,4792737,"TERMINAL",0,0,"8258868865",,terminal_output +3233,4793883,"TERMINAL",0,0,"9369979976",,terminal_output +3234,4794945,"TERMINAL",0,0,"30479:00408501:0087",,terminal_output +3235,4795875,"TERMINAL",0,0,"1581191198",,terminal_output +3236,4796920,"TERMINAL",0,0,"269224022109",,terminal_output +3237,4797971,"TERMINAL",0,0,"377:0033133120",,terminal_output +3238,4799026,"TERMINAL",0,0,"4814424421",,terminal_output +3239,4800088,"TERMINAL",0,0,"5925535532",,terminal_output +3240,4801109,"TERMINAL",0,0,"63036646643",,terminal_output +3241,4802158,"TERMINAL",0,0,"7147757754",,terminal_output +3242,4803259,"TERMINAL",0,0,"8258868865",,terminal_output +3243,4804253,"TERMINAL",0,0,"947105081:001087",,terminal_output +3244,4805300,"TERMINAL",0,0,"41581191198",,terminal_output +3245,4806431,"TERMINAL",0,0,"269225022209",,terminal_output +3246,4807454,"TERMINAL",0,0,"371033133130",,terminal_output +3247,4808476,"TERMINAL",0,0,"4814424421",,terminal_output +3248,4809491,"TERMINAL",0,0,"5925535532",,terminal_output +3249,4810548,"TERMINAL",0,0,"64036646643",,terminal_output +3250,4811653,"TERMINAL",0,0,"7147757754",,terminal_output +3251,4812678,"TERMINAL",0,0,"8258868865",,terminal_output +3252,4813702,"TERMINAL",0,0,"9369979976",,terminal_output +3253,4814788,"TERMINAL",0,0,"5047209:008102087",,terminal_output +3254,4815783,"TERMINAL",0,0,"1581191198",,terminal_output +3255,4816836,"TERMINAL",0,0,"269221:0022309",,terminal_output +3256,4817886,"TERMINAL",0,0,"372033133140",,terminal_output +3257,4818932,"TERMINAL",0,0,"4814424421",,terminal_output +3258,4820059,"TERMINAL",0,0,"5925535532",,terminal_output +3259,4821073,"TERMINAL",0,0,"65036646643",,terminal_output +3260,4822097,"TERMINAL",0,0,"7147757754",,terminal_output +3261,4823144,"TERMINAL",0,0,"8258868865",,terminal_output +3262,4824184,"TERMINAL",0,0,"9369979976",,terminal_output +3263,4825228,"TERMINAL",0,0,"1:005831119213198",,terminal_output +3264,4826262,"TERMINAL",0,0,"269221022409",,terminal_output +3265,4827317,"TERMINAL",0,0,"373033133150",,terminal_output +3266,4828447,"TERMINAL",0,0,"4814424421",,terminal_output +3267,4829403,"TERMINAL",0,0,"5925535532",,terminal_output +3268,4830457,"TERMINAL",0,0,"68:0036646643",,terminal_output +3269,4831497,"TERMINAL",0,0,"7147757754",,terminal_output +3270,4832687,"TERMINAL",0,0,"8258868865",,terminal_output +3271,4833668,"TERMINAL",0,0,"9369979976",,terminal_output +3272,4834694,"TERMINAL",0,0,"104740208304087",,terminal_output +3273,4835691,"TERMINAL",0,0,"1581191198",,terminal_output +3274,4836741,"TERMINAL",0,0,"269222022509",,terminal_output +3275,4837805,"TERMINAL",0,0,"37403313312:00",,terminal_output +3276,4838832,"TERMINAL",0,0,"4814424421",,terminal_output +3277,4839916,"TERMINAL",0,0,"5925535532",,terminal_output +3278,4840941,"TERMINAL",0,0,"61036646643",,terminal_output +3279,4842064,"TERMINAL",0,0,"7147757754",,terminal_output +3280,4843090,"TERMINAL",0,0,"8258868865",,terminal_output +3281,4844113,"TERMINAL",0,0,"9369979976",,terminal_output +3282,4845140,"TERMINAL",0,0,"204750308405087",,terminal_output +3283,4846157,"TERMINAL",0,0,"1581191198",,terminal_output +3284,4847207,"TERMINAL",0,0,"2692230222:009",,terminal_output +3285,4848314,"TERMINAL",0,0,"385144244211",,terminal_output +3286,4849307,"TERMINAL",0,0,"5925535532",,terminal_output +3287,4850379,"TERMINAL",0,0,"62036646643",,terminal_output +3288,4851485,"TERMINAL",0,0,"7147757754",,terminal_output +3289,4852456,"TERMINAL",0,0,"8258868865",,terminal_output +3290,4853537,"TERMINAL",0,0,"9369979976",,terminal_output +3291,4854543,"TERMINAL",0,0,"304720:00408502:0087",,terminal_output +3292,4855597,"TERMINAL",0,0,"1581191198",,terminal_output +3293,4856709,"TERMINAL",0,0,"269224022109",,terminal_output +3294,4857682,"TERMINAL",0,0,"378:0033133120",,terminal_output +3295,4858730,"TERMINAL",0,0,"4814424421",,terminal_output +3296,4859782,"TERMINAL",0,0,"5925535532",,terminal_output +3297,4860909,"TERMINAL",0,0,"63036646643",,terminal_output +3298,4861935,"TERMINAL",0,0,"7147757754",,terminal_output +3299,4862921,"TERMINAL",0,0,"8258868865",,terminal_output +3300,4863969,"TERMINAL",0,0,"9369979976",,terminal_output +3301,4865108,"TERMINAL",0,0,"4047105082:001087",,terminal_output +3302,4866132,"TERMINAL",0,0,"1581191198",,terminal_output +3303,4867155,"TERMINAL",0,0,"269225022209",,terminal_output +3304,4868160,"TERMINAL",0,0,"371033133130",,terminal_output +3305,4869206,"TERMINAL",0,0,"4814424421",,terminal_output +3306,4870369,"TERMINAL",0,0,"54036646643",,terminal_output +3307,4871355,"TERMINAL",0,0,"7147757754",,terminal_output +3308,4872378,"TERMINAL",0,0,"8258868865",,terminal_output +3309,4873405,"TERMINAL",0,0,"9369979976",,terminal_output +3310,4874527,"TERMINAL",0,0,"50472030:008102087",,terminal_output +3311,4875483,"TERMINAL",0,0,"1581191198",,terminal_output +3312,4876575,"TERMINAL",0,0,"269222:0022309",,terminal_output +3313,4877584,"TERMINAL",0,0,"372033133140",,terminal_output +3314,4878628,"TERMINAL",0,0,"4814424421",,terminal_output +3315,4879750,"TERMINAL",0,0,"5925535532",,terminal_output +3316,4880723,"TERMINAL",0,0,"65036646643",,terminal_output +3317,4881798,"TERMINAL",0,0,"7147757754",,terminal_output +3318,4882822,"TERMINAL",0,0,"8258868865",,terminal_output +3319,4883852,"TERMINAL",0,0,"9369979976",,terminal_output +3320,4884975,"TERMINAL",0,0,"2:004730108203087",,terminal_output +3321,4885947,"TERMINAL",0,0,"1581191198",,terminal_output +3322,4886997,"TERMINAL",0,0,"269221022409",,terminal_output +3323,4888148,"TERMINAL",0,0,"373033133150",,terminal_output +3324,4889083,"TERMINAL",0,0,"4814424421",,terminal_output +3325,4890127,"TERMINAL",0,0,"5925535532",,terminal_output +3326,4891165,"TERMINAL",0,0,"69:0036646643",,terminal_output +3327,4892218,"TERMINAL",0,0,"7258868865",,terminal_output +3328,4893268,"TERMINAL",0,0,"9369979976",,terminal_output +3329,4894396,"TERMINAL",0,0,"104740208304087",,terminal_output +3330,4895381,"TERMINAL",0,0,"1581191198",,terminal_output +3331,4896405,"TERMINAL",0,0,"269222022509",,terminal_output +3332,4897568,"TERMINAL",0,0,"37403313313:00",,terminal_output +3333,4898590,"TERMINAL",0,0,"4814424421",,terminal_output +3334,4899615,"TERMINAL",0,0,"5925535532",,terminal_output +3335,4900600,"TERMINAL",0,0,"61036646643",,terminal_output +3336,4901665,"TERMINAL",0,0,"7147757754",,terminal_output +3337,4902792,"TERMINAL",0,0,"8258868865",,terminal_output +3338,4903815,"TERMINAL",0,0,"9369979976",,terminal_output +3339,4904839,"TERMINAL",0,0,"204750308405087",,terminal_output +3340,4905856,"TERMINAL",0,0,"1581191198",,terminal_output +3341,4906907,"TERMINAL",0,0,"2692230223:009",,terminal_output +3342,4907951,"TERMINAL",0,0,"375033133110",,terminal_output +3343,4909037,"TERMINAL",0,0,"4814424421",,terminal_output +3344,4910076,"TERMINAL",0,0,"5925535532",,terminal_output +3345,4911094,"TERMINAL",0,0,"62036646643",,terminal_output +3346,4912139,"TERMINAL",0,0,"7147757754",,terminal_output +3347,4913187,"TERMINAL",0,0,"8258868865",,terminal_output +3348,4914230,"TERMINAL",0,0,"9471:00408503:0087",,terminal_output +3349,4915285,"TERMINAL",0,0,"31581191198",,terminal_output +3350,4916336,"TERMINAL",0,0,"269224022109",,terminal_output +3351,4917435,"TERMINAL",0,0,"379:0033133120",,terminal_output +3352,4918403,"TERMINAL",0,0,"4814424421",,terminal_output +3353,4919450,"TERMINAL",0,0,"5925535532",,terminal_output +3354,4920495,"TERMINAL",0,0,"63036646643",,terminal_output +3355,4921629,"TERMINAL",0,0,"7147757754",,terminal_output +3356,4922625,"TERMINAL",0,0,"8258868865",,terminal_output +3357,4923678,"TERMINAL",0,0,"9369979976",,terminal_output +3358,4924674,"TERMINAL",0,0,"4047105083:001087",,terminal_output +3359,4925831,"TERMINAL",0,0,"1581191198",,terminal_output +3360,4926851,"TERMINAL",0,0,"269225022209",,terminal_output +3361,4927818,"TERMINAL",0,0,"371033133130",,terminal_output +3362,4928903,"TERMINAL",0,0,"4814424421",,terminal_output +3363,4929920,"TERMINAL",0,0,"5925535532",,terminal_output +3364,4931040,"TERMINAL",0,0,"64036646643",,terminal_output +3365,4932078,"TERMINAL",0,0,"7147757754",,terminal_output +3366,4933102,"TERMINAL",0,0,"8258868865",,terminal_output +3367,4934111,"TERMINAL",0,0,"9369979976",,terminal_output +3368,4935157,"TERMINAL",0,0,"5047201:008102087",,terminal_output +3369,4936260,"TERMINAL",0,0,"1581191198",,terminal_output +3370,4937252,"TERMINAL",0,0,"2720333:01333140",,terminal_output +3371,4938321,"TERMINAL",0,0,"4814424421",,terminal_output +3372,4939348,"TERMINAL",0,0,"5925535532",,terminal_output +3373,4940394,"TERMINAL",0,0,"65036646643",,terminal_output +3374,4941442,"TERMINAL",0,0,"7147757754",,terminal_output +3375,4942488,"TERMINAL",0,0,"8258868865",,terminal_output +3376,4943548,"TERMINAL",0,0,"9369979976",,terminal_output +3377,4944673,"TERMINAL",0,0,"3:004730108203087",,terminal_output +3378,4945647,"TERMINAL",0,0,"1581191198",,terminal_output +3379,4946722,"TERMINAL",0,0,"269221022409",,terminal_output +3380,4947745,"TERMINAL",0,0,"373033133150",,terminal_output +3381,4948872,"TERMINAL",0,0,"4814424421",,terminal_output +3382,4949895,"TERMINAL",0,0,"5925535532",,terminal_output +3383,4950919,"TERMINAL",0,0,"640:0036646643",,terminal_output +3384,4951943,"TERMINAL",0,0,"7147757754",,terminal_output +3385,4953072,"TERMINAL",0,0,"8258868865",,terminal_output +3386,4954016,"TERMINAL",0,0,"9369979976",,terminal_output +3387,4955086,"TERMINAL",0,0,"104740208304087",,terminal_output +3388,4956142,"TERMINAL",0,0,"1581191198",,terminal_output +3389,4957164,"TERMINAL",0,0,"269222022509",,terminal_output +3390,4958214,"TERMINAL",0,0,"37403313314:00",,terminal_output +3391,4959264,"TERMINAL",0,0,"4925535532",,terminal_output +3392,4960373,"TERMINAL",0,0,"61036646643",,terminal_output +3393,4961466,"TERMINAL",0,0,"7147757754",,terminal_output +3394,4962488,"TERMINAL",0,0,"8258868865",,terminal_output +3395,4963514,"TERMINAL",0,0,"9369979976",,terminal_output +3396,4964538,"TERMINAL",0,0,"204750308405087",,terminal_output +3397,4965620,"TERMINAL",0,0,"1581191198",,terminal_output +3398,4966692,"TERMINAL",0,0,"2692230224:009",,terminal_output +3399,4967713,"TERMINAL",0,0,"375033133110",,terminal_output +3400,4968737,"TERMINAL",0,0,"4814424421",,terminal_output +3401,4969863,"TERMINAL",0,0,"5925535532",,terminal_output +3402,4970887,"TERMINAL",0,0,"62036646643",,terminal_output +3403,4971910,"TERMINAL",0,0,"7147757754",,terminal_output +3404,4972916,"TERMINAL",0,0,"8258868865",,terminal_output +3405,4974061,"TERMINAL",0,0,"9369979976",,terminal_output +3406,4975088,"TERMINAL",0,0,"30472:00408504:0087",,terminal_output +3407,4976214,"TERMINAL",0,0,"1581191198",,terminal_output +3408,4977134,"TERMINAL",0,0,"269224022109",,terminal_output +3409,4978163,"TERMINAL",0,0,"3750:0033133120",,terminal_output +3410,4979211,"TERMINAL",0,0,"4814424421",,terminal_output +3411,4980309,"TERMINAL",0,0,"53036646643",,terminal_output +3412,4981333,"TERMINAL",0,0,"7147757754",,terminal_output +3413,4982348,"TERMINAL",0,0,"8258868865",,terminal_output +3414,4983487,"TERMINAL",0,0,"9369979976",,terminal_output +3415,4984452,"TERMINAL",0,0,"4047105084:001087",,terminal_output +3416,4985503,"TERMINAL",0,0,"1581191198",,terminal_output +3417,4986554,"TERMINAL",0,0,"269225022209",,terminal_output +3418,4987683,"TERMINAL",0,0,"371033133130",,terminal_output +3419,4988706,"TERMINAL",0,0,"4814424421",,terminal_output +3420,4989735,"TERMINAL",0,0,"5925535532",,terminal_output +3421,4990743,"TERMINAL",0,0,"64036646643",,terminal_output +3422,4991882,"TERMINAL",0,0,"7147757754",,terminal_output +3423,4992864,"TERMINAL",0,0,"8258868865",,terminal_output +3424,4993928,"TERMINAL",0,0,"9369979976",,terminal_output +3425,4994952,"TERMINAL",0,0,"5047202:008102087",,terminal_output +3426,4996001,"TERMINAL",0,0,"1581191198",,terminal_output +3427,4997102,"TERMINAL",0,0,"269224:0022309",,terminal_output +3428,4998127,"TERMINAL",0,0,"372033133140",,terminal_output +3429,4999152,"TERMINAL",0,0,"4814424421",,terminal_output +3430,5000188,"TERMINAL",0,0,"5925535532",,terminal_output +3431,5001230,"TERMINAL",0,0,"65147757754",,terminal_output +3432,5002323,"TERMINAL",0,0,"8258868865",,terminal_output +3433,5003350,"TERMINAL",0,0,"9369979976",,terminal_output +3434,5004478,"TERMINAL",0,0,"4:004730108203087",,terminal_output +3435,5005435,"TERMINAL",0,0,"1581191198",,terminal_output +3436,5006524,"TERMINAL",0,0,"269221022409",,terminal_output +3437,5007527,"TERMINAL",0,0,"373033133150",,terminal_output +3438,5008673,"TERMINAL",0,0,"4814424421",,terminal_output +3439,5009627,"TERMINAL",0,0,"5925535532",,terminal_output +3440,5010676,"TERMINAL",0,0,"61:0036646643",,terminal_output +3441,5011747,"TERMINAL",0,0,"7147757754",,terminal_output +3442,5012872,"TERMINAL",0,0,"8258868865",,terminal_output +3443,5013824,"TERMINAL",0,0,"9369979976",,terminal_output +3444,5014921,"TERMINAL",0,0,"104740208304087",,terminal_output +3445,5015944,"TERMINAL",0,0,"1581191198",,terminal_output +3446,5016968,"TERMINAL",0,0,"269222022509",,terminal_output +3447,5018003,"TERMINAL",0,0,"37403313315:00",,terminal_output +3448,5019117,"TERMINAL",0,0,"4814424421",,terminal_output +3449,5020142,"TERMINAL",0,0,"5925535532",,terminal_output +3450,5021169,"TERMINAL",0,0,"61036646643",,terminal_output +3451,5022193,"TERMINAL",0,0,"7147757754",,terminal_output +3452,5023239,"TERMINAL",0,0,"8369979976",,terminal_output +3453,5024339,"TERMINAL",0,0,"204750308405087",,terminal_output +3454,5025366,"TERMINAL",0,0,"1581191198",,terminal_output +3455,5026491,"TERMINAL",0,0,"2692230225:009",,terminal_output +3456,5027489,"TERMINAL",0,0,"375033133110",,terminal_output +3457,5028540,"TERMINAL",0,0,"4814424421",,terminal_output +3458,5029564,"TERMINAL",0,0,"5925535532",,terminal_output +3459,5030629,"TERMINAL",0,0,"62036646643",,terminal_output +3460,5031713,"TERMINAL",0,0,"7147757754",,terminal_output +3461,5033057,"TERMINAL",0,0,"8258868865",,terminal_output +3462,5034171,"TERMINAL",0,0,"9369979976",,terminal_output +3463,5035153,"TERMINAL",0,0,"30473:00408505:0087",,terminal_output +3464,5036223,"TERMINAL",0,0,"1581191198",,terminal_output +3465,5037256,"TERMINAL",0,0,"271:003341331120",,terminal_output +3466,5038291,"TERMINAL",0,0,"4814424421",,terminal_output +3467,5039332,"TERMINAL",0,0,"5925535532",,terminal_output +3468,5040384,"TERMINAL",0,0,"63036646643",,terminal_output +3469,5041434,"TERMINAL",0,0,"7147757754",,terminal_output +3470,5042480,"TERMINAL",0,0,"8258868865",,terminal_output +3471,5043594,"TERMINAL",0,0,"9369979976",,terminal_output +3472,5044615,"TERMINAL",0,0,"4047105085:001087",,terminal_output +3473,5045645,"TERMINAL",0,0,"1581191198",,terminal_output +3474,5046762,"TERMINAL",0,0,"269225022209",,terminal_output +3475,5047791,"TERMINAL",0,0,"371033133130",,terminal_output +3476,5048815,"TERMINAL",0,0,"4814424421",,terminal_output +3477,5049838,"TERMINAL",0,0,"5925535532",,terminal_output +3478,5050859,"TERMINAL",0,0,"64036646643",,terminal_output +3479,5051906,"TERMINAL",0,0,"7147757754",,terminal_output +3480,5052936,"TERMINAL",0,0,"8258868865",,terminal_output +3481,5054049,"TERMINAL",0,0,"9369979976",,terminal_output +3482,5055021,"TERMINAL",0,0,"5047203:008102087",,terminal_output +3483,5056090,"TERMINAL",0,0,"1581191198",,terminal_output +3484,5057242,"TERMINAL",0,0,"269225:0022309",,terminal_output +3485,5058158,"TERMINAL",0,0,"372033133140",,terminal_output +3486,5059200,"TERMINAL",0,0,"4814424421",,terminal_output +3487,5060249,"TERMINAL",0,0,"55036646643",,terminal_output +3488,5061295,"TERMINAL",0,0,"7147757754",,terminal_output +3489,5062345,"TERMINAL",0,0,"8258868865",,terminal_output +3490,5063391,"TERMINAL",0,0,"9369979976",,terminal_output +3491,5064434,"TERMINAL",0,0,"5:004730108203087",,terminal_output +3492,5065482,"TERMINAL",0,0,"1581191198",,terminal_output +3493,5066532,"TERMINAL",0,0,"269221022409",,terminal_output +3494,5067658,"TERMINAL",0,0,"373033133150",,terminal_output +3495,5068681,"TERMINAL",0,0,"4814424421",,terminal_output +3496,5069675,"TERMINAL",0,0,"5925535532",,terminal_output +3497,5070728,"TERMINAL",0,0,"62:0036646643",,terminal_output +3498,5071857,"TERMINAL",0,0,"7147757754",,terminal_output +3499,5072823,"TERMINAL",0,0,"8258868865",,terminal_output +3500,5073906,"TERMINAL",0,0,"9369979976",,terminal_output +3501,5074926,"TERMINAL",0,0,"104740208304087",,terminal_output +3502,5075963,"TERMINAL",0,0,"1581191198",,terminal_output +3503,5077077,"TERMINAL",0,0,"269222022509",,terminal_output +3504,5078101,"TERMINAL",0,0,"37403313316:00",,terminal_output +3505,5079144,"TERMINAL",0,0,"4814424421",,terminal_output +3506,5080154,"TERMINAL",0,0,"5925535532",,terminal_output +3507,5081201,"TERMINAL",0,0,"61036646643",,terminal_output +3508,5082250,"TERMINAL",0,0,"7258868865",,terminal_output +3509,5083294,"TERMINAL",0,0,"9369979976",,terminal_output +3510,5084450,"TERMINAL",0,0,"204750308405087",,terminal_output +3511,5085390,"TERMINAL",0,0,"1581191198",,terminal_output +3512,5086498,"TERMINAL",0,0,"2692230226:009",,terminal_output +3513,5087586,"TERMINAL",0,0,"375033133110",,terminal_output +3514,5089311,"TERMINAL",0,0,"4925535532",,terminal_output +3515,5090376,"TERMINAL",0,0,"62036646643",,terminal_output +3516,5091516,"TERMINAL",0,0,"7147757754",,terminal_output +3517,5092457,"TERMINAL",0,0,"8258868865",,terminal_output +3518,5093503,"TERMINAL",0,0,"9369979976",,terminal_output +3519,5094588,"TERMINAL",0,0,"30474:00408506:0087",,terminal_output +3520,5095848,"TERMINAL",0,0,"1581191198",,terminal_output +3521,5096663,"TERMINAL",0,0,"269224022109",,terminal_output +3522,5097716,"TERMINAL",0,0,"372:0033133120",,terminal_output +3523,5098788,"TERMINAL",0,0,"4814424421",,terminal_output +3524,5099807,"TERMINAL",0,0,"5925535532",,terminal_output +3525,5100939,"TERMINAL",0,0,"63036646643",,terminal_output +3526,5101960,"TERMINAL",0,0,"7147757754",,terminal_output +3527,5102936,"TERMINAL",0,0,"8258868865",,terminal_output +3528,5103978,"TERMINAL",0,0,"9369979976",,terminal_output +3529,5105034,"TERMINAL",0,0,"4047105086:001087",,terminal_output +3530,5106160,"TERMINAL",0,0,"1581191198",,terminal_output +3531,5107182,"TERMINAL",0,0,"269225022209",,terminal_output +3532,5108166,"TERMINAL",0,0,"371033133130",,terminal_output +3533,5109212,"TERMINAL",0,0,"4814424421",,terminal_output +3534,5110259,"TERMINAL",0,0,"54036646643",,terminal_output +3535,5111383,"TERMINAL",0,0,"7147757754",,terminal_output +3536,5112348,"TERMINAL",0,0,"8258868865",,terminal_output +3537,5113441,"TERMINAL",0,0,"9369979976",,terminal_output +3538,5114467,"TERMINAL",0,0,"5047204:008102087",,terminal_output +3539,5115475,"TERMINAL",0,0,"1581191198",,terminal_output +3540,5116524,"TERMINAL",0,0,"269226:0022309",,terminal_output +3541,5117634,"TERMINAL",0,0,"372033133140",,terminal_output +3542,5118654,"TERMINAL",0,0,"4814424421",,terminal_output +3543,5119677,"TERMINAL",0,0,"5925535532",,terminal_output +3544,5120676,"TERMINAL",0,0,"65036646643",,terminal_output +3545,5121827,"TERMINAL",0,0,"7147757754",,terminal_output +3546,5122851,"TERMINAL",0,0,"8258868865",,terminal_output +3547,5123874,"TERMINAL",0,0,"9369979976",,terminal_output +3548,5124898,"TERMINAL",0,0,"6:004730108203087",,terminal_output +3549,5125897,"TERMINAL",0,0,"1581191198",,terminal_output +3550,5126948,"TERMINAL",0,0,"269221022409",,terminal_output +3551,5127989,"TERMINAL",0,0,"373033133150",,terminal_output +3552,5129098,"TERMINAL",0,0,"4814424421",,terminal_output +3553,5130122,"TERMINAL",0,0,"5925535532",,terminal_output +3554,5131144,"TERMINAL",0,0,"63:0036646643",,terminal_output +3555,5132182,"TERMINAL",0,0,"7147757754",,terminal_output +3556,5133218,"TERMINAL",0,0,"8258868866",,terminal_output +3557,5134267,"TERMINAL",0,0,"104740208304087",,terminal_output +3558,5135317,"TERMINAL",0,0,"1581191198",,terminal_output +3559,5136362,"TERMINAL",0,0,"269222022509",,terminal_output +3560,5137494,"TERMINAL",0,0,"37403313317:00",,terminal_output +3561,5138517,"TERMINAL",0,0,"4814424421",,terminal_output +3562,5139646,"TERMINAL",0,0,"5925535532",,terminal_output +3563,5140599,"TERMINAL",0,0,"61036646643",,terminal_output +3564,5141639,"TERMINAL",0,0,"7147757754",,terminal_output +3565,5142716,"TERMINAL",0,0,"8258868865",,terminal_output +3566,5143847,"TERMINAL",0,0,"9369979976",,terminal_output +3567,5144867,"TERMINAL",0,0,"204750308405087",,terminal_output +3568,5145891,"TERMINAL",0,0,"1581191198",,terminal_output +3569,5146915,"TERMINAL",0,0,"2692230227:009",,terminal_output +3570,5147931,"TERMINAL",0,0,"375033133110",,terminal_output +3571,5149070,"TERMINAL",0,0,"4814424421",,terminal_output +3572,5150094,"TERMINAL",0,0,"5925535532",,terminal_output +3573,5151117,"TERMINAL",0,0,"62036646643",,terminal_output +3574,5152137,"TERMINAL",0,0,"7147757754",,terminal_output +3575,5153182,"TERMINAL",0,0,"8258868865",,terminal_output +3576,5154204,"TERMINAL",0,0,"9369979976",,terminal_output +3577,5155251,"TERMINAL",0,0,"30585:01419517:0198",,terminal_output +3578,5156294,"TERMINAL",0,0,"269224022109",,terminal_output +3579,5157340,"TERMINAL",0,0,"373:0033133120",,terminal_output +3580,5158385,"TERMINAL",0,0,"4814424421",,terminal_output +3581,5159511,"TERMINAL",0,0,"5925535532",,terminal_output +3582,5160473,"TERMINAL",0,0,"63036646643",,terminal_output +3583,5161556,"TERMINAL",0,0,"7147757754",,terminal_output +3584,5162565,"TERMINAL",0,0,"8258868865",,terminal_output +3585,5163691,"TERMINAL",0,0,"9369979976",,terminal_output +3586,5164737,"TERMINAL",0,0,"4047105087:001087",,terminal_output +3587,5165705,"TERMINAL",0,0,"1581191198",,terminal_output +3588,5166781,"TERMINAL",0,0,"269225022209",,terminal_output +3589,5167806,"TERMINAL",0,0,"371033133130",,terminal_output +3590,5168852,"TERMINAL",0,0,"4814424421",,terminal_output +3591,5169920,"TERMINAL",0,0,"5925535532",,terminal_output +3592,5170980,"TERMINAL",0,0,"64036646643",,terminal_output +3593,5172003,"TERMINAL",0,0,"7147757754",,terminal_output +3594,5173130,"TERMINAL",0,0,"8258868865",,terminal_output +3595,5174159,"TERMINAL",0,0,"9369979976",,terminal_output +3596,5175133,"TERMINAL",0,0,"5047205:008102087",,terminal_output +3597,5176185,"TERMINAL",0,0,"1581191198",,terminal_output +3598,5177227,"TERMINAL",0,0,"2720337:01333140",,terminal_output +3599,5178278,"TERMINAL",0,0,"4814424421",,terminal_output +3600,5179331,"TERMINAL",0,0,"5925535532",,terminal_output +3601,5180383,"TERMINAL",0,0,"65036646643",,terminal_output +3602,5181526,"TERMINAL",0,0,"7147757754",,terminal_output +3603,5182551,"TERMINAL",0,0,"8258868865",,terminal_output +3604,5183514,"TERMINAL",0,0,"9369979976",,terminal_output +3605,5184602,"TERMINAL",0,0,"7:004730108203087",,terminal_output +3606,5185610,"TERMINAL",0,0,"1581191198",,terminal_output +3607,5186750,"TERMINAL",0,0,"269221022409",,terminal_output +3608,5187774,"TERMINAL",0,0,"373033133150",,terminal_output +3609,5188797,"TERMINAL",0,0,"4814424421",,terminal_output +3610,5189821,"TERMINAL",0,0,"5925535532",,terminal_output +3611,5190845,"TERMINAL",0,0,"64:0036646643",,terminal_output +3612,5191971,"TERMINAL",0,0,"7147757754",,terminal_output +3613,5192936,"TERMINAL",0,0,"8258868865",,terminal_output +3614,5193965,"TERMINAL",0,0,"9369979976",,terminal_output +3615,5195045,"TERMINAL",0,0,"104740208304087",,terminal_output +3616,5196048,"TERMINAL",0,0,"1581191198",,terminal_output +3617,5197194,"TERMINAL",0,0,"269222022509",,terminal_output +3618,5198217,"TERMINAL",0,0,"37403313318:00",,terminal_output +3619,5199241,"TERMINAL",0,0,"4814424421",,terminal_output +3620,5200217,"TERMINAL",0,0,"51036646643",,terminal_output +3621,5201274,"TERMINAL",0,0,"7147757754",,terminal_output +3622,5202320,"TERMINAL",0,0,"8258868865",,terminal_output +3623,5203369,"TERMINAL",0,0,"9369979976",,terminal_output +3624,5204408,"TERMINAL",0,0,"204750308405087",,terminal_output +3625,5205456,"TERMINAL",0,0,"1581191198",,terminal_output +3626,5206614,"TERMINAL",0,0,"2692230228:009",,terminal_output +3627,5207639,"TERMINAL",0,0,"375033133110",,terminal_output +3628,5208664,"TERMINAL",0,0,"4814424421",,terminal_output +3629,5209687,"TERMINAL",0,0,"5925535532",,terminal_output +3630,5210674,"TERMINAL",0,0,"62036646643",,terminal_output +3631,5211735,"TERMINAL",0,0,"7147757754",,terminal_output +3632,5212770,"TERMINAL",0,0,"8258868865",,terminal_output +3633,5213887,"TERMINAL",0,0,"9369979976",,terminal_output +3634,5214855,"TERMINAL",0,0,"30476:00408508:0087",,terminal_output +3635,5215933,"TERMINAL",0,0,"1581191198",,terminal_output +3636,5216952,"TERMINAL",0,0,"269224022109",,terminal_output +3637,5218028,"TERMINAL",0,0,"374:0033133120",,terminal_output +3638,5219115,"TERMINAL",0,0,"4814424421",,terminal_output +3639,5220132,"TERMINAL",0,0,"5925535532",,terminal_output +3640,5221177,"TERMINAL",0,0,"63036646643",,terminal_output +3641,5222290,"TERMINAL",0,0,"7147757754",,terminal_output +3642,5223324,"TERMINAL",0,0,"9369979976",,terminal_output +3643,5224316,"TERMINAL",0,0,"4047105088:001087",,terminal_output +3644,5225382,"TERMINAL",0,0,"1581191198",,terminal_output +3645,5226409,"TERMINAL",0,0,"269225022209",,terminal_output +3646,5227451,"TERMINAL",0,0,"371033133130",,terminal_output +3647,5228492,"TERMINAL",0,0,"4814424421",,terminal_output +3648,5229554,"TERMINAL",0,0,"5925535532",,terminal_output +3649,5230631,"TERMINAL",0,0,"64036646643",,terminal_output +3650,5231703,"TERMINAL",0,0,"7147757754",,terminal_output +3651,5232728,"TERMINAL",0,0,"8258868865",,terminal_output +3652,5233853,"TERMINAL",0,0,"9369979976",,terminal_output +3653,5234819,"TERMINAL",0,0,"5047206:008102087",,terminal_output +3654,5235903,"TERMINAL",0,0,"1581191198",,terminal_output +3655,5236926,"TERMINAL",0,0,"269228:0022309",,terminal_output +3656,5237960,"TERMINAL",0,0,"372033133140",,terminal_output +3657,5239077,"TERMINAL",0,0,"4814424421",,terminal_output +3658,5240060,"TERMINAL",0,0,"5925535532",,terminal_output +3659,5241125,"TERMINAL",0,0,"65036646643",,terminal_output +3660,5242155,"TERMINAL",0,0,"7147757754",,terminal_output +3661,5243276,"TERMINAL",0,0,"8258868865",,terminal_output +3662,5244250,"TERMINAL",0,0,"94730108203087",,terminal_output +3663,5245304,"TERMINAL",0,0,"8:01581191198",,terminal_output +3664,5246353,"TERMINAL",0,0,"269221022409",,terminal_output +3665,5247398,"TERMINAL",0,0,"373033133150",,terminal_output +3666,5248442,"TERMINAL",0,0,"4814424421",,terminal_output +3667,5249624,"TERMINAL",0,0,"5925535532",,terminal_output +3668,5250558,"TERMINAL",0,0,"65:0036646643",,terminal_output +3669,5251677,"TERMINAL",0,0,"7147757754",,terminal_output +3670,5252697,"TERMINAL",0,0,"8258868865",,terminal_output +3671,5253727,"TERMINAL",0,0,"9369979976",,terminal_output +3672,5254846,"TERMINAL",0,0,"104740208304087",,terminal_output +3673,5255868,"TERMINAL",0,0,"1581191198",,terminal_output +3674,5256893,"TERMINAL",0,0,"269222022509",,terminal_output +3675,5257879,"TERMINAL",0,0,"37403313319:00",,terminal_output +3676,5258942,"TERMINAL",0,0,"4814424421",,terminal_output +3677,5260068,"TERMINAL",0,0,"5925535532",,terminal_output +3678,5261102,"TERMINAL",0,0,"61036646643",,terminal_output +3679,5262064,"TERMINAL",0,0,"7147757754",,terminal_output +3680,5263110,"TERMINAL",0,0,"8258868865",,terminal_output +3681,5264160,"TERMINAL",0,0,"9369979976",,terminal_output +3682,5265291,"TERMINAL",0,0,"204750308405087",,terminal_output +3683,5266315,"TERMINAL",0,0,"1692230229:009",,terminal_output +3684,5267303,"TERMINAL",0,0,"375033133110",,terminal_output +3685,5268353,"TERMINAL",0,0,"4814424421",,terminal_output +3686,5269391,"TERMINAL",0,0,"5925535532",,terminal_output +3687,5270444,"TERMINAL",0,0,"62036646643",,terminal_output +3688,5271475,"TERMINAL",0,0,"7147757754",,terminal_output +3689,5272564,"TERMINAL",0,0,"8258868865",,terminal_output +3690,5273586,"TERMINAL",0,0,"9369979976",,terminal_output +3691,5274713,"TERMINAL",0,0,"30477:00408509:0087",,terminal_output +3692,5275659,"TERMINAL",0,0,"1581191198",,terminal_output +3693,5276703,"TERMINAL",0,0,"269224022109",,terminal_output +3694,5277749,"TERMINAL",0,0,"375:0033133120",,terminal_output +3695,5278792,"TERMINAL",0,0,"4814424421",,terminal_output +3696,5279933,"TERMINAL",0,0,"5925535532",,terminal_output +3697,5280957,"TERMINAL",0,0,"63036646643",,terminal_output +3698,5281918,"TERMINAL",0,0,"7147757754",,terminal_output +3699,5282968,"TERMINAL",0,0,"8258868865",,terminal_output +3700,5284036,"TERMINAL",0,0,"9369979976",,terminal_output +3701,5285156,"TERMINAL",0,0,"4047105089:001087",,terminal_output +3702,5286180,"TERMINAL",0,0,"1581191198",,terminal_output +3703,5287220,"TERMINAL",0,0,"269225022209",,terminal_output +3704,5288169,"TERMINAL",0,0,"371033133130",,terminal_output +3705,5289239,"TERMINAL",0,0,"4925535532",,terminal_output +3706,5290276,"TERMINAL",0,0,"64036646643",,terminal_output +3707,5291306,"TERMINAL",0,0,"7147757754",,terminal_output +3708,5292347,"TERMINAL",0,0,"8258868865",,terminal_output +3709,5293390,"TERMINAL",0,0,"9369979976",,terminal_output +3710,5294436,"TERMINAL",0,0,"5047207:008102087",,terminal_output +3711,5295483,"TERMINAL",0,0,"1581191198",,terminal_output +3712,5296611,"TERMINAL",0,0,"269229:0022309",,terminal_output +3713,5297650,"TERMINAL",0,0,"372033133140",,terminal_output +3714,5298674,"TERMINAL",0,0,"4814424421",,terminal_output +3715,5299676,"TERMINAL",0,0,"5925535532",,terminal_output +3716,5300722,"TERMINAL",0,0,"65036646643",,terminal_output +3717,5301951,"TERMINAL",0,0,"7147757754",,terminal_output +3718,5302849,"TERMINAL",0,0,"8258868865",,terminal_output +3719,5303896,"TERMINAL",0,0,"9369979976",,terminal_output +3720,5304904,"TERMINAL",0,0,"9:004730108203087",,terminal_output +3721,5306046,"TERMINAL",0,0,"1581191198",,terminal_output +3722,5306991,"TERMINAL",0,0,"269221022409",,terminal_output +3723,5308094,"TERMINAL",0,0,"373033133150",,terminal_output +3724,5309082,"TERMINAL",0,0,"4814424421",,terminal_output +3725,5310141,"TERMINAL",0,0,"5925535532",,terminal_output +3726,5311270,"TERMINAL",0,0,"66:0036646643",,terminal_output +3727,5312293,"TERMINAL",0,0,"7258868865",,terminal_output +3728,5313320,"TERMINAL",0,0,"9369979976",,terminal_output +3729,5314314,"TERMINAL",0,0,"104740208304087",,terminal_output +3730,5315376,"TERMINAL",0,0,"1581191198",,terminal_output +3731,5316409,"TERMINAL",0,0,"269222022509",,terminal_output +3732,5317456,"TERMINAL",0,0,"374033133140:00",,terminal_output +3733,5318538,"TERMINAL",0,0,"4814424421",,terminal_output +3734,5319563,"TERMINAL",0,0,"5925535532",,terminal_output +3735,5320639,"TERMINAL",0,0,"61036646643",,terminal_output +3736,5321713,"TERMINAL",0,0,"7147757754",,terminal_output +3737,5322701,"TERMINAL",0,0,"8258868865",,terminal_output +3738,5323747,"TERMINAL",0,0,"9369979976",,terminal_output +3739,5324891,"TERMINAL",0,0,"204750308405087",,terminal_output +3740,5325911,"TERMINAL",0,0,"1581191198",,terminal_output +3741,5326938,"TERMINAL",0,0,"26922302240:009",,terminal_output +3742,5327927,"TERMINAL",0,0,"375033133110",,terminal_output +3743,5328985,"TERMINAL",0,0,"4814424421",,terminal_output +3744,5330111,"TERMINAL",0,0,"5925535532",,terminal_output +3745,5331133,"TERMINAL",0,0,"62036646643",,terminal_output +3746,5332158,"TERMINAL",0,0,"7147757754",,terminal_output +3747,5333186,"TERMINAL",0,0,"8258868865",,terminal_output +3748,5334205,"TERMINAL",0,0,"9369979976",,terminal_output +3749,5335369,"TERMINAL",0,0,"30588:014195140:0198",,terminal_output +3750,5336284,"TERMINAL",0,0,"269224022109",,terminal_output +3751,5337385,"TERMINAL",0,0,"376:0033133120",,terminal_output +3752,5338378,"TERMINAL",0,0,"4814424421",,terminal_output +3753,5339419,"TERMINAL",0,0,"5925535532",,terminal_output +3754,5340461,"TERMINAL",0,0,"63036646643",,terminal_output +3755,5341578,"TERMINAL",0,0,"7147757754",,terminal_output +3756,5342603,"TERMINAL",0,0,"8258868865",,terminal_output +3757,5343586,"TERMINAL",0,0,"9369979976",,terminal_output +3758,5344651,"TERMINAL",0,0,"40471050840:001087",,terminal_output +3759,5345670,"TERMINAL",0,0,"1581191198",,terminal_output +3760,5346805,"TERMINAL",0,0,"269225022209",,terminal_output +3761,5347827,"TERMINAL",0,0,"371033133130",,terminal_output +3762,5348953,"TERMINAL",0,0,"4814424421",,terminal_output +3763,5349900,"TERMINAL",0,0,"5925535532",,terminal_output +3764,5350945,"TERMINAL",0,0,"64036646643",,terminal_output +3765,5351992,"TERMINAL",0,0,"7147757754",,terminal_output +3766,5353153,"TERMINAL",0,0,"8258868865",,terminal_output +3767,5354088,"TERMINAL",0,0,"9369979976",,terminal_output +3768,5355200,"TERMINAL",0,0,"5047208:008102087",,terminal_output +3769,5356224,"TERMINAL",0,0,"1581191198",,terminal_output +3770,5357252,"TERMINAL",0,0,"27203340:01333140",,terminal_output +3771,5358257,"TERMINAL",0,0,"4814424421",,terminal_output +3772,5359303,"TERMINAL",0,0,"5925535532",,terminal_output +3773,5360349,"TERMINAL",0,0,"65036646643",,terminal_output +3774,5361398,"TERMINAL",0,0,"7147757754",,terminal_output +3775,5362447,"TERMINAL",0,0,"8258868865",,terminal_output +3776,5363494,"TERMINAL",0,0,"9369979976",,terminal_output +3777,5364536,"TERMINAL",0,0,"20:004730108203087",,terminal_output +3778,5365645,"TERMINAL",0,0,"1581191198",,terminal_output +3779,5366667,"TERMINAL",0,0,"269221022409",,terminal_output +3780,5367691,"TERMINAL",0,0,"373033133150",,terminal_output +3781,5368818,"TERMINAL",0,0,"4814424421",,terminal_output +3782,5369842,"TERMINAL",0,0,"5925535532",,terminal_output +3783,5370812,"TERMINAL",0,0,"67:0036646643",,terminal_output +3784,5371891,"TERMINAL",0,0,"7147757754",,terminal_output +3785,5372913,"TERMINAL",0,0,"8258868865",,terminal_output +3786,5374040,"TERMINAL",0,0,"9369979976",,terminal_output +3787,5375006,"TERMINAL",0,0,"104740208304087",,terminal_output +3788,5376087,"TERMINAL",0,0,"1581191198",,terminal_output +3789,5377113,"TERMINAL",0,0,"269222022509",,terminal_output +3790,5378241,"TERMINAL",0,0,"37403313311:00",,terminal_output +3791,5379264,"TERMINAL",0,0,"4814424421",,terminal_output +3792,5380292,"TERMINAL",0,0,"51036646643",,terminal_output +3793,5381314,"TERMINAL",0,0,"7147757754",,terminal_output +3794,5382337,"TERMINAL",0,0,"8258868865",,terminal_output +3795,5383379,"TERMINAL",0,0,"9369979976",,terminal_output +3796,5384566,"TERMINAL",0,0,"204750308405087",,terminal_output +3797,5385481,"TERMINAL",0,0,"1581191198",,terminal_output +3798,5386534,"TERMINAL",0,0,"2692230221:009",,terminal_output +3799,5387583,"TERMINAL",0,0,"375033133110",,terminal_output +3800,5388637,"TERMINAL",0,0,"4814424421",,terminal_output +3801,5388796,"TERMINAL",0,0,"bash",,terminal_focus +3802,5389709,"TERMINAL",0,0,"5925535532",,terminal_output +3803,5390833,"TERMINAL",0,0,"62036646643",,terminal_output +3804,5391858,"TERMINAL",0,0,"7147757754",,terminal_output +3805,5392867,"TERMINAL",0,0,"8258868865",,terminal_output +3806,5394006,"TERMINAL",0,0,"9369979976",,terminal_output +3807,5394964,"TERMINAL",0,0,"30479:00408501:0087",,terminal_output +3808,5396055,"TERMINAL",0,0,"1581191198",,terminal_output +3809,5397059,"TERMINAL",0,0,"269224022109",,terminal_output +3810,5398214,"TERMINAL",0,0,"377:0033133120",,terminal_output +3811,5399334,"TERMINAL",0,0,"4814424421",,terminal_output +3812,5400268,"TERMINAL",0,0,"5925535532",,terminal_output +3813,5401279,"TERMINAL",0,0,"63147757754",,terminal_output +3814,5402279,"TERMINAL",0,0,"8258868865",,terminal_output +3815,5403334,"TERMINAL",0,0,"9369979976",,terminal_output +3816,5404374,"TERMINAL",0,0,"4047105081:001087",,terminal_output +3817,5405427,"TERMINAL",0,0,"1581191198",,terminal_output +3818,5406504,"TERMINAL",0,0,"269225022209",,terminal_output +3819,5408405,"TERMINAL",0,0,"381144244231",,terminal_output +3820,5411217,"TERMINAL",0,0,"54036646644",,terminal_output +3821,5413504,"TERMINAL",0,0,"8369979976",,terminal_output +3822,5414898,"TERMINAL",0,0,"5047209:008102087",,terminal_output +3823,5416330,"TERMINAL",0,0,"169221:0022309",,terminal_output +3824,5417355,"TERMINAL",0,0,"372033133140",,terminal_output +3825,5418359,"TERMINAL",0,0,"4814424421",,terminal_output +3826,5420332,"TERMINAL",0,0,"55036646643",,terminal_output +3827,5422275,"TERMINAL",0,0,"7147757754",,terminal_output +3828,5424114,"TERMINAL",0,0,"8369979976",,terminal_output +3829,5426161,"TERMINAL",0,0,"1:005831119213198",,terminal_output +3830,5428558,"TERMINAL",0,0,"28314412444251",,terminal_output +3831,5430252,"TERMINAL",0,0,"58:0036646643",,terminal_output +3832,5432681,"TERMINAL",0,0,"7258868865",,terminal_output +3833,5434649,"TERMINAL",0,0,"94740208304087",,terminal_output +3834,5436962,"TERMINAL",0,0,"time=05:00:00 --partition=accelerated --nodes=2 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5",,terminal_command +3835,5436976,"TERMINAL",0,0,"^C\r\n\r[?2004l\r[?2004h[?2004l\r\r\n]633;E;;361f455a-15ad-4916-88b6-b9e49434d7af]633;C]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D",,terminal_output +3836,5437028,"TERMINAL",0,0,"1169222022509",,terminal_output +3837,5437762,"TERMINAL",0,0,"idle",,terminal_command +3838,5437808,"TERMINAL",0,0,"]633;E;2025-08-11 17:21:13 idle;361f455a-15ad-4916-88b6-b9e49434d7af]633;C",,terminal_output +3839,5438736,"TERMINAL",0,0,"Partition dev_cpuonly : 12 nodes idle\r\nPartition cpuonly : 52 nodes idle\r\nPartition dev_accelerated : 2 nodes idle\r\nPartition accelerated : 46 nodes idle\r\nPartition dev_accelerated-h100 : 1 nodes idle\r\nPartition accelerated-h100 : 2 nodes idle\r\nPartition large : 7 nodes idle\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +3840,5439680,"TERMINAL",0,0,"39425535532:02",,terminal_output +3841,5442403,"TERMINAL",0,0,"61258868865",,terminal_output +3842,5442686,"TERMINAL",0,0,"git branch^C",,terminal_command +3843,5442706,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;361f455a-15ad-4916-88b6-b9e49434d7af]633;C]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D",,terminal_output +3844,5443457,"TERMINAL",0,0,"9369979976",,terminal_output +3845,5444556,"TERMINAL",0,0,"204750308405087",,terminal_output +3846,5445545,"TERMINAL",0,0,"1581191198",,terminal_output +3847,5445782,"TERMINAL",0,0,"l -f train_dynamics_cau^Cl_8_node_3415137.log",,terminal_command +3848,5445805,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;361f455a-15ad-4916-88b6-b9e49434d7af]633;C]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D",,terminal_output +3849,5446595,"TERMINAL",0,0,"2692230222:009",,terminal_output +3850,5447666,"TERMINAL",0,0,"375033133110",,terminal_output +3851,5448702,"TERMINAL",0,0,"4814424421",,terminal_output +3852,5449746,"TERMINAL",0,0,"5925535532",,terminal_output +3853,5450847,"TERMINAL",0,0,"62036646643",,terminal_output +3854,5451866,"TERMINAL",0,0,"7147757754",,terminal_output +3855,5452871,"TERMINAL",0,0,"8258868865",,terminal_output +3856,5453955,"TERMINAL",0,0,"9369979976",,terminal_output +3857,5454990,"TERMINAL",0,0,"304730:00408502:0087",,terminal_output +3858,5456050,"TERMINAL",0,0,"1581191198",,terminal_output +3859,5457058,"TERMINAL",0,0,"269224022109",,terminal_output +3860,5458111,"TERMINAL",0,0,"378:0033133120",,terminal_output +3861,5459170,"TERMINAL",0,0,"4814424421",,terminal_output +3862,5460232,"TERMINAL",0,0,"5925535532",,terminal_output +3863,5461256,"TERMINAL",0,0,"63147757754",,terminal_output +3864,5462156,"TERMINAL",0,0,"salloc --time=00:30:00 --partition=accelerated --nodes=8 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5",,terminal_command +3865,5462208,"TERMINAL",0,0,"]633;E;2025-08-11 17:21:37 salloc --time=00:30:00 --partition=accelerated --nodes=8 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5;361f455a-15ad-4916-88b6-b9e49434d7af]633;Csalloc: Granted job allocation 3415167\r\n",,terminal_output +3866,5462312,"TERMINAL",0,0,"867interact 0:0\t 8 hkn[0413-0419,0421]11148:326105079dy 58:0564046 5:30:08\t 1 hkn07282401 accelerat train_to1-01:39:48\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]4461031:580309208511616623414280 cpuonly wrap tum_cte0 R 3:42:25\t 1 hkn0238",,terminal_output +3867,5462313,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +3868,5463345,"TERMINAL",0,0,"92369979976",,terminal_output +3869,5464397,"TERMINAL",0,0,"40347105082:001087",,terminal_output +3870,5465441,"TERMINAL",0,0,"14581191198",,terminal_output +3871,5466491,"TERMINAL",0,0,"2569225022209",,terminal_output +3872,5467531,"TERMINAL",0,0,"3671033133130",,terminal_output +3873,5468658,"TERMINAL",0,0,"47814424421",,terminal_output +3874,5469681,"TERMINAL",0,0,"58925535532",,terminal_output +3875,5470677,"TERMINAL",0,0,"694036646643",,terminal_output +3876,5471292,"train_dynamics.py",0,0,"",python,tab +3877,5471669,"train_dynamics.py",2003,0,"",python,selection_mouse +3878,5471673,"train_dynamics.py",2002,0,"",python,selection_command +3879,5471723,"TERMINAL",0,0,"710147757754",,terminal_output +3880,5472857,"TERMINAL",0,0,"81258868865",,terminal_output +3881,5473881,"TERMINAL",0,0,"92369979976",,terminal_output +3882,5474904,"TERMINAL",0,0,"503472040:008102087",,terminal_output +3883,5475912,"TERMINAL",0,0,"14581191198",,terminal_output +3884,5476810,"train_dynamics.py",4346,0,"",python,selection_command +3885,5476963,"TERMINAL",0,0,"2569222:0022309",,terminal_output +3886,5477174,"train_dynamics.py",12500,0,"",python,selection_command +3887,5478009,"TERMINAL",0,0,"3672033133140",,terminal_output +3888,5478134,"train_dynamics.py",12464,0,"\n ",python,content +3889,5479000,"train_dynamics.py",12477,0,"j",python,content +3890,5479001,"train_dynamics.py",12478,0,"",python,selection_keyboard +3891,5479061,"TERMINAL",0,0,"47814424421",,terminal_output +3892,5479097,"train_dynamics.py",12478,0,"a",python,content +3893,5479099,"train_dynamics.py",12479,0,"",python,selection_keyboard +3894,5479251,"train_dynamics.py",12479,0,"x",python,content +3895,5479255,"train_dynamics.py",12480,0,"",python,selection_keyboard +3896,5479365,"train_dynamics.py",12480,0,".",python,content +3897,5479367,"train_dynamics.py",12481,0,"",python,selection_keyboard +3898,5479665,"train_dynamics.py",12481,0,"d",python,content +3899,5479666,"train_dynamics.py",12482,0,"",python,selection_keyboard +3900,5479798,"train_dynamics.py",12482,0,"e",python,content +3901,5479800,"train_dynamics.py",12483,0,"",python,selection_keyboard +3902,5479876,"train_dynamics.py",12483,0,"b",python,content +3903,5479877,"train_dynamics.py",12484,0,"",python,selection_keyboard +3904,5480111,"train_dynamics.py",12484,0,"g",python,content +3905,5480113,"train_dynamics.py",12485,0,"",python,selection_keyboard +3906,5480114,"TERMINAL",0,0,"58925535532",,terminal_output +3907,5480415,"train_dynamics.py",12484,1,"",python,content +3908,5480674,"train_dynamics.py",12484,0,"u",python,content +3909,5480676,"train_dynamics.py",12485,0,"",python,selection_keyboard +3910,5480801,"train_dynamics.py",12485,0,"g",python,content +3911,5480802,"train_dynamics.py",12486,0,"",python,selection_keyboard +3912,5480929,"train_dynamics.py",12486,0,".",python,content +3913,5480931,"train_dynamics.py",12487,0,"",python,selection_keyboard +3914,5481155,"TERMINAL",0,0,"695036646643",,terminal_output +3915,5481238,"train_dynamics.py",12487,0,"b",python,content +3916,5481240,"train_dynamics.py",12488,0,"",python,selection_keyboard +3917,5481312,"train_dynamics.py",12488,0,"r",python,content +3918,5481313,"train_dynamics.py",12489,0,"",python,selection_keyboard +3919,5481958,"train_dynamics.py",12489,0,"e",python,content +3920,5481960,"train_dynamics.py",12490,0,"",python,selection_keyboard +3921,5482121,"train_dynamics.py",12490,0,"a",python,content +3922,5482122,"train_dynamics.py",12491,0,"",python,selection_keyboard +3923,5482208,"train_dynamics.py",12491,0,"k",python,content +3924,5482210,"train_dynamics.py",12492,0,"",python,selection_keyboard +3925,5482240,"TERMINAL",0,0,"720147757754",,terminal_output +3926,5482440,"train_dynamics.py",12492,0,"p",python,content +3927,5482441,"train_dynamics.py",12493,0,"",python,selection_keyboard +3928,5482720,"train_dynamics.py",12493,0,"p",python,content +3929,5482721,"train_dynamics.py",12494,0,"",python,selection_keyboard +3930,5483207,"train_dynamics.py",12493,1,"",python,content +3931,5483271,"TERMINAL",0,0,"82369979976",,terminal_output +3932,5483386,"train_dynamics.py",12493,0,"o",python,content +3933,5483387,"train_dynamics.py",12494,0,"",python,selection_keyboard +3934,5483668,"train_dynamics.py",12494,0,"i",python,content +3935,5483670,"train_dynamics.py",12495,0,"",python,selection_keyboard +3936,5483768,"train_dynamics.py",12495,0,"n",python,content +3937,5483770,"train_dynamics.py",12496,0,"",python,selection_keyboard +3938,5483879,"train_dynamics.py",12496,0,"t",python,content +3939,5483881,"train_dynamics.py",12497,0,"",python,selection_keyboard +3940,5484307,"TERMINAL",0,0,"2:0034730108203087",,terminal_output +3941,5484530,"train_dynamics.py",12497,0,"()",python,content +3942,5484532,"train_dynamics.py",12498,0,"",python,selection_keyboard +3943,5484580,"train_dynamics.py",12498,1,")",python,content +3944,5484582,"train_dynamics.py",12499,0,"",python,selection_keyboard +3945,5484859,"train_dynamics.py",12498,0,"",python,selection_command +3946,5485382,"TERMINAL",0,0,"14581191198",,terminal_output +3947,5486404,"TERMINAL",0,0,"2569221022409",,terminal_output +3948,5487433,"TERMINAL",0,0,"3673033133150",,terminal_output +3949,5488479,"TERMINAL",0,0,"47814424421",,terminal_output +3950,5489377,"TERMINAL",0,0,"salloc: Nodes hkn[0413-0419,0421] are ready for job\r\n",,terminal_output +3951,5489549,"TERMINAL",0,0,"58925535532",,terminal_output +3952,5490392,"TERMINAL",0,0,"]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h[tum_cte0515@hkn0413 jafar]$ ",,terminal_output +3953,5490577,"TERMINAL",0,0,"699:0036646643",,terminal_output +3954,5491698,"TERMINAL",0,0,"730147757754",,terminal_output +3955,5492723,"TERMINAL",0,0,"81258868865",,terminal_output +3956,5493716,"TERMINAL",0,0,"92369979976",,terminal_output +3957,5494771,"TERMINAL",0,0,"1034740208304087",,terminal_output +3958,5495809,"TERMINAL",0,0,"14581191198",,terminal_output +3959,5496855,"TERMINAL",0,0,"2569222022509",,terminal_output +3960,5497896,"TERMINAL",0,0,"367403313313:00",,terminal_output +3961,5498964,"TERMINAL",0,0,"47814424421",,terminal_output +3962,5500097,"TERMINAL",0,0,"58925535532",,terminal_output +3963,5500451,"TERMINAL",0,0,"s",,terminal_output +3964,5500516,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +3965,5500593,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +3966,5500653,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +3967,5500849,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3968,5500958,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3969,5501019,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +3970,5501094,"TERMINAL",0,0,"691036646643",,terminal_output +3971,5501140,"TERMINAL",0,0,"[?25l.[?25h[?25lv[?25h",,terminal_output +3972,5501351,"TERMINAL",0,0,"env/",,terminal_output +3973,5501502,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +3974,5501563,"TERMINAL",0,0,"in/",,terminal_output +3975,5501811,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +3976,5501871,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3977,5502058,"TERMINAL",0,0,"tivate",,terminal_output +3978,5502111,"TERMINAL",0,0,"740147757754",,terminal_output +3979,5502227,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +3980,5503168,"TERMINAL",0,0,"81258868865",,terminal_output +3981,5504294,"TERMINAL",0,0,"92369979976",,terminal_output +3982,5504524,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_output +3983,5504685,"TERMINAL",0,0,"ls",,terminal_output +3984,5504867,"TERMINAL",0,0,"cd ..",,terminal_output +3985,5505317,"TERMINAL",0,0,"2045851319415198",,terminal_output +3986,5505470,"TERMINAL",0,0,"ls",,terminal_output +3987,5505661,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_output +3988,5505818,"TERMINAL",0,0,"",,terminal_output +3989,5505989,"TERMINAL",0,0,"",,terminal_output +3990,5506106,"TERMINAL",0,0,"",,terminal_output +3991,5506278,"TERMINAL",0,0,"25692230223:009",,terminal_output +3992,5507348,"TERMINAL",0,0,"3675033133110",,terminal_output +3993,5508390,"TERMINAL",0,0,"47814424421",,terminal_output +3994,5509360,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +3995,5509434,"TERMINAL",0,0,"58925535532",,terminal_output +3996,5510461,"TERMINAL",0,0,"692036646643",,terminal_output +3997,5511510,"TERMINAL",0,0,"750147757754",,terminal_output +3998,5512560,"TERMINAL",0,0,"81258868865",,terminal_output +3999,5512620,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +4000,5512828,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +4001,5512891,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +4002,5513630,"TERMINAL",0,0,"92369979976",,terminal_output +4003,5514500,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +4004,5514658,"TERMINAL",0,0,"303471:00408503:0087",,terminal_output +4005,5515709,"TERMINAL",0,0,"14581191198",,terminal_output +4006,5516564,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_output +4007,5516749,"TERMINAL",0,0,"2569224022109",,terminal_output +4008,5517802,"TERMINAL",0,0,"3679:0033133120",,terminal_output +4009,5517856,"TERMINAL",0,0,"\rslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch\r\n[?2004l\r#!/usr/bin/env bash\r\n\r\n#SBATCH --nodes=8\r\n#SBATCH --ntasks-per-node=4\r\n#SBATCH --time=48:00:00\r\n#SBATCH --partition=accelerated\r\n#SBATCH --cpus-per-task=5\r\n#SBATCH --gres=gpu:4\r\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --job-name=train_dynamics_causal_8_node\r\n#SBATCH --requeue\r\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\r\n\r\n# --- signal trap to requeue job before timeout ---\r\nrequeue_job() {\r\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\r\n # optional: trigger checkpoint saving here\r\n # e.g., touch $checkpoint_dir/requeue_trigger\r\n scontrol requeue $SLURM_JOB_ID\r\n exit 0\r\n}\r\n\r\ntrap requeue_job sigusr1\r\n\r\n# set checkpoint flag based on restart count\r\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\r\n\r\nif [ $restart_count -eq 0 ]; then\r\n restore_ckpt_flag=""--no-restore-ckpt""\r\nelse\r\n restore_ckpt_flag=""--restore-ckpt""\r\nfi\r\n\r\n\r\n\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\n# tokenizer with the new structure supporting larger ffn_dim\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --save_ckpt \\r\n $restore_ckpt_flag \\r\n --wandb_id $SLURM_JOB_ID \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=256 \\r\n --init_lr=0 \\r\n --dyna_type=causal \\r\n --max_lr=8e-5 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-causal-8-node-$slurm_job_id \\r\n --tags dynamics causal 8-node post-launch-main \\r\n --entity instant-uv \\r\n --project jafar \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $array_records_dir &\r\n\r\nchild_pid=$!\r\n\r\nwait $child_pid\r\n",,terminal_output +4010,5518938,"TERMINAL",0,0,"47814424421",,terminal_output +4011,5519962,"TERMINAL",0,0,"58925535532",,terminal_output +4012,5520983,"TERMINAL",0,0,"693036646643",,terminal_output +4013,5521294,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x8)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=796793\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0413\r\nSLURM_JOB_START_TIME=1754925697\r\nSLURM_STEP_NODELIST=hkn0413\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1754927497\r\nSLURM_PMI2_SRUN_PORT=43975\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x8)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=8\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3415167\r\nSLURM_PTY_PORT=34295\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=41\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=32\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e12.hkn0413\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=129\r\nSLURM_NODELIST=hkn[0413-0419,0421]\r\nSLURM_SRUN_COMM_PORT=38301\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=32\r\nSLURM_NNODES=8\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3415167\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0413\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38301\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0413-0419,0421]\r\n",,terminal_output +4014,5521431,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +4015,5522012,"TERMINAL",0,0,"71:00147757754",,terminal_output +4016,5523033,"TERMINAL",0,0,"81258868865",,terminal_output +4017,5524159,"TERMINAL",0,0,"92369979976",,terminal_output +4018,5525184,"TERMINAL",0,0,"40347105083:001087",,terminal_output +4019,5526210,"TERMINAL",0,0,"14581191198",,terminal_output +4020,5527231,"TERMINAL",0,0,"267103351332130",,terminal_output +4021,5528358,"TERMINAL",0,0,"47814424421",,terminal_output +4022,5529382,"TERMINAL",0,0,"58925535532",,terminal_output +4023,5530382,"TERMINAL",0,0,"694036646643",,terminal_output +4024,5531430,"TERMINAL",0,0,"710147757754",,terminal_output +4025,5532454,"TERMINAL",0,0,"81258868865",,terminal_output +4026,5533499,"TERMINAL",0,0,"92369979976",,terminal_output +4027,5534544,"TERMINAL",0,0,"50347201:008102087",,terminal_output +4028,5535630,"TERMINAL",0,0,"14581191198",,terminal_output +4029,5536647,"TERMINAL",0,0,"2569223:0022309",,terminal_output +4030,5537779,"TERMINAL",0,0,"3672033133140",,terminal_output +4031,5538805,"TERMINAL",0,0,"47814424421",,terminal_output +4032,5539828,"TERMINAL",0,0,"58925535532",,terminal_output +4033,5540852,"TERMINAL",0,0,"695036646643",,terminal_output +4034,5541877,"TERMINAL",0,0,"720147757754",,terminal_output +4035,5542909,"TERMINAL",0,0,"81258868865",,terminal_output +4036,5544025,"TERMINAL",0,0,"92369979976",,terminal_output +4037,5545000,"TERMINAL",0,0,"3:0034730108203087",,terminal_output +4038,5546046,"TERMINAL",0,0,"14581191198",,terminal_output +4039,5547201,"TERMINAL",0,0,"2569221022409",,terminal_output +4040,5548237,"TERMINAL",0,0,"3673033133150",,terminal_output +4041,5549249,"TERMINAL",0,0,"47814424421",,terminal_output +4042,5550219,"TERMINAL",0,0,"58925535532",,terminal_output +4043,5551246,"TERMINAL",0,0,"63050:0147757754",,terminal_output +4044,5552292,"TERMINAL",0,0,"81258868865",,terminal_output +4045,5553323,"TERMINAL",0,0,"92369979976",,terminal_output +4046,5554379,"TERMINAL",0,0,"1034740208304087",,terminal_output +4047,5555400,"TERMINAL",0,0,"14581191198",,terminal_output +4048,5555618,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +4049,5556442,"TERMINAL",0,0,"2569222022509",,terminal_output +4050,5556622,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250811_172311-3415167\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-causal-8-node-3415167\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3415167\r\n",,terminal_output +4051,5557487,"TERMINAL",0,0,"367403313314:00",,terminal_output +4052,5558539,"TERMINAL",0,0,"47814424421",,terminal_output +4053,5559578,"TERMINAL",0,0,"58925535532",,terminal_output +4054,5560627,"TERMINAL",0,0,"691036646643",,terminal_output +4055,5561742,"TERMINAL",0,0,"740147757754",,terminal_output +4056,5562723,"TERMINAL",0,0,"81258868865",,terminal_output +4057,5563789,"TERMINAL",0,0,"92369979976",,terminal_output +4058,5564967,"TERMINAL",0,0,"2034750308405087",,terminal_output +4059,5565941,"TERMINAL",0,0,"14581191198",,terminal_output +4060,5566965,"TERMINAL",0,0,"25692230224:009",,terminal_output +4061,5567957,"TERMINAL",0,0,"3675033133110",,terminal_output +4062,5569010,"TERMINAL",0,0,"47814424421",,terminal_output +4063,5570038,"TERMINAL",0,0,"58925535532",,terminal_output +4064,5571162,"TERMINAL",0,0,"692036646643",,terminal_output +4065,5572192,"TERMINAL",0,0,"750147757754",,terminal_output +4066,5573180,"TERMINAL",0,0,"81258868865",,terminal_output +4067,5574335,"TERMINAL",0,0,"93472:00408504:0087",,terminal_output +4068,5575417,"TERMINAL",0,0,"314581191198",,terminal_output +4069,5576395,"TERMINAL",0,0,"2569224022109",,terminal_output +4070,5577407,"TERMINAL",0,0,"3671:00:0033133120",,terminal_output +4071,5578402,"TERMINAL",0,0,"47814424421",,terminal_output +4072,5579440,"TERMINAL",0,0,"58925535532",,terminal_output +4073,5580480,"TERMINAL",0,0,"693036646643",,terminal_output +4074,5581526,"TERMINAL",0,0,"72:00147757754",,terminal_output +4075,5582561,"TERMINAL",0,0,"81258868865",,terminal_output +4076,5583601,"TERMINAL",0,0,"92369979976",,terminal_output +4077,5584679,"TERMINAL",0,0,"40347105084:001087",,terminal_output +4078,5585696,"TERMINAL",0,0,"14581191198",,terminal_output +4079,5586733,"TERMINAL",0,0,"2569225022209",,terminal_output +4080,5587854,"TERMINAL",0,0,"3671033133130",,terminal_output +4081,5588981,"TERMINAL",0,0,"47814424421",,terminal_output +4082,5589902,"TERMINAL",0,0,"58925535532",,terminal_output +4083,5591029,"TERMINAL",0,0,"694036646643",,terminal_output +4084,5592054,"TERMINAL",0,0,"710147757754",,terminal_output +4085,5593076,"TERMINAL",0,0,"81258868865",,terminal_output +4086,5594104,"TERMINAL",0,0,"92369979976",,terminal_output +4087,5595124,"TERMINAL",0,0,"50347202:008102087",,terminal_output +4088,5596150,"TERMINAL",0,0,"14581191198",,terminal_output +4089,5597276,"TERMINAL",0,0,"2569224:0022309",,terminal_output +4090,5598300,"TERMINAL",0,0,"3672033133140",,terminal_output +4091,5599322,"TERMINAL",0,0,"48925535532",,terminal_output +4092,5600288,"TERMINAL",0,0,"695036646643",,terminal_output +4093,5601370,"TERMINAL",0,0,"720147757754",,terminal_output +4094,5602405,"TERMINAL",0,0,"81258868865",,terminal_output +4095,5603418,"TERMINAL",0,0,"92369979976",,terminal_output +4096,5604446,"TERMINAL",0,0,"4:0034730108203087",,terminal_output +4097,5605490,"TERMINAL",0,0,"14581191198",,terminal_output +4098,5606544,"TERMINAL",0,0,"2569221022409",,terminal_output +4099,5607571,"TERMINAL",0,0,"3673033133150",,terminal_output +4100,5608641,"TERMINAL",0,0,"47814424421",,terminal_output +4101,5609665,"TERMINAL",0,0,"58925535532",,terminal_output +4102,5610694,"TERMINAL",0,0,"691:0036646643",,terminal_output +4103,5611816,"TERMINAL",0,0,"730147757754",,terminal_output +4104,5612844,"TERMINAL",0,0,"81258868865",,terminal_output +4105,5613812,"TERMINAL",0,0,"92369979976",,terminal_output +4106,5614888,"TERMINAL",0,0,"1034740208304087",,terminal_output +4107,5616015,"TERMINAL",0,0,"14581191198",,terminal_output +4108,5616953,"TERMINAL",0,0,"2569222022509",,terminal_output +4109,5618003,"TERMINAL",0,0,"367403313315:00",,terminal_output +4110,5619089,"TERMINAL",0,0,"47814424421",,terminal_output +4111,5620108,"TERMINAL",0,0,"58925535532",,terminal_output +4112,5621117,"TERMINAL",0,0,"691036646643",,terminal_output +4113,5622251,"TERMINAL",0,0,"740147757754",,terminal_output +4114,5623282,"TERMINAL",0,0,"81258868865",,terminal_output +4115,5624307,"TERMINAL",0,0,"934750308405087",,terminal_output +4116,5625288,"TERMINAL",0,0,"214581191198",,terminal_output +4117,5626355,"TERMINAL",0,0,"25692230225:009",,terminal_output +4118,5627380,"TERMINAL",0,0,"3675033133110",,terminal_output +4119,5628425,"TERMINAL",0,0,"47814424421",,terminal_output +4120,5629530,"TERMINAL",0,0,"58925535532",,terminal_output +4121,5630506,"TERMINAL",0,0,"692036646643",,terminal_output +4122,5631546,"TERMINAL",0,0,"750147757754",,terminal_output +4123,5631609,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4124,5631667,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4125,5631725,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4126,5631781,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4127,5631839,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4128,5632602,"TERMINAL",0,0,"81258868865",,terminal_output +4129,5632967,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4130,5633025,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4131,5633101,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\n",,terminal_output +4132,5633727,"TERMINAL",0,0,"92369979976",,terminal_output +4133,5633749,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +4134,5633816,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +4135,5634698,"TERMINAL",0,0,"303473:00408505:0087",,terminal_output +4136,5635748,"TERMINAL",0,0,"14581191198",,terminal_output +4137,5636796,"TERMINAL",0,0,"2569224022109",,terminal_output +4138,5637018,"TERMINAL",0,0,"Running on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\n",,terminal_output +4139,5637155,"TERMINAL",0,0,"Running on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\n",,terminal_output +4140,5637266,"TERMINAL",0,0,"Running on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\n",,terminal_output +4141,5637321,"TERMINAL",0,0,"Running on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\n",,terminal_output +4142,5637373,"TERMINAL",0,0,"Running on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\n",,terminal_output +4143,5637917,"TERMINAL",0,0,"3671:0033133120",,terminal_output +4144,5638952,"TERMINAL",0,0,"47814424421",,terminal_output +4145,5639973,"TERMINAL",0,0,"58925535532",,terminal_output +4146,5641004,"TERMINAL",0,0,"693036646643",,terminal_output +4147,5642058,"TERMINAL",0,0,"73:00147757754",,terminal_output +4148,5643101,"TERMINAL",0,0,"81258868865",,terminal_output +4149,5644172,"TERMINAL",0,0,"92369979976",,terminal_output +4150,5645196,"TERMINAL",0,0,"40347105085:001087",,terminal_output +4151,5646329,"TERMINAL",0,0,"1569225022209",,terminal_output +4152,5647297,"TERMINAL",0,0,"3671033133130",,terminal_output +4153,5648374,"TERMINAL",0,0,"47814424421",,terminal_output +4154,5649412,"TERMINAL",0,0,"58925535532",,terminal_output +4155,5649476,"TERMINAL",0,0,"Entering jdb:\r\nEntering jdb:\r\nERROR:2025-08-11 17:24:45,122:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:2025-08-11 17:24:45,138:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nEntering jdb:\r\nERROR:2025-08-11 17:24:45,204:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nEntering jdb:\r\nERROR:2025-08-11 17:24:45,251:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4156,5649588,"TERMINAL",0,0,"Entering jdb:\r\nERROR:2025-08-11 17:24:45,368:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4157,5649710,"TERMINAL",0,0,"Entering jdb:\r\n",,terminal_output +4158,5649770,"TERMINAL",0,0,"ERROR:2025-08-11 17:24:45,493:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4159,5649832,"TERMINAL",0,0,"Entering jdb:\r\nEntering jdb:\r\n",,terminal_output +4160,5649884,"TERMINAL",0,0,"ERROR:2025-08-11 17:24:45,610:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:2025-08-11 17:24:45,610:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4161,5650013,"TERMINAL",0,0,"Entering jdb:\r\nERROR:2025-08-11 17:24:45,703:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nEntering jdb:\r\nERROR:2025-08-11 17:24:45,731:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nEntering jdb:\r\nERROR:2025-08-11 17:24:45,777:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nEntering jdb:\r\n",,terminal_output +4162,5650070,"TERMINAL",0,0,"ERROR:2025-08-11 17:24:45,799:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4163,5650182,"TERMINAL",0,0,"Entering jdb:\r\nEntering jdb:\r\nERROR:2025-08-11 17:24:45,935:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:2025-08-11 17:24:45,935:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4164,5650319,"TERMINAL",0,0,"Entering jdb:\r\nERROR:2025-08-11 17:24:46,082:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nEntering jdb:\r\nERROR:2025-08-11 17:24:46,093:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4165,5650415,"TERMINAL",0,0,"Entering jdb:\r\nERROR:2025-08-11 17:24:46,166:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4166,5650441,"TERMINAL",0,0,"694036646643",,terminal_output +4167,5650575,"TERMINAL",0,0,"Entering jdb:\r\nEntering jdb:\r\nERROR:2025-08-11 17:24:46,314:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:2025-08-11 17:24:46,306:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py"", line 36, in exit\r\n self._orig_exit(orig_code) # type: ignore\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py"", line 36, in exit\r\n self._orig_exit(orig_code) # type: ignore\r\nSystemExit: 0\r\n",,terminal_output +4168,5650959,"TERMINAL",0,0,"Entering jdb:\r\nERROR:2025-08-11 17:24:46,639:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nEntering jdb:\r\n",,terminal_output +4169,5651017,"TERMINAL",0,0,"ERROR:2025-08-11 17:24:46,745:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4170,5651184,"TERMINAL",0,0,"(jdb) Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4171,5651455,"TERMINAL",0,0,"Entering jdb:\r\nERROR:2025-08-11 17:24:47,202:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4172,5651486,"TERMINAL",0,0,"710147757754",,terminal_output +4173,5651564,"TERMINAL",0,0,"(jdb) Filtering out episode with length 15, which is shorter than the requested sequence length 16.\r\n(jdb) Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4174,5651753,"TERMINAL",0,0,"(jdb) Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4175,5651859,"TERMINAL",0,0,"(jdb) Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4176,5651911,"TERMINAL",0,0,"Entering jdb:\r\n",,terminal_output +4177,5651964,"TERMINAL",0,0,"ERROR:2025-08-11 17:24:47,680:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4178,5652058,"TERMINAL",0,0,"(jdb) Filtering out episode with length 3, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4179,5652118,"TERMINAL",0,0,"Filtering out episode with length 15, which is shorter than the requested sequence length 16.\r\nEntering jdb:\r\n",,terminal_output +4180,5652169,"TERMINAL",0,0,"ERROR:2025-08-11 17:24:47,899:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4181,5652266,"TERMINAL",0,0,"Filtering out episode with length 10, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4182,5652380,"TERMINAL",0,0,"(jdb) Filtering out episode with length 14, which is shorter than the requested sequence length 16.\r\nEntering jdb:\r\nEntering jdb:\r\n(jdb) Filtering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4183,5652448,"TERMINAL",0,0,"ERROR:2025-08-11 17:24:48,166:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:2025-08-11 17:24:48,160:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4184,5652555,"TERMINAL",0,0,"81258868865",,terminal_output +4185,5652620,"TERMINAL",0,0,"Entering jdb:\r\nERROR:2025-08-11 17:24:48,372:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4186,5652724,"TERMINAL",0,0,"Entering jdb:\r\nERROR:2025-08-11 17:24:48,437:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4187,5652908,"TERMINAL",0,0,"Entering jdb:\r\nERROR:2025-08-11 17:24:48,671:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4188,5653218,"TERMINAL",0,0,"(jdb) Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\nEntering jdb:\r\nERROR:2025-08-11 17:24:48,958:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n(jdb) Filtering out episode with length 4, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4189,5653327,"TERMINAL",0,0,"(jdb) Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4190,5653389,"TERMINAL",0,0,"Entering jdb:\r\nERROR:2025-08-11 17:24:49,162:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4191,5653461,"TERMINAL",0,0,"Entering jdb:\r\n",,terminal_output +4192,5653524,"TERMINAL",0,0,"ERROR:2025-08-11 17:24:49,247:jax._src.debugging:98: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 96, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 336, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +4193,5653627,"TERMINAL",0,0,"92369979976",,terminal_output +4194,5653628,"TERMINAL",0,0,"Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4195,5653745,"TERMINAL",0,0,"Filtering out episode with length 9, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4196,5653880,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4197,5654066,"TERMINAL",0,0,"(jdb) Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4198,5654526,"TERMINAL",0,0,"Filtering out episode with length 14, which is shorter than the requested sequence length 16.\r\n(jdb) Filtering out episode with length 6, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4199,5654625,"TERMINAL",0,0,"(jdb) Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4200,5654636,"TERMINAL",0,0,"50347203:008102087",,terminal_output +4201,5654741,"TERMINAL",0,0,"(jdb) Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4202,5654970,"TERMINAL",0,0,"(jdb) Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4203,5655079,"TERMINAL",0,0,"(jdb) Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4204,5655535,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run dynamics-causal-8-node-3415167 at: https://wandb.ai/instant-uv/jafar/runs/3415167\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250811_172311-3415167/logs\r\n",,terminal_output +4205,5655677,"TERMINAL",0,0,"14581191198",,terminal_output +4206,5655688,"TERMINAL",0,0,"(jdb) Filtering out episode with length 11, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4207,5656156,"TERMINAL",0,0,"(jdb) Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4208,5656770,"TERMINAL",0,0,"2569225:0022309",,terminal_output +4209,5657386,"TERMINAL",0,0,"(jdb) Filtering out episode with length 2, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4210,5657773,"TERMINAL",0,0,"3672033133140",,terminal_output +4211,5658008,"TERMINAL",0,0,"(jdb) Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4212,5658925,"TERMINAL",0,0,"47814424421",,terminal_output +4213,5659944,"TERMINAL",0,0,"58925535532",,terminal_output +4214,5660772,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 4 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 9 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 4 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 4 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 9 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 8 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n(jdb) /home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 2 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n(jdb) ",,terminal_output +4215,5660904,"TERMINAL",0,0,"695036646643",,terminal_output +4216,5661076,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 2 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 4 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 2 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 5 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n(jdb) ",,terminal_output +4217,5661137,"TERMINAL",0,0,"/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 4 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 6 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 5 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 8 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 9 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 8 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 9 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 9 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 9 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n(jdb) /home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 9 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 9 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 4 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 6 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n(jdb) (jdb) (jdb) (jdb) (jdb) (jdb) (jdb) ",,terminal_output +4218,5661993,"TERMINAL",0,0,"720147757754",,terminal_output +4219,5662114,"TERMINAL",0,0,"]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4220,5662993,"TERMINAL",0,0,"81258868865",,terminal_output +4221,5664143,"TERMINAL",0,0,"92369979976",,terminal_output +4222,5665088,"TERMINAL",0,0,"5:0034730108203087",,terminal_output +4223,5666190,"TERMINAL",0,0,"14581191198",,terminal_output +4224,5667214,"TERMINAL",0,0,"2569221022409",,terminal_output +4225,5668237,"TERMINAL",0,0,"3783144244251",,terminal_output +4226,5669366,"TERMINAL",0,0,"58925535532",,terminal_output +4227,5670392,"TERMINAL",0,0,"692:0036646643",,terminal_output +4228,5671411,"TERMINAL",0,0,"730147757754",,terminal_output +4229,5672415,"TERMINAL",0,0,"81258868865",,terminal_output +4230,5673468,"TERMINAL",0,0,"92369979976",,terminal_output +4231,5674520,"TERMINAL",0,0,"1034740208304087",,terminal_output +4232,5675559,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +4233,5675651,"TERMINAL",0,0,"14581191198",,terminal_output +4234,5676614,"TERMINAL",0,0,"2569222022509",,terminal_output +4235,5676798,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +4236,5677759,"TERMINAL",0,0,"367403313316:00",,terminal_output +4237,5678784,"TERMINAL",0,0,"47814424421",,terminal_output +4238,5679081,"train_dynamics.py",0,0,"",python,tab +4239,5679751,"TERMINAL",0,0,"58925535532",,terminal_output +4240,5680785,"train_dynamics.py",12477,0,"",python,selection_command +4241,5680839,"TERMINAL",0,0,"691036646643",,terminal_output +4242,5681423,"train_dynamics.py",12477,3,"",python,content +4243,5681551,"train_dynamics.py",12477,1,"",python,content +4244,5681872,"TERMINAL",0,0,"740147757754",,terminal_output +4245,5681938,"train_dynamics.py",12477,5,"",python,content +4246,5682334,"train_dynamics.py",12477,1,"",python,content +4247,5682903,"TERMINAL",0,0,"81258868865",,terminal_output +4248,5683100,"train_dynamics.py",12476,0,"",python,selection_command +4249,5684745,"TERMINAL",0,0,"934750308405087",,terminal_output +4250,5685002,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_output +4251,5685386,"TERMINAL",0,0,"\r\n[?2004l\r#!/usr/bin/env bash\r\n\r\n#SBATCH --nodes=8\r\n#SBATCH --ntasks-per-node=4\r\n#SBATCH --time=48:00:00\r\n#SBATCH --partition=accelerated\r\n#SBATCH --cpus-per-task=5\r\n#SBATCH --gres=gpu:4\r\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --job-name=train_dynamics_causal_8_node\r\n#SBATCH --requeue\r\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\r\n\r\n# --- signal trap to requeue job before timeout ---\r\nrequeue_job() {\r\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\r\n # optional: trigger checkpoint saving here\r\n # e.g., touch $checkpoint_dir/requeue_trigger\r\n scontrol requeue $SLURM_JOB_ID\r\n exit 0\r\n}\r\n\r\ntrap requeue_job sigusr1\r\n\r\n# set checkpoint flag based on restart count\r\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\r\n\r\nif [ $restart_count -eq 0 ]; then\r\n restore_ckpt_flag=""--no-restore-ckpt""\r\nelse\r\n restore_ckpt_flag=""--restore-ckpt""\r\nfi\r\n\r\n\r\n\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\n# tokenizer with the new structure supporting larger ffn_dim\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --save_ckpt \\r\n $restore_ckpt_flag \\r\n --wandb_id $SLURM_JOB_ID \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=256 \\r\n --init_lr=0 \\r\n --dyna_type=causal \\r\n --max_lr=8e-5 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-causal-8-node-$slurm_job_id \\r\n --tags dynamics causal 8-node post-launch-main \\r\n --entity instant-uv \\r\n --project jafar \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $array_records_dir &\r\n\r\nchild_pid=$!\r\n\r\nwait $child_pid\r\n",,terminal_output +4252,5685690,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x8)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=796793\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0413\r\nSLURM_JOB_START_TIME=1754925697\r\nSLURM_STEP_NODELIST=hkn0413\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1754927497\r\nSLURM_PMI2_SRUN_PORT=43975\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x8)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=8\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3415167\r\nSLURM_PTY_PORT=34295\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=41\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=32\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e12.hkn0413\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=129\r\nSLURM_NODELIST=hkn[0413-0419,0421]\r\nSLURM_SRUN_COMM_PORT=38301\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=32\r\nSLURM_NNODES=8\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3415167\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0413\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38301\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0413-0419,0421]\r\n",,terminal_output +4253,5685824,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +4254,5685884,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +4255,5687179,"TERMINAL",0,0,"215692230226:009",,terminal_output +4256,5689433,"TERMINAL",0,0,"3895255355312",,terminal_output +4257,5689916,"train_dynamics.py",0,0,"",python,tab +4258,5690652,"TERMINAL",0,0,"692036646643",,terminal_output +4259,5691697,"TERMINAL",0,0,"750147757754",,terminal_output +4260,5692738,"TERMINAL",0,0,"81258868865",,terminal_output +4261,5693514,"train_dynamics.py",12489,0,"",python,selection_mouse +4262,5693515,"train_dynamics.py",12488,0,"",python,selection_command +4263,5694964,"TERMINAL",0,0,"93474:00408506:0087",,terminal_output +4264,5696807,"TERMINAL",0,0,"31569224022109",,terminal_output +4265,5698448,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3415167.1 tasks 0-31: running\r\n\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4266,5698658,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4267,5698822,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4268,5698883,"TERMINAL",0,0,"3782:0144244221",,terminal_output +4269,5699567,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_output +4270,5700626,"TERMINAL",0,0,"593036646643",,terminal_output +4271,5700962,"TERMINAL",0,0,"[?25l[?25h[?25l[?25h",,terminal_output +4272,5701017,"TERMINAL",0,0,"[?25l[?25h",,terminal_output +4273,5701610,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +4274,5701853,"TERMINAL",0,0,"",,terminal_output +4275,5702550,"TERMINAL",0,0,"74:01258868865",,terminal_output +4276,5702559,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250811_172537-3415167\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Resuming run dynamics-causal-8-node-3415167\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3415167\r\n",,terminal_output +4277,5703382,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4278,5703497,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4279,5703712,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4280,5703892,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4281,5704373,"TERMINAL",0,0,"9347105086:001087",,terminal_output +4282,5705681,"TERMINAL",0,0,"414581191198",,terminal_output +4283,5708184,"TERMINAL",0,0,"267103351332130",,terminal_output +4284,5708246,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_output +4285,5710563,"TERMINAL",0,0,"494036646643",,terminal_output +4286,5712476,"TERMINAL",0,0,"711258868865",,terminal_output +4287,5714734,"TERMINAL",0,0,"[?25ls[1@P[?25h",,terminal_output +4288,5714842,"TERMINAL",0,0,"9347204:008102087",,terminal_output +4289,5714980,"TERMINAL",0,0,"[?25ls[1@Y[?25h",,terminal_output +4290,5715371,"TERMINAL",0,0,"[?25ls[1@T[?25h",,terminal_output +4291,5715483,"TERMINAL",0,0,"[?25ls[1@H[?25h",,terminal_output +4292,5715697,"TERMINAL",0,0,"[?25ls[1@O[?25h",,terminal_output +4293,5715839,"TERMINAL",0,0,"[?25ls[1@N[?25h",,terminal_output +4294,5717445,"TERMINAL",0,0,"[?25ls[1@_[?25h",,terminal_output +4295,5717627,"TERMINAL",0,0,"516720336:01333140",,terminal_output +4296,5718192,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +4297,5718615,"TERMINAL",0,0,"[?25ls[1@U[?25h",,terminal_output +4298,5718871,"TERMINAL",0,0,"[?25ls[1@N[?25h",,terminal_output +4299,5719196,"TERMINAL",0,0,"[?25ls[1@B[?25h",,terminal_output +4300,5719196,"TERMINAL",0,0,"47814424421",,terminal_output +4301,5719410,"TERMINAL",0,0,"[?25ls[1@U[?25h",,terminal_output +4302,5719667,"TERMINAL",0,0,"[?25ls[1@F[?25h",,terminal_output +4303,5719832,"TERMINAL",0,0,"[?25ls[1@F[?25h",,terminal_output +4304,5720010,"TERMINAL",0,0,"[?25ls[1@E[?25h",,terminal_output +4305,5720199,"TERMINAL",0,0,"[?25ls[1@R[?25h",,terminal_output +4306,5720256,"TERMINAL",0,0,"595036646643",,terminal_output +4307,5720270,"TERMINAL",0,0,"[?25ls[1@E[?25h",,terminal_output +4308,5720455,"TERMINAL",0,0,"[?25ls[1@D[?25h",,terminal_output +4309,5721280,"TERMINAL",0,0,"720147757754",,terminal_output +4310,5721300,"TERMINAL",0,0,"[?25ls[1@=[?25h",,terminal_output +4311,5721478,"TERMINAL",0,0,"[?25ls[1@1[?25h",,terminal_output +4312,5722410,"TERMINAL",0,0,"81258868865",,terminal_output +4313,5722677,"TERMINAL",0,0,"[?25ls[1@ [?25h",,terminal_output +4314,5723161,"TERMINAL",0,0,"\r\n[?2004l\r#!/usr/bin/env bash\r\n\r\n#SBATCH --nodes=8\r\n#SBATCH --ntasks-per-node=4\r\n#SBATCH --time=48:00:00\r\n#SBATCH --partition=accelerated\r\n#SBATCH --cpus-per-task=5\r\n#SBATCH --gres=gpu:4\r\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --job-name=train_dynamics_causal_8_node\r\n#SBATCH --requeue\r\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\r\n\r\n# --- signal trap to requeue job before timeout ---\r\nrequeue_job() {\r\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\r\n # optional: trigger checkpoint saving here\r\n # e.g., touch $checkpoint_dir/requeue_trigger\r\n scontrol requeue $SLURM_JOB_ID\r\n exit 0\r\n}\r\n\r\ntrap requeue_job sigusr1\r\n\r\n# set checkpoint flag based on restart count\r\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\r\n\r\nif [ $restart_count -eq 0 ]; then\r\n restore_ckpt_flag=""--no-restore-ckpt""\r\nelse\r\n restore_ckpt_flag=""--restore-ckpt""\r\nfi\r\n\r\n\r\n\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\n# tokenizer with the new structure supporting larger ffn_dim\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --save_ckpt \\r\n $restore_ckpt_flag \\r\n --wandb_id $SLURM_JOB_ID \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=256 \\r\n --init_lr=0 \\r\n --dyna_type=causal \\r\n --max_lr=8e-5 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-causal-8-node-$slurm_job_id \\r\n --tags dynamics causal 8-node post-launch-main \\r\n --entity instant-uv \\r\n --project jafar \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $array_records_dir &\r\n\r\nchild_pid=$!\r\n\r\nwait $child_pid\r\n",,terminal_output +4315,5723350,"TERMINAL",0,0,"92369979976",,terminal_output +4316,5723528,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x8)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=796793\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0413\r\nSLURM_JOB_START_TIME=1754925697\r\nSLURM_STEP_NODELIST=hkn0413\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1754927497\r\nSLURM_PMI2_SRUN_PORT=43975\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x8)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=8\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3415167\r\nSLURM_PTY_PORT=34295\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=41\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=32\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e12.hkn0413\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=129\r\nSLURM_NODELIST=hkn[0413-0419,0421]\r\nSLURM_SRUN_COMM_PORT=38301\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=32\r\nSLURM_NNODES=8\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3415167\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0413\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38301\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0413-0419,0421]\r\n",,terminal_output +4317,5723644,"TERMINAL",0,0,"srun: error: Unable to create step for job 3415167: Invalid generic resource (gres) specification\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4318,5724454,"TERMINAL",0,0,"6:0034730108203087",,terminal_output +4319,5725432,"TERMINAL",0,0,"14581191198",,terminal_output +4320,5726503,"TERMINAL",0,0,"2569221022409",,terminal_output +4321,5727510,"TERMINAL",0,0,"3673033133150",,terminal_output +4322,5728556,"TERMINAL",0,0,"47814424421",,terminal_output +4323,5729239,"TERMINAL",0,0,"PYTHONUNBUFFERED=1 sh slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_output +4324,5729593,"TERMINAL",0,0,"58925535532",,terminal_output +4325,5730652,"TERMINAL",0,0,"693:0036646643",,terminal_output +4326,5731728,"TERMINAL",0,0,"730147757754",,terminal_output +4327,5732725,"TERMINAL",0,0,"81258868865",,terminal_output +4328,5733776,"TERMINAL",0,0,"92369979976",,terminal_output +4329,5734803,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +4330,5734819,"TERMINAL",0,0,"1034740208304087",,terminal_output +4331,5734856,"TERMINAL",0,0,"#!/usr/bin/env bash\r\n\r\n#SBATCH --nodes=8\r\n#SBATCH --ntasks-per-node=4\r\n#SBATCH --time=48:00:00\r\n#SBATCH --partition=accelerated\r\n#SBATCH --cpus-per-task=5\r\n#SBATCH --gres=gpu:4\r\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --job-name=train_dynamics_causal_8_node\r\n#SBATCH --requeue\r\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\r\n\r\n# --- signal trap to requeue job before timeout ---\r\nrequeue_job() {\r\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\r\n # optional: trigger checkpoint saving here\r\n # e.g., touch $checkpoint_dir/requeue_trigger\r\n scontrol requeue $SLURM_JOB_ID\r\n exit 0\r\n}\r\n\r\ntrap requeue_job sigusr1\r\n\r\n# set checkpoint flag based on restart count\r\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\r\n\r\nif [ $restart_count -eq 0 ]; then\r\n restore_ckpt_flag=""--no-restore-ckpt""\r\nelse\r\n restore_ckpt_flag=""--restore-ckpt""\r\nfi\r\n\r\n\r\n\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\n# tokenizer with the new structure supporting larger ffn_dim\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --save_ckpt \\r\n $restore_ckpt_flag \\r\n --wandb_id $SLURM_JOB_ID \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=256 \\r\n --init_lr=0 \\r\n --dyna_type=causal \\r\n --max_lr=8e-5 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-causal-8-node-$slurm_job_id \\r\n --tags dynamics causal 8-node post-launch-main \\r\n --entity instant-uv \\r\n --project jafar \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $array_records_dir &\r\n\r\nchild_pid=$!\r\n\r\nwait $child_pid\r\n",,terminal_output +4332,5735466,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x8)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=796793\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0413\r\nSLURM_JOB_START_TIME=1754925697\r\nSLURM_STEP_NODELIST=hkn0413\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1754927497\r\nSLURM_PMI2_SRUN_PORT=43975\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x8)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=8\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3415167\r\nSLURM_PTY_PORT=34295\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=41\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=32\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e12.hkn0413\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=129\r\nSLURM_NODELIST=hkn[0413-0419,0421]\r\nSLURM_SRUN_COMM_PORT=38301\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=32\r\nSLURM_NNODES=8\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3415167\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0413\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38301\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0413-0419,0421]\r\nsrun: error: Unable to create step for job 3415167: Invalid generic resource (gres) specification\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4333,5735927,"TERMINAL",0,0,"14581191198",,terminal_output +4334,5736909,"TERMINAL",0,0,"2569222022509",,terminal_output +4335,5737954,"TERMINAL",0,0,"367403313317:00",,terminal_output +4336,5739001,"TERMINAL",0,0,"47814424421",,terminal_output +4337,5740032,"TERMINAL",0,0,"58925535532",,terminal_output +4338,5741249,"TERMINAL",0,0,"691036646643",,terminal_output +4339,5742217,"TERMINAL",0,0,"741258868865",,terminal_output +4340,5743155,"TERMINAL",0,0,"PYTHONUNBUFFERED=1 sh slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_output +4341,5743267,"TERMINAL",0,0,"92369979976",,terminal_output +4342,5744323,"TERMINAL",0,0,"2034750308405087",,terminal_output +4343,5745382,"TERMINAL",0,0,"14581191198",,terminal_output +4344,5746475,"TERMINAL",0,0,"25692230227:009",,terminal_output +4345,5747497,"TERMINAL",0,0,"3675033133110",,terminal_output +4346,5748490,"TERMINAL",0,0,"47814424421",,terminal_output +4347,5749534,"TERMINAL",0,0,"58925535532",,terminal_output +4348,5750619,"TERMINAL",0,0,"692036646643",,terminal_output +4349,5751615,"TERMINAL",0,0,"750147757754",,terminal_output +4350,5752657,"TERMINAL",0,0,"81258868865",,terminal_output +4351,5753750,"TERMINAL",0,0,"92369979976",,terminal_output +4352,5754746,"TERMINAL",0,0,"303475:00408507:0087",,terminal_output +4353,5755803,"TERMINAL",0,0,"14581191198",,terminal_output +4354,5756512,"TERMINAL",0,0,"",,terminal_output +4355,5756669,"TERMINAL",0,0,"",,terminal_output +4356,5756844,"TERMINAL",0,0,"2569224022109",,terminal_output +4357,5757401,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +4358,5757896,"TERMINAL",0,0,"3673:0033133120",,terminal_output +4359,5758504,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1800,0,"",shellscript,selection_mouse +4360,5758932,"TERMINAL",0,0,"47814424421",,terminal_output +4361,5759215,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1800,0,"\n",shellscript,content +4362,5759495,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1800,0,"",shellscript,selection_mouse +4363,5759496,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1800,0,"e",shellscript,content +4364,5759497,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1801,0,"",shellscript,selection_keyboard +4365,5759666,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1801,0,"x",shellscript,content +4366,5759667,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1802,0,"",shellscript,selection_keyboard +4367,5759742,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1802,0,"p",shellscript,content +4368,5759743,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1803,0,"",shellscript,selection_keyboard +4369,5759815,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1803,0,"o",shellscript,content +4370,5759816,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1804,0,"",shellscript,selection_keyboard +4371,5759980,"TERMINAL",0,0,"58925535532",,terminal_output +4372,5760028,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1804,0,"r",shellscript,content +4373,5760029,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1805,0,"",shellscript,selection_keyboard +4374,5760262,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1805,0,"t",shellscript,content +4375,5760263,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1806,0,"",shellscript,selection_keyboard +4376,5760329,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1806,0," ",shellscript,content +4377,5760330,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1807,0,"",shellscript,selection_keyboard +4378,5761022,"TERMINAL",0,0,"693036646643",,terminal_output +4379,5761383,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1807,0,"PYTHONUNBUFFERED=1",shellscript,content +4380,5762139,"TERMINAL",0,0,"75:00147757754",,terminal_output +4381,5762418,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1799,0,"",shellscript,selection_mouse +4382,5763111,"TERMINAL",0,0,"81258868865",,terminal_output +4383,5763272,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1799,0,"\n",shellscript,content +4384,5764033,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1803,0,"",shellscript,selection_mouse +4385,5764151,"TERMINAL",0,0,"92369979976",,terminal_output +4386,5764166,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1801,6,"export",shellscript,selection_mouse +4387,5765195,"TERMINAL",0,0,"40347105087:001087",,terminal_output +4388,5765939,"TERMINAL",0,0,"[19@PYTHONUNBUFFERED=1 ",,terminal_output +4389,5766262,"TERMINAL",0,0,"1569225022209",,terminal_output +4390,5766281,"TERMINAL",0,0,"\r",,terminal_output +4391,5766774,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +4392,5766880,"TERMINAL",0,0,"#!/usr/bin/env bash\r\n\r\n#SBATCH --nodes=8\r\n#SBATCH --ntasks-per-node=4\r\n#SBATCH --time=48:00:00\r\n#SBATCH --partition=accelerated\r\n#SBATCH --cpus-per-task=5\r\n#SBATCH --gres=gpu:4\r\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --job-name=train_dynamics_causal_8_node\r\n#SBATCH --requeue\r\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\r\n\r\n# --- signal trap to requeue job before timeout ---\r\nrequeue_job() {\r\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\r\n # optional: trigger checkpoint saving here\r\n # e.g., touch $checkpoint_dir/requeue_trigger\r\n scontrol requeue $SLURM_JOB_ID\r\n exit 0\r\n}\r\n\r\ntrap requeue_job sigusr1\r\n\r\n# set checkpoint flag based on restart count\r\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\r\n\r\nif [ $restart_count -eq 0 ]; then\r\n restore_ckpt_flag=""--no-restore-ckpt""\r\nelse\r\n restore_ckpt_flag=""--restore-ckpt""\r\nfi\r\n\r\n\r\n\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\n# tokenizer with the new structure supporting larger ffn_dim\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\r\n\r\nenv | grep SLURM\r\n\r\nexport PYTHONUNBUFFERED=1\r\n\r\nsrun python train_dynamics.py \\r\n --save_ckpt \\r\n $restore_ckpt_flag \\r\n --wandb_id $SLURM_JOB_ID \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=256 \\r\n --init_lr=0 \\r\n --dyna_type=causal \\r\n --max_lr=8e-5 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-causal-8-node-$slurm_job_id \\r\n --tags dynamics causal 8-node post-launch-main \\r\n --entity instant-uv \\r\n --project jafar \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $array_records_dir &\r\n\r\nchild_pid=$!\r\n\r\nwait $child_pid\r\n",,terminal_output +4393,5767361,"TERMINAL",0,0,"3671033133130",,terminal_output +4394,5767396,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x8)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=796793\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0413\r\nSLURM_JOB_START_TIME=1754925697\r\nSLURM_STEP_NODELIST=hkn0413\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1754927497\r\nSLURM_PMI2_SRUN_PORT=43975\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x8)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=8\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3415167\r\nSLURM_PTY_PORT=34295\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=41\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=32\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e12.hkn0413\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=129\r\nSLURM_NODELIST=hkn[0413-0419,0421]\r\nSLURM_SRUN_COMM_PORT=38301\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=32\r\nSLURM_NNODES=8\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3415167\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0413\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=38301\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0413-0419,0421]\r\nsrun: error: Unable to create step for job 3415167: Invalid generic resource (gres) specification\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4395,5768332,"TERMINAL",0,0,"47814424421",,terminal_output +4396,5769411,"TERMINAL",0,0,"58925535532",,terminal_output +4397,5770416,"TERMINAL",0,0,"694036646643",,terminal_output +4398,5771162,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +4399,5771464,"TERMINAL",0,0,"710147757754",,terminal_output +4400,5772588,"TERMINAL",0,0,"81258868865",,terminal_output +4401,5773552,"TERMINAL",0,0,"92369979976",,terminal_output +4402,5774254,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1801,7,"export ",shellscript,selection_command +4403,5774428,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1801,8,"export P",shellscript,selection_command +4404,5774593,"TERMINAL",0,0,"50347205:008102087",,terminal_output +4405,5775634,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1808,0,"",shellscript,selection_command +4406,5775651,"TERMINAL",0,0,"14581191198",,terminal_output +4407,5776674,"TERMINAL",0,0,"2569227:0022309",,terminal_output +4408,5777717,"TERMINAL",0,0,"3672033133140",,terminal_output +4409,5778842,"TERMINAL",0,0,"47814424421",,terminal_output +4410,5779145,"TERMINAL",0,0,"[?25ll[?25h[?25ls[?25h",,terminal_output +4411,5779857,"TERMINAL",0,0,"58925535532",,terminal_output +4412,5780150,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +4413,5780670,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +4414,5780842,"TERMINAL",0,0,"695036646643",,terminal_output +4415,5781378,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_output +4416,5781943,"TERMINAL",0,0,"720147757754",,terminal_output +4417,5782929,"TERMINAL",0,0,"81258868865",,terminal_output +4418,5783954,"TERMINAL",0,0,"[?25lh slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch\r[?25h",,terminal_output +4419,5783985,"TERMINAL",0,0,"92369979976",,terminal_output +4420,5784046,"TERMINAL",0,0,"[?25ls slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch\r[?25h",,terminal_output +4421,5784273,"TERMINAL",0,0,"[?25lcs slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch\r[?25hp slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch\r",,terminal_output +4422,5784331,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4423,5784428,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4424,5784548,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\n",,terminal_output +4425,5785087,"TERMINAL",0,0,"7:0034730108203087",,terminal_output +4426,5785192,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +4427,5785239,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +4428,5786058,"TERMINAL",0,0,"14581191198",,terminal_output +4429,5787100,"TERMINAL",0,0,"2569221022409",,terminal_output +4430,5788074,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4431,5788189,"TERMINAL",0,0,"3673033133150",,terminal_output +4432,5788331,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ Running on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\n",,terminal_output +4433,5788457,"TERMINAL",0,0,"Running on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\n",,terminal_output +4434,5788522,"TERMINAL",0,0,"Running on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\n",,terminal_output +4435,5788589,"TERMINAL",0,0,"Running on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\nRunning on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\n",,terminal_output +4436,5788646,"TERMINAL",0,0,"Running on 32 devices.\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nStarting training from step 0...\r\n",,terminal_output +4437,5789103,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4438,5789208,"TERMINAL",0,0,"47814424421",,terminal_output +4439,5789283,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4440,5789485,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4441,5790302,"TERMINAL",0,0,"594:0036646643",,terminal_output +4442,5791327,"TERMINAL",0,0,"730147757754",,terminal_output +4443,5792351,"TERMINAL",0,0,"81258868865",,terminal_output +4444,5792667,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4445,5792855,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4446,5793016,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4447,5793378,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4448,5793388,"TERMINAL",0,0,"92369979976",,terminal_output +4449,5793514,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4450,5793756,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4451,5793946,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4452,5794088,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0413:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0413 jafar]$ ",,terminal_output +4453,5794344,"TERMINAL",0,0,"[?2004l\r\r\nexit\r\n",,terminal_output +4454,5794449,"TERMINAL",0,0,"1034740208304087",,terminal_output +4455,5795368,"TERMINAL",0,0,"srun: error: hkn0413: task 0: Exited with exit code 130\r\nsalloc: Relinquishing job allocation 3415167\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;130",,terminal_output +4456,5795488,"TERMINAL",0,0,"1CG4581191198",,terminal_output +4457,5796502,"TERMINAL",0,0,"269222022509",,terminal_output +4458,5796586,"TERMINAL",0,0,"salloc --time=00:30:00 --partition=accelerated --nodes=8 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5",,terminal_command +4459,5796634,"TERMINAL",0,0,"]633;E;2025-08-11 17:27:12 salloc --time=00:30:00 --partition=accelerated --nodes=8 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5;361f455a-15ad-4916-88b6-b9e49434d7af]633;Csalloc: Granted job allocation 3415180\r\n",,terminal_output +4460,5796743,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +4461,5797549,"TERMINAL",0,0,"380interact 0:01\t 8 hkn[0405-0412]111 54:076105079dy1:03:4064046 5:35:43\t 1 hkn07282401 accelerat train_to1-01:45:23\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]42110333030924351116623414280 cpuonly wrap tum_cte0 R 3:48:00\t 1 hkn0238",,terminal_output +4462,5798601,"TERMINAL",0,0,"42814424421",,terminal_output +4463,5799644,"TERMINAL",0,0,"53925535532",,terminal_output +4464,5800712,"TERMINAL",0,0,"641036646643",,terminal_output +4465,5801681,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +4466,5801758,"TERMINAL",0,0,"75147757754",,terminal_output +4467,5802795,"TERMINAL",0,0,"86258868865",,terminal_output +4468,5802949,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1832,0,"",shellscript,selection_mouse +4469,5803095,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1828,4,"srun",shellscript,selection_mouse +4470,5803830,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1805,0,"",shellscript,selection_mouse +4471,5803882,"TERMINAL",0,0,"97369979976",,terminal_output +4472,5804482,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1827,0,"",shellscript,selection_mouse +4473,5804881,"TERMINAL",0,0,"2084750308405087",,terminal_output +4474,5805010,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1826,0,"",shellscript,selection_mouse +4475,5805020,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1825,0,"",shellscript,selection_command +4476,5805157,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1825,1,"1",shellscript,selection_mouse +4477,5805159,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1826,0,"",shellscript,selection_command +4478,5805185,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1823,3,"D=1",shellscript,selection_mouse +4479,5805239,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1821,5,"RED=1",shellscript,selection_mouse +4480,5805240,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1817,9,"UFFERED=1",shellscript,selection_mouse +4481,5805245,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1814,12,"UNBUFFERED=1",shellscript,selection_mouse +4482,5805261,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1812,14,"ONUNBUFFERED=1",shellscript,selection_mouse +4483,5805313,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1810,16,"THONUNBUFFERED=1",shellscript,selection_mouse +4484,5805314,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1809,17,"YTHONUNBUFFERED=1",shellscript,selection_mouse +4485,5805315,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1808,18,"PYTHONUNBUFFERED=1",shellscript,selection_mouse +4486,5805328,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1807,19," PYTHONUNBUFFERED=1",shellscript,selection_mouse +4487,5805385,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1806,20,"t PYTHONUNBUFFERED=1",shellscript,selection_mouse +4488,5805386,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1805,21,"rt PYTHONUNBUFFERED=1",shellscript,selection_mouse +4489,5805439,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1804,22,"ort PYTHONUNBUFFERED=1",shellscript,selection_mouse +4490,5805840,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1804,0,"",shellscript,selection_mouse +4491,5805841,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1801,6,"export",shellscript,selection_mouse +4492,5805956,"TERMINAL",0,0,"19581191198",,terminal_output +4493,5806050,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1801,23,"export PYTHONUNBUFFERED",shellscript,selection_mouse +4494,5806508,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",1823,0,"",shellscript,selection_mouse +4495,5806970,"TERMINAL",0,0,"210692230228:009",,terminal_output +4496,5808733,"TERMINAL",0,0,"3285144244211",,terminal_output +4497,5809319,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +4498,5811600,"TERMINAL",0,0,"552147757754",,terminal_output +4499,5813546,"TERMINAL",0,0,"87369979976",,terminal_output +4500,5815132,"TERMINAL",0,0,"307-0414,0416-0419,0421]8476:00408508:0087",,terminal_output +4501,5816517,"TERMINAL",0,0,"\r180 R\t0:20\t 8\t0405-0412]11train_dy54:26\t 1 hkn06100791:03:597264046to5:36:02824011-01:45:42\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]4284 cpuonly wrap 3:471 hkn03103520928:0216651120190238",,terminal_output +4502,5817492,"TERMINAL",0,0,"3174:0033133120",,terminal_output +4503,5818566,"TERMINAL",0,0,"42814424421",,terminal_output +4504,5820302,"TERMINAL",0,0,"543036646643",,terminal_output +4505,5821819,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes copy.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexport PYTHONUNBUFFERED=1\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=256 \\n --init_lr=0 \\n --dyna_type=causal \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +4506,5822342,"TERMINAL",0,0,"76258868865",,terminal_output +4507,5823992,"TERMINAL",0,0,"97369979976",,terminal_output +4508,5824923,"TERMINAL",0,0,"salloc: Nodes hkn[0405-0412] are ready for job\r\n",,terminal_output +4509,5826256,"TERMINAL",0,0,"40958115198:011198",,terminal_output +4510,5827495,"TERMINAL",0,0,"]0;tum_cte0515@hkn0405:~/Projects/jafar[?2004h[tum_cte0515@hkn0405 jafar]$ ",,terminal_output +4511,5828602,"TERMINAL",0,0,"2328114452442231",,terminal_output +4512,5829739,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexport PYTHONUNBUFFERED=1\n\nsrun python train_dynamics.py \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=256 \\n --init_lr=0 \\n --dyna_type=causal \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics causal 8-node post-launch-main \\n --entity instant-uv \\n --project jafar \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +4513,5831351,"TERMINAL",0,0,"554147757754",,terminal_output +4514,5833587,"TERMINAL",0,0,"87369979976",,terminal_output +4515,5835621,"TERMINAL",0,0,"50958216:019112198",,terminal_output +4516,5836243,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +4517,5836315,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +4518,5836437,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +4519,5836498,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +4520,5836672,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +4521,5836805,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +4522,5836865,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +4523,5836927,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +4524,5837044,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +4525,5837254,"TERMINAL",0,0,"env/",,terminal_output +4526,5837495,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +4527,5837806,"TERMINAL",0,0,"in/",,terminal_output +4528,5838038,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +4529,5838188,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +4530,5838368,"TERMINAL",0,0,"tivate",,terminal_output +4531,5838510,"TERMINAL",0,0,"242821448:02443241",,terminal_output +4532,5838736,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0405:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0405 jafar]$ ",,terminal_output +4533,5839664,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +4534,5839730,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +4535,5839848,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +4536,5840379,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",,terminal_output +4537,5841098,"TERMINAL",0,0,"545036646643",,terminal_output +4538,5842088,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,0,"",shellscript,tab +4539,5843302,"TERMINAL",0,0,"77369979976",,terminal_output +4540,5844008,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1804,0,"",shellscript,selection_mouse +4541,5844177,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1804,2,"or",shellscript,selection_mouse +4542,5844197,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1804,6,"ort PY",shellscript,selection_mouse +4543,5844212,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1804,10,"ort PYTHON",shellscript,selection_mouse +4544,5844265,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1800,4,"\nexp",shellscript,selection_mouse +4545,5844574,"TERMINAL",0,0,"8:0084730108203087",,terminal_output +4546,5845617,"TERMINAL",0,0,"19581191198",,terminal_output +4547,5845920,"TERMINAL",0,0,"\rslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh\r\n[?2004l\r#!/usr/bin/env bash\r\n\r\n#SBATCH --nodes=8\r\n#SBATCH --ntasks-per-node=4\r\n#SBATCH --time=48:00:00\r\n#SBATCH --partition=accelerated\r\n#SBATCH --cpus-per-task=5\r\n#SBATCH --gres=gpu:4\r\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --job-name=train_dynamics_causal_8_node\r\n#SBATCH --requeue\r\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\r\n\r\n# --- signal trap to requeue job before timeout ---\r\nrequeue_job() {\r\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\r\n # optional: trigger checkpoint saving here\r\n # e.g., touch $checkpoint_dir/requeue_trigger\r\n scontrol requeue $SLURM_JOB_ID\r\n exit 0\r\n}\r\n\r\ntrap requeue_job sigusr1\r\n\r\n# set checkpoint flag based on restart count\r\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\r\n\r\nif [ $restart_count -eq 0 ]; then\r\n restore_ckpt_flag=""--no-restore-ckpt""\r\nelse\r\n restore_ckpt_flag=""--restore-ckpt""\r\nfi\r\n\r\n\r\n\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\n# tokenizer with the new structure supporting larger ffn_dim\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\r\n\r\nenv | grep SLURM\r\n\r\nexport PYTHONUNBUFFERED=1\r\n\r\nsrun python train_dynamics.py \\r\n --save_ckpt \\r\n $restore_ckpt_flag \\r\n --wandb_id $SLURM_JOB_ID \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=256 \\r\n --init_lr=0 \\r\n --dyna_type=causal \\r\n --max_lr=8e-5 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-causal-8-node-$slurm_job_id \\r\n --tags dynamics causal 8-node post-launch-main \\r\n --entity instant-uv \\r\n --project jafar \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $array_records_dir &\r\n\r\nchild_pid=$!\r\n\r\nwait $child_pid\r\n",,terminal_output +4548,5846723,"TERMINAL",0,0,"25069221022409",,terminal_output +4549,5847745,"TERMINAL",0,0,"3173033133150",,terminal_output +4550,5848875,"TERMINAL",0,0,"42814424421",,terminal_output +4551,5849683,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x8)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=1816136\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0405\r\nSLURM_JOB_START_TIME=1754926032\r\nSLURM_STEP_NODELIST=hkn0405\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1754927832\r\nSLURM_PMI2_SRUN_PORT=33881\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x8)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=8\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3415180\r\nSLURM_PTY_PORT=39129\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=41\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=32\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e12.hkn0405\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=129\r\nSLURM_NODELIST=hkn[0405-0412]\r\nSLURM_SRUN_COMM_PORT=35763\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=32\r\nSLURM_NNODES=8\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3415180\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0405\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=35763\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0405-0412]\r\n",,terminal_output +4552,5849819,"TERMINAL",0,0,"53925535532",,terminal_output +4553,5849830,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +4554,5850927,"TERMINAL",0,0,"645:0036646643",,terminal_output +4555,5851915,"TERMINAL",0,0,"75147757754",,terminal_output +4556,5852970,"TERMINAL",0,0,"86258868865",,terminal_output +4557,5854002,"TERMINAL",0,0,"97369979976",,terminal_output +4558,5855053,"TERMINAL",0,0,"1084740208304087",,terminal_output +4559,5856145,"TERMINAL",0,0,"19581191198",,terminal_output +4560,5857153,"TERMINAL",0,0,"21:0069222022509",,terminal_output +4561,5858299,"TERMINAL",0,0,"317403313319:00",,terminal_output +4562,5859242,"TERMINAL",0,0,"43925535532",,terminal_output +4563,5860369,"TERMINAL",0,0,"641036646643",,terminal_output +4564,5861369,"TERMINAL",0,0,"75147757754",,terminal_output +4565,5862389,"TERMINAL",0,0,"86258868865",,terminal_output +4566,5863515,"TERMINAL",0,0,"97369979976",,terminal_output +4567,5864529,"TERMINAL",0,0,"2084750308405087",,terminal_output +4568,5865518,"TERMINAL",0,0,"19581191198",,terminal_output +4569,5866593,"TERMINAL",0,0,"210692230229:009",,terminal_output +4570,5867614,"TERMINAL",0,0,"3175033133110",,terminal_output +4571,5868651,"TERMINAL",0,0,"42814424421",,terminal_output +4572,5869696,"TERMINAL",0,0,"53925535532",,terminal_output +4573,5870788,"TERMINAL",0,0,"642036646643",,terminal_output +4574,5871509,"TERMINAL",0,0,"Running on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\n",,terminal_output +4575,5871812,"TERMINAL",0,0,"75147757754",,terminal_output +4576,5872838,"TERMINAL",0,0,"86258868865",,terminal_output +4577,5873962,"TERMINAL",0,0,"97369979976",,terminal_output +4578,5874988,"TERMINAL",0,0,"308477:00408509:0087",,terminal_output +4579,5876016,"TERMINAL",0,0,"19581191198",,terminal_output +4580,5877002,"TERMINAL",0,0,"22069224022109",,terminal_output +4581,5878040,"TERMINAL",0,0,"3175:0033133120",,terminal_output +4582,5879075,"TERMINAL",0,0,"42814424421",,terminal_output +4583,5879666,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +4584,5879730,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +4585,5879790,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +4586,5879855,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +4587,5879930,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +4588,5879994,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +4589,5880066,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +4590,5880162,"TERMINAL",0,0,"53925535532",,terminal_output +4591,5880625,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +4592,5881181,"TERMINAL",0,0,"643036646643",,terminal_output +4593,5881569,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250811_172836-3415180\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-causal-8-node-3415180\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3415180\r\n",,terminal_output +4594,5882225,"TERMINAL",0,0,"76258868865",,terminal_output +4595,5882771,"TERMINAL",0,0,"Parameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +4596,5883284,"TERMINAL",0,0,"97369979976",,terminal_output +4597,5884411,"TERMINAL",0,0,"40847105089:001087",,terminal_output +4598,5885384,"TERMINAL",0,0,"19581191198",,terminal_output +4599,5886457,"TERMINAL",0,0,"23069225022209",,terminal_output +4600,5887481,"TERMINAL",0,0,"3171033133130",,terminal_output +4601,5888525,"TERMINAL",0,0,"42814424421",,terminal_output +4602,5889736,"TERMINAL",0,0,"53925535532",,terminal_output +4603,5890580,"TERMINAL",0,0,"644036646643",,terminal_output +4604,5891621,"TERMINAL",0,0,"75147757754",,terminal_output +4605,5892656,"TERMINAL",0,0,"86258868865",,terminal_output +4606,5893706,"TERMINAL",0,0,"97369979976",,terminal_output +4607,5894757,"TERMINAL",0,0,"50847207:008102087",,terminal_output +4608,5895877,"TERMINAL",0,0,"19581191198",,terminal_output +4609,5896901,"TERMINAL",0,0,"24069229:0022309",,terminal_output +4610,5897958,"TERMINAL",0,0,"3172033133140",,terminal_output +4611,5898927,"TERMINAL",0,0,"42814424421",,terminal_output +4612,5900076,"TERMINAL",0,0,"53925535532",,terminal_output +4613,5901100,"TERMINAL",0,0,"645036646643",,terminal_output +4614,5902126,"TERMINAL",0,0,"75147757754",,terminal_output +4615,5903102,"TERMINAL",0,0,"86258868865",,terminal_output +4616,5904171,"TERMINAL",0,0,"97369979976",,terminal_output +4617,5905298,"TERMINAL",0,0,"9:0084730108203087",,terminal_output +4618,5906281,"TERMINAL",0,0,"15069221022409",,terminal_output +4619,5907346,"TERMINAL",0,0,"3173033133150",,terminal_output +4620,5908377,"TERMINAL",0,0,"42814424421",,terminal_output +4621,5909499,"TERMINAL",0,0,"53925535532",,terminal_output +4622,5910471,"TERMINAL",0,0,"646:0036646643",,terminal_output +4623,5911523,"TERMINAL",0,0,"75147757754",,terminal_output +4624,5912568,"TERMINAL",0,0,"86258868865",,terminal_output +4625,5913614,"TERMINAL",0,0,"97369979976",,terminal_output +4626,5914664,"TERMINAL",0,0,"1084740208304087",,terminal_output +4627,5915714,"TERMINAL",0,0,"19581191198",,terminal_output +4628,5916760,"TERMINAL",0,0,"22:0069222022509",,terminal_output +4629,5917895,"TERMINAL",0,0,"3174033133150:00",,terminal_output +4630,5918919,"TERMINAL",0,0,"42814424421",,terminal_output +4631,5919947,"TERMINAL",0,0,"53925535532",,terminal_output +4632,5921049,"TERMINAL",0,0,"641036646643",,terminal_output +4633,5922092,"TERMINAL",0,0,"75147757754",,terminal_output +4634,5923049,"TERMINAL",0,0,"86258868865",,terminal_output +4635,5924526,"TERMINAL",0,0,"984750308405087",,terminal_output +4636,5926676,"TERMINAL",0,0,"21106922302250:009",,terminal_output +4637,5929158,"TERMINAL",0,0,"3285144244211",,terminal_output +4638,5930378,"TERMINAL",0,0,"542036646643",,terminal_output +4639,5931922,"TERMINAL",0,0,"75147757754",,terminal_output +4640,5932905,"TERMINAL",0,0,"86258868865",,terminal_output +4641,5933971,"TERMINAL",0,0,"97369979976",,terminal_output +4642,5935100,"TERMINAL",0,0,"308478:004085050:0087",,terminal_output +4643,5936950,"TERMINAL",0,0,"12069224022109",,terminal_output +4644,5939221,"TERMINAL",0,0,"3396:0255355322",,terminal_output +4645,5941240,"TERMINAL",0,0,"643036646643",,terminal_output +4646,5943368,"TERMINAL",0,0,"77369979976",,terminal_output +4647,5945646,"TERMINAL",0,0,"409581151950:011198",,terminal_output +4648,5947229,"TERMINAL",0,0,"2317103351332130",,terminal_output +4649,5949205,"TERMINAL",0,0,"42814424421",,terminal_output +4650,5951436,"TERMINAL",0,0,"554147757754",,terminal_output +4651,5953772,"TERMINAL",0,0,"87369979976",,terminal_output +4652,5956294,"TERMINAL",0,0,"504069228:0250:001222309",,terminal_output +4653,5958548,"TERMINAL",0,0,"3282144244241",,terminal_output +4654,5960595,"TERMINAL",0,0,"545036646643",,terminal_output +4655,5961621,"TERMINAL",0,0,"75147757754",,terminal_output +4656,5962643,"TERMINAL",0,0,"86258868865",,terminal_output +4657,5963653,"TERMINAL",0,0,"97369979976",,terminal_output +4658,5963872,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4659,5963977,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4660,5964037,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4661,5964100,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4662,5964704,"TERMINAL",0,0,"30:0084730108203087",,terminal_output +4663,5965821,"TERMINAL",0,0,"19581191198",,terminal_output +4664,5966793,"TERMINAL",0,0,"25069221022409",,terminal_output +4665,5967660,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4666,5967775,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +4667,5967856,"TERMINAL",0,0,"3173033133150",,terminal_output +4668,5967866,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 77000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/077000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\n",,terminal_output +4669,5968504,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +4670,5968576,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +4671,5968991,"TERMINAL",0,0,"42814424421",,terminal_output +4672,5970017,"TERMINAL",0,0,"53925535532",,terminal_output +4673,5971040,"TERMINAL",0,0,"647:0036646643",,terminal_output +4674,5971524,"TERMINAL",0,0,"Starting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\n",,terminal_output +4675,5971618,"TERMINAL",0,0,"Starting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\n",,terminal_output +4676,5971811,"TERMINAL",0,0,"Starting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\n",,terminal_output +4677,5972049,"TERMINAL",0,0,"75147757754",,terminal_output +4678,5973086,"TERMINAL",0,0,"86258868865",,terminal_output +4679,5974118,"TERMINAL",0,0,"97369979976",,terminal_output +4680,5975215,"TERMINAL",0,0,"1084740208304087",,terminal_output +4681,5976203,"TERMINAL",0,0,"19581191198",,terminal_output +4682,5977249,"TERMINAL",0,0,"23:01740332133511:00",,terminal_output +4683,5978311,"TERMINAL",0,0,"42814424421",,terminal_output +4684,5979344,"TERMINAL",0,0,"53925535532",,terminal_output +4685,5980407,"TERMINAL",0,0,"641036646643",,terminal_output +4686,5981485,"TERMINAL",0,0,"75147757754",,terminal_output +4687,5982509,"TERMINAL",0,0,"86258868865",,terminal_output +4688,5983529,"TERMINAL",0,0,"97369979976",,terminal_output +4689,5984592,"TERMINAL",0,0,"2084750308405087",,terminal_output +4690,5985629,"TERMINAL",0,0,"19581191198",,terminal_output +4691,5986660,"TERMINAL",0,0,"210692230221:009",,terminal_output +4692,5987708,"TERMINAL",0,0,"3175033133110",,terminal_output +4693,5988748,"TERMINAL",0,0,"42814424421",,terminal_output +4694,5989472,"TERMINAL",0,0,"Filtering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4695,5989649,"TERMINAL",0,0,"Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4696,5989793,"TERMINAL",0,0,"53925535532",,terminal_output +4697,5989902,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4698,5989961,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4699,5990135,"TERMINAL",0,0,"Filtering out episode with length 4, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4700,5990242,"TERMINAL",0,0,"Filtering out episode with length 9, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4701,5990375,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4702,5990428,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4703,5990837,"TERMINAL",0,0,"642036646643",,terminal_output +4704,5990945,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 11, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4705,5991281,"TERMINAL",0,0,"Filtering out episode with length 15, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4706,5991468,"TERMINAL",0,0,"Filtering out episode with length 2, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4707,5991640,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4708,5991779,"TERMINAL",0,0,"Filtering out episode with length 3, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4709,5991844,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4710,5991900,"TERMINAL",0,0,"75147757754",,terminal_output +4711,5992060,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4712,5992116,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4713,5992215,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4714,5992358,"TERMINAL",0,0,"Filtering out episode with length 6, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4715,5992471,"TERMINAL",0,0,"Filtering out episode with length 14, which is shorter than the requested sequence length 16.\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4716,5992555,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\nFiltering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4717,5992674,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4718,5992736,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4719,5992790,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4720,5992909,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4721,5992960,"TERMINAL",0,0,"86258868865",,terminal_output +4722,5993021,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4723,5993158,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4724,5993675,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4725,5993780,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4726,5993843,"TERMINAL",0,0,"Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4727,5993971,"TERMINAL",0,0,"97369979976",,terminal_output +4728,5994387,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4729,5994491,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4730,5994674,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4731,5994829,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4732,5995024,"TERMINAL",0,0,"308479:00408501:0087",,terminal_output +4733,5995086,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4734,5995144,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4735,5995282,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4736,5995372,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4737,5995863,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py(366)()\r\n-> loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n(Pdb) \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 366, in \r\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +4738,5996065,"TERMINAL",0,0,"19581191198",,terminal_output +4739,5996434,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4740,5997107,"TERMINAL",0,0,"22069224022109",,terminal_output +4741,5998180,"TERMINAL",0,0,"3177:0033133120",,terminal_output +4742,5998828,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run dynamics-causal-8-node-3415180 at: https://wandb.ai/instant-uv/jafar/runs/3415180\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250811_172836-3415180/logs\r\n",,terminal_output +4743,5999304,"TERMINAL",0,0,"42814424421",,terminal_output +4744,6000369,"TERMINAL",0,0,"543036646643",,terminal_output +4745,6000600,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +4746,6001351,"TERMINAL",0,0,"75147757754",,terminal_output +4747,6002350,"TERMINAL",0,0,"86258868865",,terminal_output +4748,6003120,"TERMINAL",0,0,"Exception ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 2 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 2 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 7 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 2 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 9 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 6 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 6 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 2 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +4749,6003179,"TERMINAL",0,0,"Exception ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +4750,6003434,"TERMINAL",0,0,"97369979976",,terminal_output +4751,6003498,"TERMINAL",0,0,"Exception ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 2 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 2 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 9 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 2 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 4 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 2 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 3 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 8 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 9 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 2 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 9 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 9 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 9 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 8 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 2 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\nException ignored in: Exception ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehookException ignored in sys.unraisablehookException ignored in: Exception ignored in sys.unraisablehook/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 7 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +4752,6003865,"TERMINAL",0,0,"srun: error: hkn0410: tasks 21-22: Exited with exit code 1\r\nsrun: error: hkn0406: task 5: Exited with exit code 1\r\nsrun: error: hkn0405: task 1: Exited with exit code 1\r\n",,terminal_output +4753,6004070,"TERMINAL",0,0,"srun: error: hkn0407: tasks 8,10: Exited with exit code 1\r\n",,terminal_output +4754,6004134,"TERMINAL",0,0,"srun: error: hkn0411: task 26: Exited with exit code 1\r\nsrun: error: hkn0410: task 23: Exited with exit code 1\r\nsrun: error: hkn0410: task 20: Exited with exit code 1\r\nsrun: error: hkn0412: task 30: Exited with exit code 1\r\nsrun: error: hkn0406: tasks 4,6-7: Exited with exit code 1\r\nsrun: error: hkn0405: tasks 0,2-3: Exited with exit code 1\r\n",,terminal_output +4755,6004275,"TERMINAL",0,0,"srun: error: hkn0408: tasks 12-13: Exited with exit code 1\r\nsrun: error: hkn0411: tasks 24-25: Exited with exit code 1\r\n",,terminal_output +4756,6004329,"TERMINAL",0,0,"srun: error: hkn0412: tasks 28-29: Exited with exit code 1\r\n",,terminal_output +4757,6004480,"TERMINAL",0,0,"srun: error: hkn0411: task 27: Exited with exit code 1\r\nsrun: error: hkn0407: tasks 9,11: Exited with exit code 1\r\nsrun: error: hkn0412: task 31: Exited with exit code 1\r\nsrun: error: hkn0408: task 15: Exited with exit code 1\r\n",,terminal_output +4758,6004534,"TERMINAL",0,0,"40847105081:001087",,terminal_output +4759,6004555,"TERMINAL",0,0,"srun: error: hkn0408: task 14: Exited with exit code 1\r\nsrun: error: hkn0409: tasks 16,18-19: Exited with exit code 1\r\nsrun: error: hkn0409: task 17: Exited with exit code 1\r\n]0;tum_cte0515@hkn0405:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0405 jafar]$ ",,terminal_output +4760,6005471,"TERMINAL",0,0,"19581191198",,terminal_output +4761,6006573,"TERMINAL",0,0,"23069225022209",,terminal_output +4762,6007564,"TERMINAL",0,0,"3171033133130",,terminal_output +4763,6008611,"TERMINAL",0,0,"42814424421",,terminal_output +4764,6009659,"TERMINAL",0,0,"53925535532",,terminal_output +4765,6010711,"TERMINAL",0,0,"644036646643",,terminal_output +4766,6011795,"TERMINAL",0,0,"75147757754",,terminal_output +4767,6012819,"TERMINAL",0,0,"86258868865",,terminal_output +4768,6013967,"TERMINAL",0,0,"97369979976",,terminal_output +4769,6014970,"TERMINAL",0,0,"50847209:008102087",,terminal_output +4770,6015993,"TERMINAL",0,0,"19581191198",,terminal_output +4771,6016988,"TERMINAL",0,0,"24069221:0022309",,terminal_output +4772,6018044,"TERMINAL",0,0,"3172033133140",,terminal_output +4773,6018946,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,0,"",shellscript,tab +4774,6019092,"TERMINAL",0,0,"42814424421",,terminal_output +4775,6020189,"TERMINAL",0,0,"53925535532",,terminal_output +4776,6021216,"TERMINAL",0,0,"645036646643",,terminal_output +4777,6022344,"TERMINAL",0,0,"76258868865",,terminal_output +4778,6023284,"TERMINAL",0,0,"97369979976",,terminal_output +4779,6024392,"TERMINAL",0,0,"1:0084730108203087",,terminal_output +4780,6025381,"TERMINAL",0,0,"19581191198",,terminal_output +4781,6026443,"TERMINAL",0,0,"25069221022409",,terminal_output +4782,6027575,"TERMINAL",0,0,"3173033133150",,terminal_output +4783,6028589,"TERMINAL",0,0,"42814424421",,terminal_output +4784,6029616,"TERMINAL",0,0,"53925535532",,terminal_output +4785,6030629,"TERMINAL",0,0,"648:0036646643",,terminal_output +4786,6031661,"TERMINAL",0,0,"75147757754",,terminal_output +4787,6032696,"TERMINAL",0,0,"86258868865",,terminal_output +4788,6033738,"TERMINAL",0,0,"97369979976",,terminal_output +4789,6034781,"TERMINAL",0,0,"1084740208304087",,terminal_output +4790,6035828,"TERMINAL",0,0,"19581191198",,terminal_output +4791,6036883,"TERMINAL",0,0,"24:0069222022509",,terminal_output +4792,6037950,"TERMINAL",0,0,"317403313312:00",,terminal_output +4793,6039032,"TERMINAL",0,0,"42814424421",,terminal_output +4794,6040008,"TERMINAL",0,0,"53925535532",,terminal_output +4795,6041082,"TERMINAL",0,0,"641036646643",,terminal_output +4796,6042109,"TERMINAL",0,0,"75147757754",,terminal_output +4797,6043166,"TERMINAL",0,0,"86258868865",,terminal_output +4798,6044257,"TERMINAL",0,0,"97369979976",,terminal_output +4799,6045281,"TERMINAL",0,0,"2095851319415198",,terminal_output +4800,6046302,"TERMINAL",0,0,"210692230222:009",,terminal_output +4801,6047437,"TERMINAL",0,0,"3175033133110",,terminal_output +4802,6048364,"TERMINAL",0,0,"42814424421",,terminal_output +4803,6049479,"TERMINAL",0,0,"53925535532",,terminal_output +4804,6050477,"TERMINAL",0,0,"642036646643",,terminal_output +4805,6051528,"TERMINAL",0,0,"75147757754",,terminal_output +4806,6052554,"TERMINAL",0,0,"86258868865",,terminal_output +4807,6053679,"TERMINAL",0,0,"97369979976",,terminal_output +4808,6054638,"TERMINAL",0,0,"3084740:00408502:0087",,terminal_output +4809,6055703,"TERMINAL",0,0,"19581191198",,terminal_output +4810,6056748,"TERMINAL",0,0,"22069224022109",,terminal_output +4811,6057759,"TERMINAL",0,0,"3178:0033133120",,terminal_output +4812,6059001,"TERMINAL",0,0,"42814424421",,terminal_output +4813,6060023,"TERMINAL",0,0,"53925535532",,terminal_output +4814,6061048,"TERMINAL",0,0,"643036646643",,terminal_output +4815,6062057,"TERMINAL",0,0,"75147757754",,terminal_output +4816,6063198,"TERMINAL",0,0,"86258868865",,terminal_output +4817,6064148,"TERMINAL",0,0,"97369979976",,terminal_output +4818,6065186,"TERMINAL",0,0,"40847105082:001087",,terminal_output +4819,6066267,"TERMINAL",0,0,"13069225022209",,terminal_output +4820,6067296,"TERMINAL",0,0,"3171033133130",,terminal_output +4821,6068320,"TERMINAL",0,0,"42814424421",,terminal_output +4822,6069400,"TERMINAL",0,0,"53925535532",,terminal_output +4823,6070412,"TERMINAL",0,0,"644036646643",,terminal_output +4824,6071458,"TERMINAL",0,0,"75147757754",,terminal_output +4825,6072518,"TERMINAL",0,0,"86258868865",,terminal_output +4826,6073648,"TERMINAL",0,0,"97369979976",,terminal_output +4827,6074670,"TERMINAL",0,0,"508472050:008102087",,terminal_output +4828,6075697,"TERMINAL",0,0,"19581191198",,terminal_output +4829,6076699,"TERMINAL",0,0,"24069222:0022309",,terminal_output +4830,6077752,"TERMINAL",0,0,"3172033133140",,terminal_output +4831,6078801,"TERMINAL",0,0,"42814424421",,terminal_output +4832,6079891,"TERMINAL",0,0,"53925535532",,terminal_output +4833,6080913,"TERMINAL",0,0,"645036646643",,terminal_output +4834,6082040,"TERMINAL",0,0,"75147757754",,terminal_output +4835,6082977,"TERMINAL",0,0,"86258868865",,terminal_output +4836,6084024,"TERMINAL",0,0,"97369979976",,terminal_output +4837,6085115,"TERMINAL",0,0,"2:0084730108203087",,terminal_output +4838,6086114,"TERMINAL",0,0,"19581191198",,terminal_output +4839,6086892,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,0,"",shellscript,tab +4840,6087179,"TERMINAL",0,0,"25069221022409",,terminal_output +4841,6088284,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1827,0,"",shellscript,selection_mouse +4842,6088295,"TERMINAL",0,0,"3173033133150",,terminal_output +4843,6089274,"TERMINAL",0,0,"43925535532",,terminal_output +4844,6090369,"TERMINAL",0,0,"649:0036646643",,terminal_output +4845,6091264,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +4846,6091378,"TERMINAL",0,0,"75147757754",,terminal_output +4847,6092487,"TERMINAL",0,0,"86258868865",,terminal_output +4848,6093924,"TERMINAL",0,0,"97369979976",,terminal_output +4849,6094912,"TERMINAL",0,0,"1084740208304087",,terminal_output +4850,6095985,"TERMINAL",0,0,"19581191198",,terminal_output +4851,6097096,"TERMINAL",0,0,"25:0069222022509",,terminal_output +4852,6098121,"TERMINAL",0,0,"317403313313:00",,terminal_output +4853,6099143,"TERMINAL",0,0,"42814424421",,terminal_output +4854,6100168,"TERMINAL",0,0,"53925535532",,terminal_output +4855,6101196,"TERMINAL",0,0,"641036646643",,terminal_output +4856,6102225,"TERMINAL",0,0,"76258868865",,terminal_output +4857,6103343,"TERMINAL",0,0,"97369979976",,terminal_output +4858,6104367,"TERMINAL",0,0,"2084750308405087",,terminal_output +4859,6105383,"TERMINAL",0,0,"19581191198",,terminal_output +4860,6106414,"TERMINAL",0,0,"210692230223:009",,terminal_output +4861,6107460,"TERMINAL",0,0,"3175033133110",,terminal_output +4862,6108565,"TERMINAL",0,0,"42814424421",,terminal_output +4863,6109602,"TERMINAL",0,0,"53925535532",,terminal_output +4864,6110609,"TERMINAL",0,0,"642036646643",,terminal_output +4865,6111644,"TERMINAL",0,0,"75147757754",,terminal_output +4866,6112675,"TERMINAL",0,0,"86258868865",,terminal_output +4867,6113714,"TERMINAL",0,0,"97369979976",,terminal_output +4868,6114754,"TERMINAL",0,0,"308471:00408503:0087",,terminal_output +4869,6115835,"TERMINAL",0,0,"19581191198",,terminal_output +4870,6116865,"TERMINAL",0,0,"22069224022109",,terminal_output +4871,6117882,"TERMINAL",0,0,"3179:0033133120",,terminal_output +4872,6119009,"TERMINAL",0,0,"42814424421",,terminal_output +4873,6120034,"TERMINAL",0,0,"53925535532",,terminal_output +4874,6121055,"TERMINAL",0,0,"643036646643",,terminal_output +4875,6122072,"TERMINAL",0,0,"75147757754",,terminal_output +4876,6123099,"TERMINAL",0,0,"86258868865",,terminal_output +4877,6124231,"TERMINAL",0,0,"97369979976",,terminal_output +4878,6125188,"TERMINAL",0,0,"40847105083:001087",,terminal_output +4879,6125243,"TERMINAL",0,0,"[?2004l\r\r\nexit\r\nsrun: error: hkn0405: task 0: Exited with exit code 1\r\nsalloc: Relinquishing job allocation 3415180\r\nsalloc: Job allocation 3415180 has been revoked.\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;1",,terminal_output +4880,6126280,"TERMINAL",0,0,"1CG969225022209",,terminal_output +4881,6126868,"TERMINAL",0,0,"idle",,terminal_command +4882,6126888,"TERMINAL",0,0,"]633;E;2025-08-11 17:32:42 idle;361f455a-15ad-4916-88b6-b9e49434d7af]633;CPartition dev_cpuonly : 11 nodes idle\r\nPartition cpuonly : 52 nodes idle\r\nPartition dev_accelerated : 2 nodes idle\r\nPartition accelerated : 43 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 2 nodes idle\r\nPartition large : 7 nodes idle\r\n]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +4883,6127265,"TERMINAL",0,0,"371033133130",,terminal_output +4884,6128341,"TERMINAL",0,0,"4814424421",,terminal_output +4885,6128771,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,0,"",shellscript,tab +4886,6129353,"TERMINAL",0,0,"5925535532",,terminal_output +4887,6130186,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",2003,0,"",shellscript,selection_mouse +4888,6130419,"TERMINAL",0,0,"64036646643",,terminal_output +4889,6131459,"TERMINAL",0,0,"7147757754",,terminal_output +4890,6131765,"slurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch",0,0,"",shellscript,tab +4891,6132524,"TERMINAL",0,0,"8258868865",,terminal_output +4892,6133650,"TERMINAL",0,0,"9369979976",,terminal_output +4893,6134675,"TERMINAL",0,0,"5047201:008102087",,terminal_output +4894,6134753,"train_dynamics.py",0,0,"",python,tab +4895,6135676,"TERMINAL",0,0,"11 hkn0405581191198",,terminal_output +4896,6135945,"train_dynamics.py",12465,25,"",python,content +4897,6135962,"train_dynamics.py",12477,0,"",python,selection_command +4898,6136679,"TERMINAL",0,0,"\r211train_dy R59:466100791:09:197264046to5:41:22824011-01:51:02\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]4284 cpuonly wrap 3:53:00\t 1 hkn031033:120922216651302030238",,terminal_output +4899,6137733,"TERMINAL",0,0,"372033133140",,terminal_output +4900,6138784,"TERMINAL",0,0,"4814424421",,terminal_output +4901,6139055,"train_dynamics.py",12663,0,"",python,selection_mouse +4902,6139823,"TERMINAL",0,0,"5925535532",,terminal_output +4903,6140932,"TERMINAL",0,0,"65036646643",,terminal_output +4904,6141947,"TERMINAL",0,0,"7147757754",,terminal_output +4905,6142958,"TERMINAL",0,0,"8258868865",,terminal_output +4906,6144104,"TERMINAL",0,0,"9369979976",,terminal_output +4907,6145121,"TERMINAL",0,0,"3:004730108203087",,terminal_output +4908,6146146,"TERMINAL",0,0,"1581191198",,terminal_output +4909,6147170,"TERMINAL",0,0,"269221022409",,terminal_output +4910,6148158,"TERMINAL",0,0,"373033133150",,terminal_output +4911,6149219,"TERMINAL",0,0,"4814424421",,terminal_output +4912,6150245,"TERMINAL",0,0,"51:00:0036646643",,terminal_output +4913,6151377,"TERMINAL",0,0,"7147757754",,terminal_output +4914,6152394,"TERMINAL",0,0,"8258868865",,terminal_output +4915,6153362,"TERMINAL",0,0,"9369979976",,terminal_output +4916,6154443,"TERMINAL",0,0,"104740208304087",,terminal_output +4917,6155450,"TERMINAL",0,0,"1581191198",,terminal_output +4918,6156593,"TERMINAL",0,0,"269222022509",,terminal_output +4919,6157537,"TERMINAL",0,0,"37403313314:00",,terminal_output +4920,6158642,"TERMINAL",0,0,"4814424421",,terminal_output +4921,6159613,"TERMINAL",0,0,"5925535532",,terminal_output +4922,6160653,"TERMINAL",0,0,"61036646643",,terminal_output +4923,6161694,"TERMINAL",0,0,"7147757754",,terminal_output +4924,6162746,"TERMINAL",0,0,"8258868865",,terminal_output +4925,6163786,"TERMINAL",0,0,"9369979976",,terminal_output +4926,6164821,"TERMINAL",0,0,"204750308405087",,terminal_output +4927,6165909,"TERMINAL",0,0,"1581191198",,terminal_output +4928,6166933,"TERMINAL",0,0,"2692230224:009",,terminal_output +4929,6167944,"TERMINAL",0,0,"375033133110",,terminal_output +4930,6169086,"TERMINAL",0,0,"4814424421",,terminal_output +4931,6170106,"TERMINAL",0,0,"5925535532",,terminal_output +4932,6171069,"TERMINAL",0,0,"62036646643",,terminal_output +4933,6172157,"TERMINAL",0,0,"7147757754",,terminal_output +4934,6173147,"TERMINAL",0,0,"8258868865",,terminal_output +4935,6174186,"TERMINAL",0,0,"9369979976",,terminal_output +4936,6175256,"TERMINAL",0,0,"30582:01419514:0198",,terminal_output +4937,6176357,"TERMINAL",0,0,"269224022109",,terminal_output +4938,6177311,"TERMINAL",0,0,"3710:0033133120",,terminal_output +4939,6178349,"TERMINAL",0,0,"4814424421",,terminal_output +4940,6179427,"TERMINAL",0,0,"5925535532",,terminal_output +4941,6180554,"TERMINAL",0,0,"63036646643",,terminal_output +4942,6181586,"TERMINAL",0,0,"7147757754",,terminal_output +4943,6182649,"TERMINAL",0,0,"8258868865",,terminal_output +4944,6183726,"TERMINAL",0,0,"9369979976",,terminal_output +4945,6184751,"TERMINAL",0,0,"4047105084:001087",,terminal_output +4946,6185761,"TERMINAL",0,0,"1581191198",,terminal_output +4947,6186812,"TERMINAL",0,0,"269225022209",,terminal_output +4948,6187889,"TERMINAL",0,0,"371033133130",,terminal_output +4949,6188946,"TERMINAL",0,0,"4814424421",,terminal_output +4950,6189972,"TERMINAL",0,0,"5925535532",,terminal_output +4951,6190989,"TERMINAL",0,0,"64036646643",,terminal_output +4952,6192038,"TERMINAL",0,0,"7147757754",,terminal_output +4953,6193148,"TERMINAL",0,0,"8258868865",,terminal_output +4954,6194170,"TERMINAL",0,0,"9369979976",,terminal_output +4955,6195169,"TERMINAL",0,0,"5047202:008102087",,terminal_output +4956,6196322,"TERMINAL",0,0,"1581191198",,terminal_output +4957,6196961,"genie.py",0,0,"",python,tab +4958,6197299,"TERMINAL",0,0,"2720334:01333140",,terminal_output +4959,6198304,"TERMINAL",0,0,"4814424421",,terminal_output +4960,6199364,"TERMINAL",0,0,"5925535532",,terminal_output +4961,6200397,"TERMINAL",0,0,"65036646643",,terminal_output +4962,6201127,"genie.py",3580,0,"",python,selection_mouse +4963,6201543,"TERMINAL",0,0,"7147757754",,terminal_output +4964,6202513,"TERMINAL",0,0,"8258868865",,terminal_output +4965,6203523,"genie.py",3594,0,"\n ",python,content +4966,6203586,"TERMINAL",0,0,"9369979976",,terminal_output +4967,6204588,"TERMINAL",0,0,"4:004730108203087",,terminal_output +4968,6205019,"genie.py",3607,0,"b",python,content +4969,6205020,"genie.py",3608,0,"",python,selection_keyboard +4970,6205140,"genie.py",3608,0,"r",python,content +4971,6205142,"genie.py",3609,0,"",python,selection_keyboard +4972,6205273,"genie.py",3609,0,"e",python,content +4973,6205275,"genie.py",3610,0,"",python,selection_keyboard +4974,6205423,"genie.py",3610,0,"a",python,content +4975,6205424,"genie.py",3611,0,"",python,selection_keyboard +4976,6205636,"TERMINAL",0,0,"1581191198",,terminal_output +4977,6206108,"genie.py",3607,4,"breakpoint",python,content +4978,6206680,"TERMINAL",0,0,"269221022409",,terminal_output +4979,6206867,"genie.py",3617,0,"()",python,content +4980,6206868,"genie.py",3618,0,"",python,selection_keyboard +4981,6206885,"genie.py",3618,1,")",python,content +4982,6206886,"genie.py",3619,0,"",python,selection_keyboard +4983,6207725,"TERMINAL",0,0,"373033133150",,terminal_output +4984,6208761,"TERMINAL",0,0,"4814424421",,terminal_output +4985,6209818,"TERMINAL",0,0,"5925535532",,terminal_output +4986,6210865,"TERMINAL",0,0,"61:0036646643",,terminal_output +4987,6211989,"TERMINAL",0,0,"7147757754",,terminal_output +4988,6212036,"TERMINAL",0,0,"jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch",,terminal_command +4989,6212061,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;361f455a-15ad-4916-88b6-b9e49434d7af]633;C]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D",,terminal_output +4990,6212953,"TERMINAL",0,0,"8258868865",,terminal_output +4991,6214000,"TERMINAL",0,0,"9369979976",,terminal_output +4992,6215062,"TERMINAL",0,0,"104740208304087",,terminal_output +4993,6215177,"TERMINAL",0,0,"salloc --time=00:30:00 --partition=accelerated --nodes=8 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5",,terminal_command +4994,6215228,"TERMINAL",0,0,"]633;E;2025-08-11 17:34:10 salloc --time=00:30:00 --partition=accelerated --nodes=8 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5;361f455a-15ad-4916-88b6-b9e49434d7af]633;Csalloc: Granted job allocation 3415275\r\n",,terminal_output +4995,6215369,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +4996,6216191,"TERMINAL",0,0,"1275interact 0:00\t 8 hkn[0522-0527,0530-0531]11101:056105079dy1:10:3864046 5:42:41\t 1 hkn07282401 accelerat train_to1-01:52:21\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]41910331030921514916623414280 cpuonly wrap tum_cte0 R 3:54:58\t 1 hkn0238",,terminal_output +4997,6217212,"TERMINAL",0,0,"2169222022509",,terminal_output +4998,6218187,"TERMINAL",0,0,"327403313315:00",,terminal_output +4999,6219260,"TERMINAL",0,0,"44925535532",,terminal_output +5000,6220284,"TERMINAL",0,0,"651036646643",,terminal_output +5001,6221410,"TERMINAL",0,0,"76147757754",,terminal_output +5002,6222436,"TERMINAL",0,0,"87258868865",,terminal_output +5003,6223419,"TERMINAL",0,0,"98369979976",,terminal_output +5004,6224485,"TERMINAL",0,0,"2094750308405087",,terminal_output +5005,6225608,"TERMINAL",0,0,"110581191198",,terminal_output +5006,6226566,"TERMINAL",0,0,"21692230225:009",,terminal_output +5007,6227605,"TERMINAL",0,0,"3275033133110",,terminal_output +5008,6228682,"TERMINAL",0,0,"43814424421",,terminal_output +5009,6229694,"TERMINAL",0,0,"54925535532",,terminal_output +5010,6230737,"TERMINAL",0,0,"652036646643",,terminal_output +5011,6231612,"TERMINAL",0,0,"s",,terminal_output +5012,6231728,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +5013,6231790,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +5014,6231799,"TERMINAL",0,0,"76147757754",,terminal_output +5015,6231851,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +5016,6232081,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +5017,6232185,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5018,6232665,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +5019,6232862,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +5020,6232862,"TERMINAL",0,0,"87258868865",,terminal_output +5021,6233450,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +5022,6233597,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5023,6233895,"TERMINAL",0,0,"98369979976",,terminal_output +5024,6233984,"TERMINAL",0,0,"[?25ln[?25h[?25lv[?25h",,terminal_output +5025,6234570,"TERMINAL",0,0,"[?25l/[?25h",,terminal_output +5026,6234955,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +5027,6234956,"TERMINAL",0,0,"309473:00408505:0087",,terminal_output +5028,6235159,"TERMINAL",0,0,"[?25li[?25h[?25ln[?25h",,terminal_output +5029,6235780,"TERMINAL",0,0,"[?25l/[?25h",,terminal_output +5030,6235950,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +5031,6235978,"TERMINAL",0,0,"120581191198",,terminal_output +5032,6236041,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +5033,6236299,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +5034,6236900,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +5035,6237020,"TERMINAL",0,0,"2169224022109",,terminal_output +5036,6237678,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +5037,6237740,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +5038,6237985,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +5039,6238055,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +5040,6238108,"TERMINAL",0,0,"3271:0033133120",,terminal_output +5041,6238125,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5042,6238778,"TERMINAL",0,0,"\r\n",,terminal_output +5043,6239126,"TERMINAL",0,0,"43814424421",,terminal_output +5044,6240261,"TERMINAL",0,0,"54925535532",,terminal_output +5045,6241277,"TERMINAL",0,0,"653036646643",,terminal_output +5046,6242303,"TERMINAL",0,0,"77258868865",,terminal_output +5047,6242411,"TERMINAL",0,0,"salloc: Nodes hkn[0522-0527,0530-0531] are ready for job\r\n",,terminal_output +5048,6242552,"TERMINAL",0,0,"source .venv/bin/activate\r\n",,terminal_output +5049,6243324,"TERMINAL",0,0,"98369979976",,terminal_output +5050,6244453,"TERMINAL",0,0,"40947105085:001087",,terminal_output +5051,6244993,"TERMINAL",0,0,"]0;tum_cte0515@hkn0522:~/Projects/jafar[?2004h[tum_cte0515@hkn0522 jafar]$ source .venv/bin/activate\r\n[?2004l\r]0;tum_cte0515@hkn0522:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0522 jafar]$ ",,terminal_output +5052,6245404,"TERMINAL",0,0,"130581191198",,terminal_output +5053,6246500,"TERMINAL",0,0,"2169225022209",,terminal_output +5054,6247523,"TERMINAL",0,0,"3271033133130",,terminal_output +5055,6248547,"TERMINAL",0,0,"43814424421",,terminal_output +5056,6249672,"TERMINAL",0,0,"54925535532",,terminal_output +5057,6250356,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,0,"",shellscript,tab +5058,6250685,"TERMINAL",0,0,"654036646643",,terminal_output +5059,6251719,"TERMINAL",0,0,"76147757754",,terminal_output +5060,6252083,"genie.py",0,0,"",python,tab +5061,6252753,"TERMINAL",0,0,"87258868865",,terminal_output +5062,6253230,"genie.py",3612,0,"",python,selection_mouse +5063,6253806,"TERMINAL",0,0,"98369979976",,terminal_output +5064,6254496,"genie.py",3611,0,"",python,selection_command +5065,6254900,"TERMINAL",0,0,"50947203:008102087",,terminal_output +5066,6256028,"TERMINAL",0,0,"140581191198",,terminal_output +5067,6257051,"TERMINAL",0,0,"2169225:0022309",,terminal_output +5068,6258002,"TERMINAL",0,0,"3272033133140",,terminal_output +5069,6259094,"TERMINAL",0,0,"43814424421",,terminal_output +5070,6260118,"TERMINAL",0,0,"54925535532",,terminal_output +5071,6261144,"TERMINAL",0,0,"655036646643",,terminal_output +5072,6262269,"TERMINAL",0,0,"76147757754",,terminal_output +5073,6263294,"TERMINAL",0,0,"88369979976",,terminal_output +5074,6263536,"genie.py",3619,0,"\n ",python,content +5075,6264321,"TERMINAL",0,0,"5:0094730108203087",,terminal_output +5076,6264794,"genie.py",3620,12,"",python,content +5077,6264893,"genie.py",3595,0,"",python,selection_command +5078,6265307,"genie.py",3620,0,"",python,selection_command +5079,6265362,"TERMINAL",0,0,"150581191198",,terminal_output +5080,6265715,"genie.py",3620,1,"",python,content +5081,6265719,"genie.py",3632,0,"",python,selection_command +5082,6265759,"genie.py",3607,0,"",python,selection_command +5083,6266052,"genie.py",3567,0,"",python,selection_command +5084,6266384,"TERMINAL",0,0,"2169221022409",,terminal_output +5085,6266858,"genie.py",3553,0,"",python,selection_command +5086,6267237,"genie.py",3567,0,"",python,selection_command +5087,6267434,"TERMINAL",0,0,"3273033133150",,terminal_output +5088,6268095,"genie.py",3607,0,"",python,selection_command +5089,6268514,"TERMINAL",0,0,"43814424421",,terminal_output +5090,6269643,"TERMINAL",0,0,"54925535532",,terminal_output +5091,6270670,"TERMINAL",0,0,"652:0036646643",,terminal_output +5092,6271627,"TERMINAL",0,0,"76147757754",,terminal_output +5093,6272689,"TERMINAL",0,0,"87258868865",,terminal_output +5094,6273518,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_output +5095,6273722,"TERMINAL",0,0,"98369979976",,terminal_output +5096,6273928,"TERMINAL",0,0,"h slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",,terminal_output +5097,6274770,"TERMINAL",0,0,"1094740208304087",,terminal_output +5098,6275830,"TERMINAL",0,0,"11:00581191198",,terminal_output +5099,6275892,"TERMINAL",0,0,"\r\n[?2004l\r#!/usr/bin/env bash\r\n\r\n#SBATCH --nodes=8\r\n#SBATCH --ntasks-per-node=4\r\n#SBATCH --time=48:00:00\r\n#SBATCH --partition=accelerated\r\n#SBATCH --cpus-per-task=5\r\n#SBATCH --gres=gpu:4\r\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --job-name=train_dynamics_causal_8_node\r\n#SBATCH --requeue\r\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\r\n\r\n# --- signal trap to requeue job before timeout ---\r\nrequeue_job() {\r\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\r\n # optional: trigger checkpoint saving here\r\n # e.g., touch $checkpoint_dir/requeue_trigger\r\n scontrol requeue $SLURM_JOB_ID\r\n exit 0\r\n}\r\n\r\ntrap requeue_job sigusr1\r\n\r\n# set checkpoint flag based on restart count\r\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\r\n\r\nif [ $restart_count -eq 0 ]; then\r\n restore_ckpt_flag=""--no-restore-ckpt""\r\nelse\r\n restore_ckpt_flag=""--restore-ckpt""\r\nfi\r\n\r\n\r\n\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\n# tokenizer with the new structure supporting larger ffn_dim\r\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\r\n\r\nenv | grep SLURM\r\n\r\nexport PYTHONUNBUFFERED=1\r\n\r\nsrun python train_dynamics.py \\r\n --save_ckpt \\r\n $restore_ckpt_flag \\r\n --wandb_id $SLURM_JOB_ID \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=256 \\r\n --init_lr=0 \\r\n --dyna_type=causal \\r\n --max_lr=8e-5 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-causal-8-node-$slurm_job_id \\r\n --tags dynamics causal 8-node post-launch-main \\r\n --entity instant-uv \\r\n --project jafar \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $array_records_dir &\r\n\r\nchild_pid=$!\r\n\r\nwait $child_pid\r\n",,terminal_output +5100,6276913,"TERMINAL",0,0,"2169222022509",,terminal_output +5101,6277937,"TERMINAL",0,0,"327403313316:00",,terminal_output +5102,6279008,"TERMINAL",0,0,"43814424421",,terminal_output +5103,6279264,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x8)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=986737\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0522\r\nSLURM_JOB_START_TIME=1754926451\r\nSLURM_STEP_NODELIST=hkn0522\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1754928251\r\nSLURM_PMI2_SRUN_PORT=42681\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x8)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=8\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3415275\r\nSLURM_PTY_PORT=35785\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=41\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=32\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e11.hkn0522\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=129\r\nSLURM_NODELIST=hkn[0522-0527,0530-0531]\r\nSLURM_SRUN_COMM_PORT=42393\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=32\r\nSLURM_NNODES=8\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3415275\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0522\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=42393\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0522-0527,0530-0531]\r\n",,terminal_output +5104,6279365,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +5105,6279416,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output +5106,6280087,"TERMINAL",0,0,"54925535532",,terminal_output +5107,6281109,"TERMINAL",0,0,"651036646643",,terminal_output +5108,6282134,"TERMINAL",0,0,"76147757754",,terminal_output +5109,6283171,"TERMINAL",0,0,"87258868865",,terminal_output +5110,6283999,"genie.py",0,0,"",python,tab +5111,6284233,"TERMINAL",0,0,"98369979976",,terminal_output +5112,6285264,"TERMINAL",0,0,"20105851319415198",,terminal_output +5113,6286333,"TERMINAL",0,0,"21692230226:009",,terminal_output +5114,6287457,"TERMINAL",0,0,"3275033133110",,terminal_output +5115,6288400,"TERMINAL",0,0,"43814424421",,terminal_output +5116,6289464,"TERMINAL",0,0,"54925535532",,terminal_output +5117,6290508,"TERMINAL",0,0,"652036646643",,terminal_output +5118,6291561,"TERMINAL",0,0,"76147757754",,terminal_output +5119,6292341,"train_dynamics.py",0,0,"",python,tab +5120,6292602,"TERMINAL",0,0,"87258868865",,terminal_output +5121,6293655,"TERMINAL",0,0,"98369979976",,terminal_output +5122,6294728,"TERMINAL",0,0,"309474:00408506:0087",,terminal_output +5123,6295323,"train_dynamics.py",7753,0,"",python,selection_mouse +5124,6295335,"train_dynamics.py",7752,0,"",python,selection_command +5125,6295730,"TERMINAL",0,0,"120581191198",,terminal_output +5126,6296789,"TERMINAL",0,0,"2169224022109",,terminal_output +5127,6297195,"train_dynamics.py",7935,0,"",python,selection_command +5128,6297828,"TERMINAL",0,0,"3272:0033133120",,terminal_output +5129,6298928,"TERMINAL",0,0,"43814424421",,terminal_output +5130,6299956,"TERMINAL",0,0,"54925535532",,terminal_output +5131,6301029,"train_dynamics.py",6787,0,"",python,selection_command +5132,6301098,"TERMINAL",0,0,"653036646643",,terminal_output +5133,6302100,"TERMINAL",0,0,"76147757754",,terminal_output +5134,6302774,"TERMINAL",0,0,"Running on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\nRunning on 32 devices.\r\n",,terminal_output +5135,6303073,"TERMINAL",0,0,"87258868865",,terminal_output +5136,6304151,"TERMINAL",0,0,"98369979976",,terminal_output +5137,6304229,"train_dynamics.py",5552,0,"",python,selection_command +5138,6305353,"TERMINAL",0,0,"40947105086:001087",,terminal_output +5139,6306014,"train_dynamics.py",5571,0,"",python,selection_command +5140,6306220,"TERMINAL",0,0,"13169225022209",,terminal_output +5141,6306563,"train_dynamics.py",5591,0,"",python,selection_command +5142,6306564,"train_dynamics.py",5627,0,"",python,selection_command +5143,6306567,"train_dynamics.py",5669,0,"",python,selection_command +5144,6306618,"train_dynamics.py",5719,0,"",python,selection_command +5145,6306619,"train_dynamics.py",5767,0,"",python,selection_command +5146,6306688,"train_dynamics.py",5817,0,"",python,selection_command +5147,6306689,"train_dynamics.py",5853,0,"",python,selection_command +5148,6306810,"train_dynamics.py",5909,0,"",python,selection_command +5149,6306810,"train_dynamics.py",5963,0,"",python,selection_command +5150,6306811,"train_dynamics.py",5977,0,"",python,selection_command +5151,6306811,"train_dynamics.py",6007,0,"",python,selection_command +5152,6306827,"train_dynamics.py",6045,0,"",python,selection_command +5153,6306951,"train_dynamics.py",6095,0,"",python,selection_command +5154,6306952,"train_dynamics.py",6147,0,"",python,selection_command +5155,6306953,"train_dynamics.py",6191,0,"",python,selection_command +5156,6306954,"train_dynamics.py",6235,0,"",python,selection_command +5157,6307022,"train_dynamics.py",6277,0,"",python,selection_command +5158,6307022,"train_dynamics.py",6323,0,"",python,selection_command +5159,6307044,"train_dynamics.py",6342,0,"",python,selection_command +5160,6307178,"train_dynamics.py",6376,0,"",python,selection_command +5161,6307179,"train_dynamics.py",6408,0,"",python,selection_command +5162,6307179,"train_dynamics.py",6448,0,"",python,selection_command +5163,6307179,"train_dynamics.py",6494,0,"",python,selection_command +5164,6307192,"train_dynamics.py",6538,0,"",python,selection_command +5165,6307227,"train_dynamics.py",6568,0,"",python,selection_command +5166,6307253,"train_dynamics.py",6604,0,"",python,selection_command +5167,6307381,"train_dynamics.py",6642,0,"",python,selection_command +5168,6307382,"train_dynamics.py",6668,0,"",python,selection_command +5169,6307382,"train_dynamics.py",6722,0,"",python,selection_command +5170,6307383,"TERMINAL",0,0,"3271033133130",,terminal_output +5171,6307485,"train_dynamics.py",6744,0,"",python,selection_command +5172,6307634,"train_dynamics.py",6755,0,"",python,selection_command +5173,6308319,"TERMINAL",0,0,"43814424421",,terminal_output +5174,6308775,"train_dynamics.py",6756,0,"\n ",python,content +5175,6309363,"TERMINAL",0,0,"54925535532",,terminal_output +5176,6310321,"train_dynamics.py",6761,0,"p",python,content +5177,6310323,"train_dynamics.py",6762,0,"",python,selection_keyboard +5178,6310411,"TERMINAL",0,0,"654036646643",,terminal_output +5179,6310550,"train_dynamics.py",6762,0,"i",python,content +5180,6310551,"train_dynamics.py",6763,0,"",python,selection_keyboard +5181,6310896,"train_dynamics.py",6762,1,"",python,content +5182,6310920,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +5183,6311054,"train_dynamics.py",6762,0,"r",python,content +5184,6311055,"train_dynamics.py",6763,0,"",python,selection_keyboard +5185,6311196,"train_dynamics.py",6763,0,"i",python,content +5186,6311197,"train_dynamics.py",6764,0,"",python,selection_keyboard +5187,6311197,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +5188,6311271,"train_dynamics.py",6764,0,"n",python,content +5189,6311272,"train_dynamics.py",6765,0,"",python,selection_keyboard +5190,6311365,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +5191,6311377,"train_dynamics.py",6765,0,"t",python,content +5192,6311378,"train_dynamics.py",6766,0,"",python,selection_keyboard +5193,6311456,"TERMINAL",0,0,"76147757754",,terminal_output +5194,6311492,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\nCounting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +5195,6311609,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +5196,6311660,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +5197,6311728,"TERMINAL",0,0,"Counting all components: ['dynamics', 'lam', 'tokenizer']\r\nParameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +5198,6311833,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +5199,6311946,"train_dynamics.py",6761,5,"print",python,content +5200,6312556,"TERMINAL",0,0,"87258868865",,terminal_output +5201,6312773,"train_dynamics.py",6766,0,"()",python,content +5202,6312774,"train_dynamics.py",6767,0,"",python,selection_keyboard +5203,6313136,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250811_173547-3415275\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-causal-8-node-3415275\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/3415275\r\n",,terminal_output +5204,6313547,"TERMINAL",0,0,"98369979976",,terminal_output +5205,6314702,"TERMINAL",0,0,"50947204:008102087",,terminal_output +5206,6315002,"train_dynamics.py",6767,0,"n",python,content +5207,6315004,"train_dynamics.py",6768,0,"",python,selection_keyboard +5208,6315014,"TERMINAL",0,0,"Parameter counts:\r\n{'dynamics': 271932416, 'lam': 35115232, 'tokenizer': 33750256, 'total': 340797904}\r\n",,terminal_output +5209,6315138,"train_dynamics.py",6768,0,"n",python,content +5210,6315139,"train_dynamics.py",6769,0,"",python,selection_keyboard +5211,6315538,"train_dynamics.py",6769,0,".",python,content +5212,6315539,"train_dynamics.py",6770,0,"",python,selection_keyboard +5213,6315552,"train_dynamics.py",6770,0,"x",python,content +5214,6315553,"train_dynamics.py",6771,0,"",python,selection_keyboard +5215,6315674,"TERMINAL",0,0,"140581191198",,terminal_output +5216,6316215,"train_dynamics.py",6770,1,"",python,content +5217,6316235,"train_dynamics.py",6769,1,"",python,content +5218,6316601,"train_dynamics.py",6769,0,"x",python,content +5219,6316602,"train_dynamics.py",6770,0,"",python,selection_keyboard +5220,6316697,"TERMINAL",0,0,"2169226:0022309",,terminal_output +5221,6316817,"train_dynamics.py",6770,0,".",python,content +5222,6316818,"train_dynamics.py",6771,0,"",python,selection_keyboard +5223,6317492,"train_dynamics.py",6770,1,"",python,content +5224,6317739,"TERMINAL",0,0,"3272033133140",,terminal_output +5225,6317978,"train_dynamics.py",6769,0,"",python,selection_command +5226,6318743,"train_dynamics.py",6757,15,"",python,content +5227,6318813,"TERMINAL",0,0,"43814424421",,terminal_output +5228,6319840,"TERMINAL",0,0,"54925535532",,terminal_output +5229,6320908,"TERMINAL",0,0,"655036646643",,terminal_output +5230,6321971,"TERMINAL",0,0,"76147757754",,terminal_output +5231,6322969,"TERMINAL",0,0,"87258868865",,terminal_output +5232,6324116,"TERMINAL",0,0,"98369979976",,terminal_output +5233,6324492,"genie.py",0,0,"",python,tab +5234,6325075,"TERMINAL",0,0,"6:0094730108203087",,terminal_output +5235,6326163,"TERMINAL",0,0,"150581191198",,terminal_output +5236,6326830,"train_dynamics.py",0,0,"",python,tab +5237,6327169,"TERMINAL",0,0,"2169221022409",,terminal_output +5238,6327620,"train_dynamics.py",6757,0,"\n print(nnx)",python,content +5239,6327627,"train_dynamics.py",6762,0,"",python,selection_command +5240,6328212,"TERMINAL",0,0,"3273033133150",,terminal_output +5241,6329183,"train_dynamics.py",6758,15,"",python,content +5242,6329196,"train_dynamics.py",6762,0,"",python,selection_command +5243,6329255,"TERMINAL",0,0,"44925535532",,terminal_output +5244,6329291,"train_dynamics.py",6814,0,"",python,selection_command +5245,6330623,"train_dynamics.py",6866,0,"\n print(nnx)",python,content +5246,6330634,"train_dynamics.py",6871,0,"",python,selection_command +5247,6331411,"train_dynamics.py",6872,0,"",python,selection_command +5248,6331901,"train_dynamics.py",6873,0,"",python,selection_command +5249,6331955,"train_dynamics.py",6874,0,"",python,selection_command +5250,6331957,"train_dynamics.py",6875,0,"",python,selection_command +5251,6332019,"train_dynamics.py",6876,0,"",python,selection_command +5252,6332019,"train_dynamics.py",6877,0,"",python,selection_command +5253,6332137,"TERMINAL",0,0,"663:0147757754",,terminal_output +5254,6333138,"TERMINAL",0,0,"87258868865",,terminal_output +5255,6334185,"TERMINAL",0,0,"98369979976",,terminal_output +5256,6334805,"train_dynamics.py",6867,15,"",python,content +5257,6334933,"train_dynamics.py",6810,0,"",python,selection_command +5258,6335262,"TERMINAL",0,0,"102:005841219314198",,terminal_output +5259,6335288,"train_dynamics.py",6758,0,"",python,selection_command +5260,6335449,"train_dynamics.py",6809,0,"\n print(nnx)",python,content +5261,6335451,"train_dynamics.py",6814,0,"",python,selection_command +5262,6336276,"TERMINAL",0,0,"2169222022509",,terminal_output +5263,6336609,"train_dynamics.py",6810,15,"",python,content +5264,6336623,"train_dynamics.py",6814,0,"",python,selection_command +5265,6336800,"train_dynamics.py",6867,0,"",python,selection_command +5266,6337388,"TERMINAL",0,0,"327403313317:00",,terminal_output +5267,6338401,"TERMINAL",0,0,"43814424421",,terminal_output +5268,6338497,"train_dynamics.py",6867,0,"\n",python,content +5269,6339448,"TERMINAL",0,0,"54925535532",,terminal_output +5270,6340385,"train_dynamics.py",6868,0,"k",python,content +5271,6340386,"train_dynamics.py",6869,0,"",python,selection_keyboard +5272,6340481,"TERMINAL",0,0,"651036646643",,terminal_output +5273,6340884,"train_dynamics.py",6868,1,"",python,content +5274,6341459,"train_dynamics.py",6868,0,"\n print(nnx)",python,content +5275,6341463,"train_dynamics.py",6873,0,"",python,selection_command +5276,6341525,"TERMINAL",0,0,"76147757754",,terminal_output +5277,6342560,"TERMINAL",0,0,"87258868865",,terminal_output +5278,6343608,"TERMINAL",0,0,"98369979976",,terminal_output +5279,6343747,"train_dynamics.py",6868,0,"\n",python,content +5280,6343758,"train_dynamics.py",6869,0," ",python,content +5281,6344658,"TERMINAL",0,0,"2094750308405087",,terminal_output +5282,6345689,"TERMINAL",0,0,"110581191198",,terminal_output +5283,6345768,"train_dynamics.py",6873,0,"s",python,content +5284,6345770,"train_dynamics.py",6874,0,"",python,selection_keyboard +5285,6345924,"train_dynamics.py",6874,0,"t",python,content +5286,6345925,"train_dynamics.py",6875,0,"",python,selection_keyboard +5287,6345980,"train_dynamics.py",6875,0,"a",python,content +5288,6345982,"train_dynamics.py",6876,0,"",python,selection_keyboard +5289,6346098,"train_dynamics.py",6876,0,"t",python,content +5290,6346099,"train_dynamics.py",6877,0,"",python,selection_keyboard +5291,6346203,"train_dynamics.py",6877,0,"e",python,content +5292,6346204,"train_dynamics.py",6878,0,"",python,selection_keyboard +5293,6346600,"train_dynamics.py",6878,0," ",python,content +5294,6346600,"train_dynamics.py",6879,0,"",python,selection_keyboard +5295,6346756,"TERMINAL",0,0,"21692230227:009",,terminal_output +5296,6346781,"train_dynamics.py",6879,0,"=",python,content +5297,6346781,"train_dynamics.py",6880,0,"",python,selection_keyboard +5298,6346860,"train_dynamics.py",6880,0," ",python,content +5299,6346861,"train_dynamics.py",6881,0,"",python,selection_keyboard +5300,6347743,"train_dynamics.py",6881,0,"n",python,content +5301,6347744,"train_dynamics.py",6882,0,"",python,selection_keyboard +5302,6347800,"TERMINAL",0,0,"3275033133110",,terminal_output +5303,6347866,"train_dynamics.py",6882,0,"n",python,content +5304,6347867,"train_dynamics.py",6883,0,"",python,selection_keyboard +5305,6348015,"train_dynamics.py",6883,0,"x",python,content +5306,6348016,"train_dynamics.py",6884,0,"",python,selection_keyboard +5307,6348127,"train_dynamics.py",6884,0,".",python,content +5308,6348128,"train_dynamics.py",6885,0,"",python,selection_keyboard +5309,6348749,"train_dynamics.py",6885,0,"s",python,content +5310,6348750,"train_dynamics.py",6886,0,"",python,selection_keyboard +5311,6348840,"TERMINAL",0,0,"43814424421",,terminal_output +5312,6348848,"train_dynamics.py",6886,0,"t",python,content +5313,6348849,"train_dynamics.py",6887,0,"",python,selection_keyboard +5314,6349063,"train_dynamics.py",6887,0,"t",python,content +5315,6349064,"train_dynamics.py",6888,0,"",python,selection_keyboard +5316,6349146,"train_dynamics.py",6888,0,"a",python,content +5317,6349147,"train_dynamics.py",6889,0,"",python,selection_keyboard +5318,6349461,"train_dynamics.py",6888,1,"",python,content +5319,6349566,"train_dynamics.py",6887,1,"",python,content +5320,6349665,"train_dynamics.py",6887,0,"a",python,content +5321,6349666,"train_dynamics.py",6888,0,"",python,selection_keyboard +5322,6349830,"train_dynamics.py",6888,0,"t",python,content +5323,6349831,"train_dynamics.py",6889,0,"",python,selection_keyboard +5324,6349884,"TERMINAL",0,0,"54925535532",,terminal_output +5325,6349909,"train_dynamics.py",6889,0,"e",python,content +5326,6349911,"train_dynamics.py",6890,0,"",python,selection_keyboard +5327,6350557,"train_dynamics.py",6890,0,".",python,content +5328,6350560,"train_dynamics.py",6891,0,"",python,selection_keyboard +5329,6350928,"TERMINAL",0,0,"652036646643",,terminal_output +5330,6351634,"train_dynamics.py",6890,1,"",python,content +5331,6351974,"TERMINAL",0,0,"76147757754",,terminal_output +5332,6352199,"train_dynamics.py",6890,0,"()",python,content +5333,6352200,"train_dynamics.py",6891,0,"",python,selection_keyboard +5334,6352473,"train_dynamics.py",6891,0,"g",python,content +5335,6352474,"train_dynamics.py",6892,0,"",python,selection_keyboard +5336,6352536,"train_dynamics.py",6892,0,"e",python,content +5337,6352538,"train_dynamics.py",6893,0,"",python,selection_keyboard +5338,6352716,"train_dynamics.py",6893,0,"n",python,content +5339,6352717,"train_dynamics.py",6894,0,"",python,selection_keyboard +5340,6352845,"train_dynamics.py",6894,0,"i",python,content +5341,6352846,"train_dynamics.py",6895,0,"",python,selection_keyboard +5342,6352896,"train_dynamics.py",6895,0,"e",python,content +5343,6352897,"train_dynamics.py",6896,0,"",python,selection_keyboard +5344,6353018,"TERMINAL",0,0,"87258868865",,terminal_output +5345,6353615,"train_dynamics.py",6895,0,"",python,selection_command +5346,6353949,"train_dynamics.py",6911,0,"",python,selection_command +5347,6354086,"TERMINAL",0,0,"98369979976",,terminal_output +5348,6354595,"train_dynamics.py",6910,0,"",python,selection_command +5349,6354788,"train_dynamics.py",6909,0,"",python,selection_command +5350,6354928,"train_dynamics.py",6908,0,"",python,selection_command +5351,6355108,"TERMINAL",0,0,"309475:00408507:0087",,terminal_output +5352,6355452,"train_dynamics.py",6908,3,"",python,content +5353,6356170,"TERMINAL",0,0,"120581191198",,terminal_output +5354,6356873,"train_dynamics.py",6908,0,"n",python,content +5355,6356874,"train_dynamics.py",6909,0,"",python,selection_keyboard +5356,6356994,"train_dynamics.py",6909,0,"n",python,content +5357,6356995,"train_dynamics.py",6910,0,"",python,selection_keyboard +5358,6357082,"train_dynamics.py",6910,0,"x",python,content +5359,6357083,"train_dynamics.py",6911,0,"",python,selection_keyboard +5360,6357212,"TERMINAL",0,0,"2169224022109",,terminal_output +5361,6357231,"train_dynamics.py",6911,0,".",python,content +5362,6357232,"train_dynamics.py",6912,0,"",python,selection_keyboard +5363,6358250,"TERMINAL",0,0,"3383:0144244221",,terminal_output +5364,6359326,"train_dynamics.py",6911,0,"",python,selection_command +5365,6359345,"TERMINAL",0,0,"54925535532",,terminal_output +5366,6359456,"train_dynamics.py",6913,0,"\n ",python,content +5367,6359928,"train_dynamics.py",6918,0,"k",python,content +5368,6359929,"train_dynamics.py",6919,0,"",python,selection_keyboard +5369,6360379,"TERMINAL",0,0,"653036646643",,terminal_output +5370,6360440,"train_dynamics.py",6918,1,"",python,content +5371,6360576,"train_dynamics.py",6917,0,"",python,selection_command +5372,6360710,"train_dynamics.py",6901,0,"",python,selection_command +5373,6361097,"train_dynamics.py",6913,0,"",python,selection_command +5374,6361388,"TERMINAL",0,0,"76147757754",,terminal_output +5375,6361583,"train_dynamics.py",6912,0,"",python,selection_command +5376,6362141,"train_dynamics.py",6912,0,"d",python,content +5377,6362142,"train_dynamics.py",6913,0,"",python,selection_keyboard +5378,6362262,"train_dynamics.py",6913,0,"i",python,content +5379,6362263,"train_dynamics.py",6914,0,"",python,selection_keyboard +5380,6362357,"train_dynamics.py",6914,0,"s",python,content +5381,6362358,"train_dynamics.py",6915,0,"",python,selection_keyboard +5382,6362442,"TERMINAL",0,0,"87258868865",,terminal_output +5383,6362472,"train_dynamics.py",6915,0,"p",python,content +5384,6362472,"train_dynamics.py",6916,0,"",python,selection_keyboard +5385,6362603,"train_dynamics.py",6916,0,"l",python,content +5386,6362604,"train_dynamics.py",6917,0,"",python,selection_keyboard +5387,6362615,"train_dynamics.py",6917,0,"a",python,content +5388,6362616,"train_dynamics.py",6918,0,"",python,selection_keyboard +5389,6363134,"train_dynamics.py",6912,6,"display",python,content +5390,6363500,"TERMINAL",0,0,"98369979976",,terminal_output +5391,6364569,"TERMINAL",0,0,"40947105087:001087",,terminal_output +5392,6364766,"train_dynamics.py",6919,0,"/",python,content +5393,6364767,"train_dynamics.py",6920,0,"",python,selection_keyboard +5394,6365617,"TERMINAL",0,0,"130581191198",,terminal_output +5395,6365741,"train_dynamics.py",6919,1,"",python,content +5396,6366207,"train_dynamics.py",6919,0,"()",python,content +5397,6366208,"train_dynamics.py",6920,0,"",python,selection_keyboard +5398,6366523,"train_dynamics.py",6920,0,"s",python,content +5399,6366524,"train_dynamics.py",6921,0,"",python,selection_keyboard +5400,6366579,"train_dynamics.py",6921,0,"t",python,content +5401,6366580,"train_dynamics.py",6922,0,"",python,selection_keyboard +5402,6366641,"train_dynamics.py",6922,0,"a",python,content +5403,6366642,"train_dynamics.py",6923,0,"",python,selection_keyboard +5404,6366666,"TERMINAL",0,0,"2169225022209",,terminal_output +5405,6366729,"train_dynamics.py",6923,0,"t",python,content +5406,6366730,"train_dynamics.py",6924,0,"",python,selection_keyboard +5407,6366813,"train_dynamics.py",6924,0,"e",python,content +5408,6366814,"train_dynamics.py",6925,0,"",python,selection_keyboard +5409,6367134,"train_dynamics.py",6924,0,"",python,selection_command +5410,6367730,"TERMINAL",0,0,"3271033133130",,terminal_output +5411,6368767,"TERMINAL",0,0,"43814424421",,terminal_output +5412,6369789,"TERMINAL",0,0,"54925535532",,terminal_output +5413,6370838,"TERMINAL",0,0,"654036646643",,terminal_output +5414,6371909,"TERMINAL",0,0,"76147757754",,terminal_output +5415,6372924,"TERMINAL",0,0,"87258868865",,terminal_output +5416,6373961,"TERMINAL",0,0,"98369979976",,terminal_output +5417,6375115,"TERMINAL",0,0,"50947205:008102087",,terminal_output +5418,6376139,"TERMINAL",0,0,"140581191198",,terminal_output +5419,6377164,"TERMINAL",0,0,"2169227:0022309",,terminal_output +5420,6378186,"TERMINAL",0,0,"3272033133140",,terminal_output +5421,6379210,"TERMINAL",0,0,"43814424421",,terminal_output +5422,6380243,"TERMINAL",0,0,"555036646643",,terminal_output +5423,6381363,"TERMINAL",0,0,"76147757754",,terminal_output +5424,6382340,"TERMINAL",0,0,"87258868865",,terminal_output +5425,6383409,"TERMINAL",0,0,"98369979976",,terminal_output +5426,6384538,"TERMINAL",0,0,"7:0094730108203087",,terminal_output +5427,6385478,"TERMINAL",0,0,"150581191198",,terminal_output +5428,6386583,"TERMINAL",0,0,"2169221022409",,terminal_output +5429,6387608,"TERMINAL",0,0,"3273033133150",,terminal_output +5430,6388734,"TERMINAL",0,0,"43814424421",,terminal_output +5431,6389675,"TERMINAL",0,0,"54925535532",,terminal_output +5432,6389867,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +5433,6389940,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +5434,6389998,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +5435,6390050,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +5436,6390730,"TERMINAL",0,0,"654:0036646643",,terminal_output +5437,6391806,"TERMINAL",0,0,"76147757754",,terminal_output +5438,6392818,"TERMINAL",0,0,"87258868865",,terminal_output +5439,6393886,"TERMINAL",0,0,"",,terminal_focus +5440,6393953,"TERMINAL",0,0,"98369979976",,terminal_output +5441,6394980,"TERMINAL",0,0,"1094740208304087",,terminal_output +5442,6395651,"TERMINAL",0,0,"source /home/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/bin/activate",,terminal_command +5443,6395659,"TERMINAL",0,0,"]633;E;2025-08-11 17:37:11 source /home/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/bin/activate;686e78ec-8807-43f4-8f60-628f6dc2af90]633;C]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D;0",,terminal_output +5444,6395960,"TERMINAL",0,0,"13:00581191198",,terminal_output +5445,6396636,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +5446,6396752,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\n",,terminal_output +5447,6397012,"TERMINAL",0,0,"2169222022509",,terminal_output +5448,6398058,"TERMINAL",0,0,"327403313318:00",,terminal_output +5449,6399178,"TERMINAL",0,0,"43814424421",,terminal_output +5450,6400209,"TERMINAL",0,0,"54925535532",,terminal_output +5451,6400274,"TERMINAL",0,0,"WARNING:absl:Dropping 18 examples of 89394 examples (shard 32).\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 40000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/040000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 78000\r\nWARNING:absl:Missing metrics for step 78000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/078000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 60000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/060000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 80000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/080000/metrics/metrics not found.\r\n",,terminal_output +5452,6400369,"TERMINAL",0,0,"WARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 79000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/079000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\nWARNING:absl:Missing metrics for step 20000\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401/020000/metrics/metrics not found.\r\n",,terminal_output +5453,6400883,"TERMINAL",0,0,"^Clloc --time=01:00:00 --partition=dev_cpuonly --nodes=1 --ntasks-per-node=1 --cpus-per-task=5",,terminal_command +5454,6400918,"TERMINAL",0,0,"^C\r\n\r[?2004l\r[?2004h[?2004l\r\r\n]633;E;;686e78ec-8807-43f4-8f60-628f6dc2af90]633;C]0;tum_cte0515@hkn1993:~/Projects/jafar]633;D",,terminal_output +5455,6400994,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +5456,6401054,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +5457,6401110,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +5458,6401214,"TERMINAL",0,0,"651036646643",,terminal_output +5459,6402287,"TERMINAL",0,0,"77258868865",,terminal_output +5460,6403286,"TERMINAL",0,0,"98369979976",,terminal_output +5461,6404175,"TERMINAL",0,0,"Starting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\n",,terminal_output +5462,6404233,"TERMINAL",0,0,"Starting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\n",,terminal_output +5463,6404291,"TERMINAL",0,0,"Starting training from step 0...\r\nStarting training from step 0...\r\nStarting training from step 0...\r\n",,terminal_output +5464,6404352,"TERMINAL",0,0,"2094750308405087",,terminal_output +5465,6404352,"TERMINAL",0,0,"Starting training from step 0...\r\n",,terminal_output +5466,6405373,"TERMINAL",0,0,"110581191198",,terminal_output +5467,6406452,"TERMINAL",0,0,"21692230228:009",,terminal_output +5468,6407460,"TERMINAL",0,0,"3275033133110",,terminal_output +5469,6408600,"TERMINAL",0,0,"43814424421",,terminal_output +5470,6409624,"TERMINAL",0,0,"54925535532",,terminal_output +5471,6410648,"TERMINAL",0,0,"652036646643",,terminal_output +5472,6411640,"TERMINAL",0,0,"76147757754",,terminal_output +5473,6412719,"TERMINAL",0,0,"87258868865",,terminal_output +5474,6413768,"TERMINAL",0,0,"98369979976",,terminal_output +5475,6414773,"TERMINAL",0,0,"309476:00408508:0087",,terminal_output +5476,6415820,"TERMINAL",0,0,"120581191198",,terminal_output +5477,6416867,"TERMINAL",0,0,"2169224022109",,terminal_output +5478,6417918,"TERMINAL",0,0,"3274:0033133120",,terminal_output +5479,6418741,"TERMINAL",0,0,"Filtering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5480,6418896,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 12, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5481,6419002,"TERMINAL",0,0,"43814424421",,terminal_output +5482,6419003,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5483,6419112,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5484,6419350,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 9, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5485,6419455,"TERMINAL",0,0,"Filtering out episode with length 4, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5486,6419514,"TERMINAL",0,0,"Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5487,6419720,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5488,6420034,"TERMINAL",0,0,"54925535532",,terminal_output +5489,6420099,"TERMINAL",0,0,"Filtering out episode with length 11, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5490,6420152,"TERMINAL",0,0,"Filtering out episode with length 15, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5491,6420444,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5492,6420833,"TERMINAL",0,0,"Filtering out episode with length 3, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5493,6421056,"TERMINAL",0,0,"653036646643",,terminal_output +5494,6421203,"TERMINAL",0,0,"Filtering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5495,6421320,"TERMINAL",0,0,"Filtering out episode with length 6, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5496,6421711,"TERMINAL",0,0,"Filtering out episode with length 2, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5497,6421815,"TERMINAL",0,0,"Filtering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5498,6422118,"TERMINAL",0,0,"76147757754",,terminal_output +5499,6422273,"TERMINAL",0,0,"Filtering out episode with length 15, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5500,6422800,"TERMINAL",0,0,"Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5501,6423091,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5",,terminal_command +5502,6423142,"TERMINAL",0,0,"Filtering out episode with length 10, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5503,6423142,"TERMINAL",0,0,"]633;E;2025-08-11 17:37:38 salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5;686e78ec-8807-43f4-8f60-628f6dc2af90]633;Csalloc: Granted job allocation 3415322\r\n",,terminal_output +5504,6423154,"TERMINAL",0,0,"83220:00\t 1 hkn0533275interact 3:27\t 8 hkn[0522-0527,0530-0531]11104:326105079dy1:14:0564046 5:46:08\t 1 hkn07282401 accelerat train_to1-01:55:48\t 8 hkn[0802,0804-0806,0808,0810,0813-0814]4461037:580309208511616623414280 cpuonly wrap tum_cte0 R 3:58:25\t 1 hkn0238",,terminal_output +5505,6423270,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +5506,6423416,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5507,6423526,"TERMINAL",0,0,"Filtering out episode with length 2, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5508,6424063,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 15, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5509,6424191,"TERMINAL",0,0,"918369979976",,terminal_output +5510,6424191,"TERMINAL",0,0,"Filtering out episode with length 6, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5511,6424320,"TERMINAL",0,0,"Filtering out episode with length 6, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5512,6424678,"TERMINAL",0,0,"Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 4, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5513,6425290,"TERMINAL",0,0,"4033058115198:011198",,terminal_output +5514,6425817,"TERMINAL",0,0,"Filtering out episode with length 4, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5515,6426108,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5516,6426291,"TERMINAL",0,0,"24169225022209",,terminal_output +5517,6426467,"TERMINAL",0,0,"Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\nFiltering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5518,6426620,"TERMINAL",0,0,"Filtering out episode with length 15, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5519,6426682,"TERMINAL",0,0,"Filtering out episode with length 8, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5520,6426927,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5521,6427082,"TERMINAL",0,0,"Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5522,6427296,"TERMINAL",0,0,"s",,terminal_output +5523,6427306,"TERMINAL",0,0,"Filtering out episode with length 12, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5524,6427362,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +5525,6427478,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +5526,6427585,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +5527,6427741,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +5528,6427786,"TERMINAL",0,0,"35271033133130",,terminal_output +5529,6427902,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5530,6427965,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +5531,6428030,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +5532,6428183,"TERMINAL",0,0,"Filtering out episode with length 13, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5533,6428194,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +5534,6428536,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5535,6428879,"TERMINAL",0,0,"463814424421",,terminal_output +5536,6429095,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +5537,6429155,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +5538,6429839,"TERMINAL",0,0,"[?25l/[?25h",,terminal_output +5539,6429839,"TERMINAL",0,0,"Filtering out episode with length 6, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5540,6429901,"TERMINAL",0,0,"574925535532",,terminal_output +5541,6430134,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +5542,6430237,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +5543,6430299,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +5544,6430375,"TERMINAL",0,0,"Filtering out episode with length 2, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5545,6430615,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5546,6430969,"TERMINAL",0,0,"6854036646643",,terminal_output +5547,6431033,"TERMINAL",0,0,"[?25l/[?25h",,terminal_output +5548,6431093,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +5549,6431297,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +5550,6431358,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +5551,6431549,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +5552,6431717,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5553,6431748,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +5554,6432022,"TERMINAL",0,0,"796147757754",,terminal_output +5555,6432072,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +5556,6432540,"TERMINAL",0,0,"Filtering out episode with length 14, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5557,6432826,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +5558,6432960,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +5559,6433048,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +5560,6433079,"TERMINAL",0,0,"8107258868865",,terminal_output +5561,6433201,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5562,6433412,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +5563,6433910,"TERMINAL",0,0,"srun",,terminal_focus +5564,6434092,"TERMINAL",0,0,"918369979976",,terminal_output +5565,6435158,"TERMINAL",0,0,"502947206:008102087",,terminal_output +5566,6436186,"TERMINAL",0,0,"1340581191198",,terminal_output +5567,6437216,"TERMINAL",0,0,"24169228:0022309",,terminal_output +5568,6438294,"TERMINAL",0,0,"46382144244241",,terminal_output +5569,6439311,"TERMINAL",0,0,"574925535532",,terminal_output +5570,6439447,"TERMINAL",0,0,"salloc",,terminal_focus +5571,6440383,"TERMINAL",0,0,"6855036646643",,terminal_output +5572,6441471,"TERMINAL",0,0,"796147757754",,terminal_output +5573,6442500,"TERMINAL",0,0,"8207258868865",,terminal_output +5574,6443312,"TERMINAL",0,0,"2025-08-11 17:37:58.991489: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5575,6443508,"TERMINAL",0,0,"918369979976",,terminal_output +5576,6443623,"TERMINAL",0,0,"2025-08-11 17:37:59.373780: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5577,6443998,"TERMINAL",0,0,"2025-08-11 17:37:59.780296: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-08-11 17:37:59.780338: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5578,6444325,"TERMINAL",0,0,"2025-08-11 17:38:00.108611: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5579,6444619,"TERMINAL",0,0,"8:00294730108203087",,terminal_output +5580,6445717,"TERMINAL",0,0,"1350581191198",,terminal_output +5581,6446729,"TERMINAL",0,0,"24169221022409",,terminal_output +5582,6447039,"TERMINAL",0,0,"2025-08-11 17:38:02.822115: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5583,6447819,"TERMINAL",0,0,"35273033133150",,terminal_output +5584,6448801,"TERMINAL",0,0,"463814424421",,terminal_output +5585,6449847,"TERMINAL",0,0,"574925535532",,terminal_output +5586,6450380,"TERMINAL",0,0,"salloc: Nodes hkn0533 are ready for job\r\n",,terminal_output +5587,6450492,"TERMINAL",0,0,"source .venv/bin/activate\r\n",,terminal_output +5588,6450898,"TERMINAL",0,0,"6855:0036646643",,terminal_output +5589,6451535,"TERMINAL",0,0,"]0;tum_cte0515@hkn0533:~/Projects/jafar[?2004h[tum_cte0515@hkn0533 jafar]$ source .venv/bin/activate\r\n[?2004l\r]0;tum_cte0515@hkn0533:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0533 jafar]$ ",,terminal_output +5590,6451935,"TERMINAL",0,0,"796147757754",,terminal_output +5591,6452986,"TERMINAL",0,0,"8307258868865",,terminal_output +5592,6454034,"TERMINAL",0,0,"918369979976",,terminal_output +5593,6455189,"TERMINAL",0,0,"10294740208304087",,terminal_output +5594,6456215,"TERMINAL",0,0,"134:00581191198",,terminal_output +5595,6457175,"TERMINAL",0,0,"24169222022509",,terminal_output +5596,6458266,"TERMINAL",0,0,"3638414424429:01",,terminal_output +5597,6459288,"TERMINAL",0,0,"574925535532",,terminal_output +5598,6460380,"TERMINAL",0,0,"6851036646643",,terminal_output +5599,6461437,"TERMINAL",0,0,"796147757754",,terminal_output +5600,6462420,"TERMINAL",0,0,"8407258868865",,terminal_output +5601,6463459,"TERMINAL",0,0,"918369979976",,terminal_output +5602,6464510,"TERMINAL",0,0,"20294750308405087",,terminal_output +5603,6465638,"TERMINAL",0,0,"1310581191198",,terminal_output +5604,6466662,"TERMINAL",0,0,"241692230229:009",,terminal_output +5605,6467684,"TERMINAL",0,0,"35275033133110",,terminal_output +5606,6468692,"TERMINAL",0,0,"463814424421",,terminal_output +5607,6469836,"TERMINAL",0,0,"574925535532",,terminal_output +5608,6470790,"TERMINAL",0,0,"6852036646643",,terminal_output +5609,6471837,"TERMINAL",0,0,"796147757754",,terminal_output +5610,6472896,"TERMINAL",0,0,"8507258868865",,terminal_output +5611,6474033,"TERMINAL",0,0,"918369979976",,terminal_output +5612,6475060,"TERMINAL",0,0,"3029477:00408509:0087",,terminal_output +5613,6476081,"TERMINAL",0,0,"1320581191198",,terminal_output +5614,6477078,"TERMINAL",0,0,"24169224022109",,terminal_output +5615,6478130,"TERMINAL",0,0,"35275:0033133120",,terminal_output +5616,6479257,"TERMINAL",0,0,"463814424421",,terminal_output +5617,6480243,"TERMINAL",0,0,"574925535532",,terminal_output +5618,6481309,"TERMINAL",0,0,"6963147757754",,terminal_output +5619,6482480,"TERMINAL",0,0,"81:007258868865",,terminal_output +5620,6483357,"TERMINAL",0,0,"918369979976",,terminal_output +5621,6484476,"TERMINAL",0,0,"402947105089:001087",,terminal_output +5622,6484997,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,0,"",shellscript,tab +5623,6485433,"TERMINAL",0,0,"1330581191198",,terminal_output +5624,6486268,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",2007,0,"",shellscript,selection_mouse +5625,6486269,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",2006,0,"",shellscript,selection_command +5626,6486665,"TERMINAL",0,0,"24169225022209",,terminal_output +5627,6486769,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",2002,0,"",shellscript,selection_mouse +5628,6487759,"TERMINAL",0,0,"35271033133130",,terminal_output +5629,6488782,"TERMINAL",0,0,"463814424421",,terminal_output +5630,6489805,"TERMINAL",0,0,"574925535532",,terminal_output +5631,6490840,"TERMINAL",0,0,"6854036646643",,terminal_output +5632,6491882,"TERMINAL",0,0,"796147757754",,terminal_output +5633,6492948,"TERMINAL",0,0,"8107258868865",,terminal_output +5634,6494002,"TERMINAL",0,0,"918369979976",,terminal_output +5635,6495037,"TERMINAL",0,0,"502947207:008102087",,terminal_output +5636,6496162,"TERMINAL",0,0,"1340581191198",,terminal_output +5637,6497182,"TERMINAL",0,0,"24169229:0022309",,terminal_output +5638,6498201,"TERMINAL",0,0,"35272033133140",,terminal_output +5639,6499204,"TERMINAL",0,0,"463814424421",,terminal_output +5640,6500380,"TERMINAL",0,0,"5855036646643",,terminal_output +5641,6501383,"TERMINAL",0,0,"796147757754",,terminal_output +5642,6502400,"TERMINAL",0,0,"8207258868865",,terminal_output +5643,6503424,"TERMINAL",0,0,"918369979976",,terminal_output +5644,6503910,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",2070,0,"",shellscript,selection_mouse +5645,6504447,"TERMINAL",0,0,"9:00294730108203087",,terminal_output +5646,6504634,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",2048,0,"",shellscript,selection_mouse +5647,6505481,"TERMINAL",0,0,"1350581191198",,terminal_output +5648,6506304,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1985,0,"",shellscript,selection_mouse +5649,6506401,"TERMINAL",0,0,"2025-08-11 17:39:02.167137: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 17.79GiB (rounded to 19101157120)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-08-11 17:39:02.171960: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] ****************************************____________________________________________________________\r\nE0811 17:39:02.172691 847991 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes. [tf-allocator-allocation-error='']\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 365, in \r\n for videos in dataloader:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\r\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes.\r\n",,terminal_output +5650,6506525,"TERMINAL",0,0,"24169221022409",,terminal_output +5651,6506826,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1983,0,"",shellscript,selection_mouse +5652,6506862,"TERMINAL",0,0,"2025-08-11 17:39:02.629680: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_1_bfc) ran out of memory trying to allocate 17.79GiB (rounded to 19101157120)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-08-11 17:39:02.634388: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] ****************************************____________________________________________________________\r\nE0811 17:39:02.635104 2028451 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes. [tf-allocator-allocation-error='']\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 365, in \r\n for videos in dataloader:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\r\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes.\r\n",,terminal_output +5653,6507576,"TERMINAL",0,0,"35273033133150",,terminal_output +5654,6508555,"TERMINAL",0,0,"2025-08-11 17:39:04.255942: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 17.79GiB (rounded to 19101157120)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-08-11 17:39:04.260808: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] ****************************************____________________________________________________________\r\nE0811 17:39:04.261589 3343067 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes. [tf-allocator-allocation-error='']\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 365, in \r\n for videos in dataloader:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\r\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes.\r\n2025-08-11 17:39:04.319350: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_0_bfc) ran out of memory trying to allocate 17.79GiB (rounded to 19101157120)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-08-11 17:39:04.324194: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] ****************************************____________________________________________________________\r\nE0811 17:39:04.324900 987015 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes. [tf-allocator-allocation-error='']\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 365, in \r\n for videos in dataloader:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\r\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes.\r\n",,terminal_output +5655,6508629,"TERMINAL",0,0,"463814424421",,terminal_output +5656,6509055,"TERMINAL",0,0,"2025-08-11 17:39:04.789041: W external/xla/xla/tsl/framework/bfc_allocator.cc:501] Allocator (GPU_3_bfc) ran out of memory trying to allocate 17.79GiB (rounded to 19101157120)requested by op \r\nIf the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. \r\nCurrent allocation summary follows.\r\nCurrent allocation summary follows.\r\n2025-08-11 17:39:04.793870: W external/xla/xla/tsl/framework/bfc_allocator.cc:512] ****************************************____________________________________________________________\r\nE0811 17:39:04.794601 2028453 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes. [tf-allocator-allocation-error='']\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 365, in \r\n for videos in dataloader:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\r\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\r\njaxlib._jax.XlaRuntimeError: RESOURCE_EXHAUSTED: Out of memory while trying to allocate 19101156952 bytes.\r\n",,terminal_output +5657,6509771,"TERMINAL",0,0,"574925535532",,terminal_output +5658,6510712,"TERMINAL",0,0,"6856:0036646643",,terminal_output +5659,6510727,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1666,0,"",shellscript,selection_mouse +5660,6511821,"TERMINAL",0,0,"796147757754",,terminal_output +5661,6512045,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1666,115,"",shellscript,content +5662,6512058,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1665,0,"",shellscript,selection_command +5663,6512256,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run dynamics-causal-8-node-3415275 at: https://wandb.ai/instant-uv/jafar/runs/3415275\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250811_173547-3415275/logs\r\n",,terminal_output +5664,6512429,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1666,0,"",shellscript,selection_command +5665,6512845,"TERMINAL",0,0,"8307258868865",,terminal_output +5666,6513092,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1666,0,"""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/ali/tokenizer/train_tokenizer_33M_minimal/3414553""",shellscript,content +5667,6513849,"TERMINAL",0,0,"918369979976",,terminal_output +5668,6514894,"TERMINAL",0,0,"10294740208304087",,terminal_output +5669,6515152,"TERMINAL",0,0,"python",,terminal_focus +5670,6515947,"TERMINAL",0,0,"135:00581191198",,terminal_output +5671,6516528,"TERMINAL",0,0,"6",,terminal_output +5672,6516786,"TERMINAL",0,0,"[?25l*[?25h",,terminal_output +5673,6517042,"TERMINAL",0,0,"24169222022509",,terminal_output +5674,6517343,"TERMINAL",0,0,"[?25l8[?25h",,terminal_output +5675,6517532,"TERMINAL",0,0,"[?25l*[?25h",,terminal_output +5676,6518029,"TERMINAL",0,0,"[?25l4[?25h",,terminal_output +5677,6518046,"TERMINAL",0,0,"3527403313314:00:00",,terminal_output +5678,6518376,"TERMINAL",0,0,"\r\n192\r\n>>> ",,terminal_output +5679,6519193,"TERMINAL",0,0,"463814424421",,terminal_output +5680,6520369,"TERMINAL",0,0,"574925535532",,terminal_output +5681,6521241,"TERMINAL",0,0,"6851036646643",,terminal_output +5682,6522222,"TERMINAL",0,0,"7407258868865",,terminal_output +5683,6523288,"TERMINAL",0,0,"918369979976",,terminal_output +5684,6524412,"TERMINAL",0,0,"20294750308405087",,terminal_output +5685,6525378,"TERMINAL",0,0,"1310581191198",,terminal_output +5686,6526463,"TERMINAL",0,0,"241692230224:00:009",,terminal_output +5687,6527488,"TERMINAL",0,0,"35275033133110",,terminal_output +5688,6528614,"TERMINAL",0,0,"463814424421",,terminal_output +5689,6529546,"TERMINAL",0,0,"574925535532",,terminal_output +5690,6530637,"TERMINAL",0,0,"6852036646643",,terminal_output +5691,6531685,"TERMINAL",0,0,"796147757754",,terminal_output +5692,6532682,"TERMINAL",0,0,"8507258868865",,terminal_output +5693,6533733,"TERMINAL",0,0,"918369979976",,terminal_output +5694,6534161,"TERMINAL",0,0,"srun",,terminal_focus +5695,6534777,"TERMINAL",0,0,"3029478:00408504:00:0087",,terminal_output +5696,6535820,"TERMINAL",0,0,"1320581191198",,terminal_output +5697,6536908,"TERMINAL",0,0,"24169224022109",,terminal_output +5698,6537902,"TERMINAL",0,0,"35276:0033133120",,terminal_output +5699,6538941,"TERMINAL",0,0,"463814424421",,terminal_output +5700,6539981,"TERMINAL",0,0,"574925535532",,terminal_output +5701,6541105,"TERMINAL",0,0,"6853036646643",,terminal_output +5702,6542129,"TERMINAL",0,0,"796147757754",,terminal_output +5703,6543127,"TERMINAL",0,0,"82:007258868865",,terminal_output +5704,6544166,"TERMINAL",0,0,"918369979976",,terminal_output +5705,6545303,"TERMINAL",0,0,"402947105084:00:001087",,terminal_output +5706,6546328,"TERMINAL",0,0,"143169225022209",,terminal_output +5707,6547351,"TERMINAL",0,0,"35271033133130",,terminal_output +5708,6548376,"TERMINAL",0,0,"463814424421",,terminal_output +5709,6549502,"TERMINAL",0,0,"574925535532",,terminal_output +5710,6550443,"TERMINAL",0,0,"6854036646643",,terminal_output +5711,6551549,"TERMINAL",0,0,"796147757754",,terminal_output +5712,6552565,"TERMINAL",0,0,"8107258868865",,terminal_output +5713,6553599,"TERMINAL",0,0,"918369979976",,terminal_output +5714,6554726,"TERMINAL",0,0,"502947208:008102087",,terminal_output +5715,6555676,"TERMINAL",0,0,"1340581191198",,terminal_output +5716,6556742,"TERMINAL",0,0,"24169224:00:0022309",,terminal_output +5717,6557822,"TERMINAL",0,0,"35272033133140",,terminal_output +5718,6558831,"TERMINAL",0,0,"463814424421",,terminal_output +5719,6559867,"TERMINAL",0,0,"574925535532",,terminal_output +5720,6560974,"TERMINAL",0,0,"6855036646643",,terminal_output +5721,6561959,"TERMINAL",0,0,"796147757754",,terminal_output +5722,6563007,"TERMINAL",0,0,"8207258868865",,terminal_output +5723,6564145,"TERMINAL",0,0,"918369979976",,terminal_output +5724,6565169,"TERMINAL",0,0,"40:00294730108203087",,terminal_output +5725,6566096,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_type: str = ""maskgit"" # supported options: maskgit, causal\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(\n model: Genie, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n """"""Compute masked dynamics loss""""""\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@nnx.jit\ndef train_step(\n model: Genie, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n """"""Update state and compute metrics""""""\n\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=False,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n\n state = nnx.state(genie)\n print(nnx.display(state))\n \n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(genie, tx)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +5726,6566193,"TERMINAL",0,0,"1350581191198",,terminal_output +5727,6567068,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12361,0,"",python,selection_mouse +5728,6567203,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12360,6,"videos",python,selection_mouse +5729,6567213,"TERMINAL",0,0,"24169221022409",,terminal_output +5730,6567423,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12360,20,"videos in dataloader",python,selection_mouse +5731,6567504,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12360,21,"videos in dataloader:",python,selection_mouse +5732,6568228,"TERMINAL",0,0,"36383144244251",,terminal_output +5733,6568887,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12549,0,"",python,selection_mouse +5734,6569042,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12548,5,"recon",python,selection_mouse +5735,6569115,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12548,6,"recon,",python,selection_mouse +5736,6569137,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12548,14,"recon, metrics",python,selection_mouse +5737,6569152,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12548,27,"recon, metrics = train_step",python,selection_mouse +5738,6569206,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12548,37,"recon, metrics = train_step(optimizer",python,selection_mouse +5739,6569275,"TERMINAL",0,0,"574925535532",,terminal_output +5740,6570335,"TERMINAL",0,0,"6857:0036646643",,terminal_output +5741,6571526,"TERMINAL",0,0,"796147757754",,terminal_output +5742,6571618,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py",0,0,"# Copyright 2024 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the ""License"");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an ""AS IS"" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# pytype: skip-file\nfrom __future__ import annotations\n\nimport dataclasses\nimport functools\nimport typing as tp\n\nimport jax\nimport jax.experimental\nimport jax.experimental.shard_map\nfrom jax.sharding import AbstractMesh, Mesh, PartitionSpec\n\nfrom flax.nnx import (\n extract,\n filterlib,\n graph,\n statelib,\n variablelib,\n)\nfrom flax.typing import Missing\n\nF = tp.TypeVar('F', bound=tp.Callable[..., tp.Any])\nSpecs = tp.Any\nAxisName = tp.Hashable\n\n# -------------------------------\n# jit\n# -------------------------------\n\n\nclass StateSharding(extract.PrefixMapping):\n def __init__(\n self,\n filter_sharding: statelib.State\n | tp.Mapping[filterlib.Filter, tp.Any]\n | tp.Iterable[tuple[filterlib.Filter, tp.Any]],\n /,\n ):\n if isinstance(filter_sharding, statelib.State):\n filter_sharding = statelib.create_path_filters(filter_sharding) # type: ignore\n\n iterable = tuple(\n filter_sharding.items()\n if isinstance(filter_sharding, tp.Mapping)\n else filter_sharding\n )\n self._filters = tuple(filter for filter, _ in iterable)\n self._shardings = tuple(axis for _, axis in iterable)\n\n @property\n def filters(self) -> tuple[filterlib.Filter, ...]:\n return self._filters\n\n @property\n def shardings(self) -> tuple[tp.Any, ...]:\n return self._shardings\n\n def map_prefix(\n self, path: variablelib.PathParts, variable: variablelib.Variable\n ) -> tp.Any:\n for filter, sharding in zip(self.filters, self.shardings):\n predicate = filterlib.to_predicate(filter)\n if predicate(path, variable):\n return sharding\n raise ValueError(f'No axis found for {path=}, {variable=}')\n\n def __repr__(self):\n return f'StateSharding({dict(zip(self.filters, self.shardings))})'\n\n def __eq__(self, other):\n return (\n isinstance(other, StateSharding)\n and self.filters == other.filters\n and self.shardings == other.shardings\n )\n\n def __hash__(self):\n return hash((self.filters, self.shardings))\n\n\ndef _jit_split_fn(ctx: graph.SplitContext, path, prefix, x):\n if isinstance(prefix, StateSharding):\n graphdef, *states = ctx.flatten(x, *prefix.filters)\n return extract.NodeStates.from_split(graphdef, *states, metadata=prefix)\n return extract.NodeStates.from_split(*ctx.flatten(x, with_paths=False))\n\n\ndef _jit_merge_fn(ctx: graph.MergeContext, path, prefix, leaf) -> tp.Any:\n if not isinstance(leaf, extract.NodeStates):\n raise ValueError(f'Expected TreeNode, got {type(leaf)} at path {path}')\n return ctx.unflatten(leaf.graphdef, *leaf.states)\n\n\n@dataclasses.dataclass(eq=False)\nclass JitFn:\n f: tp.Callable[..., tp.Any]\n in_shardings: tp.Any\n out_shardings: tp.Any\n kwarg_shardings: tp.Any\n ctxtag: tp.Hashable\n\n def __post_init__(self):\n functools.update_wrapper(self, self.f)\n\n def __call__(self, *pure_args, **pure_kwargs):\n args, kwargs = extract.from_tree(\n (pure_args, pure_kwargs),\n merge_fn=_jit_merge_fn,\n ctxtag=self.ctxtag,\n is_inner=True,\n )\n\n out = self.f(*args, **kwargs)\n\n args_out, kwargs_out = extract.clear_non_graph_nodes((args, kwargs))\n pure_args_out, pure_kwargs_out, pure_out = extract.to_tree(\n (args_out, kwargs_out, out),\n prefix=(self.in_shardings, self.kwarg_shardings, self.out_shardings),\n ctxtag=self.ctxtag,\n split_fn=_jit_split_fn,\n )\n\n return pure_args_out, pure_kwargs_out, pure_out\n\n\n@tp.overload\ndef jit(\n *,\n in_shardings: tp.Any = None,\n out_shardings: tp.Any = None,\n static_argnums: int | tp.Sequence[int] | None = None,\n static_argnames: str | tp.Iterable[str] | None = None,\n donate_argnums: int | tp.Sequence[int] | None = None,\n donate_argnames: str | tp.Iterable[str] | None = None,\n keep_unused: bool = False,\n device: tp.Optional[jax.Device] = None,\n backend: tp.Optional[str] = None,\n inline: bool = False,\n abstracted_axes: tp.Optional[tp.Any] = None,\n) -> tp.Callable[[tp.Callable[..., tp.Any]], JitWrapped]: ...\n@tp.overload\ndef jit(\n fun: tp.Callable[..., tp.Any],\n *,\n in_shardings: tp.Any = None,\n out_shardings: tp.Any = None,\n static_argnums: int | tp.Sequence[int] | None = None,\n static_argnames: str | tp.Iterable[str] | None = None,\n donate_argnums: int | tp.Sequence[int] | None = None,\n donate_argnames: str | tp.Iterable[str] | None = None,\n keep_unused: bool = False,\n device: tp.Optional[jax.Device] = None,\n backend: tp.Optional[str] = None,\n inline: bool = False,\n abstracted_axes: tp.Optional[tp.Any] = None,\n) -> JitWrapped: ...\ndef jit(\n fun: tp.Callable[..., tp.Any] | type[Missing] = Missing,\n *,\n in_shardings: tp.Any = None,\n out_shardings: tp.Any = None,\n static_argnums: int | tp.Sequence[int] | None = None,\n static_argnames: str | tp.Iterable[str] | None = None,\n donate_argnums: int | tp.Sequence[int] | None = None,\n donate_argnames: str | tp.Iterable[str] | None = None,\n keep_unused: bool = False,\n device: tp.Optional[jax.Device] = None,\n backend: tp.Optional[str] = None,\n inline: bool = False,\n abstracted_axes: tp.Optional[tp.Any] = None,\n) -> JitWrapped | tp.Callable[[tp.Callable[..., tp.Any]], JitWrapped]:\n """"""\n Lifted version of ``jax.jit`` that can handle Modules / graph nodes as\n arguments.\n\n Args:\n fun: Function to be jitted. ``fun`` should be a pure function, as\n side-effects may only be executed once.\n\n The arguments and return value of ``fun`` should be arrays,\n scalars, or (nested) standard Python containers (tuple/list/dict) thereof.\n Positional arguments indicated by ``static_argnums`` can be anything at\n all, provided they are hashable and have an equality operation defined.\n Static arguments are included as part of a compilation cache key, which is\n why hash and equality operators must be defined.\n\n JAX keeps a weak reference to ``fun`` for use as a compilation cache key,\n so the object ``fun`` must be weakly-referenceable. Most :class:`Callable`\n objects will already satisfy this requirement.\n in_shardings: Pytree of structure matching that of arguments to ``fun``,\n with all actual arguments replaced by resource assignment specifications.\n It is also valid to specify a pytree prefix (e.g. one value in place of a\n whole subtree), in which case the leaves get broadcast to all values in\n that subtree.\n\n The ``in_shardings`` argument is optional. JAX will infer the shardings\n from the input :py:class:`jax.Array`'s and defaults to replicating the input\n if the sharding cannot be inferred.\n\n The valid resource assignment specifications are:\n - :py:class:`Sharding`, which will decide how the value\n will be partitioned. With this, using a mesh context manager is not\n required.\n - :py:obj:`None`, will give JAX the freedom to choose whatever sharding\n it wants.\n For in_shardings, JAX will mark is as replicated but this behavior\n can change in the future.\n For out_shardings, we will rely on the XLA GSPMD partitioner to\n determine the output shardings.\n\n The size of every dimension has to be a multiple of the total number of\n resources assigned to it. This is similar to pjit's in_shardings.\n out_shardings: Like ``in_shardings``, but specifies resource\n assignment for function outputs. This is similar to pjit's\n out_shardings.\n\n The ``out_shardings`` argument is optional. If not specified, :py:func:`jax.jit`\n will use GSPMD's sharding propagation to figure out what the sharding of the\n output(s) should be.\n static_argnums: An optional int or collection of ints that specify which\n positional arguments to treat as static (compile-time constant).\n Operations that only depend on static arguments will be constant-folded in\n Python (during tracing), and so the corresponding argument values can be\n any Python object.\n\n Static arguments should be hashable, meaning both ``__hash__`` and\n ``__eq__`` are implemented, and immutable. Calling the jitted function\n with different values for these constants will trigger recompilation.\n Arguments that are not arrays or containers thereof must be marked as\n static.\n\n If neither ``static_argnums`` nor ``static_argnames`` is provided, no\n arguments are treated as static. If ``static_argnums`` is not provided but\n ``static_argnames`` is, or vice versa, JAX uses\n :code:`inspect.signature(fun)` to find any positional arguments that\n correspond to ``static_argnames``\n (or vice versa). If both ``static_argnums`` and ``static_argnames`` are\n provided, ``inspect.signature`` is not used, and only actual\n parameters listed in either ``static_argnums`` or ``static_argnames`` will\n be treated as static.\n static_argnames: An optional string or collection of strings specifying\n which named arguments to treat as static (compile-time constant). See the\n comment on ``static_argnums`` for details. If not\n provided but ``static_argnums`` is set, the default is based on calling\n ``inspect.signature(fun)`` to find corresponding named arguments.\n donate_argnums: Specify which positional argument buffers are ""donated"" to\n the computation. It is safe to donate argument buffers if you no longer\n need them once the computation has finished. In some cases XLA can make\n use of donated buffers to reduce the amount of memory needed to perform a\n computation, for example recycling one of your input buffers to store a\n result. You should not reuse buffers that you donate to a computation, JAX\n will raise an error if you try to. By default, no argument buffers are\n donated.\n\n If neither ``donate_argnums`` nor ``donate_argnames`` is provided, no\n arguments are donated. If ``donate_argnums`` is not provided but\n ``donate_argnames`` is, or vice versa, JAX uses\n :code:`inspect.signature(fun)` to find any positional arguments that\n correspond to ``donate_argnames``\n (or vice versa). If both ``donate_argnums`` and ``donate_argnames`` are\n provided, ``inspect.signature`` is not used, and only actual\n parameters listed in either ``donate_argnums`` or ``donate_argnames`` will\n be donated.\n\n For more details on buffer donation see the\n `FAQ `_.\n donate_argnames: An optional string or collection of strings specifying\n which named arguments are donated to the computation. See the\n comment on ``donate_argnums`` for details. If not\n provided but ``donate_argnums`` is set, the default is based on calling\n ``inspect.signature(fun)`` to find corresponding named arguments.\n keep_unused: If `False` (the default), arguments that JAX determines to be\n unused by `fun` *may* be dropped from resulting compiled XLA executables.\n Such arguments will not be transferred to the device nor provided to the\n underlying executable. If `True`, unused arguments will not be pruned.\n device: This is an experimental feature and the API is likely to change.\n Optional, the Device the jitted function will run on. (Available devices\n can be retrieved via :py:func:`jax.devices`.) The default is inherited\n from XLA's DeviceAssignment logic and is usually to use\n ``jax.devices()[0]``.\n backend: This is an experimental feature and the API is likely to change.\n Optional, a string representing the XLA backend: ``'cpu'``, ``'gpu'``, or\n ``'tpu'``.\n inline: Specify whether this function should be inlined into enclosing\n jaxprs (rather than being represented as an application of the xla_call\n primitive with its own subjaxpr). Default False.\n\n Returns:\n A wrapped version of ``fun``, set up for just-in-time compilation.\n """"""\n\n if fun is Missing:\n return functools.partial(\n jit,\n in_shardings=in_shardings,\n out_shardings=out_shardings,\n static_argnums=static_argnums,\n static_argnames=static_argnames,\n donate_argnums=donate_argnums,\n donate_argnames=donate_argnames,\n keep_unused=keep_unused,\n device=device,\n backend=backend,\n inline=inline,\n abstracted_axes=abstracted_axes,\n ) # type: ignore[return-value]\n\n return JitWrapped(\n fun,\n in_shardings=in_shardings,\n out_shardings=out_shardings,\n static_argnums=static_argnums,\n static_argnames=static_argnames,\n donate_argnums=donate_argnums,\n donate_argnames=donate_argnames,\n keep_unused=keep_unused,\n device=device,\n backend=backend,\n inline=inline,\n abstracted_axes=abstracted_axes,\n )\n\n\nclass JitWrapped:\n """"""A function ready to be traced, lowered, and compiled.\n\n This protocol reflects the output of functions such as\n ``jax.jit``. Calling it results in JIT (just-in-time) lowering,\n compilation, and execution. It can also be explicitly lowered prior\n to compilation, and the result compiled prior to execution.\n """"""\n\n def __init__(\n self,\n fun: tp.Callable[..., tp.Any],\n in_shardings: tp.Any,\n out_shardings: tp.Any,\n static_argnums: int | tp.Sequence[int] | None = None,\n static_argnames: str | tp.Iterable[str] | None = None,\n donate_argnums: int | tp.Sequence[int] | None = None,\n donate_argnames: str | tp.Iterable[str] | None = None,\n keep_unused: bool = False,\n device: tp.Optional[jax.Device] = None,\n backend: tp.Optional[str] = None,\n inline: bool = False,\n abstracted_axes: tp.Optional[tp.Any] = None,\n ):\n functools.update_wrapper(self, fun)\n kwarg_shardings = None\n self.jax_in_shardings = jax.tree.map(\n lambda x: extract.NodeStates.from_prefixes(x.shardings, metadata=x)\n if isinstance(x, StateSharding)\n else x,\n in_shardings,\n )\n self.jax_out_shardings = jax.tree.map(\n lambda x: extract.NodeStates.from_prefixes(x.shardings, metadata=x)\n if isinstance(x, StateSharding)\n else x,\n out_shardings,\n )\n\n self.jitted_fn = jax.jit(\n JitFn(fun, in_shardings, out_shardings, kwarg_shardings, self),\n in_shardings=self.jax_in_shardings,\n out_shardings=(\n self.jax_in_shardings,\n kwarg_shardings,\n self.jax_out_shardings,\n ),\n static_argnums=static_argnums,\n static_argnames=static_argnames,\n donate_argnums=donate_argnums,\n donate_argnames=donate_argnames,\n keep_unused=keep_unused,\n device=device,\n backend=backend,\n inline=inline,\n abstracted_axes=abstracted_axes,\n )\n self.in_shardings = in_shardings\n self.out_shardings = out_shardings\n self.kwarg_shardings = kwarg_shardings\n self.static_argnums = static_argnums\n\n # implement descriptor protocol so that we can use this as a method\n def __get__(self, obj, objtype=None):\n if obj is None:\n return self\n return functools.partial(self, obj)\n\n def _get_pure_args_kwargs(self, args, kwargs):\n pure_args, pure_kwargs = extract.to_tree(\n (args, kwargs),\n prefix=(self.in_shardings, self.kwarg_shardings)\n if self.in_shardings is not None or self.kwarg_shardings is not None\n else None,\n split_fn=_jit_split_fn,\n check_aliasing=self.in_shardings is not None\n or self.kwarg_shardings is not None,\n ctxtag=self,\n )\n return pure_args, pure_kwargs\n\n def _get_non_pure_out(self, pure_args_out, pure_kwargs_out, pure_out, /):\n _args_out, _kwargs_out, out = extract.from_tree(\n (pure_args_out, pure_kwargs_out, pure_out),\n merge_fn=_jit_merge_fn,\n is_inner=False,\n ctxtag=self,\n )\n return out\n\n def __call__(self, *args, **kwargs):\n # run dynamic_cache_context before update_context\n with graph.update_context(self):\n pure_args, pure_kwargs = self._get_pure_args_kwargs(args, kwargs)\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\n *pure_args, **pure_kwargs\n )\n out = self._get_non_pure_out(pure_args_out, pure_kwargs_out, pure_out)\n return out\n\n def eval_shape(self, *args, **kwargs):\n """"""See ``jax.eval_shape``.""""""\n args, kwargs = graph.clone((args, kwargs))\n with graph.update_context(self):\n pure_args, pure_kwargs = self._get_pure_args_kwargs(args, kwargs)\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn.eval_shape(\n *pure_args, **pure_kwargs\n )\n out = self._get_non_pure_out(pure_args_out, pure_kwargs_out, pure_out)\n return out\n\n def trace(self, *args, **kwargs) -> Traced:\n """"""Trace this function explicitly for the given arguments.\n\n A traced function is staged out of Python and translated to a jaxpr. It is\n ready for lowering but not yet lowered.\n\n Returns:\n A ``Traced`` instance representing the tracing.\n """"""\n with graph.update_context(self):\n pure_args, pure_kwargs = self._get_pure_args_kwargs(args, kwargs)\n traced = self.jitted_fn.trace(*pure_args, **pure_kwargs)\n return Traced(traced, self)\n\n def lower(self, *args, **kwargs) -> Lowered:\n """"""Lower this function explicitly for the given arguments.\n\n This is a shortcut for ``self.trace(*args, **kwargs).lower()``.\n\n A lowered function is staged out of Python and translated to a\n compiler's input language, possibly in a backend-dependent\n manner. It is ready for compilation but not yet compiled.\n\n Returns:\n A ``Lowered`` instance representing the lowering.\n """"""\n with graph.update_context(self):\n pure_args, pure_kwargs = self._get_pure_args_kwargs(args, kwargs)\n lowered = self.jitted_fn.lower(*pure_args, **pure_kwargs)\n return Lowered(lowered, self)\n\n\nclass Stage:\n args_info: tp.Any # PyTree of ArgInfo\n\n @property\n def _inner_obj(self) -> tp.Any:\n raise NotImplementedError\n\n @property\n def in_tree(self) -> jax.tree_util.PyTreeDef:\n return self._inner_obj.in_tree\n\n @property\n def in_avals(self):\n return self._inner_obj.in_avals\n\n @property\n def donate_argnums(self):\n return self._inner_obj.donate_argnums\n\n@dataclasses.dataclass(frozen=True, slots=True)\nclass Compiled(Stage):\n """"""Compiled representation of a function specialized to types/values.\n\n A compiled computation is associated with an executable and the\n remaining information needed to execute it. It also provides a\n common API for querying properties of compiled computations across\n JAX's various compilation paths and backends.\n """"""\n\n compiled: jax.stages.Compiled\n jit_wrapped: JitWrapped\n\n @property\n def _inner_obj(self):\n return self.compiled\n\n @property\n def args_info(self) -> tp.Any: # PyTree of ArgInfo\n raise self.compiled.args_info\n\n @staticmethod\n def call(*args, **kwargs):\n raise NotImplementedError\n\n def __call__(self, *args, **kwargs):\n with graph.update_context(self.jit_wrapped):\n pure_args, pure_kwargs = self.jit_wrapped._get_pure_args_kwargs(\n args, kwargs\n )\n pure_args_out, pure_kwargs_out, pure_out = self.compiled(\n *pure_args, **pure_kwargs\n )\n out = self.jit_wrapped._get_non_pure_out(\n pure_args_out, pure_kwargs_out, pure_out\n )\n return out\n\n @property\n def out_tree(self) -> jax.tree_util.PyTreeDef:\n return self.compiled.out_tree\n\n def as_text(self) -> str | None:\n """"""A human-readable text representation of this executable.\n\n Intended for visualization and debugging purposes. This is not a valid nor\n reliable serialization.\n\n Returns ``None`` if unavailable, e.g. based on backend, compiler, or\n runtime.\n """"""\n return self.compiled.as_text()\n\n def cost_analysis(self) -> tp.Any | None:\n """"""A summary of execution cost estimates.\n\n Intended for visualization and debugging purposes. The object output by\n this is some simple data structure that can easily be printed or serialized\n (e.g. nested dicts, lists, and tuples with numeric leaves). However, its\n structure can be arbitrary: it may be inconsistent across versions of JAX\n and jaxlib, or even across invocations.\n\n Returns ``None`` if unavailable, e.g. based on backend, compiler, or\n runtime.\n """"""\n return self.compiled.cost_analysis()\n\n def memory_analysis(self) -> tp.Any | None:\n """"""A summary of estimated memory requirements.\n\n Intended for visualization and debugging purposes. The object output by\n this is some simple data structure that can easily be printed or serialized\n (e.g. nested dicts, lists, and tuples with numeric leaves). However, its\n structure can be arbitrary: it may be inconsistent across versions of JAX\n and jaxlib, or even across invocations.\n\n Returns ``None`` if unavailable, e.g. based on backend, compiler, or\n runtime.\n """"""\n return self.compiled.memory_analysis()\n\n def runtime_executable(self) -> tp.Any | None:\n """"""An arbitrary object representation of this executable.\n\n Intended for debugging purposes. This is not valid nor reliable\n serialization. The output has no guarantee of consistency across\n invocations.\n\n Returns ``None`` if unavailable, e.g. based on backend, compiler, or\n runtime.\n """"""\n return self.compiled.runtime_executable()\n\n @property\n def input_shardings(self): # PyTree[sharding.Sharding]\n return self.compiled.input_shardings\n\n @property\n def output_shardings(self): # PyTree[sharding.Sharding]\n return self.compiled.output_shardings\n\n @property\n def input_layouts(self):\n return self.compiled.input_formats\n\n\n@dataclasses.dataclass(frozen=True, slots=True)\nclass Lowered(Stage):\n """"""Lowering of a function specialized to argument types and values.\n\n A lowering is a computation ready for compilation. This class\n carries a lowering together with the remaining information needed to\n later compile and execute it. It also provides a common API for\n querying properties of lowered computations across JAX's various\n lowering paths (:func:`~jax.jit`, :func:`~jax.pmap`, etc.).\n """"""\n\n lowered: jax.stages.Lowered\n jit_wrapped: JitWrapped\n\n @property\n def _inner_obj(self):\n return self.lowered\n\n @property\n def args_info(self) -> tp.Any: # PyTree of ArgInfo\n return self.lowered.args_info\n\n @property\n def out_tree(self):\n return self.lowered.out_tree\n\n @classmethod\n def from_flat_info(\n cls,\n lowering: tp.Any, # type: ignore[name-defined]\n in_tree: jax.tree_util.PyTreeDef,\n in_avals,\n donate_argnums: tuple[int, ...],\n out_tree: jax.tree_util.PyTreeDef,\n no_kwargs: bool = False,\n ):\n raise NotImplementedError\n\n def compile(\n self, compiler_options: jax.stages.CompilerOptions | None = None\n ) -> Compiled:\n """"""Compile, returning a corresponding ``Compiled`` instance.""""""\n compiled = self.lowered.compile(compiler_options)\n return Compiled(compiled, self.jit_wrapped)\n\n def as_text(\n self, dialect: str | None = None, *, debug_info: bool = False\n ) -> str:\n """"""A human-readable text representation of this lowering.\n\n Intended for visualization and debugging purposes. This need not be a valid\n nor reliable serialization.\n Use `jax.export` if you want reliable and portable serialization.\n\n Args:\n dialect: Optional string specifying a lowering dialect (e.g. ""stablehlo"",\n or ""hlo"").\n debug_info: Whether to include debugging information,\n e.g., source location.\n """"""\n return self.lowered.as_text(dialect=dialect, debug_info=debug_info)\n\n def compiler_ir(self, dialect: str | None = None) -> tp.Any | None:\n """"""An arbitrary object representation of this lowering.\n\n Intended for debugging purposes. This is not a valid nor reliable\n serialization. The output has no guarantee of consistency across\n invocations.\n Use `jax.export` if you want reliable and portable serialization.\n\n Returns ``None`` if unavailable, e.g. based on backend, compiler, or\n runtime.\n\n Args:\n dialect: Optional string specifying a lowering dialect (e.g. ""stablehlo"",\n or ""hlo"").\n """"""\n return self.lowered.compiler_ir(dialect=dialect)\n\n def cost_analysis(self) -> tp.Any | None:\n """"""A summary of execution cost estimates.\n\n Intended for visualization and debugging purposes. The object output by\n this is some simple data structure that can easily be printed or serialized\n (e.g. nested dicts, lists, and tuples with numeric leaves). However, its\n structure can be arbitrary: it may be inconsistent across versions of JAX\n and jaxlib, or even across invocations.\n\n Returns ``None`` if unavailable, e.g. based on backend, compiler, or\n runtime.\n """"""\n return self.lowered.cost_analysis()\n\n@dataclasses.dataclass(frozen=True, slots=True)\nclass Traced(Stage):\n """"""Traced form of a function specialized to argument types and values.\n\n A traced computation is ready for lowering. This class carries the\n traced representation with the remaining information needed to later\n lower, compile, and execute it.\n """"""\n\n traced: jax.stages.Traced\n jit_wrapped: JitWrapped\n\n @property\n def _inner_obj(self):\n return self.traced\n\n @property\n def out_info(self):\n return self.traced.out_info\n\n def lower(\n self, *, lowering_platforms: tuple[str, ...] | None = None\n ) -> Lowered:\n """"""Lower to compiler input, returning a ``Lowered`` instance.""""""\n lowered = self.traced.lower(lowering_platforms=lowering_platforms)\n return Lowered(lowered, self.jit_wrapped)\n\n\n# -------------------------------\n# shard_map\n# -------------------------------\n\n# TODO: create StateSpec and consider enabling a mode that does\n# not use filters during split for performance. Overall there might\n# be performance limitations for using shard_map at a top-level\n\n\n@dataclasses.dataclass(eq=False)\nclass ShardMapFn:\n f: tp.Callable[..., tp.Any]\n in_specs: tp.Any\n out_specs: tp.Any\n kwarg_specs: tp.Any\n ctxtag: tp.Hashable\n\n def __post_init__(self):\n functools.update_wrapper(self, self.f)\n\n def __call__(self, *pure_args, **pure_kwargs):\n args, kwargs = extract.from_tree(\n (pure_args, pure_kwargs),\n merge_fn=_jit_merge_fn,\n ctxtag=self.ctxtag,\n is_inner=True,\n )\n\n out = self.f(*args, **kwargs)\n\n args_out, kwargs_out = extract.clear_non_graph_nodes((args, kwargs))\n pure_args_out, pure_kwargs_out, pure_out = extract.to_tree(\n (args_out, kwargs_out, out),\n prefix=(self.in_specs, self.kwarg_specs, self.out_specs),\n ctxtag=self.ctxtag,\n split_fn=_jit_split_fn,\n )\n\n return pure_args_out, pure_kwargs_out, pure_out\n\n\n@tp.overload\ndef shard_map(\n f: F,\n *,\n mesh: Mesh | AbstractMesh,\n in_specs: Specs,\n out_specs: Specs,\n check_rep: bool = True,\n auto: frozenset[AxisName] = frozenset(),\n) -> F: ...\n@tp.overload\ndef shard_map(\n *,\n mesh: Mesh | AbstractMesh,\n in_specs: Specs,\n out_specs: Specs,\n check_rep: bool = True,\n auto: frozenset[AxisName] = frozenset(),\n) -> tp.Callable[[F], F]: ...\ndef shard_map(\n f: F | type[Missing] = Missing,\n *,\n mesh: Mesh | AbstractMesh,\n in_specs: Specs,\n out_specs: Specs,\n check_rep: bool = True,\n auto: frozenset[AxisName] = frozenset(),\n) -> F | tp.Callable[[F], F]:\n """"""\n Lifted version of\n `jax.experimental.shard_map.shard_map `_\n that can handle Modules / graph nodes as arguments.\n\n Simple data parallel example::\n\n import jax\n import jax.numpy as jnp\n from flax import nnx\n from jax.sharding import PartitionSpec as P\n\n mesh = jax.sharding.Mesh(jax.local_devices(), ('data',))\n\n m = nnx.Linear(2, 3, rngs=nnx.Rngs(0))\n x = jnp.ones((32, 2))\n\n @nnx.shard_map(\n mesh=mesh, in_specs=(P(None), P('data')), out_specs=P('data')\n )\n def f(m, x):\n return m(x)\n\n y = f(m, x)\n\n jax.debug.visualize_array_sharding(y)\n\n Notice that here we simply used some ``PartitionSpec`` to define the spec\n the the whole model and data. This works for simple cases but if we need\n to assign different ``PartitionSpec`` to different parts of the model we\n need to use ``StateSharding`` and create some filters that allow us to target\n specific parts of the model. Here's an example of how to do tensor parallelism\n for a simple MLP block using ``StateSharding`` and filters::\n\n mesh = jax.sharding.Mesh(jax.local_devices(), ('model',))\n\n class MLP(nnx.Module):\n def __init__(self, din, dhidden, dout, *, rngs: nnx.Rngs):\n self.linear1 = nnx.Linear(din, dhidden, use_bias=False, rngs=rngs)\n self.linear2 = nnx.Linear(dhidden, dout, use_bias=False, rngs=rngs)\n\n def __call__(self, x):\n return self.linear2(jax.nn.relu(self.linear1(x)))\n\n m = MLP(2, 64, 3, rngs=nnx.Rngs(0))\n x = jnp.ones((32, 2))\n\n def path_ends_with(*path_suffix): # custom filter\n return lambda path, value: path[-len(path_suffix):] == path_suffix\n\n model_spec = nnx.StateSharding({\n path_ends_with('linear1', 'kernel'): P(None, 'model'),\n path_ends_with('linear2', 'kernel'): P('model', None),\n })\n\n @nnx.shard_map(mesh=mesh, in_specs=(model_spec, P(None)), out_specs=P(None))\n def f(m, x):\n y = m(x)\n return jax.lax.psum(y, 'model')\n\n y = f(m, x)\n\n jax.debug.visualize_array_sharding(m.linear1.kernel.value)\n jax.debug.visualize_array_sharding(m.linear2.kernel.value)\n\n\n Alternatively, a ``State`` object with the exact PartitionSpec for each\n state then you can be passed to ``StateSharding``::\n\n mesh = jax.sharding.Mesh(jax.local_devices(), ('model',))\n\n class MLP(nnx.Module):\n def __init__(self, din, dhidden, dout, *, rngs: nnx.Rngs):\n self.linear1 = nnx.Linear(din, dhidden, use_bias=False, rngs=rngs)\n self.linear2 = nnx.Linear(dhidden, dout, use_bias=False, rngs=rngs)\n\n def __call__(self, x):\n return self.linear2(jax.nn.relu(self.linear1(x)))\n\n m = MLP(2, 64, 3, rngs=nnx.Rngs(0))\n x = jnp.ones((32, 2))\n\n model_spec = nnx.State(\n {\n 'linear1': {'kernel': P(None, 'model')},\n 'linear2': {'kernel': P('model', None)},\n }\n )\n\n @nnx.shard_map(\n mesh=mesh,\n in_specs=(nnx.StateSharding(model_spec), P(None)),\n out_specs=P(None),\n )\n def f(m, x):\n y = m(x)\n return jax.lax.psum(y, 'model')\n\n y = f(m, x)\n\n jax.debug.visualize_array_sharding(m.linear1.kernel.value)\n jax.debug.visualize_array_sharding(m.linear2.kernel.value)\n\n Here ``model_spec`` was created manually but you can also automate\n this process by using ``nnx.get_partition_spec`` to automatically\n create it for you (see\n `Scale up on multiple devices `_\n ).\n\n Args:\n f: callable to be mapped. Each application of ``f``, or ""instance"" of ``f``,\n takes as input a shard of the mapped-over arguments and produces a shard\n of the output.\n mesh: a ``jax.sharding.Mesh`` representing the array of devices over which\n to shard the data and on which to execute instances of ``f``. The names of\n the ``Mesh`` can be used in collective communication operations in ``f``.\n This is typically created by a utility function like\n :func:`jax.experimental.mesh_utils.create_device_mesh`.\n in_specs: a pytree with ``jax.sharding.PartitionSpec``or ``nnx.StateSharding``\n (mapping substates to ``PartitionSpec``s) instances as leaves,\n with a tree structure that is a tree prefix of the\n args tuple to be mapped over. Similar to ``jax.sharding.NamedSharding``,\n each ``PartitionSpec`` represents how the corresponding argument (or subtree\n of arguments) should be sharded along the named axes of ``mesh``. In each\n ``PartitionSpec``, mentioning a ``mesh`` axis name at a position expresses sharding\n the corresponding argument array axis along that positional axis; not\n mentioning an axis name expresses replication. If an argument, or argument\n subtree, has a corresponding spec of None, that argument is not sharded.\n out_specs: a pytree with ``jax.sharding.PartitionSpec`` or ``nnx.StateSharding``\n (mapping substates to ``PartitionSpec``s) instances as leaves, with a tree structure\n that is a tree prefix of the output of ``f``.\n Each ``PartitionSpec`` represents how the corresponding output shards should be\n concatenated. In each ``PartitionSpec``, metioning a ``mesh`` axis name at\n a position expresses concatenation of that mesh axis's shards along the\n corresponding positional axis. Not mentioning a ``mesh`` axis name\n expresses a promise that the output values are equal along that mesh axis,\n and that rather than concatenating only a single value should be produced.\n check_rep: If True (default) enable additional validity checks and automatic\n differentiation optimizations. The validity checks concern whether any mesh\n axis names not mentioned in ``out_specs`` are consistent with how the outputs\n of ``f`` are replicated. Must be set False if using a Pallas kernel in ``f``.\n auto: (experimental) an optional set of axis names from ``mesh`` over which we\n do not shard the data or map the function, but rather we allow the\n compiler to control sharding. These names cannot be used in ``in_specs``,\n ``out_specs``, or in communication collectives in ``f``.\n\n Returns:\n A callable that applies the input function ``f`` across data sharded according to\n the ``mesh`` and ``in_specs``.\n """"""\n if f is Missing:\n return functools.partial(\n shard_map,\n mesh=mesh,\n in_specs=in_specs,\n out_specs=out_specs,\n check_rep=check_rep,\n auto=auto,\n ) # type: ignore[return-value]\n assert not isinstance(f, type)\n\n kwarg_specs = PartitionSpec()\n jax_in_specs = jax.tree.map(\n lambda x: extract.NodeStates(\n _graphdef=PartitionSpec(), # type: ignore[arg-type]\n states=x.shardings,\n metadata=x,\n )\n if isinstance(x, StateSharding)\n else x,\n in_specs,\n )\n jax_out_specs = jax.tree.map(\n lambda x: extract.NodeStates(\n _graphdef=PartitionSpec(), # type: ignore[arg-type]\n states=x.shardings,\n metadata=x,\n )\n if isinstance(x, StateSharding)\n else x,\n out_specs,\n )\n\n @functools.wraps(f)\n def shard_map_wrapper(*args, **kwargs):\n # run dynamic_cache_context before update_context\n with graph.update_context(shard_map_wrapper):\n pure_args, pure_kwargs = extract.to_tree(\n (args, kwargs),\n prefix=(in_specs, kwarg_specs)\n if in_specs is not None or kwarg_specs is not None\n else None,\n split_fn=_jit_split_fn,\n check_aliasing=in_specs is not None or kwarg_specs is not None,\n ctxtag=shard_map_wrapper,\n )\n pure_args_out, pure_kwargs_out, pure_out = shard_map_fn(\n *pure_args, **pure_kwargs\n )\n _args_out, _kwargs_out, out = extract.from_tree(\n (pure_args_out, pure_kwargs_out, pure_out),\n merge_fn=_jit_merge_fn,\n is_inner=False,\n ctxtag=shard_map_wrapper,\n )\n return out\n\n shard_map_fn = jax.experimental.shard_map.shard_map(\n ShardMapFn(f, in_specs, out_specs, kwarg_specs, shard_map_wrapper),\n mesh=mesh,\n in_specs=jax_in_specs,\n out_specs=(jax_in_specs, kwarg_specs, jax_out_specs), # type: ignore\n check_rep=check_rep,\n auto=auto,\n )\n\n shard_map_wrapper.inner = shard_map_fn # type: ignore\n\n return shard_map_wrapper # type: ignore\n",python,tab +5743,6572424,"TERMINAL",0,0,"8307258868865",,terminal_output +5744,6573469,"TERMINAL",0,0,"918369979976",,terminal_output +5745,6574515,"TERMINAL",0,0,"10294740208304087",,terminal_output +5746,6574910,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py",16501,0,"",python,selection_mouse +5747,6575091,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py",16497,4,"self",python,selection_mouse +5748,6575372,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py",16511,0,"",python,selection_mouse +5749,6575514,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py",16502,9,"jitted_fn",python,selection_mouse +5750,6575565,"TERMINAL",0,0,"136:00581191198",,terminal_output +5751,6576606,"TERMINAL",0,0,"24169222022509",,terminal_output +5752,6577474,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py",0,0,"",python,tab +5753,6577651,"TERMINAL",0,0,"3527403313311:00",,terminal_output +5754,6577811,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",0,0,"",python,tab +5755,6578789,"TERMINAL",0,0,"463814424421",,terminal_output +5756,6579135,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12570,0,"",python,selection_mouse +5757,6579822,"TERMINAL",0,0,"574925535532",,terminal_output +5758,6580840,"TERMINAL",0,0,"6851036646643",,terminal_output +5759,6580904,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12393,0,"",python,selection_mouse +5760,6581052,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12382,12," ",python,selection_mouse +5761,6581198,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12382,48," # --- Train step ---\n rng",python,selection_mouse +5762,6581219,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12382,49," # --- Train step ---\n rng,",python,selection_mouse +5763,6581232,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12382,87," # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n",python,selection_mouse +5764,6581289,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12382,108," # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = ",python,selection_mouse +5765,6581290,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12382,112," # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict",python,selection_mouse +5766,6581290,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12382,119," # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos",python,selection_mouse +5767,6581346,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12382,180," # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics",python,selection_mouse +5768,6581347,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12382,182," # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics =",python,selection_mouse +5769,6581353,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12382,193," # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step",python,selection_mouse +5770,6581410,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12382,274," # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step",python,selection_mouse +5771,6581430,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12382,275," # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)",python,selection_mouse +5772,6581834,"TERMINAL",0,0,"796147757754",,terminal_output +5773,6581979,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12569,0,"",python,selection_mouse +5774,6582081,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12565,10,"train_step",python,selection_mouse +5775,6582894,"TERMINAL",0,0,"8407258868865",,terminal_output +5776,6583935,"TERMINAL",0,0,"918369979976",,terminal_output +5777,6585036,"TERMINAL",0,0,"20294750308405087",,terminal_output +5778,6586060,"TERMINAL",0,0,"1310581191198",,terminal_output +5779,6587084,"TERMINAL",0,0,"241692230221:009",,terminal_output +5780,6588209,"TERMINAL",0,0,"35275033133110",,terminal_output +5781,6589238,"TERMINAL",0,0,"463814424421",,terminal_output +5782,6590260,"TERMINAL",0,0,"574925535532",,terminal_output +5783,6591284,"TERMINAL",0,0,"6962147757754",,terminal_output +5784,6592308,"TERMINAL",0,0,"8507258868865",,terminal_output +5785,6593436,"TERMINAL",0,0,"918369979976",,terminal_output +5786,6594457,"TERMINAL",0,0,"3029479:00408501:0087",,terminal_output +5787,6595430,"TERMINAL",0,0,"1320581191198",,terminal_output +5788,6596504,"TERMINAL",0,0,"24169224022109",,terminal_output +5789,6597516,"TERMINAL",0,0,"35277:0033133120",,terminal_output +5790,6598663,"TERMINAL",0,0,"463814424421",,terminal_output +5791,6599687,"TERMINAL",0,0,"574925535532",,terminal_output +5792,6600714,"TERMINAL",0,0,"6853036646643",,terminal_output +5793,6601730,"TERMINAL",0,0,"796147757754",,terminal_output +5794,6602753,"TERMINAL",0,0,"83:007258868865",,terminal_output +5795,6603802,"TERMINAL",0,0,"918369979976",,terminal_output +5796,6604596,"TERMINAL",0,0,"[?25lG[?25h",,terminal_output +5797,6604832,"TERMINAL",0,0,"402947105081:001087",,terminal_output +5798,6605879,"TERMINAL",0,0,"1330581191198",,terminal_output +5799,6606951,"TERMINAL",0,0,"24169225022209",,terminal_output +5800,6607982,"TERMINAL",0,0,"35271033133130",,terminal_output +5801,6609098,"TERMINAL",0,0,"463814424421",,terminal_output +5802,6610132,"TERMINAL",0,0,"574925535532",,terminal_output +5803,6611150,"TERMINAL",0,0,"6854036646643",,terminal_output +5804,6612174,"TERMINAL",0,0,"796147757754",,terminal_output +5805,6613216,"TERMINAL",0,0,"8107258868865",,terminal_output +5806,6613288,"TERMINAL",0,0,"^Csrun: interrupt (one more within 1 sec to abort)\r\nsrun: StepId=3415275.0 tasks 0-31: running\r\n\r\n]0;tum_cte0515@hkn0522:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0522 jafar]$ ",,terminal_output +5807,6613526,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0522:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0522 jafar]$ ",,terminal_output +5808,6614324,"TERMINAL",0,0,"92947209:008102087",,terminal_output +5809,6615298,"TERMINAL",0,0,"51340581191198",,terminal_output +5810,6616341,"TERMINAL",0,0,"24169221:0022309",,terminal_output +5811,6616970,"TERMINAL",0,0,"srun",,terminal_focus +5812,6617430,"TERMINAL",0,0,"35272033133140",,terminal_output +5813,6618540,"TERMINAL",0,0,"srun",,terminal_focus +5814,6618557,"TERMINAL",0,0,"463814424421",,terminal_output +5815,6619481,"TERMINAL",0,0,"574925535532",,terminal_output +5816,6620531,"TERMINAL",0,0,"6855036646643",,terminal_output +5817,6621595,"TERMINAL",0,0,"796147757754",,terminal_output +5818,6622723,"TERMINAL",0,0,"8207258868865",,terminal_output +5819,6623677,"TERMINAL",0,0,"918369979976",,terminal_output +5820,6624770,"TERMINAL",0,0,"1:00294730108203087",,terminal_output +5821,6625758,"TERMINAL",0,0,"1350581191198",,terminal_output +5822,6626821,"TERMINAL",0,0,"24169221022409",,terminal_output +5823,6627849,"TERMINAL",0,0,"35273033133150",,terminal_output +5824,6628893,"TERMINAL",0,0,"463814424421",,terminal_output +5825,6629937,"TERMINAL",0,0,"574925535532",,terminal_output +5826,6630980,"TERMINAL",0,0,"6858:0036646643",,terminal_output +5827,6631456,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",0,0,"",python,tab +5828,6632039,"TERMINAL",0,0,"796147757754",,terminal_output +5829,6633098,"TERMINAL",0,0,"8307258868865",,terminal_output +5830,6633275,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12373,0,"",python,selection_mouse +5831,6633447,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12370,10,"dataloader",python,selection_mouse +5832,6634113,"TERMINAL",0,0,"918369979976",,terminal_output +5833,6635213,"TERMINAL",0,0,"10294740208304087",,terminal_output +5834,6635846,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12176,0,"",python,selection_mouse +5835,6636028,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12164,34,"make_array_from_process_local_data",python,selection_mouse +5836,6636210,"TERMINAL",0,0,"137:00581191198",,terminal_output +5837,6637262,"TERMINAL",0,0,"252740332133512:00",,terminal_output +5838,6637478,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12176,0,"",python,selection_mouse +5839,6637640,".venv/lib/python3.10/site-packages/jax/_src/array.py",0,0,"# Copyright 2021 The JAX Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the ""License"");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an ""AS IS"" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom collections import defaultdict\nfrom collections.abc import Callable, Sequence\nimport enum\nimport functools\nimport math\nimport operator as op\nfrom typing import Any, TYPE_CHECKING, cast\n\nfrom jax._src import api\nfrom jax._src import basearray\nfrom jax._src import config\nfrom jax._src import core\nfrom jax._src import deprecations\nfrom jax._src import dispatch\nfrom jax._src import dtypes\nfrom jax._src import errors\nfrom jax._src import profiler\nfrom jax._src import util\nfrom jax._src import xla_bridge\nfrom jax._src.interpreters import mlir\nfrom jax._src.interpreters import pxla\nfrom jax._src.interpreters import xla\nfrom jax._src.layout import AutoLayout, DeviceLocalLayout, Format\nfrom jax._src.lib import xla_client as xc\nfrom jax._src.lib import _jax\nfrom jax._src.sharding import Sharding\nfrom jax._src.sharding_impls import (\n PmapSharding, SingleDeviceSharding,\n device_replica_id_map, hashed_index, num_addressable_indices,\n local_to_global_shape, _internal_use_concrete_mesh) # pyformat: disable\nfrom jax._src.typing import ArrayLike, DLDeviceType, DTypeLike\nfrom jax._src.util import safe_zip, unzip3, use_cpp_class, use_cpp_method, cache\nimport numpy as np\n\n\nShape = tuple[int, ...]\nDevice = xc.Device\nIndex = tuple[slice, ...]\nPRNGKeyArray = Any # TODO(jakevdp): fix cycles and import this.\n\ndef _get_device(a: ArrayImpl) -> Device:\n devices = a.sharding._internal_device_list # pytype: disable=attribute-error\n if len(devices) != 1:\n raise ValueError(\n ""When making an array from single-device arrays the input arrays must ""\n f""have one shard each. An argument array had {len(devices)} shard(s)."")\n return devices[0]\n\n\nclass Shard:\n """"""A single data shard of an Array.\n\n Attributes:\n device : Which device this shard resides on.\n index : The index into the global array of this shard.\n replica_id : Integer id indicating which replica of the global array this\n shard is part of. Always 0 for fully sharded data\n (i.e. when there’s only 1 replica).\n data : The data of this shard. None if ``device`` is non-local.\n """"""\n\n def __init__(self, device: Device, sharding: Sharding, global_shape: Shape,\n data: None | ArrayImpl | PRNGKeyArray = None):\n self._device = device\n self._sharding = sharding\n self._global_shape = global_shape\n self._data = data\n\n def __repr__(self):\n try:\n return (f'Shard(device={self.device!r}, index={self.index}, '\n f'replica_id={self.replica_id}, data={self.data})')\n except ValueError:\n return f'Shard(device={self.device!r}, data={self.data})'\n\n @functools.cached_property\n def index(self) -> Index:\n try:\n device_indices_map_fn = self._sharding.devices_indices_map\n except AttributeError:\n raise ValueError('Cannot calculate indices from sharding: '\n f'{self._sharding}. Please create a device to index '\n 'mapping for your sharding.') from None\n index = device_indices_map_fn(self._global_shape)[self.device]\n assert index is not None\n return index\n\n @functools.cached_property\n def replica_id(self) -> int:\n return device_replica_id_map(self._sharding, self._global_shape)[self.device]\n\n @property\n def device(self):\n return self._device\n\n @property\n def data(self):\n return self._data\n\n\ndef _reconstruct_array(fun, args, arr_state, aval_state):\n """"""Method to reconstruct a device array from a serialized state.""""""\n np_value = fun(*args)\n np_value.__setstate__(arr_state)\n jnp_value = api.device_put(np_value)\n # TODO(slebedev): Remove this branch after December 10th 2024.\n if ""named_shape"" in aval_state:\n deprecations.warn(\n ""jax-aval-named-shape"",\n ""Pickled array contains an aval with a named_shape attribute. This is""\n "" deprecated and the code path supporting such avals will be removed.""\n "" Please re-pickle the array."",\n stacklevel=2,\n )\n del aval_state[""named_shape""]\n jnp_value.aval = jnp_value.aval.update(**aval_state)\n return jnp_value\n\n\n@cache(max_size=4096, trace_context_in_key=False)\ndef _cached_index_calc(s, shape):\n map_ = s.addressable_devices_indices_map(shape)\n seen_h_indices = set()\n l = []\n for array_index, index in enumerate(map_.values()):\n h_index = hashed_index(index)\n if h_index not in seen_h_indices:\n seen_h_indices.add(h_index)\n l.append((array_index, index))\n return l\n\n\n@cache(max_size=4096, trace_context_in_key=False)\ndef _process_has_full_value_in_mcjax(s, shape):\n # Return False for single host as a fast path.\n if xla_bridge.process_count() == 1:\n return False\n\n num_unique_indices = len(\n {hashed_index(v) for v in s.devices_indices_map(shape).values()})\n num_addressable_unique_indices = len(\n {hashed_index(v) for v in s.addressable_devices_indices_map(shape).values()})\n return num_unique_indices == num_addressable_unique_indices\n\n\ndef _validate_shape_and_dtype_for_per_device_arrays(\n arrays: Sequence[ArrayImpl | np.ndarray],\n sharding: Sharding,\n aval: core.ShapedArray,\n expected_shape: Shape,\n):\n """"""Validates that per-device arrays are valid and consistent.""""""\n expected_dtype = aval.dtype\n for db in arrays:\n if db.dtype != expected_dtype:\n raise ValueError(\n ""Input buffers to `Array` must have matching dtypes. ""\n f""Got {db.dtype}, expected {expected_dtype} for buffer: {db}""\n )\n if db.shape != expected_shape:\n raise ValueError(\n f""Expected shard shape {expected_shape} doesn't match the single ""\n f""device array shape {db.shape}. Shape of Array is ""\n f""{aval.str_short()} with sharding {sharding}""\n )\n\n\nclass ArrayImpl(basearray.Array):\n # TODO(yashkatariya): Add __slots__ here.\n\n aval: core.ShapedArray\n _sharding: Sharding\n _arrays: list[ArrayImpl]\n _committed: bool\n _skip_checks: bool\n _npy_value: np.ndarray | None\n\n @use_cpp_method()\n def __init__(self, aval: core.ShapedArray, sharding: Sharding,\n arrays: Sequence[ArrayImpl],\n committed: bool, _skip_checks: bool = False):\n # NOTE: the actual implementation of the constructor is moved to C++.\n\n self.aval = aval\n self._sharding = sharding\n self._committed = committed\n self._npy_value = None\n arrays = [a._arrays[0] for a in arrays]\n\n # Don't rearrange if skip_checks is enabled because this assumes that the\n # input buffers are already arranged properly. This usually happens when\n # Array's are created as output of a JAX transformation\n # (like pjit, etc).\n if not _skip_checks or config.enable_checks.value:\n arrays = self._check_and_rearrange(arrays, self._sharding, self.aval)\n self._arrays = arrays\n\n def _check_and_rearrange(self, arrays, sharding, aval):\n device_id_to_buffer = {_get_device(db).id: db for db in arrays}\n\n addressable_dev = sharding.addressable_devices\n if len(arrays) != len(addressable_dev):\n raise ValueError(\n f""Expected {len(addressable_dev)} per-device arrays ""\n ""(this is how many devices are addressable by the sharding), but ""\n f""got {len(arrays)}"")\n\n array_device_ids = set(device_id_to_buffer.keys())\n addressable_device_ids = {d.id for d in addressable_dev}\n if len(array_device_ids) != len(arrays):\n buffer_device_ids = [_get_device(db).id for db in arrays]\n raise ValueError(\n ""When making an array from single-device arrays, the input arrays""\n "" must be from distinct devices, but got device IDs""\n f"" {buffer_device_ids}"")\n\n # Calculate a symmetric difference because the device ids between sharding\n # and _arrays should match.\n diff = array_device_ids ^ addressable_device_ids\n if diff:\n dev_in_sharding_not_in_arrays = addressable_device_ids - array_device_ids\n dev_in_arrays_not_in_sharding = array_device_ids - addressable_device_ids\n err_msg = (\n ""Addressable devices and per-device arrays devices do not match."")\n if dev_in_sharding_not_in_arrays:\n err_msg += (f"" Sharding contains devices {dev_in_sharding_not_in_arrays} ""\n ""that are not present in per-device arrays."")\n if dev_in_arrays_not_in_sharding:\n err_msg += (f"" Per-device arrays contain devices {dev_in_arrays_not_in_sharding} ""\n ""that are not present in the sharding."")\n raise ValueError(err_msg)\n\n _validate_shape_and_dtype_for_per_device_arrays(\n arrays,\n sharding=sharding,\n aval=aval,\n expected_shape=sharding.shard_shape(aval.shape),\n )\n\n # Rearrange arrays based on the device assignment.\n addressable_da = sharding._addressable_device_assignment\n return [device_id_to_buffer[device.id] for device in addressable_da]\n\n @property\n def shape(self) -> Shape:\n return self.aval.shape\n\n @property\n def dtype(self):\n return self.aval.dtype\n\n @property\n def ndim(self):\n return len(self.shape)\n\n @property\n def size(self):\n return math.prod(self.shape)\n\n @property\n def sharding(self):\n return self._sharding\n\n @property\n def device(self):\n self._check_if_deleted()\n if isinstance(self.sharding, SingleDeviceSharding):\n return list(self.sharding.device_set)[0]\n return self.sharding\n\n @property\n def weak_type(self):\n return self.aval.weak_type\n\n @property\n def committed(self) -> bool:\n return self._committed\n\n def __str__(self):\n return str(self._value)\n\n def __len__(self):\n try:\n return self.shape[0]\n except IndexError as err:\n raise TypeError(""len() of unsized object"") from err # same as numpy error\n\n def __bool__(self):\n core.check_bool_conversion(self)\n return bool(self._value)\n\n def __float__(self):\n core.check_scalar_conversion(self)\n return self._value.__float__()\n\n def __int__(self):\n core.check_scalar_conversion(self)\n return self._value.__int__()\n\n def __complex__(self):\n core.check_scalar_conversion(self)\n return self._value.__complex__()\n\n def __hex__(self):\n core.check_integer_conversion(self)\n return hex(self._value)\n\n def __oct__(self):\n core.check_integer_conversion(self)\n return oct(self._value)\n\n def __index__(self):\n core.check_integer_conversion(self)\n return op.index(self._value)\n\n def tobytes(self, order=""C""):\n return self._value.tobytes(order)\n\n def tolist(self):\n return self._value.tolist()\n\n def __format__(self, format_spec):\n # Simulates behavior of https://github.com/numpy/numpy/pull/9883\n if self.ndim == 0:\n return format(self._value[()], format_spec)\n else:\n return format(self._value, format_spec)\n\n def __getitem__(self, idx):\n from jax._src.lax import lax # pytype: disable=import-error\n from jax._src.numpy import indexing # pytype: disable=import-error\n self._check_if_deleted()\n\n if isinstance(self.sharding, PmapSharding):\n if config.pmap_no_rank_reduction.value:\n cidx = idx if isinstance(idx, tuple) else (idx,)\n\n padded_cidx = tuple(\n slice(i, i + 1, None) if isinstance(i, int) else i for i in cidx\n ) + (slice(None),) * (len(self.shape) - len(cidx))\n else:\n if not isinstance(idx, tuple):\n padded_cidx = (idx,) + (slice(None),) * (len(self.shape) - 1)\n else:\n padded_cidx = idx + (slice(None),) * (len(self.shape) - len(idx))\n\n indices = tuple(self.sharding.devices_indices_map(self.shape).values())\n try:\n arr_idx = indices.index(padded_cidx)\n except ValueError:\n arr_idx = None\n if arr_idx is not None:\n out = self._arrays[arr_idx]\n sharding = SingleDeviceSharding(_get_device(out))\n\n if config.pmap_no_rank_reduction.value:\n # If cidx was the index of a single shard, then it corresponds to one\n # shard of the chunked dimension.\n dims = tuple(i for i, x in enumerate(cidx) if isinstance(x, int))\n # Squeeze on committed arrays to avoid data movement to shard 0.\n out = lax.squeeze(out, dimensions=dims)\n\n return ArrayImpl(\n out.aval, sharding, [out], committed=False, _skip_checks=True)\n\n return indexing.rewriting_take(self, idx)\n\n def __iter__(self):\n if self.ndim == 0:\n raise TypeError(""iteration over a 0-d array"") # same as numpy error\n else:\n assert self.is_fully_replicated or self.is_fully_addressable\n if dispatch.is_single_device_sharding(self.sharding) or self.is_fully_replicated:\n return (sl for chunk in self._chunk_iter(100) for sl in chunk._unstack())\n elif isinstance(self.sharding, PmapSharding):\n return (self[i] for i in range(self.shape[0]))\n else:\n # TODO(yashkatariya): Don't bounce to host and use `_chunk_iter` path\n # here after uneven partitioning support is added.\n return (api.device_put(self._value[i]) for i in range(self.shape[0]))\n\n @property\n def is_fully_replicated(self) -> bool:\n return self.sharding.is_fully_replicated\n\n def __repr__(self):\n prefix = 'Array('\n if self.aval is not None and self.aval.weak_type:\n dtype_str = f'dtype={self.dtype.name}, weak_type=True)'\n else:\n dtype_str = f'dtype={self.dtype.name})'\n\n if self.is_fully_addressable or self.is_fully_replicated:\n line_width = np.get_printoptions()[""linewidth""]\n if self.size == 0:\n s = f""[], shape={self.shape}""\n else:\n s = np.array2string(self._value, prefix=prefix, suffix=',',\n separator=', ', max_line_width=line_width)\n last_line_len = len(s) - s.rfind('\n') + 1\n sep = ' '\n if last_line_len + len(dtype_str) + 1 > line_width:\n sep = ' ' * len(prefix)\n return f""{prefix}{s},{sep}{dtype_str}""\n else:\n return f""{prefix}{self.shape}, {dtype_str}""\n\n @property\n def is_fully_addressable(self) -> bool:\n """"""Is this Array fully addressable?\n\n A jax.Array is fully addressable if the current process can address all of\n the devices named in the :class:`Sharding`. ``is_fully_addressable`` is\n equivalent to ""is_local"" in multi-process JAX.\n\n Note that fully replicated is not equal to fully addressable i.e.\n a jax.Array which is fully replicated can span across multiple hosts and is\n not fully addressable.\n """"""\n return self.sharding.is_fully_addressable\n\n def __array__(self, dtype=None, context=None, copy=None):\n # copy argument is supported by np.asarray starting in numpy 2.0\n kwds = {} if copy is None else {'copy': copy}\n return np.asarray(self._value, dtype=dtype, **kwds)\n\n def __dlpack__(self, *, stream: int | Any | None = None,\n max_version: tuple[int, int] | None = None,\n dl_device: tuple[DLDeviceType, int] | None = None,\n copy: bool | None = None):\n from jax._src.dlpack import to_dlpack # pytype: disable=import-error # pylint: disable=g-import-not-at-top\n\n device_set = self.sharding.device_set\n if len(device_set) > 1:\n raise BufferError(\n ""to_dlpack can only pack a dlpack tensor from an array on a singular ""\n f""device, but an array with a Sharding over {len(device_set)} devices ""\n ""was provided.""\n )\n device, = device_set\n return to_dlpack(self, stream=stream,\n max_version=max_version,\n src_device=device,\n dl_device=dl_device,\n copy=copy)\n\n def __dlpack_device__(self) -> tuple[enum.Enum, int]:\n if len(self._arrays) != 1:\n raise BufferError(""__dlpack__ only supported for unsharded arrays."")\n\n from jax._src.dlpack import DLDeviceType # pytype: disable=import-error # pylint: disable=g-import-not-at-top\n\n if self.platform() == ""cpu"":\n return DLDeviceType.kDLCPU, 0\n\n elif self.platform() == ""gpu"":\n platform_version = _get_device(self).client.platform_version\n if ""cuda"" in platform_version:\n dl_device_type = DLDeviceType.kDLCUDA\n elif ""rocm"" in platform_version:\n dl_device_type = DLDeviceType.kDLROCM\n else:\n raise BufferError(""Unknown GPU platform for __dlpack__: ""\n f""{platform_version}"")\n\n local_hardware_id = _get_device(self).local_hardware_id\n if local_hardware_id is None:\n raise BufferError(""Couldn't get local_hardware_id for __dlpack__"")\n\n return dl_device_type, local_hardware_id\n\n else:\n raise BufferError(\n ""__dlpack__ device only supported for CPU and GPU, got platform: ""\n f""{self.platform()}""\n )\n\n def __reduce__(self):\n fun, args, arr_state = self._value.__reduce__()\n aval_state = {'weak_type': self.aval.weak_type}\n return (_reconstruct_array, (fun, args, arr_state, aval_state))\n\n @use_cpp_method()\n def unsafe_buffer_pointer(self):\n if len(self._arrays) != 1:\n raise ValueError(""unsafe_buffer_pointer() is supported only for unsharded""\n "" arrays."")\n return self._arrays[0].unsafe_buffer_pointer()\n\n @property\n @use_cpp_method()\n def __cuda_array_interface__(self):\n if len(self._arrays) != 1:\n raise ValueError(""__cuda_array_interface__() is supported only for ""\n ""unsharded arrays."")\n return self._arrays[0].__cuda_array_interface__ # pytype: disable=attribute-error # bind-properties\n\n @use_cpp_method()\n def on_device_size_in_bytes(self):\n """"""Returns the total global on-device size of the array in bytes.""""""\n arr = self._arrays[0]\n per_shard_size = arr.on_device_size_in_bytes()\n return per_shard_size * self.sharding.num_devices\n\n def devices(self) -> set[Device]:\n self._check_if_deleted()\n return self.sharding.device_set\n\n @property\n def device_buffer(self):\n raise AttributeError(\n ""arr.device_buffer has been deprecated. Use arr.addressable_data(0)"")\n\n @property\n def device_buffers(self):\n raise AttributeError(\n ""arr.device_buffers has been deprecated. Use [x.data for x in arr.addressable_shards]"")\n\n def addressable_data(self, index: int) -> ArrayImpl:\n self._check_if_deleted()\n if self.is_fully_replicated:\n return self._fully_replicated_shard()\n return self._arrays[index]\n\n @functools.cached_property\n def addressable_shards(self) -> Sequence[Shard]:\n self._check_if_deleted()\n out = []\n for a in self._arrays:\n out.append(Shard(_get_device(a), self.sharding, self.shape, a))\n return out\n\n @property\n def format(self):\n # TODO(yashkatariya): Remove the deleted check from here.\n if self.is_deleted():\n return Format(None, self.sharding)\n try:\n return Format(DeviceLocalLayout.from_pjrt_layout(self._pjrt_layout),\n self.sharding)\n except _jax.XlaRuntimeError as e:\n msg, *_ = e.args\n if type(msg) is str and msg.startswith(""UNIMPLEMENTED""):\n return Format(None, self.sharding)\n else:\n raise\n\n # TODO(frostig, yashkatariya): remove\n layout = format\n\n @property\n def global_shards(self) -> Sequence[Shard]:\n """"""Returns list of all `Shard`s of the Array across all devices.\n\n The result includes shards that are not addressable by the current process.\n If a `Shard` is not addressable, then its `data` will be `None`.\n """"""\n self._check_if_deleted()\n if self.is_fully_addressable: # pylint: disable=using-constant-test\n return self.addressable_shards\n\n out = []\n device_id_to_buffer = {_get_device(a).id: a for a in self._arrays}\n for global_d in self.sharding.device_set:\n if device_id_to_buffer.get(global_d.id, None) is not None:\n array = device_id_to_buffer[global_d.id]\n else:\n array = None\n out.append(Shard(global_d, self.sharding, self.shape, array))\n return out\n\n @use_cpp_method()\n def delete(self):\n if self._arrays is None:\n return\n for buf in self._arrays:\n buf.delete()\n self._arrays = None\n self._npy_value = None\n\n @use_cpp_method()\n def is_deleted(self):\n if self._arrays is None:\n return True\n # This path is taken when a view of `Array` is created and the original\n # Array is deleted. In that case, the buffers the view represents also get\n # deleted.\n return any(buf.is_deleted() for buf in self._arrays)\n\n def _check_if_deleted(self):\n if self.is_deleted():\n raise RuntimeError(\n f""Array has been deleted with shape={self.aval.str_short()}."")\n\n @use_cpp_method()\n def block_until_ready(self):\n self._check_if_deleted()\n for db in self._arrays:\n db.block_until_ready()\n return self\n\n @use_cpp_method()\n def _single_device_array_to_np_array_did_copy(self) -> tuple[np.ndarray, bool]: # type: ignore\n ... # pytype: disable=bad-return-type\n\n @use_cpp_method()\n def _copy_single_device_array_to_host_async(self):\n self._arrays[0].copy_to_host_async()\n\n @profiler.annotate_function\n def copy_to_host_async(self):\n self._check_if_deleted()\n if self._npy_value is None:\n if self.is_fully_replicated:\n self._copy_single_device_array_to_host_async()\n return\n for i, _ in _cached_index_calc(self.sharding, self.shape):\n self._arrays[i]._copy_single_device_array_to_host_async()\n\n @property\n @functools.partial(profiler.annotate_function, name=""np.asarray(jax.Array)"")\n def _value(self) -> np.ndarray:\n self._check_if_deleted()\n\n if self._npy_value is None:\n if (self.is_fully_replicated and\n self.sharding._internal_device_list.addressable_device_list): # type: ignore\n npy_value, did_copy = self._single_device_array_to_np_array_did_copy()\n npy_value.flags.writeable = False\n if did_copy:\n self._npy_value = npy_value\n return npy_value\n\n # TODO(yashkatariya): Merge `_process_has_full_value_in_mcjax` with\n # is_fully_addressable.\n if (not self.is_fully_addressable and\n not _process_has_full_value_in_mcjax(self.sharding, self.shape)):\n raise RuntimeError(\n ""Fetching value for `jax.Array` that spans non-addressable""\n "" (non process local) devices is not possible. You can use""\n "" `jax.experimental.multihost_utils.process_allgather` to print the""\n "" global array or use `.addressable_shards` method of jax.Array to""\n "" inspect the addressable (process local) shards.""\n )\n\n for i, _ in _cached_index_calc(self.sharding, self.shape):\n self._arrays[i]._copy_single_device_array_to_host_async()\n\n npy_value = np.empty(self.shape, self.dtype)\n for i, ind in _cached_index_calc(self.sharding, self.shape):\n npy_value[ind], _ = self._arrays[i]._single_device_array_to_np_array_did_copy()\n self._npy_value = npy_value\n self._npy_value.flags.writeable = False\n return self._npy_value\n\n\n# TODO(b/273265390): ideally we would write this as a decorator on the ArrayImpl\n# class, however this triggers a pytype bug. Workaround: apply the decorator\n# after the fact.\nif not TYPE_CHECKING:\n ArrayImpl = use_cpp_class(xc.ArrayImpl)(ArrayImpl)\n\n\ndef _get_shape_from_index(slc: Index, shape: Shape) -> Shape:\n return tuple(\n (s.stop or dim) - (s.start or 0)\n for s, dim in safe_zip(slc, shape)\n if isinstance(s, slice) # If element is int, this dimension is reduced\n )\n\ndef _get_and_check_dtype(arrays: Sequence[basearray.Array | np.ndarray],\n dtype: DTypeLike | None, fname: str):\n if arrays:\n if dtype is None:\n dtype = arrays[0].dtype\n else:\n if arrays[0].dtype != dtype:\n raise ValueError(\n f""If `dtype` is provided to `jax.{fname}`, it must match the dtype ""\n f""of the addressable shards. Got dtype={dtype} and shard ""\n f""dtype={arrays[0].dtype}`."")\n else:\n if not config.enable_empty_arrays.value:\n raise ValueError(\n f""Building an Array with no addressable shards with `jax.{fname}` is ""\n ""supported only if `jax.config.enable_empty_arrays` is set to True.""\n )\n if dtype is None:\n raise ValueError(\n ""If the Array has no addressable shards, `dtype` must be provided ""\n f""via the `dtype` argument to `jax.{fname}`."")\n return dtype\n\n# explicitly set to be unhashable.\nsetattr(ArrayImpl, ""__hash__"", None)\nsetattr(ArrayImpl, ""__array_priority__"", 100)\n\n# TODO(yashkatariya): Remove None from callback input type.\n\ndef make_array_from_callback(\n shape: Shape, sharding: Sharding | Format,\n data_callback: Callable[[Index | None], ArrayLike],\n dtype: DTypeLike | None = None) -> ArrayImpl:\n # pyformat: disable\n """"""Returns a ``jax.Array`` via data fetched from ``data_callback``.\n\n ``data_callback`` is used to fetch the data for each addressable shard of the\n returned ``jax.Array``. This function must return concrete arrays, meaning that\n ``make_array_from_callback`` has limited compatibility with JAX transformations\n like :func:`jit` or :func:`vmap`.\n\n Args:\n shape : Shape of the ``jax.Array``.\n sharding: A ``Sharding`` instance which describes how the ``jax.Array`` is\n laid out across devices.\n data_callback : Callback that takes indices into the global array value as\n input and returns the corresponding data of the global array value.\n The data can be returned as any array-like object, e.g. a ``numpy.ndarray``.\n dtype: The dtype of the output ``jax.Array``. If not provided, the dtype of\n the data for the first addressable shard is used. If there are no\n addressable shards, the ``dtype`` argument must be provided.\n\n Returns:\n A ``jax.Array`` via data fetched from ``data_callback``.\n\n Examples:\n\n >>> import math\n >>> from jax.sharding import Mesh\n >>> from jax.sharding import PartitionSpec as P\n >>> import numpy as np\n ...\n >>> input_shape = (8, 8)\n >>> global_input_data = np.arange(math.prod(input_shape)).reshape(input_shape)\n >>> global_mesh = Mesh(np.array(jax.devices()).reshape(2, 4), ('x', 'y'))\n >>> inp_sharding = jax.sharding.NamedSharding(global_mesh, P('x', 'y'))\n ...\n >>> def cb(index):\n ... return global_input_data[index]\n ...\n >>> arr = jax.make_array_from_callback(input_shape, inp_sharding, cb)\n >>> arr.addressable_data(0).shape\n (4, 2)\n """"""\n # pyformat: enable\n dll = sharding.device_local_layout if isinstance(sharding, Format) else None\n if isinstance(dll, AutoLayout):\n raise TypeError(\n ""`DeviceLocalLayout.AUTO` cannot be used in place of a device-local""\n f"" layout when calling `jax.make_array_from_callback`. Got {sharding}"")\n sharding = sharding.sharding if isinstance(sharding, Format) else sharding\n if not isinstance(sharding, Sharding):\n raise TypeError(\n f""sharding should be an instance of `jax.sharding`. Got {sharding} of""\n f"" type {type(sharding)}"")\n\n def get_data(index: Index | None) -> ArrayImpl | np.ndarray:\n # Perhaps cache on index here, then we can unify fully_replicated\n # and non-fully_replicated cases below and become faster for\n # partially replicated cases.\n assert index is not None\n r = data_callback(index)\n if isinstance(r, core.Tracer):\n raise errors.UnexpectedTracerError(\n ""jax.make_array_from_callback cannot be called within a traced""\n "" context.""\n )\n # Value can be python scalar, resolve it into something with dtype.\n return xla.canonicalize_dtype(r)\n\n if sharding.is_fully_replicated:\n devices = list(sharding._internal_device_list.addressable_device_list) # type: ignore\n # Only compute data once.\n per_device_values = [get_data((slice(None),) * len(shape))] * len(devices)\n else:\n device_to_index_map = sharding.addressable_devices_indices_map(shape)\n devices = list(device_to_index_map.keys())\n per_device_values = [\n get_data(device_to_index_map[device]) for device in devices\n ]\n\n dtype = _get_and_check_dtype(\n per_device_values, dtype, ""make_array_from_callback"")\n expected_shape = sharding.shard_shape(shape)\n aval = core.update_aval_with_sharding(\n core.ShapedArray(shape, dtype), sharding)\n\n _validate_shape_and_dtype_for_per_device_arrays(\n per_device_values,\n expected_shape=expected_shape,\n aval=aval,\n sharding=sharding,\n )\n first_value = None\n if per_device_values:\n first_value = per_device_values[0]\n if (isinstance(first_value, ArrayImpl)\n and first_value._committed\n and sharding.is_fully_replicated\n and first_value.is_fully_replicated\n and first_value.sharding._device_assignment == tuple(devices)\n and first_value.format.device_local_layout == dll):\n return first_value\n\n if dtypes.issubdtype(aval.dtype, dtypes.extended):\n # TODO(yashkatariya): Can this also use batched_device_put?\n arrays = api.device_put(per_device_values, devices)\n return aval.dtype._rules.make_sharded_array(\n aval, sharding, arrays, committed=True\n )\n\n if dll is not None:\n devices = [Format(dll, SingleDeviceSharding(d)) for d in devices]\n # pxla.batched_device_put doesn't support Layout... Take the slow route\n arrays = api.device_put(per_device_values, devices)\n return ArrayImpl(aval, sharding, arrays, committed=True)\n\n if isinstance(first_value, ArrayImpl) and len(first_value.devices()) > 1:\n # The output of the callback is already a sharded array, move it to\n # to target device.\n per_device_values = api.device_put(per_device_values, devices)\n\n return pxla.batched_device_put(aval, sharding, per_device_values, devices)\n\n\ndef make_array_from_process_local_data(\n sharding: Sharding,\n local_data: np.ndarray,\n global_shape: Shape | None = None,\n) -> ArrayImpl:\n # pyformat: disable\n """"""Creates distributed tensor using the data available in process.\n\n This function is a common special case of `make_array_from_callback`. It\n assumes that the data is available in the process and takes care of the\n index wrangling.\n\n The most common case is when the sharding is sharded across the batch\n dimension and each host just loads its corresponding sub-batch. This function\n supports more general cases as well, such as mixed multi-host and multi-axis\n replication and sharding but you would need to compute the size and the\n contents of process-local data correctly to satisfy the sharding constraints.\n\n In particular, if any two hosts are replicas, host_local_data should be\n identical as well.\n\n The global_shape is optional. If not provided it will be be inferred from\n the local_data and sharding, under the assumption that\n each host represents only their own data for uniform sharding. If sharding\n is non-uniform, (see note below) an exception will be raised.\n\n Setting global_shape explicitly allows for finer grain control and works with\n non-uniform shardings. Each dimension of global_shape must either match\n host_local_data, or match the inferred global shape of the sharding (in which\n case it is equivalent to setting it to None, but is more explicit).\n\n For example if dimension `i` is fully sharded then this size would be\n `per_device_shape[i] * jax.local_device_count()`. Each device will be mapped\n into local slice of `local_data` array. For example, if given process\n addresses slices (8, 12) and (24, 28), then these slices will be mapped\n into (0, 4) and (4, 8) of the `local_data`.\n\n For each dimension where global_shapes matches local_shape, each device\n will lookup the slice in the local_data. For example if\n global_shape == local_data.shape, the local data is assumed to be the\n actual target array that will be sharded into device.\n\n If global_shape is the same as local_data.shape, then the data must\n be the same across all hosts.\n\n Examples:\n >>> from jax.sharding import PartitionSpec as P\n >>> mesh_rows = 2\n >>> mesh_cols = jax.device_count() // 2\n ...\n >>> mesh = jax.sharding.Mesh(np.array(jax.devices()).reshape(mesh_rows, mesh_cols), ('x', 'y'))\n\n >>> sharding = jax.sharding.NamedSharding(mesh, P(('x', 'y'),))\n >>> rows_per_device = 2\n >>> feature_length = 32\n >>> per_device_shape = (rows_per_device, feature_length)\n >>> per_host_shape = (rows_per_device * len(mesh.local_devices), feature_length)\n >>> per_host_generator = lambda : np.arange(np.prod(per_host_shape)).reshape(per_host_shape)\n >>> per_host_data = per_host_generator() # replace with your own per-host data pipeline that outputs numpy arrays\n >>> global_shape = (rows_per_device * len(sharding.device_set), ) + per_device_shape[1:]\n >>> output_global_array = jax.make_array_from_process_local_data(sharding, per_host_data, global_shape)\n ...\n >>> assert output_global_array.addressable_data(0).shape == per_device_shape\n >>> assert output_global_array.shape == global_shape\n\n NB: While most shardings are uniform, It is possible to design am exotic\n sharding mesh where each process's devices will be arranged in a non-grid\n like pattern in some dimensions, or for indices to overlap non-trivially.\n Such sharding is called ""non-uniform"" in those dimensions. In that case,\n the global shape along those directions must match local shape as there is\n no meaningful way to represent all needed\n per-process data in non-overlapping fashion. For example for global_shape 4x4\n if sharding looks like this:\n\n 0123\n 2103\n 4675\n 4567\n\n with 4 processes, containing devices (0,1), (2, 3), (4, 5), (6, 7) respectively.\n Then the data for each host look like\n\n xx.. ..xx .... ....\n .xx. x..x .... ....\n .... .... x..x .xx.\n .... .... xx.. ..xx\n\n the sharding is uniform on rows (each host requires either rows 1-2, or rows 3-4)\n and non-uniform on columns (hosts require overlapping but not matching\n set of columns). Thus local data must have the shape 2x4 or 4x4\n for all hosts, even though each host can potentially fit into 2x2 shape.\n In this case user must provide global_shape explicitly and for\n local_shape=(2, 4), potentially valid global shapes are (2, 4) and (4, 4).\n\n On the other hand for sharding:\n\n 0213 x.x. .x.x. .... ....\n 0213 x.x. .x.x. .... ....\n 4657 .... .... .x.x x.x.\n 4657 .... .... .x.x x.x.\n\n for local_shape=(2, 2) this function can accept a choice of 2x2, 2x4, 4x2\n and 4x4 global shapes. Setting global_shape to None, is equivalent to\n setting it to (4, 4) in this case.\n\n Args:\n sharding: Sharding of the global array.\n local_data: Data on the host to be placed on local devices. Each\n dimension should either match global_shape, or match\n num_addressable_indices(dim).\n global_shape: The target shape of the global array. If None,\n will infer from local_data and sharding.\n\n Returns:\n Tensor that will have sharding=sharding and of shape global_shape.\n """"""\n # pyformat: enable\n if xla_bridge.process_count() == 1:\n return api.device_put(local_data, sharding)\n\n # TODO(sandler): consider supporting partially specified global_shape or\n # making local_to_global_shape available in the api.\n local_shape = local_data.shape\n if global_shape is None:\n global_shape = local_to_global_shape(sharding, local_shape) # type: ignore[assignment]\n assert global_shape is not None\n if None in global_shape:\n raise ValueError(\n ""Unable to compute global_shape due to non-uniform sharding.""\n f"" Specify global shape directly. Partially computed {global_shape=}.""\n )\n elif None in global_shape:\n raise ValueError(f""{global_shape=} has Nones. This is not supported."")\n full_dim = []\n for i, (data_dim, global_dim) in enumerate(\n zip(local_data.shape, global_shape)\n ):\n full_dim.append(data_dim == global_dim)\n if data_dim != global_dim:\n process_slice = num_addressable_indices(sharding, i, global_shape)\n if process_slice != data_dim:\n raise ValueError(\n ""Invalid host data, each dimension should match either global or ""\n f""process shape. In dimension {i=}, the process data has {data_dim}""\n f""elements. Process addresses {process_slice} elements and ""\n f""{global_shape=}.""\n )\n addressable_shards = sharding.addressable_devices_indices_map(global_shape)\n shard = next(iter(addressable_shards.values()))\n assert shard is not None\n shard_shape = _get_shape_from_index(shard, global_shape)\n slices_for_each_dim: list[list[int]] = [[] for _ in global_shape]\n for shard_index in addressable_shards.values():\n assert shard_index is not None\n for i, slc in enumerate(shard_index):\n slices_for_each_dim[i].append(slc.start or 0)\n for i in range(len(global_shape)):\n slices_for_each_dim[i] = sorted(set(slices_for_each_dim[i]))\n\n @functools.lru_cache(maxsize=4096)\n def local_slice(i, start):\n # Looks up the index of this slice in the list of slices for this dimension.\n # This will determine the slice in host_local_data\n start = slices_for_each_dim[i].index(start or 0) * shard_shape[i]\n end = start + shard_shape[i]\n return slice(start, end)\n\n def cb(index: Index | None) -> ArrayLike:\n assert index is not None\n data_slice = (\n slc if full_dim[i] else local_slice(i, slc.start)\n for i, slc in enumerate(index)\n )\n return local_data[tuple(data_slice)]\n\n return make_array_from_callback(global_shape, sharding, cb)\n\n\ndef make_array_from_single_device_arrays(\n shape: Shape, sharding: Sharding, arrays: Sequence[basearray.Array], *,\n dtype: DTypeLike | None = None,\n) -> ArrayImpl:\n r""""""Returns a ``jax.Array`` from a sequence of ``jax.Array``\s each on a single device.\n Every device in input ``sharding``\'s mesh must have an array in ``arrays``\s.\n\n Args:\n shape : Shape of the output ``jax.Array``. This conveys information already included with\n ``sharding`` and ``arrays`` and serves as a double check.\n sharding: Sharding: A global Sharding instance which describes how the output jax.Array is laid out across devices.\n arrays: `list` or `tuple` of ``jax.Array``\s that are each single device addressable. ``len(arrays)``\n must equal ``len(sharding.addressable_devices)`` and the shape of each array must be the same. For multiprocess code,\n each process will call with a different ``arrays`` argument that corresponds to that processes' data.\n These arrays are commonly created via ``jax.device_put``.\n dtype: The dtype of the output ``jax.Array``. If not provided, the dtype of the first array in\n ``arrays`` is used. If ``arrays`` is empty, the ``dtype`` argument must be provided.\n\n Returns:\n A global ``jax.Array``, sharded as ``sharding``, with shape equal to ``shape``, and with per-device\n contents matching ``arrays``.\n\n Examples:\n\n >>> import math\n >>> from jax.sharding import Mesh\n >>> from jax.sharding import PartitionSpec as P\n >>> import numpy as np\n ...\n >>> mesh_rows = 2\n >>> mesh_cols = jax.device_count() // 2\n ...\n >>> global_shape = (8, 8)\n >>> mesh = Mesh(np.array(jax.devices()).reshape(mesh_rows, mesh_cols), ('x', 'y'))\n >>> sharding = jax.sharding.NamedSharding(mesh, P('x', 'y'))\n >>> inp_data = np.arange(math.prod(global_shape)).reshape(global_shape)\n ...\n >>> arrays = [\n ... jax.device_put(inp_data[index], d)\n ... for d, index in sharding.addressable_devices_indices_map(global_shape).items()]\n ...\n >>> arr = jax.make_array_from_single_device_arrays(global_shape, sharding, arrays)\n >>> assert arr.shape == (8,8) # arr.shape is (8,8) regardless of jax.device_count()\n\n For cases where you have a local array and want to convert it to a global\n jax.Array, use ``jax.make_array_from_process_local_data``.\n """"""\n if isinstance(arrays, Sequence):\n dtype = _get_and_check_dtype(\n arrays, dtype, ""make_array_from_single_device_arrays"")\n\n # All input arrays should be committed. Checking it is expensive on\n # single-controller systems.\n aval = core.update_aval_with_sharding(\n core.ShapedArray(shape, dtype, weak_type=False), sharding)\n if dtypes.issubdtype(aval.dtype, dtypes.extended):\n return aval.dtype._rules.make_sharded_array(aval, sharding, arrays,\n committed=True)\n arrays = list(arrays) if isinstance(arrays, tuple) else arrays\n # TODO(phawkins): ideally the cast() could be checked.\n try:\n return ArrayImpl(aval, sharding, cast(Sequence[ArrayImpl], arrays),\n committed=True)\n except TypeError:\n if not isinstance(arrays, list):\n raise TypeError(""jax.make_array_from_single_device_arrays `arrays` ""\n ""argument must be a list or tuple, but got ""\n f""{type(arrays)}."")\n if any(isinstance(arr, core.Tracer) for arr in arrays):\n raise ValueError(\n ""jax.make_array_from_single_device_arrays requires a list of concrete""\n f"" arrays as input, but got types {set(map(type, arrays))}"")\n raise\n\nxla.canonicalize_dtype_handlers[ArrayImpl] = pxla.identity\n\ndef _get_aval_array(self):\n return core.update_aval_with_sharding(self.aval, self.sharding)\ncore.pytype_aval_mappings[ArrayImpl] = _get_aval_array\n\n\ndef _array_mlir_constant_handler(val):\n try:\n return mlir.ir_constant(val._value)\n except RuntimeError as e:\n # TODO(yashkatariya): Ideally we would catch a custom exception from\n # `_value` function in ArrayImpl instead of checking the error string.\n if 'Fetching value for `jax.Array` that spans non-addressable' in str(e):\n raise RuntimeError(\n ""Closing over jax.Array that spans non-addressable (non process""\n "" local) devices is not allowed. Please pass such arrays as arguments""\n f"" to the function. Got jax.Array: {val.aval.str_short()}"") from e\n raise\n\nmlir.register_constant_handler(ArrayImpl, _array_mlir_constant_handler)\n\n\n# NOTE(skye): we could refactor to generate _multi_slice parameters directly\n# from the input ShardingSpec, rather than the indices. However, this would\n# require duplicating the ordering logic of spec_to_indices, which is more\n# subtle and more likely to change than the index logic we have to support here.\ndef as_slice_indices(arr: Any, idx: Index) -> tuple[\n tuple[int, ...], tuple[int, ...], tuple[int, ...]]:\n """"""Returns start_indices, limit_indices, removed_dims""""""\n start_indices = [0] * arr.ndim\n limit_indices = list(arr.shape)\n removed_dims: list[int] = []\n\n tuple_idx = idx if isinstance(idx, tuple) else (idx,)\n for dim, sub_idx in enumerate(tuple_idx):\n if isinstance(sub_idx, int):\n start_indices[dim] = sub_idx\n limit_indices[dim] = sub_idx + 1\n removed_dims.append(dim)\n elif sub_idx == slice(None):\n continue\n else:\n assert isinstance(sub_idx, slice), sub_idx\n assert isinstance(sub_idx.start, int), sub_idx\n assert isinstance(sub_idx.stop, int), sub_idx\n start_indices[dim] = sub_idx.start\n limit_indices[dim] = sub_idx.stop\n\n return tuple(start_indices), tuple(limit_indices), tuple(removed_dims)\n\n\ndef shard_device_array(x, devices, indices, sharding):\n start_indices, limit_indices, removed_dims = unzip3(\n as_slice_indices(x, idx) for idx in indices)\n if sharding.is_fully_replicated:\n shards = [x] * len(devices)\n else:\n # TODO(yashkatariya): Maybe this should be set when we call the handler in\n # InputsHandler.__call__?\n with _internal_use_concrete_mesh(None):\n shards = x._multi_slice(start_indices, limit_indices, removed_dims)\n aval = core.shaped_abstractify(x)\n return pxla.batched_device_put(aval, sharding, shards, devices)\n\n\ndef shard_sharded_device_array_slow_path(x, devices, indices, sharding):\n candidates = defaultdict(list)\n bufs = [buf.data for buf in x.addressable_shards]\n arr_indices = tuple(x.sharding.devices_indices_map(x.shape).values())\n for buf, idx in safe_zip(bufs, arr_indices):\n candidates[hashed_index(idx)].append(buf)\n\n bufs = []\n for idx, device in safe_zip(indices, devices):\n # Look up all buffers that contain the correct slice of the logical array.\n candidates_list = candidates[hashed_index(idx)]\n if not candidates_list:\n return pxla.shard_args([sharding], [None], [None], [x._value],\n canonicalize=False)[0]\n # Try to find a candidate buffer already on the correct device,\n # otherwise copy one of them.\n for buf in candidates_list:\n if buf.devices() == {device}:\n bufs.append(buf)\n break\n else:\n bufs.append(candidates_list[-1])\n return pxla.batched_device_put(x.aval, sharding, bufs, devices)\n\n\n@cache(max_size=4096, trace_context_in_key=False)\ndef _sharding_indices_and_eq(src_sharding, shape, dst_sharding):\n src_indices = src_sharding.addressable_devices_indices_map(shape).values()\n dst_indices = dst_sharding.addressable_devices_indices_map(shape).values()\n return dst_indices, tuple(src_indices) == tuple(dst_indices)\n\n\ndef _array_shard_arg(xs, shardings, layouts, copy_semantics):\n util.test_event(""_array_shard_arg"")\n results = []\n batch_xs, batch_devs, batch_shardings, batch_indices = [], [], [], []\n batch_cs = []\n\n for i, (x, sharding, layout, cs) in enumerate(\n safe_zip(xs, shardings, layouts, copy_semantics)):\n x._check_if_deleted()\n indices, same_indices = _sharding_indices_and_eq(x.sharding, x.shape, sharding)\n same_layout = (True if layout is None else\n x.format.device_local_layout == layout)\n\n if not x.is_fully_addressable:\n if same_indices and same_layout:\n results.append(x)\n else:\n raise NotImplementedError(\n ""Cannot reshard an input that is not fully addressable"")\n else:\n devices = sharding._addressable_device_assignment\n if same_indices and same_layout:\n # Add a placeholder result that will be filled in later.\n results.append(None)\n # Accumulate arguments to `batched_copy_array_to_devices_with_sharding`.\n batch_xs.append(x)\n batch_devs.append(list(devices))\n batch_shardings.append(sharding)\n batch_indices.append(i)\n batch_cs.append(cs)\n # Resharding starts here:\n elif not same_layout:\n results.append(api.device_put(x, Format(layout, sharding)))\n elif dispatch.is_single_device_sharding(x.sharding):\n results.append(shard_device_array(x, devices, indices, sharding))\n else:\n results.append(\n shard_sharded_device_array_slow_path(x, devices, indices, sharding))\n\n util.test_event(""batched_copy_array"")\n copy_outs = xc.batched_copy_array_to_devices_with_sharding(\n batch_xs, batch_devs, batch_shardings, batch_cs)\n for i, copy_out in safe_zip(batch_indices, copy_outs):\n assert results[i] is None\n results[i] = copy_out\n return results\npxla.shard_arg_handlers[ArrayImpl] = _array_shard_arg\n\n\ndef _array_global_result_handler(global_aval, out_sharding, committed):\n global_aval = core.update_aval_with_sharding(global_aval, out_sharding)\n if global_aval.dtype == dtypes.float0:\n return lambda _: np.zeros(global_aval.shape, dtypes.float0)\n if dtypes.issubdtype(global_aval.dtype, dtypes.extended):\n return global_aval.dtype._rules.global_sharded_result_handler(\n global_aval, out_sharding, committed)\n return xc.array_result_handler(\n global_aval, out_sharding, committed=committed, _skip_checks=True\n )\npxla.global_result_handlers[core.ShapedArray] = _array_global_result_handler\n\n# Only used for Arrays that come out of pmap.\ndef _array_local_result_handler(aval, sharding, indices):\n if aval.dtype == dtypes.float0:\n return lambda _: np.zeros(aval.shape, dtypes.float0)\n if dtypes.issubdtype(aval.dtype, dtypes.extended):\n return aval.dtype._rules.local_sharded_result_handler(\n aval, sharding, indices)\n return xc.array_result_handler(\n aval, sharding, committed=True, _skip_checks=True\n )\npxla.local_result_handlers[core.ShapedArray] = _array_local_result_handler\n\n\n# Token handlers\n\ndef _token_shard_arg(xs, shardings, layouts, copy_semantics):\n results = []\n for x, sharding, layout in safe_zip(xs, shardings, layouts):\n assert layout is None\n x.block_until_ready()\n x = np.array([], dtype=bool)\n aval = core.get_aval(x)\n devices = sharding._addressable_device_assignment\n results.append(pxla.batched_device_put(\n aval, sharding, [x] * len(devices), devices))\n return results\npxla.shard_arg_handlers[core.Token] = _token_shard_arg\n\n\ndef _token_global_result_handler(global_aval, out_sharding, committed):\n array_handler = _array_global_result_handler(\n core.get_token_aval(), out_sharding, committed)\n\n def wrapper(*args, **kwargs):\n out_buf = array_handler(*args, **kwargs)\n return core.Token(out_buf)\n return wrapper\npxla.global_result_handlers[core.AbstractToken] = _token_global_result_handler\n",python,tab +5840,6638292,"TERMINAL",0,0,"463814424421",,terminal_output +5841,6639414,"TERMINAL",0,0,"574925535532",,terminal_output +5842,6640384,"TERMINAL",0,0,"6851036646643",,terminal_output +5843,6641424,"TERMINAL",0,0,"796147757754",,terminal_output +5844,6642465,"TERMINAL",0,0,"8407258868865",,terminal_output +5845,6643609,"TERMINAL",0,0,"918369979976",,terminal_output +5846,6644635,"TERMINAL",0,0,"20294750308405087",,terminal_output +5847,6645659,"TERMINAL",0,0,"1310581191198",,terminal_output +5848,6646682,"TERMINAL",0,0,"241692230222:009",,terminal_output +5849,6647707,"TERMINAL",0,0,"35275033133110",,terminal_output +5850,6648876,"TERMINAL",0,0,"463814424421",,terminal_output +5851,6649855,"TERMINAL",0,0,"574925535532",,terminal_output +5852,6650822,"TERMINAL",0,0,"6852036646643",,terminal_output +5853,6651525,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py",0,0,"",python,tab +5854,6651898,"TERMINAL",0,0,"796147757754",,terminal_output +5855,6652918,"TERMINAL",0,0,"8507258868865",,terminal_output +5856,6653974,"TERMINAL",0,0,"918369979976",,terminal_output +5857,6655016,"TERMINAL",0,0,"30294750:00408502:0087",,terminal_output +5858,6656102,"TERMINAL",0,0,"1320581191198",,terminal_output +5859,6657105,"TERMINAL",0,0,"24169224022109",,terminal_output +5860,6658151,"TERMINAL",0,0,"35278:0033133120",,terminal_output +5861,6659275,"TERMINAL",0,0,"463814424421",,terminal_output +5862,6660240,"TERMINAL",0,0,"5853036646643",,terminal_output +5863,6661325,"TERMINAL",0,0,"796147757754",,terminal_output +5864,6662348,"TERMINAL",0,0,"84:007258868865",,terminal_output +5865,6663479,"TERMINAL",0,0,"918369979976",,terminal_output +5866,6664421,"TERMINAL",0,0,"402947105082:001087",,terminal_output +5867,6665469,"TERMINAL",0,0,"1330581191198",,terminal_output +5868,6666551,"TERMINAL",0,0,"24169225022209",,terminal_output +5869,6667572,"TERMINAL",0,0,"35271033133130",,terminal_output +5870,6668699,"TERMINAL",0,0,"463814424421",,terminal_output +5871,6669724,"TERMINAL",0,0,"574925535532",,terminal_output +5872,6670713,"TERMINAL",0,0,"6854036646643",,terminal_output +5873,6671771,"TERMINAL",0,0,"796147757754",,terminal_output +5874,6672896,"TERMINAL",0,0,"8107258868865",,terminal_output +5875,6673923,"TERMINAL",0,0,"918369979976",,terminal_output +5876,6674891,"TERMINAL",0,0,"502947202:00:008102087",,terminal_output +5877,6676076,"TERMINAL",0,0,"1340581191198",,terminal_output +5878,6676986,"TERMINAL",0,0,"24169222:0022309",,terminal_output +5879,6678036,"TERMINAL",0,0,"35272033133140",,terminal_output +5880,6679093,"TERMINAL",0,0,"463814424421",,terminal_output +5881,6680168,"TERMINAL",0,0,"574925535532",,terminal_output +5882,6680224,"TERMINAL",0,0,"srun",,terminal_focus +5883,6681177,"TERMINAL",0,0,"6855036646643",,terminal_output +5884,6681432,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",0,0,"",python,tab +5885,6682318,"TERMINAL",0,0,"7207258868865",,terminal_output +5886,6683299,"TERMINAL",0,0,"918369979976",,terminal_output +5887,6684371,"TERMINAL",0,0,"2:00294730108203087",,terminal_output +5888,6685384,"TERMINAL",0,0,"1350581191198",,terminal_output +5889,6686426,"TERMINAL",0,0,"24169221022409",,terminal_output +5890,6687460,"TERMINAL",0,0,"35273033133150",,terminal_output +5891,6688562,"TERMINAL",0,0,"463814424421",,terminal_output +5892,6689080,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",6932,0,"",python,selection_mouse +5893,6689081,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",6931,0,"",python,selection_command +5894,6689223,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",6931,1," ",python,selection_mouse +5895,6689226,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",6932,0,"",python,selection_command +5896,6689282,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",6907,25,"(nnx.display(state))\n ",python,selection_mouse +5897,6689294,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",6904,28,"int(nnx.display(state))\n ",python,selection_mouse +5898,6689328,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",6901,31," print(nnx.display(state))\n ",python,selection_mouse +5899,6689329,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",6900,32," print(nnx.display(state))\n ",python,selection_mouse +5900,6689386,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",6899,33," print(nnx.display(state))\n ",python,selection_mouse +5901,6689386,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",6898,34," print(nnx.display(state))\n ",python,selection_mouse +5902,6689387,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",6869,63," state = nnx.state(genie)\n print(nnx.display(state))\n ",python,selection_mouse +5903,6689499,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",6868,64,"\n state = nnx.state(genie)\n print(nnx.display(state))\n ",python,selection_mouse +5904,6689555,"TERMINAL",0,0,"574925535532",,terminal_output +5905,6690611,"TERMINAL",0,0,"6859:0036646643",,terminal_output +5906,6691665,"TERMINAL",0,0,"796147757754",,terminal_output +5907,6692761,"TERMINAL",0,0,"8307258868865",,terminal_output +5908,6693323,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12353,0,"",python,selection_mouse +5909,6693508,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12353,35," for videos in dataloader:\n ",python,selection_mouse +5910,6693569,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12353,69," for videos in dataloader:\n # --- Train step ---\n ",python,selection_mouse +5911,6693576,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12353,123," for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n ",python,selection_mouse +5912,6693635,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12353,185," for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n ",python,selection_mouse +5913,6693722,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12353,267," for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n ",python,selection_mouse +5914,6693735,"TERMINAL",0,0,"918369979976",,terminal_output +5915,6693939,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12353,186," for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n ",python,selection_mouse +5916,6693999,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12353,187," for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n ",python,selection_mouse +5917,6693999,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12353,126," for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n ",python,selection_mouse +5918,6694812,"TERMINAL",0,0,"10294740208304087",,terminal_output +5919,6695262,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12353,187," for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n ",python,selection_mouse +5920,6695837,"TERMINAL",0,0,"138:00581191198",,terminal_output +5921,6696544,"TERMINAL",0,0,"srun",,terminal_focus +5922,6696866,"TERMINAL",0,0,"24169222022509",,terminal_output +5923,6697869,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",,terminal_output +5924,6697920,"TERMINAL",0,0,"3527403313313:00",,terminal_output +5925,6698967,"TERMINAL",0,0,"463814424421",,terminal_output +5926,6700015,"TERMINAL",0,0,"574925535532",,terminal_output +5927,6700546,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +5928,6700847,"TERMINAL",0,0,"[?25l>[?25h[?25l [?25h",,terminal_output +5929,6701159,"TERMINAL",0,0,"6851036646643",,terminal_output +5930,6702189,"TERMINAL",0,0,"796147757754",,terminal_output +5931,6702251,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +5932,6702461,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +5933,6702577,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +5934,6702686,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +5935,6702949,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +5936,6703165,"TERMINAL",0,0,"8407258868865",,terminal_output +5937,6703268,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +5938,6703330,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +5939,6703622,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +5940,6703954,"TERMINAL",0,0,"srun: error: Unable to create step for job 3415275: Invalid generic resource (gres) specification\r\n]0;tum_cte0515@hkn0522:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0522 jafar]$ ",,terminal_output +5941,6704250,"TERMINAL",0,0,"918369979976",,terminal_output +5942,6705369,"TERMINAL",0,0,"213105851319415198",,terminal_output +5943,6706386,"TERMINAL",0,0,"241692230223:009",,terminal_output +5944,6707408,"TERMINAL",0,0,"35275033133110",,terminal_output +5945,6708431,"TERMINAL",0,0,"463814424421",,terminal_output +5946,6709439,"TERMINAL",0,0,"574925535532",,terminal_output +5947,6710485,"TERMINAL",0,0,"6852036646643",,terminal_output +5948,6711253,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh > log.log",,terminal_output +5949,6711604,"TERMINAL",0,0,"796147757754",,terminal_output +5950,6712577,"TERMINAL",0,0,"8507258868865",,terminal_output +5951,6713593,"TERMINAL",0,0,"\r\n[?2004l\r#!/usr/bin/env bash\r\n\r\n#SBATCH --nodes=8\r\n#SBATCH --ntasks-per-node=4\r\n#SBATCH --time=48:00:00\r\n#SBATCH --partition=accelerated\r\n#SBATCH --cpus-per-task=5\r\n#SBATCH --gres=gpu:4\r\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\r\n#SBATCH --job-name=train_dynamics_causal_8_node\r\n#SBATCH --requeue\r\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\r\n\r\n# --- signal trap to requeue job before timeout ---\r\nrequeue_job() {\r\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\r\n # optional: trigger checkpoint saving here\r\n # e.g., touch $checkpoint_dir/requeue_trigger\r\n scontrol requeue $SLURM_JOB_ID\r\n exit 0\r\n}\r\n\r\ntrap requeue_job sigusr1\r\n\r\n# set checkpoint flag based on restart count\r\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\r\n\r\nif [ $restart_count -eq 0 ]; then\r\n restore_ckpt_flag=""--no-restore-ckpt""\r\nelse\r\n restore_ckpt_flag=""--restore-ckpt""\r\nfi\r\n\r\n\r\n\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\nsource .venv/bin/activate\r\n\r\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\n# tokenizer with the new structure supporting larger ffn_dim\r\ntokenizer_ckpt_dir=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/ali/tokenizer/train_tokenizer_33M_minimal/3414553""\r\n\r\nenv | grep SLURM\r\n\r\nexport PYTHONUNBUFFERED=1\r\n\r\nsrun python train_dynamics.py \\r\n --save_ckpt \\r\n $restore_ckpt_flag \\r\n --wandb_id $SLURM_JOB_ID \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=256 \\r\n --init_lr=0 \\r\n --dyna_type=causal \\r\n --max_lr=8e-5 \\r\n --log_image_interval=1000 \\r\n --log \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-causal-8-node-$slurm_job_id \\r\n --tags dynamics causal 8-node post-launch-main \\r\n --entity instant-uv \\r\n --project jafar \\r\n --dyna_dim=1024 \\r\n --dyna_num_blocks=16 \\r\n --dyna_num_heads=16 \\r\n --dyna_ffn_dim=4096 \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $array_records_dir &\r\n\r\nchild_pid=$!\r\n\r\nwait $child_pid\r\n",,terminal_output +5952,6713654,"TERMINAL",0,0,"918369979976",,terminal_output +5953,6713728,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x8)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=986737\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0522\r\nSLURM_JOB_START_TIME=1754926451\r\nSLURM_STEP_NODELIST=hkn0522\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1754928251\r\nSLURM_PMI2_SRUN_PORT=42681\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x8)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=8\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3415275\r\nSLURM_PTY_PORT=35785\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_PTY_WIN_ROW=41\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=32\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e11.hkn0522\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=129\r\nSLURM_NODELIST=hkn[0522-0527,0530-0531]\r\nSLURM_SRUN_COMM_PORT=42393\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=32\r\nSLURM_NNODES=8\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3415275\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0522\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=42393\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0522-0527,0530-0531]\r\nsrun: error: Unable to create step for job 3415275: Invalid generic resource (gres) specification\r\n]0;tum_cte0515@hkn0522:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0522 jafar]$ ",,terminal_output +5954,6714672,"TERMINAL",0,0,"3029471:00408503:0087",,terminal_output +5955,6715252,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",,terminal_output +5956,6715755,"TERMINAL",0,0,"1320581191198",,terminal_output +5957,6716761,"TERMINAL",0,0,"24169224022109",,terminal_output +5958,6717809,"TERMINAL",0,0,"35279:0033133120",,terminal_output +5959,6718589,"TERMINAL",0,0,"srun",,terminal_focus +5960,6718893,"TERMINAL",0,0,"463814424421",,terminal_output +5961,6719903,"TERMINAL",0,0,"574925535532",,terminal_output +5962,6720329,"TERMINAL",0,0,"srun",,terminal_focus +5963,6720952,"TERMINAL",0,0,"6853036646643",,terminal_output +5964,6721871,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",0,0,"",python,tab +5965,6722019,"TERMINAL",0,0,"796147757754",,terminal_output +5966,6723072,"TERMINAL",0,0,"85:007258868865",,terminal_output +5967,6723563,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12727,0,"",python,selection_mouse +5968,6723573,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",12726,0,"",python,selection_command +5969,6724092,"TERMINAL",0,0,"918369979976",,terminal_output +5970,6725136,"TERMINAL",0,0,"402947105083:001087",,terminal_output +5971,6726184,"TERMINAL",0,0,"1330581191198",,terminal_output +5972,6726424,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,0,"",shellscript,tab +5973,6727273,"TERMINAL",0,0,"2527103351332130",,terminal_output +5974,6728297,"TERMINAL",0,0,"463814424421",,terminal_output +5975,6729422,"TERMINAL",0,0,"574925535532",,terminal_output +5976,6729433,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1685,0,"",shellscript,selection_mouse +5977,6730294,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1786,0,"",shellscript,selection_mouse +5978,6730380,"TERMINAL",0,0,"6854036646643",,terminal_output +5979,6731223,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1666,119,"",shellscript,content +5980,6731423,"TERMINAL",0,0,"796147757754",,terminal_output +5981,6731579,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1666,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401",shellscript,content +5982,6732492,"TERMINAL",0,0,"8107258868865",,terminal_output +5983,6733617,"TERMINAL",0,0,"918369979976",,terminal_output +5984,6734643,"TERMINAL",0,0,"502947201:008102087",,terminal_output +5985,6735670,"TERMINAL",0,0,"1340581191198",,terminal_output +5986,6736694,"TERMINAL",0,0,"24169223:0022309",,terminal_output +5987,6736848,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +5988,6737043,"TERMINAL",0,0,"[?25l>[?25h",,terminal_output +5989,6737262,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +5990,6737476,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +5991,6737649,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +5992,6737765,"TERMINAL",0,0,"35272033133140",,terminal_output +5993,6737826,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +5994,6738054,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +5995,6738633,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +5996,6738790,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +5997,6738790,"TERMINAL",0,0,"463814424421",,terminal_output +5998,6738842,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +5999,6739866,"TERMINAL",0,0,"574925535532",,terminal_output +6000,6740099,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",0,0,"",python,tab +6001,6740890,"TERMINAL",0,0,"6855036646643",,terminal_output +6002,6741927,"TERMINAL",0,0,"796147757754",,terminal_output +6003,6742918,"genie.py",0,0,"",python,tab +6004,6742992,"TERMINAL",0,0,"8207258868865",,terminal_output +6005,6744012,"TERMINAL",0,0,"918369979976",,terminal_output +6006,6745061,"TERMINAL",0,0,"3:00294730108203087",,terminal_output +6007,6746219,"TERMINAL",0,0,"1350581191198",,terminal_output +6008,6747155,"TERMINAL",0,0,"24169221022409",,terminal_output +6009,6747916,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +6010,6748037,"TERMINAL",0,0,"srun: error: Unable to create step for job 3415275: Invalid generic resource (gres) specification\r\n]0;tum_cte0515@hkn0522:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0522 jafar]$ ",,terminal_output +6011,6748205,"TERMINAL",0,0,"35273033133150",,terminal_output +6012,6749291,"TERMINAL",0,0,"474925535532",,terminal_output +6013,6749552,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh > log.log",,terminal_output +6014,6750313,"TERMINAL",0,0,"68510:0036646643",,terminal_output +6015,6751439,"TERMINAL",0,0,"796147757754",,terminal_output +6016,6752463,"TERMINAL",0,0,"8307258868865",,terminal_output +6017,6753453,"TERMINAL",0,0,"918369979976",,terminal_output +6018,6754511,"TERMINAL",0,0,"10294740208304087",,terminal_output +6019,6755262,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,0,"",shellscript,tab +6020,6755555,"TERMINAL",0,0,"139:00581191198",,terminal_output +6021,6756574,"TERMINAL",0,0,"24169222022509",,terminal_output +6022,6757685,"TERMINAL",0,0,"3527403313314:00",,terminal_output +6023,6758025,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",871,0,"",shellscript,selection_mouse +6024,6758170,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",845,26,"\ntrap requeue_job sigusr1\n",shellscript,selection_mouse +6025,6758242,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",750,121," # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n",shellscript,selection_mouse +6026,6758242,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",469,402,"#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n",shellscript,selection_mouse +6027,6758243,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",123,748,"#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n",shellscript,selection_mouse +6028,6758243,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",20,851,"\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n",shellscript,selection_mouse +6029,6758270,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,871,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/causal/dynamics-cotraining/%x_%j.log\n#SBATCH --job-name=train_dynamics_causal_8_node\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n",shellscript,selection_mouse +6030,6758668,"TERMINAL",0,0,"463814424421",,terminal_output +6031,6758801,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,871,"",shellscript,content +6032,6759483,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,1,"",shellscript,content +6033,6759745,"TERMINAL",0,0,"574925535532",,terminal_output +6034,6760762,"TERMINAL",0,0,"6851036646643",,terminal_output +6035,6760773,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",263,0,"",shellscript,selection_mouse +6036,6760934,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",262,1,"\n",shellscript,selection_mouse +6037,6760986,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",261,2,"\n\n",shellscript,selection_mouse +6038,6760987,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",144,119,"$restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +6039,6760987,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",48,215,"tart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +6040,6760996,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,263,"# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n",shellscript,selection_mouse +6041,6761595,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,263,"",shellscript,content +6042,6761820,"TERMINAL",0,0,"796147757754",,terminal_output +6043,6762572,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,1,"",shellscript,content +6044,6762729,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,1,"",shellscript,content +6045,6762860,"TERMINAL",0,0,"8407258868865",,terminal_output +6046,6763937,"TERMINAL",0,0,"918369979976",,terminal_output +6047,6764118,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1365,0,"",shellscript,selection_mouse +6048,6764251,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1359,6,"d_pid\n",shellscript,selection_mouse +6049,6764268,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1348,17,"\nwait $child_pid\n",shellscript,selection_mouse +6050,6764324,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1335,30,"child_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +6051,6764399,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1334,31,"\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +6052,6764516,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1347,18,"\n\nwait $child_pid\n",shellscript,selection_mouse +6053,6764569,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1334,31,"\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +6054,6764733,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1333,32,"\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +6055,6764957,"TERMINAL",0,0,"20294750308405087",,terminal_output +6056,6765319,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1332,33,"&\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,selection_mouse +6057,6765847,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1332,33,"",shellscript,content +6058,6766007,"TERMINAL",0,0,"1310581191198",,terminal_output +6059,6766431,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",1331,1,"",shellscript,content +6060,6767059,"TERMINAL",0,0,"241692230224:009",,terminal_output +6061,6768098,"TERMINAL",0,0,"35275033133110",,terminal_output +6062,6769255,"TERMINAL",0,0,"463814424421",,terminal_output +6063,6770195,"TERMINAL",0,0,"574925535532",,terminal_output +6064,6771308,"TERMINAL",0,0,"6962147757754",,terminal_output +6065,6772284,"TERMINAL",0,0,"8507258868865",,terminal_output +6066,6773352,"TERMINAL",0,0,"918369979976",,terminal_output +6067,6774478,"TERMINAL",0,0,"3029472:00408504:0087",,terminal_output +6068,6775090,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +6069,6775235,"TERMINAL",0,0,"srun: error: Unable to create step for job 3415275: Invalid generic resource (gres) specification\r\n]0;tum_cte0515@hkn0522:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0522 jafar]$ ",,terminal_output +6070,6775427,"TERMINAL",0,0,"1320581191198",,terminal_output +6071,6776493,"TERMINAL",0,0,"24169224022109",,terminal_output +6072,6777522,"TERMINAL",0,0,"352720:0033133120",,terminal_output +6073,6778674,"TERMINAL",0,0,"463814424421",,terminal_output +6074,6779703,"TERMINAL",0,0,"574925535532",,terminal_output +6075,6780743,"TERMINAL",0,0,"6853036646643",,terminal_output +6076,6781714,"TERMINAL",0,0,"796147757754",,terminal_output +6077,6782776,"TERMINAL",0,0,"86:007258868865",,terminal_output +6078,6783900,"TERMINAL",0,0,"918369979976",,terminal_output +6079,6784859,"TERMINAL",0,0,"402947105084:001087",,terminal_output +6080,6785948,"TERMINAL",0,0,"1330581191198",,terminal_output +6081,6786977,"TERMINAL",0,0,"24169225022209",,terminal_output +6082,6787988,"TERMINAL",0,0,"35271033133130",,terminal_output +6083,6789029,"TERMINAL",0,0,"463814424421",,terminal_output +6084,6790067,"TERMINAL",0,0,"574925535532",,terminal_output +6085,6791169,"TERMINAL",0,0,"6854036646643",,terminal_output +6086,6792193,"TERMINAL",0,0,"796147757754",,terminal_output +6087,6793190,"TERMINAL",0,0,"8107258868865",,terminal_output +6088,6794241,"TERMINAL",0,0,"92947202:008102087",,terminal_output +6089,6795375,"TERMINAL",0,0,"51340581191198",,terminal_output +6090,6796381,"TERMINAL",0,0,"24169224:0022309",,terminal_output +6091,6797421,"TERMINAL",0,0,"35272033133140",,terminal_output +6092,6798446,"TERMINAL",0,0,"463814424421",,terminal_output +6093,6799456,"TERMINAL",0,0,"574925535532",,terminal_output +6094,6800590,"TERMINAL",0,0,"6855036646643",,terminal_output +6095,6801620,"TERMINAL",0,0,"796147757754",,terminal_output +6096,6802638,"TERMINAL",0,0,"8207258868865",,terminal_output +6097,6803663,"TERMINAL",0,0,"918369979976",,terminal_output +6098,6804687,"TERMINAL",0,0,"4:00294730108203087",,terminal_output +6099,6805713,"TERMINAL",0,0,"1350581191198",,terminal_output +6100,6805902,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +6101,6806038,"TERMINAL",0,0,"[?25lr[?25h[?25lu[?25h",,terminal_output +6102,6806102,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +6103,6806208,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +6104,6806838,"TERMINAL",0,0,"24169221022409",,terminal_output +6105,6807657,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh > log.log",,terminal_output +6106,6807799,"TERMINAL",0,0,"35273033133150",,terminal_output +6107,6808842,"TERMINAL",0,0,"463814424421",,terminal_output +6108,6809002,"TERMINAL",0,0,"2025-08-11 17:44:04.769118: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service.cc:1798] Shutdown barrier in coordination service has failed:\r\nDEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\nThis suggests that the workers are out of sync. Either at least one worker (a) crashed early due to program error or scheduler events (e.g. preemption, eviction), (b) was too fast in its execution, or (c) too slow / hanging. Check the logs (both the program and scheduler events) for an earlier error to identify the root cause.\r\n2025-08-11 17:44:04.769150: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service.cc:1839] Use error polling to propagate the following error to all tasks: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241'] [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.769481: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769684: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.769684: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Failed to disconnect from coordination service with status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/ShutdownTask [type.googleapis.com/tensorflow.CoordinationServiceError='']Proceeding with agent shutdown anyway. This is usually caused by an earlier error during execution. Check the logs of (a) this task, (b) the leader (usually slice 0 task 0) and (c) the scheduler (e.g. preemption, eviction) for an earlier error to debug further. [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.769708: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769336: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769490: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n2025-08-11 17:44:04.769601: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769820: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.769898: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769511: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769753: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.769777: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769926: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.769982: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769945: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.770116: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.769490: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769636: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n2025-08-11 17:44:04.769595: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769750: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n2025-08-11 17:44:04.769752: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.769782: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769945: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.770040: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\n2025-08-11 17:44:04.769586: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.769549: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769689: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n2025-08-11 17:44:04.769757: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Failed to disconnect from coordination service with status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.769627: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769953: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Failed to disconnect from coordination service with status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/ShutdownTask [type.googleapis.com/tensorflow.CoordinationServiceError='']Proceeding with agent shutdown anyway. This is usually caused by an earlier error during execution. Check the logs of (a) this task, (b) the leader (usually slice 0 task 0) and (c) the scheduler (e.g. preemption, eviction) for an earlier error to debug further. [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.769899: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769742: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/ShutdownTask [type.googleapis.com/tensorflow.CoordinationServiceError='']Proceeding with agent shutdown anyway. This is usually caused by an earlier error during execution. Check the logs of (a) this task, (b) the leader (usually slice 0 task 0) and (c) the scheduler (e.g. preemption, eviction) for an earlier error to debug further. [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.769847: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769630: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769802: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n2025-08-11 17:44:04.770102: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.769824: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.769952: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n2025-08-11 17:44:04.769935: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.769762: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.770040: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\n2025-08-11 17:44:04.769896: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.770211: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Failed to disconnect from coordination service with status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.770019: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/ShutdownTask [type.googleapis.com/tensorflow.CoordinationServiceError='']Proceeding with agent shutdown anyway. This is usually caused by an earlier error during execution. Check the logs of (a) this task, (b) the leader (usually slice 0 task 0) and (c) the scheduler (e.g. preemption, eviction) for an earlier error to debug further. [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.770137: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.770255: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.770285: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.770437: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.770070: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.770539: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.771138: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.771231: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.771404: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n2025-08-11 17:44:04.771958: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:04.772118: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: INTERNAL: Shutdown barrier has failed.\r\nBarrier result: 'DEADLINE_EXCEEDED: Barrier timed out. Id: Shutdown::12055900650741164241::0. This usually happens because a task triggered the barrier too early or too slowly. Please look at the task logs (both timed out and first task) to debug further.\r\n# of tasks that reached the barrier: 5/32.\r\nThe first task at the barrier: /job:jax_worker/replica:0/task:9. Some timed out task names:\r\n/job:jax_worker/replica:0/task:4\r\n/job:jax_worker/replica:0/task:28\r\n [type.googleapis.com/tensorflow.CoordinationServiceError=''] [type.googleapis.com/tensorflow.BarrierError='\n\x1eShutdown::12055900650741164241']\r\n\r\nRPC: /tensorflow.CoordinationService/PollForError [type.googleapis.com/tensorflow.CoordinationServiceError='']\r\n",,terminal_output +6109,6809111,"TERMINAL",0,0,"E0811 17:44:04.842143 1784848 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: INTERNAL: NCCL operation ncclGroupEnd() failed: remote process exited or there was a network error. Last NCCL warning(error) log entry (may be unrelated) 'socketProgress: Connection closed by remote peer hkn0525-i.localdomain<38882>'.\r\nE0811 17:44:04.844449 782350 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: INTERNAL: NCCL operation ncclGroupEnd() failed: remote process exited or there was a network error. Last NCCL warning(error) log entry (may be unrelated) 'socketProgress: Connection closed by remote peer hkn0525-i.localdomain<51020>'.\r\nE0811 17:44:04.848515 2204511 pjrt_stream_executor_client.cc:2916] Execution of replica 0 failed: INTERNAL: NCCL operation ncclGroupEnd() failed: remote process exited or there was a network error. Last NCCL warning(error) log entry (may be unrelated) 'socketProgress: Connection closed by remote peer hkn0530-i.localdomain<41242>'.\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 365, in \r\n for videos in dataloader:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\r\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: NCCL operation ncclGroupEnd() failed: remote process exited or there was a network error. Last NCCL warning(error) log entry (may be unrelated) 'socketProgress: Connection closed by remote peer hkn0525-i.localdomain<38882>'.\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 365, in \r\n for videos in dataloader:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\r\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: NCCL operation ncclGroupEnd() failed: remote process exited or there was a network error. Last NCCL warning(error) log entry (may be unrelated) 'socketProgress: Connection closed by remote peer hkn0525-i.localdomain<51020>'.\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 365, in \r\n for videos in dataloader:\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/nnx/transforms/compilation.py"", line 431, in __call__\r\n pure_args_out, pure_kwargs_out, pure_out = self.jitted_fn(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: NCCL operation ncclGroupEnd() failed: remote process exited or there was a network error. Last NCCL warning(error) log entry (may be unrelated) 'socketProgress: Connection closed by remote peer hkn0530-i.localdomain<41242>'.\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 11 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 10 leaked shared_memory objects to clean up at shutdown\r\n warnings.warn('resource_tracker: There appear to be %d '\r\n",,terminal_output +6110,6809262,"TERMINAL",0,0,"",,terminal_output +6111,6809322,"TERMINAL",0,0,"2025-08-11 17:44:05.094574: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:05.094618: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: UNAVAILABLE: Failed to send RPC to coordination service. Either the leader task was preempted/died/restarted unexpectedly or this task is experiencing network issues. Check earlier logs from 1) this task, 2) the leader (usually slice 0 task 0), and 3) cluster scheduler to debug further.\r\nAdditional GRPC error information from remote target coordination_service while calling /tensorflow.CoordinationService/PollForError:\r\n2025-08-11 17:44:05.094420: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:05.094459: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: UNAVAILABLE: Failed to send RPC to coordination service. Either the leader task was preempted/died/restarted unexpectedly or this task is experiencing network issues. Check earlier logs from 1) this task, 2) the leader (usually slice 0 task 0), and 3) cluster scheduler to debug further.\r\nAdditional GRPC error information from remote target coordination_service while calling /tensorflow.CoordinationService/PollForError:\r\n:{""created"":""@1754927045.094460499"",""description"":""Error received from peer ipv4:10.0.1.90:64747"",""file"":""external/com_github_grpc_grpc/src/core/lib/surface/call.cc"",""file_line"":1056,""grpc_message"":""Socket closed"",""grpc_status"":14}\r\n2025-08-11 17:44:05.094347: E external/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc:429] Polled an error from coordination service (this can be an error from this or another task).\r\n2025-08-11 17:44:05.094397: F external/xla/xla/pjrt/distributed/client.h:88] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: UNAVAILABLE: Failed to send RPC to coordination service. Either the leader task was preempted/died/restarted unexpectedly or this task is experiencing network issues. Check earlier logs from 1) this task, 2) the leader (usually slice 0 task 0), and 3) cluster scheduler to debug further.\r\nAdditional GRPC error information from remote target coordination_service while calling /tensorflow.CoordinationService/PollForError:\r\n:{""created"":""@1754927045.094231991"",""description"":""Error received from peer ipv4:10.0.1.90:64747"",""file"":""external/com_github_grpc_grpc/src/core/lib/surface/call.cc"",""file_line"":1056,""grpc_message"":""Socket closed"",""grpc_status"":14}\r\n:{""created"":""@1754927045.094317023"",""description"":""Error received from peer ipv4:10.0.1.90:64747"",""file"":""external/com_github_grpc_grpc/src/core/lib/surface/call.cc"",""file_line"":1056,""grpc_message"":""Socket closed"",""grpc_status"":14}\r\n",,terminal_output +6112,6809602,"TERMINAL",0,0,"Filtering out episode with length 1, which is shorter than the requested sequence length 16.\r\n",,terminal_output +6113,6809784,"TERMINAL",0,0,"srun: error: hkn0524: task 9: Aborted (core dumped)\r\n",,terminal_output +6114,6809845,"TERMINAL",0,0,"srun: error: hkn0527: task 21: Aborted (core dumped)\r\n",,terminal_output +6115,6809911,"TERMINAL",0,0,"574925535532",,terminal_output +6116,6809963,"TERMINAL",0,0,"Filtering out episode with length 13, which is shorter than the requested sequence length 16.\r\nsrun: error: hkn0523: task 6: Aborted (core dumped)\r\nsrun: error: hkn0530: tasks 24,26: Aborted (core dumped)\r\n",,terminal_output +6117,6810068,"TERMINAL",0,0,"srun: error: hkn0522: tasks 1-2: Aborted (core dumped)\r\n",,terminal_output +6118,6810180,"TERMINAL",0,0,"srun: error: hkn0531: tasks 29-31: Aborted (core dumped)\r\n",,terminal_output +6119,6810240,"TERMINAL",0,0,"srun: error: hkn0526: tasks 17-19: Aborted (core dumped)\r\n",,terminal_output +6120,6810933,"TERMINAL",0,0,"6851:0036646643",,terminal_output +6121,6811390,"TERMINAL",0,0,"srun: error: hkn0524: tasks 8,10-11: Aborted (core dumped)\r\nsrun: error: hkn0530: tasks 25,27: Aborted (core dumped)\r\n",,terminal_output +6122,6811526,"TERMINAL",0,0,"srun: error: hkn0522: tasks 0,3: Aborted (core dumped)\r\nsrun: error: hkn0527: tasks 20,22-23: Aborted (core dumped)\r\nsrun: error: hkn0525: tasks 12-15: Aborted (core dumped)\r\nsrun: error: hkn0526: task 16: Aborted (core dumped)\r\n",,terminal_output +6123,6811649,"TERMINAL",0,0,"srun: error: hkn0531: task 28: Aborted (core dumped)\r\nsrun: error: hkn0523: tasks 4-5,7: Aborted (core dumped)\r\n",,terminal_output +6124,6811973,"TERMINAL",0,0,"796147757754",,terminal_output +6125,6813016,"TERMINAL",0,0,"8307258868865",,terminal_output +6126,6814065,"TERMINAL",0,0,"918369979976",,terminal_output +6127,6815108,"TERMINAL",0,0,"10294740208304087",,terminal_output +6128,6816259,"TERMINAL",0,0,"1310:00581191198",,terminal_output +6129,6817282,"TERMINAL",0,0,"24169222022509",,terminal_output +6130,6818307,"TERMINAL",0,0,"3638414424425:01",,terminal_output +6131,6819328,"TERMINAL",0,0,"574925535532",,terminal_output +6132,6820383,"TERMINAL",0,0,"6851036646643",,terminal_output +6133,6821481,"TERMINAL",0,0,"796147757754",,terminal_output +6134,6822436,"TERMINAL",0,0,"8407258868865",,terminal_output +6135,6823528,"TERMINAL",0,0,"918369979976",,terminal_output +6136,6824554,"TERMINAL",0,0,"20294750308405087",,terminal_output +6137,6825680,"TERMINAL",0,0,"1310581191198",,terminal_output +6138,6826636,"TERMINAL",0,0,"241692230225:009",,terminal_output +6139,6827728,"TERMINAL",0,0,"35275033133110",,terminal_output +6140,6828085,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0522:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0522 jafar]$ ",,terminal_output +6141,6828716,"TERMINAL",0,0,"463814424421",,terminal_output +6142,6829503,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh > log.log",,terminal_output +6143,6829773,"TERMINAL",0,0,"574925535532",,terminal_output +6144,6830099,"TERMINAL",0,0,"\r",,terminal_output +6145,6830813,"TERMINAL",0,0,"6852036646643",,terminal_output +6146,6831938,"TERMINAL",0,0,"796147757754",,terminal_output +6147,6832953,"TERMINAL",0,0,"8507258868865",,terminal_output +6148,6833952,"TERMINAL",0,0,"918369979976",,terminal_output +6149,6835001,"TERMINAL",0,0,"3029473:00408505:0087",,terminal_output +6150,6836045,"TERMINAL",0,0,"1320581191198",,terminal_output +6151,6837085,"TERMINAL",0,0,"24169224022109",,terminal_output +6152,6838173,"TERMINAL",0,0,"35271:0033133120",,terminal_output +6153,6839200,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +6154,6839201,"TERMINAL",0,0,"463814424421",,terminal_output +6155,6839348,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +6156,6839456,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +6157,6839696,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +6158,6840153,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +6159,6840259,"TERMINAL",0,0,"5853036646643",,terminal_output +6160,6840269,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +6161,6840321,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +6162,6840583,"TERMINAL",0,0,"[?25l-[?25h",,terminal_output +6163,6841348,"TERMINAL",0,0,"796147757754",,terminal_output +6164,6841870,"TERMINAL",0,0,"",,terminal_output +6165,6842122,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +6166,6842184,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +6167,6842314,"TERMINAL",0,0,"87:007258868865",,terminal_output +6168,6842328,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +6169,6842514,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +6170,6843364,"TERMINAL",0,0,"918369979976",,terminal_output +6171,6844071,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: nvidia-smihkn0522.localdomain: Mon Aug 11 17:44:38 2025Mon Aug 11 17:44:38 2025\r+-----------------------------------------------------------------------------------------+\r| NVIDIA-SMI 570.133.20Driver Version: 570.133.20 CUDA Version: 12.8 |\r|-----------------------------------------+------------------------+----------------------+\r| GPU NamePersistence-M | Bus-IdDisp.A | Volatile Uncorr. ECC |\r| Fan Temp PerfPwr:Usage/Cap |Memory-Usage | GPU-Util Compute M. |\r|||MIG M. |\r|=========================================+========================+======================|\r| 0 NVIDIA A100-SXM4-40GBOn | 00000000:31:00.0 Off |0 |\r| N/A 44C P054W / 300W |\t 27MiB / 40960MiB |\t 0%\t Default |\r|||Disabled |\r+-----------------------------------------+------------------------+----------------------+\r| 1 NVIDIA A100-SXM4-40GBOn | 00000000:4B:00.0 Off |0 |\r| N/A 44C P053W / 300W |\t 27MiB / 40960MiB |\t 0%\t Default |\r|||Disabled |\r+-----------------------------------------+------------------------+----------------------+\r| 2 NVIDIA A100-SXM4-40GBOn | 00000000:CA:00.0 Off |0 |\r| N/A 43C P056W / 300W |\t 27MiB / 40960MiB |\t 0%\t Default |\r|||Disabled |\r+-----------------------------------------+------------------------+----------------------+\r| 3 NVIDIA A100-SXM4-40GBOn | 00000000:E3:00.0 Off |0 |\r| N/A 43C P051W / 300W |\t 27MiB / 40960MiB |\t 0%\t Default |\r|||Disabled |\r+-----------------------------------------+------------------------+----------------------+\r+-----------------------------------------------------------------------------------------+\r| Processes:|\r| GPU GI CIPID Type Process nameGPU Memory |\r|ID IDUsage\t |\r|=========================================================================================|\r| 0 N/A N/A2571G /usr/libexec/Xorg17MiB |\r| 1 N/A N/A2571G /usr/libexec/Xorg17MiB |\r| 2 N/A N/A2571G /usr/libexec/Xorg17MiB |\r| 3 N/A N/A2571G /usr/libexec/Xorg17MiB |\r+-----------------------------------------------------------------------------------------+",,terminal_output +6172,6844420,"TERMINAL",0,0,"402947105085:001087",,terminal_output +6173,6845464,"TERMINAL",0,0,"1330581191198",,terminal_output +6174,6846568,"TERMINAL",0,0,"24169225022209",,terminal_output +6175,6846621,"TERMINAL",0,0,"4040946746",,terminal_output +6176,6847594,"TERMINAL",0,0,"35271033133130",,terminal_output +6177,6848609,"TERMINAL",0,0,"463814424421",,terminal_output +6178,6849155,"TERMINAL",0,0,"335677370",,terminal_output +6179,6849743,"TERMINAL",0,0,"574925535532",,terminal_output +6180,6850716,"TERMINAL",0,0,"6854036646643",,terminal_output +6181,6851688,"TERMINAL",0,0,"5555596158",,terminal_output +6182,6851746,"TERMINAL",0,0,"796147757754",,terminal_output +6183,6851940,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0522:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0522 jafar]$ ",,terminal_output +6184,6852520,"TERMINAL",0,0,"smi",,terminal_output +6185,6852718,"TERMINAL",0,0,"h slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh > log.log",,terminal_output +6186,6852822,"TERMINAL",0,0,"8107258868865",,terminal_output +6187,6853841,"TERMINAL",0,0,"918369979976",,terminal_output +6188,6854966,"TERMINAL",0,0,"502947203:008102087",,terminal_output +6189,6855957,"TERMINAL",0,0,"1340581191198",,terminal_output +6190,6856549,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +6191,6856710,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +6192,6857015,"TERMINAL",0,0,"24169225:0022309",,terminal_output +6193,6857213,"TERMINAL",0,0,"[?25l [1@s[?25h",,terminal_output +6194,6857374,"TERMINAL",0,0,"[?25ls [1@r[?25h[1@u",,terminal_output +6195,6857436,"TERMINAL",0,0,"[?25l [1@n[?25h",,terminal_output +6196,6858019,"TERMINAL",0,0,"35272033133140",,terminal_output +6197,6859063,"TERMINAL",0,0,"463814424421",,terminal_output +6198,6860006,"TERMINAL",0,0,"",,terminal_output +6199,6860112,"TERMINAL",0,0,"574925535532",,terminal_output +6200,6860441,"TERMINAL",0,0,"",,terminal_output +6201,6861152,"TERMINAL",0,0,"6855036646643",,terminal_output +6202,6861398,"TERMINAL",0,0,"[?25ls[1@s[?25h",,terminal_output +6203,6861603,"TERMINAL",0,0,"[?25ls[1@r[?25h",,terminal_output +6204,6861675,"TERMINAL",0,0,"[?25ls[1@u[?25h",,terminal_output +6205,6861783,"TERMINAL",0,0,"[?25lls[1@n[?25h[1@ ",,terminal_output +6206,6862238,"TERMINAL",0,0,"796147757754",,terminal_output +6207,6863261,"TERMINAL",0,0,"8218369979976",,terminal_output +6208,6864388,"TERMINAL",0,0,"5:00294730108203087",,terminal_output +6209,6865369,"TERMINAL",0,0,"1350581191198",,terminal_output +6210,6866434,"TERMINAL",0,0,"24169221022409",,terminal_output +6211,6866968,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",0,0,"",python,tab +6212,6867441,"TERMINAL",0,0,"35273033133150",,terminal_output +6213,6868481,"TERMINAL",0,0,"463814424421",,terminal_output +6214,6868838,"TERMINAL",0,0,"srun",,terminal_focus +6215,6869526,"TERMINAL",0,0,"574925535532",,terminal_output +6216,6870631,"TERMINAL",0,0,"6852:0036646643",,terminal_output +6217,6870761,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,0,"",shellscript,tab +6218,6871655,"TERMINAL",0,0,"796147757754",,terminal_output +6219,6872667,"TERMINAL",0,0,"8307258868865",,terminal_output +6220,6873375,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",690,0,"",shellscript,selection_mouse +6221,6873716,"TERMINAL",0,0,"918369979976",,terminal_output +6222,6874763,"TERMINAL",0,0,"10294740208304087",,terminal_output +6223,6875859,"TERMINAL",0,0,"131:00581191198",,terminal_output +6224,6876880,"TERMINAL",0,0,"24169222022509",,terminal_output +6225,6877904,"TERMINAL",0,0,"3527403313316:00",,terminal_output +6226,6879040,"TERMINAL",0,0,"463814424421",,terminal_output +6227,6880073,"TERMINAL",0,0,"574925535532",,terminal_output +6228,6881485,"TERMINAL",0,0,"6961147757754",,terminal_output +6229,6882343,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",664,26,"export PYTHONUNBUFFERED=1\n",shellscript,selection_mouse +6230,6882399,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",663,27,"\nexport PYTHONUNBUFFERED=1\n",shellscript,selection_mouse +6231,6882466,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",646,44,"env | grep SLURM\n\nexport PYTHONUNBUFFERED=1\n",shellscript,selection_mouse +6232,6882476,"TERMINAL",0,0,"8407258868865",,terminal_output +6233,6883260,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",663,27,"\nexport PYTHONUNBUFFERED=1\n",shellscript,selection_mouse +6234,6883316,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",664,26,"export PYTHONUNBUFFERED=1\n",shellscript,selection_mouse +6235,6883523,"TERMINAL",0,0,"918369979976",,terminal_output +6236,6883704,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",666,0,"",shellscript,selection_mouse +6237,6884122,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",690,0,"",shellscript,selection_mouse +6238,6884570,"TERMINAL",0,0,"20294750308405087",,terminal_output +6239,6885174,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",729,0,"",shellscript,selection_mouse +6240,6885336,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",729,9,"save_ckpt",shellscript,selection_mouse +6241,6885512,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",729,34,"save_ckpt \\n $restore_ckpt_flag",shellscript,selection_mouse +6242,6885531,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",729,82,"save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir",shellscript,selection_mouse +6243,6885547,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",729,179,"save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=256 \\n --init_lr=0 \\n --dyna_type=causal \\n --max_lr",shellscript,selection_mouse +6244,6885566,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",729,211,"save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=256 \\n --init_lr=0 \\n --dyna_type=causal \\n --max_lr=8e-5 \\n --log_image_interval",shellscript,selection_mouse +6245,6885575,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",729,230,"save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=256 \\n --init_lr=0 \\n --dyna_type=causal \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \",shellscript,selection_mouse +6246,6885633,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",729,287,"save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=256 \\n --init_lr=0 \\n --dyna_type=causal \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics",shellscript,selection_mouse +6247,6885634,"TERMINAL",0,0,"1310581191198",,terminal_output +6248,6885635,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",729,337,"save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=256 \\n --init_lr=0 \\n --dyna_type=causal \\n --max_lr=8e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-8-node-$slurm_job_id \\n --tags dynamics",shellscript,selection_mouse +6249,6886451,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",690,0,"",shellscript,selection_mouse +6250,6886664,"TERMINAL",0,0,"241692230226:009",,terminal_output +6251,6887064,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",669,0,"",shellscript,selection_mouse +6252,6887247,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",668,1,"r",shellscript,selection_mouse +6253,6887259,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",667,2,"or",shellscript,selection_mouse +6254,6887315,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",647,22,"nv | grep SLURM\n\nexpor",shellscript,selection_mouse +6255,6887316,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",448,221,"\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexpor",shellscript,selection_mouse +6256,6887316,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",294,375,"\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexpor",shellscript,selection_mouse +6257,6887318,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",242,427,"job_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexpor",shellscript,selection_mouse +6258,6887335,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",118,551,"\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexpor",shellscript,selection_mouse +6259,6887350,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",92,577,"source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexpor",shellscript,selection_mouse +6260,6887368,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",32,637,"module unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexpor",shellscript,selection_mouse +6261,6887734,"TERMINAL",0,0,"35275033133110",,terminal_output +6262,6888070,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",31,638,"\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexpor",shellscript,selection_mouse +6263,6888123,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",24,645,"cat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexpor",shellscript,selection_mouse +6264,6888515,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",0,669,"# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexpor",shellscript,selection_mouse +6265,6888759,"TERMINAL",0,0,"463814424421",,terminal_output +6266,6889192,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",24,645,"cat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexpor",shellscript,selection_mouse +6267,6889804,"TERMINAL",0,0,"574925535532",,terminal_output +6268,6890384,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",35,0,"",shellscript,selection_mouse +6269,6890838,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",31,0,"",shellscript,selection_mouse +6270,6890853,"TERMINAL",0,0,"6852036646643",,terminal_output +6271,6891001,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",31,3,"\nmo",shellscript,selection_mouse +6272,6891016,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",31,91,"\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narr",shellscript,selection_mouse +6273,6891068,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",31,269,"\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECK",shellscript,selection_mouse +6274,6891069,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",31,425,"\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# token",shellscript,selection_mouse +6275,6891069,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",31,632,"\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n",shellscript,selection_mouse +6276,6891087,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",31,645,"\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexport PYTHO",shellscript,selection_mouse +6277,6891139,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",31,659,"\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexport PYTHONUNBUFFERED=1\n",shellscript,selection_mouse +6278,6891195,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",31,674,"\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexport PYTHONUNBUFFERED=1\n\nsrun python tr",shellscript,selection_mouse +6279,6891325,"slurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh",31,659,"\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer with the new structure supporting larger ffn_dim\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\n\nenv | grep SLURM\n\nexport PYTHONUNBUFFERED=1\n",shellscript,selection_mouse +6280,6891951,"TERMINAL",0,0,"796147757754",,terminal_output +6281,6892938,"TERMINAL",0,0,"8507258868865",,terminal_output +6282,6893988,"TERMINAL",0,0,"918369979976",,terminal_output +6283,6895067,"TERMINAL",0,0,"srun",,terminal_focus +6284,6895083,"TERMINAL",0,0,"3029474:00408506:0087",,terminal_output +6285,6896081,"TERMINAL",0,0,"1320581191198",,terminal_output +6286,6896452,"TERMINAL",0,0,"",,terminal_output +6287,6897129,"TERMINAL",0,0,"24169224022109",,terminal_output +6288,6897317,"TERMINAL",0,0,".log",,terminal_output +6289,6897805,"TERMINAL",0,0,"",,terminal_output +6290,6897990,"TERMINAL",0,0,"",,terminal_output +6291,6898179,"TERMINAL",0,0,"35272:0033133120",,terminal_output +6292,6898629,"TERMINAL",0,0,"\r\n\rmodule unload mpi/openmpi/5.0\r\n\rmodule unload devel/cuda/12.4\r\n\rsource .venv/bin/activate\r\n\r\n\rarray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\n\rjob_name=$SLURM_JOB_NAME\r\n\rslurm_job_id=$SLURM_JOB_ID\r\n\r\n\rCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\r\n\rmkdir -p $CHECKPOINT_DIR\r\n\r\n\r# tokenizer with the new structure supporting larger ffn_dim\r\n\rtokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\r\n\r\n\renv | grep SLURM\r\n\r\n\rexport PYTHONUNBUFFERED=1\r\n\r",,terminal_output +6293,6899309,"TERMINAL",0,0,"474925535532",,terminal_output +6294,6899628,"TERMINAL",0,0,"module unload mpi/openmpi/5.0\r\n\rmodule unload devel/cuda/12.4\r\n\rsource .venv/bin/activate\r\n\r\n\rarray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\r\n\r\n\rjob_name=$SLURM_JOB_NAME\r\n\rslurm_job_id=$SLURM_JOB_ID\r\n\r\n\rCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/holiday/causal/$job_name/$slurm_job_id\r\n\rmkdir -p $CHECKPOINT_DIR\r\n\r\n\r# tokenizer with the new structure supporting larger ffn_dim\r\n\rtokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer/train_tokenizer_1e-4/3412401\r\n\r\n\renv | grep SLURM\r\n\r\n\rexport PYTHONUNBUFFERED=1\r\n\r\r\n[?2004l\r",,terminal_output +6295,6899793,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=4(x8)\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=986737\r\nSLURM_JOB_GPUS=0,1,2,3\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0522\r\nSLURM_JOB_START_TIME=1754926451\r\nSLURM_STEP_NODELIST=hkn0522\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1754928251\r\nSLURM_PMI2_SRUN_PORT=42681\r\nSLURM_CPUS_ON_NODE=24\r\nSLURM_JOB_CPUS_PER_NODE=24(x8)\r\nSLURM_GPUS_ON_NODE=4\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=8\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3415275\r\nSLURM_PTY_PORT=35785\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.201\r\nSLURM_JOB_QOS=normal\r\nSLURM_PTY_WIN_ROW=41\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=32\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e11.hkn0522\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.201\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_PTY_WIN_COL=129\r\nSLURM_NODELIST=hkn[0522-0527,0530-0531]\r\nSLURM_SRUN_COMM_PORT=42393\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=32\r\nSLURM_NNODES=8\r\nSLURM_SUBMIT_HOST=hkn1993.localdomain\r\nSLURM_JOB_ID=3415275\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0522\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=4\r\nSLURM_STEP_LAUNCHER_PORT=42393\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn[0522-0527,0530-0531]\r\n]0;tum_cte0515@hkn0522:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0522 jafar]$ ",,terminal_output diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-ccfcea3b-1c9e-4890-9689-b396fb4abdb61751316192050-2025_06_30-22.43.47.442/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-ccfcea3b-1c9e-4890-9689-b396fb4abdb61751316192050-2025_06_30-22.43.47.442/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..c6fd43e1cb4d7eaf42eb379be21f29284dcdca12 --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-ccfcea3b-1c9e-4890-9689-b396fb4abdb61751316192050-2025_06_30-22.43.47.442/source.csv @@ -0,0 +1,5633 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +2,407,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"10:43:47 PM [info] Activating crowd-code\n10:43:47 PM [info] Recording started\n10:43:47 PM [info] Initializing git provider using file system watchers...\n10:43:47 PM [info] Git repository found\n10:43:47 PM [info] Git provider initialized successfully\n10:43:47 PM [info] Initial git state: [object Object]\n",Log,tab +3,3608,"TERMINAL",0,0,"/bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command +4,3657,"TERMINAL",0,0,"]633;E;2025-06-30 22:43:50 /bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;65516b1d-7ebc-420e-a49b-b7dc283e7281]633;C",,terminal_output +5,16851,"TERMINAL",0,0,"bash",,terminal_focus +6,18737,"TERMINAL",0,0,"queue",,terminal_command +7,18815,"TERMINAL",0,0,"]633;E;2025-06-30 22:44:06 queue;4d11dbdc-690b-4257-b927-bbd493ebfa56]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Mon Jun 30 22:44:06 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)",,terminal_output +8,19842,"TERMINAL",0,0,"7",,terminal_output +9,20172,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +10,150122,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5 --mem=50G",,terminal_command +11,150180,"TERMINAL",0,0,"]633;E;2025-06-30 22:46:17 salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5 --mem=50G;4d11dbdc-690b-4257-b927-bbd493ebfa56]633;C",,terminal_output +12,150224,"TERMINAL",0,0,"salloc: Pending job allocation 3307524\r\nsalloc: job 3307524 queued and waiting for resources\r\n",,terminal_output +13,158100,"TERMINAL",0,0,"salloc: job 3307524 has been allocated resources\r\nsalloc: Granted job allocation 3307524\r\n",,terminal_output +14,158155,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +15,185257,"TERMINAL",0,0,"salloc: Nodes hkn0732 are ready for job\r\n",,terminal_output +16,185985,"TERMINAL",0,0,"]0;tum_cte0515@hkn0732:~/Projects/jafar[?2004h[tum_cte0515@hkn0732 jafar]$ ",,terminal_output +17,201359,"TERMINAL",0,0,"s",,terminal_output +18,201654,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +19,201816,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +20,201870,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +21,201969,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +22,202203,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +23,202650,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +24,202763,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +25,202926,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +26,203017,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +27,203270,"TERMINAL",0,0,"env/",,terminal_output +28,203772,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +29,203877,"TERMINAL",0,0,"in/",,terminal_output +30,204165,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +31,204219,"TERMINAL",0,0,"[?25lx[?25h",,terminal_output +32,204563,"TERMINAL",0,0,"",,terminal_output +33,205764,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +34,205966,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +35,206173,"TERMINAL",0,0,"tivate",,terminal_output +36,206631,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0732:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0732 jafar]$ ",,terminal_output +37,256983,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_output +38,257218,"TERMINAL",0,0,"run --overlap --jobid=3305684 --pty /bin/bash",,terminal_output +39,257583,"TERMINAL",0,0,"\rqueue",,terminal_output +40,258302,"TERMINAL",0,0,"srun --overlap --jobid=3305679 --pty /bin/bash",,terminal_output +41,258499,"TERMINAL",0,0,"\rqueue",,terminal_output +42,259382,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0732.localdomain: Mon Jun 30 22:48:06 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3307524 accelerat interact tum_cte0 R\t1:41\t 1 hkn0732",,terminal_output +43,260312,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0732:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0732 jafar]$ ",,terminal_output +44,264083,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0732 jafar]$ ",,terminal_output +45,264849,"sample.py",0,0,"from dataclasses import dataclass\nimport time\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\n#from utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_resolution: int = 64\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_resolution, args.image_resolution, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n for frame_idx in range(args.start_frame + 1, args.seq_len):\n # --- Sample next frame ---\n print(""=""*100)\n print(""Frame"", frame_idx)\n print(""=""*100)\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch[:, :frame_idx], rng=_rng)\n new_frame = genie.apply(\n params,\n batch,\n args.maskgit_steps,\n args.temperature,\n args.sample_argmax,\n method=Genie.sample,\n )\n vid = jnp.concatenate([vid, new_frame], axis=1)\n return vid\n\ndef _oneshot_sample(rng, video_batch, action_batch):\n # Pass the full video batch, as in training\n batch = dict(\n videos=video_batch, # full batch, not just first frame\n latent_actions=action_batch, # shape should match what was used in training\n mask_rng=rng,\n )\n outputs = genie.apply(params, batch, True) # training=False for eval\n return outputs[""recon""]\n\n# --- Get video + latent actions ---\n# dataloader = get_dataloader(args.data_dir, args.seq_len, args.batch_size)\n# video_batch = next(iter(dataloader))\nvideo_batch = np.load(""overfit_dir/single_sample_corner.npy"")\n# Get latent actions from first video only\nfirst_video = video_batch[:1, :args.seq_len]\nbatch = dict(videos=first_video)\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(1, args.seq_len - 1, 1)\naction_batch = jnp.zeros_like(action_batch)\n# Use actions from first video for all videos\naction_batch = jnp.repeat(action_batch, video_batch.shape[0], axis=0)\n\n# --- Sample + evaluate video ---\nvid = _autoreg_sample(rng, video_batch, action_batch)\n# vid = _oneshot_sample(rng, video_batch, action_batch)\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\nprint(f""SSIM: {ssim}"")\n\n# --- Construct video ---\nfirst_true = (video_batch[0:1] * 255).astype(np.uint8)\nfirst_pred = (vid[0:1] * 255).astype(np.uint8)\nfirst_video_comparison = np.zeros((2, *vid.shape[1:5]), dtype=np.uint8)\nfirst_video_comparison[0] = first_true[:, : vid.shape[1]]\nfirst_video_comparison[1] = first_pred\n# For other videos, only show generated video\nother_preds = (vid[1:] * 255).astype(np.uint8)\nall_frames = np.concatenate([first_video_comparison, other_preds], axis=0)\nflat_vid = einops.rearrange(all_frames, ""n t h w c -> t h (n w) c"")\n\n# --- Save video ---\nimgs = [Image.fromarray(img) for img in flat_vid]\n# Write actions on each frame\nfor img, action in zip(imgs[1:], action_batch[0, :, 0]):\n d = ImageDraw.Draw(img)\n d.text((2, 2), f""{action}"", fill=255)\nimgs[0].save(\n f""generation_{time.time()}.gif"",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n)\n",python,tab +46,268260,"sample.py",3856,0,"",python,selection_mouse +47,336935,"sample.py",3838,0,"",python,selection_mouse +48,336939,"sample.py",3837,0,"",python,selection_command +49,344376,"sample.py",3944,0,"",python,selection_mouse +50,344379,"sample.py",3943,0,"",python,selection_command +51,345044,"sample.py",3838,0,"",python,selection_mouse +52,345052,"sample.py",3837,0,"",python,selection_command +53,345736,"sample.py",3803,0,"",python,selection_mouse +54,345943,"sample.py",3794,11,"first_video",python,selection_mouse +55,346549,"sample.py",3698,0,"",python,selection_mouse +56,346730,"sample.py",3689,11,"video_batch",python,selection_mouse +57,351553,"sample.py",3135,0,"",python,selection_mouse +58,351712,"sample.py",3133,3,"vid",python,selection_mouse +59,359040,"sample.py",2749,0,"",python,selection_mouse +60,359790,"sample.py",2727,0,"",python,selection_mouse +61,359808,"sample.py",2726,0,"",python,selection_command +62,360505,"sample.py",2727,0,"",python,selection_mouse +63,360508,"sample.py",2726,0,"",python,selection_command +64,360676,"sample.py",2726,1,")",python,selection_mouse +65,360681,"sample.py",2727,0,"",python,selection_command +66,360744,"sample.py",2725,2,"0)",python,selection_mouse +67,360745,"sample.py",2723,4,"100)",python,selection_mouse +68,360853,"sample.py",2721,6,"""*100)",python,selection_mouse +69,360854,"sample.py",2685,42,"""Frame"", frame_idx)\n print(""=""*100)",python,selection_mouse +70,360854,"sample.py",2683,44,"t(""Frame"", frame_idx)\n print(""=""*100)",python,selection_mouse +71,360855,"sample.py",2682,45,"nt(""Frame"", frame_idx)\n print(""=""*100)",python,selection_mouse +72,360930,"sample.py",2681,46,"int(""Frame"", frame_idx)\n print(""=""*100)",python,selection_mouse +73,360931,"sample.py",2680,47,"rint(""Frame"", frame_idx)\n print(""=""*100)",python,selection_mouse +74,360977,"sample.py",2679,48,"print(""Frame"", frame_idx)\n print(""=""*100)",python,selection_mouse +75,360978,"sample.py",2678,49," print(""Frame"", frame_idx)\n print(""=""*100)",python,selection_mouse +76,360979,"sample.py",2654,73," print(""=""*100)\n print(""Frame"", frame_idx)\n print(""=""*100)",python,selection_mouse +77,361001,"sample.py",2653,74," print(""=""*100)\n print(""Frame"", frame_idx)\n print(""=""*100)",python,selection_mouse +78,361022,"sample.py",2652,75," print(""=""*100)\n print(""Frame"", frame_idx)\n print(""=""*100)",python,selection_mouse +79,361045,"sample.py",2651,76," print(""=""*100)\n print(""Frame"", frame_idx)\n print(""=""*100)",python,selection_mouse +80,361094,"sample.py",2650,77," print(""=""*100)\n print(""Frame"", frame_idx)\n print(""=""*100)",python,selection_mouse +81,361592,"sample.py",2650,0,"",python,selection_mouse +82,361593,"sample.py",2648,8," ",python,selection_mouse +83,361832,"sample.py",2648,13," print",python,selection_mouse +84,361903,"sample.py",2648,16," print(""=",python,selection_mouse +85,361904,"sample.py",2648,17," print(""=""",python,selection_mouse +86,361904,"sample.py",2648,18," print(""=""*",python,selection_mouse +87,361948,"sample.py",2648,21," print(""=""*100",python,selection_mouse +88,361991,"sample.py",2648,22," print(""=""*100)",python,selection_mouse +89,362228,"sample.py",2670,0,"",python,selection_mouse +90,362255,"sample.py",2669,0,"",python,selection_command +91,362381,"sample.py",2670,0,"",python,selection_mouse +92,362424,"sample.py",2669,0,"",python,selection_command +93,362548,"sample.py",2669,1,")",python,selection_mouse +94,362591,"sample.py",2670,0,"",python,selection_command +95,362592,"sample.py",2666,4,"100)",python,selection_mouse +96,362622,"sample.py",2665,5,"*100)",python,selection_mouse +97,362623,"sample.py",2663,7,"=""*100)",python,selection_mouse +98,362640,"sample.py",2662,8,"""=""*100)",python,selection_mouse +99,362666,"sample.py",2656,14,"print(""=""*100)",python,selection_mouse +100,362752,"sample.py",2655,15," print(""=""*100)",python,selection_mouse +101,362752,"sample.py",2654,16," print(""=""*100)",python,selection_mouse +102,362753,"sample.py",2653,17," print(""=""*100)",python,selection_mouse +103,362767,"sample.py",2652,18," print(""=""*100)",python,selection_mouse +104,362806,"sample.py",2651,19," print(""=""*100)",python,selection_mouse +105,363156,"sample.py",2651,0,"",python,selection_mouse +106,363157,"sample.py",2648,8," ",python,selection_mouse +107,363322,"sample.py",2648,23," print(""=""*100)\n",python,selection_mouse +108,364024,"sample.py",2659,0,"",python,selection_mouse +109,364025,"sample.py",2656,5,"print",python,selection_mouse +110,364591,"sample.py",2670,0,"",python,selection_mouse +111,364615,"sample.py",2669,0,"",python,selection_command +112,364724,"sample.py",2670,0,"",python,selection_mouse +113,364761,"sample.py",2669,0,"",python,selection_command +114,369325,"TERMINAL",0,0,"[?25lsh[?25h",,terminal_output +115,369364,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +116,369513,"TERMINAL",0,0,"[?25l [?25h[?25ls[?25h",,terminal_output +117,369566,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +118,369846,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +119,370005,"TERMINAL",0,0,"ipts_",,terminal_output +120,370367,"TERMINAL",0,0,"",,terminal_output +121,371218,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +122,371318,"TERMINAL",0,0,"oreka/",,terminal_output +123,371664,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +124,371716,"TERMINAL",0,0,"rain_",,terminal_output +125,374367,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +126,374508,"TERMINAL",0,0,"ynamics.sh ",,terminal_output +127,376651,"scripts_horeka/train_dynamics.sh",0,0,"#!/usr/bin/env bash\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\n\ntf_records_dir=$ws_dir/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""0000""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\n# Use checkpoints from tokenizer/lam overfit sample runs\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3299272/tokenizer_1751037678_153500/\n# lam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3299259/lam_1751036759_200000/\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/lam-1-action/lam_1751297992_5/\n\npython train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=3e-4 \\n --max_lr=3e-4 \\n --log_image_interval=500 \\n --log \\n --log_checkpoint_interval=500 \\n --name=dynamics-tiny-overfit-big-lr-$slurm_job_id \\n --tags dynamics overfit tiny \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --lam_checkpoint=$lam_ckpt_dir \\n --data_dir $tf_records_dir \\n --tokenizer_dim=384 \\n --latent_patch_dim=32 \\n --num_patch_latents=1024 \\n --patch_size=4 \\n --tokenizer_num_blocks=8 \\n --tokenizer_num_heads=8 \\n --lam_dim=384 \\n --latent_action_dim=32 \\n --lam_patch_size=16 \\n --lam_num_blocks=8 \\n --lam_num_heads=8 \\n --dyna_dim=128 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4 \\n --num_latent_actions=1 \\n --mask_limit=1.0\n",shellscript,tab +128,380137,"scripts_horeka/train_dynamics.sh",1645,0,"",shellscript,selection_mouse +129,380141,"scripts_horeka/train_dynamics.sh",1644,0,"",shellscript,selection_command +130,381354,"scripts_horeka/train_dynamics.sh",1645,0,"",shellscript,selection_command +131,381508,"scripts_horeka/train_dynamics.sh",1644,1,"",shellscript,content +132,381637,"scripts_horeka/train_dynamics.sh",1643,1,"",shellscript,content +133,381928,"scripts_horeka/train_dynamics.sh",1642,1,"",shellscript,content +134,382528,"scripts_horeka/train_dynamics.sh",1642,0,"0",shellscript,content +135,382529,"scripts_horeka/train_dynamics.sh",1643,0,"",shellscript,selection_keyboard +136,382621,"scripts_horeka/train_dynamics.sh",1643,0,".",shellscript,content +137,382622,"scripts_horeka/train_dynamics.sh",1644,0,"",shellscript,selection_keyboard +138,383177,"scripts_horeka/train_dynamics.sh",1644,0,"7",shellscript,content +139,383178,"scripts_horeka/train_dynamics.sh",1645,0,"",shellscript,selection_keyboard +140,385523,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +141,385726,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2303099\r\nSLURM_JOB_GPUS=2\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0732\r\nSLURM_JOB_START_TIME=1751316385\r\nSLURM_STEP_NODELIST=hkn0732\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751319985\r\nSLURM_PMI2_SRUN_PORT=35705\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3307524\r\nSLURM_PTY_PORT=32803\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=33\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0732\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_MEM_PER_NODE=51200\r\nSLURM_PTY_WIN_COL=143\r\nSLURM_NODELIST=hkn0732\r\nSLURM_SRUN_COMM_PORT=34929\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3307524\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0732\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=34929\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0732\r\n",,terminal_output +142,386434,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +143,398659,"TERMINAL",0,0,"2025-06-30 22:50:25.991899: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n",,terminal_output +144,398976,"TERMINAL",0,0,"WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751316626.259103 2303927 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\n",,terminal_output +145,399023,"TERMINAL",0,0,"E0000 00:00:1751316626.318242 2303927 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\n",,terminal_output +146,399488,"TERMINAL",0,0,"W0000 00:00:1751316626.777922 2303927 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751316626.777948 2303927 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751316626.777950 2303927 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751316626.777953 2303927 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +147,438237,"TERMINAL",0,0,"W0000 00:00:1751316665.557890 2303927 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +148,438715,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +149,439908,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +150,440955,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250630_225107-qidh1q0t\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-tiny-overfit-big-lr-0000\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/qidh1q0t\r\n",,terminal_output +151,444371,"TERMINAL",0,0,"2025-06-30 22:51:11.601269: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +152,457169,"TERMINAL",0,0,"2025-06-30 22:51:24.496238: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +153,472608,"TERMINAL",0,0,"2025-06-30 22:51:39.944802: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +154,479900,"TERMINAL",0,0,"2025-06-30 22:51:47.237987: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +155,499045,"TERMINAL",0,0,"batch shape: (1, 16, 90, 160, 3)\r\n",,terminal_output +156,512471,"TERMINAL",0,0,"2025-06-30 22:52:19.807475: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-30 22:52:19.808067: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-30 22:52:19.808175: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-30 22:52:19.808814: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-30 22:52:19.809868: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +157,546851,"genie.py",0,0,"from typing import Dict, Any\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nfrom jax import NamedSharding\nfrom flax.training.train_state import TrainState\nfrom flax.training import orbax_utils\nfrom orbax.checkpoint import PyTreeCheckpointer\n\nfrom models.dynamics import DynamicsMaskGIT\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\n\nclass Genie(nn.Module):\n """"""Genie model""""""\n\n # --- Tokenizer ---\n in_dim: int\n tokenizer_dim: int\n latent_patch_dim: int\n num_patch_latents: int\n patch_size: int\n tokenizer_num_blocks: int\n tokenizer_num_heads: int\n # --- LAM ---\n lam_dim: int\n latent_action_dim: int\n num_latent_actions: int\n lam_patch_size: int\n lam_num_blocks: int\n lam_num_heads: int\n # --- Dynamics ---\n dyna_dim: int\n dyna_num_blocks: int\n dyna_num_heads: int\n dropout: float = 0.0\n mask_limit: float = 0.0\n\n def setup(self):\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n )\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\n # FIXME @mihir\n lam_outputs = self.lam.vq_encode(batch[""videos""], training=False)\n batch_size = batch[""videos""].shape[0]\n seq_len = batch[""videos""].shape[1]\n latent_actions_mocked = jnp.zeros((batch_size, seq_len - 1, 1, self.latent_patch_dim))\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\n latent_actions=jax.lax.stop_gradient(latent_actions_mocked),\n )\n ## \n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )\n outputs[""gt_debug""] = self.tokenizer.decode(\n tokenizer_outputs[""indices""], batch[""videos""].shape[2:4]\n )\n return outputs\n\n @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None\n\n\ndef restore_genie_components(\n train_state: TrainState,\n sharding: NamedSharding,\n inputs: Dict[str, jax.Array],\n rng: jax.Array,\n args,\n):\n """"""Restore pre-trained Genie components""""""\n rng, _rng = jax.random.split(rng)\n\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n )\n tokenizer_init_params = dummy_tokenizer.init(_rng, inputs)\n lam_init_params = dummy_lam.init(_rng, inputs)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.adamw(\n learning_rate=optax.constant_schedule(args.max_lr),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n )\n\n dummy_tokenizer_train_state = TrainState.create(\n apply_fn=dummy_tokenizer.apply, params=tokenizer_init_params, tx=dummy_tx\n )\n dummy_lam_train_state = TrainState.create(\n apply_fn=dummy_lam.apply, params=lam_init_params, tx=dummy_tx\n )\n\n def create_abstract_sharded_pytree(pytree_template, sharding_spec):\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)\n\n abstract_sharded_tokenizer_state = create_abstract_sharded_pytree(\n dummy_tokenizer_train_state, sharding\n )\n abstract_sharded_lam_state = create_abstract_sharded_pytree(\n dummy_lam_train_state, sharding\n )\n\n tokenizer_restore_target = {""model"": abstract_sharded_tokenizer_state}\n lam_restore_target = {""model"": abstract_sharded_lam_state}\n\n tokenizer_restore_args = orbax_utils.restore_args_from_target(\n tokenizer_restore_target\n )\n lam_restore_args = orbax_utils.restore_args_from_target(lam_restore_target)\n\n restored_tokenizer_params = (\n PyTreeCheckpointer()\n .restore(\n args.tokenizer_checkpoint,\n item=tokenizer_restore_target,\n restore_args=tokenizer_restore_args,\n )[""model""]\n .params[""params""]\n )\n restored_lam_params = (\n PyTreeCheckpointer()\n .restore(\n args.lam_checkpoint, item=lam_restore_target, restore_args=lam_restore_args\n )[""model""]\n .params[""params""]\n )\n # Genie does not initialize all LAM modules, thus we omit those extra modules during restoration\n # (f.srambical) FIXME: Currently, this is a small HBM memory crunch since the LAM's decoder is loaded into HBM and immediately dicarded.\n # A workaround would be to restore to host memory first, and only move the weights to HBM after pruning the decoder\n restored_lam_params = {\n k: v\n for k, v in restored_lam_params.items()\n if k in train_state.params[""params""][""lam""]\n }\n\n train_state.params[""params""][""tokenizer""].update(restored_tokenizer_params)\n train_state.params[""params""][""lam""].update(restored_lam_params)\n\n return train_state\n",python,tab +158,550189,"genie.py",2549,0,"",python,selection_mouse +159,550361,"genie.py",2545,7,"outputs",python,selection_mouse +160,551054,"genie.py",2746,0,"",python,selection_mouse +161,551197,"genie.py",2742,7,"outputs",python,selection_mouse +162,552039,"genie.py",2781,0,"",python,selection_mouse +163,552068,"genie.py",2780,0,"",python,selection_command +164,552208,"genie.py",2780,1,"]",python,selection_mouse +165,552208,"genie.py",2777,3,"ng""",python,selection_mouse +166,552251,"genie.py",2781,0,"",python,selection_command +167,552251,"genie.py",2771,10,"mask_rng""]",python,selection_mouse +168,552252,"genie.py",2765,16,"atch[""mask_rng""]",python,selection_mouse +169,552252,"genie.py",2733,48,"\n outputs[""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +170,552299,"genie.py",2753,28,"sk_rng""] = batch[""mask_rng""]",python,selection_mouse +171,552328,"genie.py",2751,30,"mask_rng""] = batch[""mask_rng""]",python,selection_mouse +172,552349,"genie.py",2750,31,"""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +173,552378,"genie.py",2749,32,"[""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +174,552379,"genie.py",2748,33,"s[""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +175,552414,"genie.py",2747,34,"ts[""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +176,552414,"genie.py",2746,35,"uts[""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +177,552415,"genie.py",2745,36,"puts[""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +178,552460,"genie.py",2744,37,"tputs[""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +179,552502,"genie.py",2743,38,"utputs[""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +180,552502,"genie.py",2742,39,"outputs[""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +181,552533,"genie.py",2741,40," outputs[""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +182,552534,"genie.py",2740,41," outputs[""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +183,552619,"genie.py",2739,42," outputs[""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +184,553026,"genie.py",2739,0,"",python,selection_mouse +185,553394,"genie.py",2747,0,"",python,selection_mouse +186,553556,"genie.py",2742,7,"outputs",python,selection_mouse +187,553743,"genie.py",2742,17,"outputs[""mask_rng",python,selection_mouse +188,553792,"genie.py",2742,20,"outputs[""mask_rng""] ",python,selection_mouse +189,553793,"genie.py",2742,27,"outputs[""mask_rng""] = batch",python,selection_mouse +190,553806,"genie.py",2733,16,"\n outputs",python,selection_mouse +191,553901,"genie.py",2742,37,"outputs[""mask_rng""] = batch[""mask_rng",python,selection_mouse +192,553901,"genie.py",2742,39,"outputs[""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +193,554189,"genie.py",2781,0,"",python,selection_mouse +194,554197,"genie.py",2780,0,"",python,selection_command +195,554421,"genie.py",2781,0,"",python,selection_mouse +196,554436,"genie.py",2780,0,"",python,selection_command +197,554660,"genie.py",2780,1,"]",python,selection_mouse +198,554661,"genie.py",2780,0,"",python,selection_mouse +199,554661,"genie.py",2771,9,"mask_rng""",python,selection_mouse +200,554661,"genie.py",2770,10,"""mask_rng""",python,selection_mouse +201,554662,"genie.py",2769,11,"[""mask_rng""",python,selection_mouse +202,554662,"genie.py",2764,16,"batch[""mask_rng""",python,selection_mouse +203,554681,"genie.py",2781,0,"",python,selection_command +204,554682,"genie.py",2764,17,"batch[""mask_rng""]",python,selection_mouse +205,554682,"genie.py",2762,19,"= batch[""mask_rng""]",python,selection_mouse +206,554685,"genie.py",2760,21,"] = batch[""mask_rng""]",python,selection_mouse +207,554702,"genie.py",2751,30,"mask_rng""] = batch[""mask_rng""]",python,selection_mouse +208,554857,"genie.py",2750,31,"""mask_rng""] = batch[""mask_rng""]",python,selection_mouse +209,555218,"genie.py",2750,0,"",python,selection_mouse +210,559881,"TERMINAL",0,0,"Step 0, loss: 9.043804168701172, step time: 60575.29163360596ms\r\n",,terminal_output +211,559991,"TERMINAL",0,0,"Step 1, loss: 8.33558177947998, step time: 29.198169708251953ms\r\n",,terminal_output +212,560102,"TERMINAL",0,0,"Step 2, loss: 7.727107524871826, step time: 22.667407989501953ms\r\n",,terminal_output +213,560168,"TERMINAL",0,0,"Step 3, loss: 7.4470624923706055, step time: 20.58720588684082ms\r\n",,terminal_output +214,560301,"TERMINAL",0,0,"Step 4, loss: 7.165591239929199, step time: 20.083904266357422ms\r\n",,terminal_output +215,560389,"TERMINAL",0,0,"Step 5, loss: 6.940353870391846, step time: 20.55191993713379ms\r\n",,terminal_output +216,560440,"TERMINAL",0,0,"Step 6, loss: 6.839682579040527, step time: 19.740819931030273ms\r\n",,terminal_output +217,560538,"TERMINAL",0,0,"Step 7, loss: 6.669307708740234, step time: 19.685983657836914ms\r\n",,terminal_output +218,560593,"TERMINAL",0,0,"Step 8, loss: 6.532555103302002, step time: 20.073413848876953ms\r\n",,terminal_output +219,560691,"TERMINAL",0,0,"Step 9, loss: 6.52276086807251, step time: 20.633459091186523ms\r\n",,terminal_output +220,560771,"TERMINAL",0,0,"Step 10, loss: 6.323750972747803, step time: 22.101402282714844ms\r\n",,terminal_output +221,560882,"TERMINAL",0,0,"Step 11, loss: 6.25666618347168, step time: 24.46126937866211ms\r\n",,terminal_output +222,560931,"TERMINAL",0,0,"Step 12, loss: 6.147627830505371, step time: 24.161815643310547ms\r\n",,terminal_output +223,561030,"TERMINAL",0,0,"Step 13, loss: 6.052901268005371, step time: 20.546674728393555ms\r\n",,terminal_output +224,561125,"TERMINAL",0,0,"Step 14, loss: 5.967831134796143, step time: 19.95253562927246ms\r\n",,terminal_output +225,561196,"TERMINAL",0,0,"Step 15, loss: 5.922191619873047, step time: 20.230531692504883ms\r\n",,terminal_output +226,561321,"TERMINAL",0,0,"Step 16, loss: 5.83481502532959, step time: 20.3857421875ms\r\n",,terminal_output +227,561411,"TERMINAL",0,0,"Step 17, loss: 5.8044114112854, step time: 21.952390670776367ms\r\n",,terminal_output +228,561588,"TERMINAL",0,0,"Step 18, loss: 5.922501087188721, step time: 20.748376846313477ms\r\nStep 19, loss: 5.66389274597168, step time: 19.98591423034668ms\r\n",,terminal_output +229,561640,"TERMINAL",0,0,"Step 20, loss: 5.623478889465332, step time: 20.39313316345215ms\r\n",,terminal_output +230,561806,"TERMINAL",0,0,"Step 21, loss: 5.589746952056885, step time: 20.190000534057617ms\r\n",,terminal_output +231,561848,"TERMINAL",0,0,"Step 22, loss: 5.541235446929932, step time: 20.07889747619629ms\r\n",,terminal_output +232,561901,"TERMINAL",0,0,"Step 23, loss: 5.464114189147949, step time: 21.505355834960938ms\r\n",,terminal_output +233,562011,"TERMINAL",0,0,"Step 24, loss: 5.382187843322754, step time: 20.084857940673828ms\r\n",,terminal_output +234,562068,"TERMINAL",0,0,"Step 25, loss: 5.3534464836120605, step time: 20.013093948364258ms\r\n",,terminal_output +235,562157,"TERMINAL",0,0,"Step 26, loss: 5.318455696105957, step time: 20.262956619262695ms\r\n",,terminal_output +236,562247,"TERMINAL",0,0,"Step 27, loss: 5.276896953582764, step time: 19.814491271972656ms\r\n",,terminal_output +237,562326,"TERMINAL",0,0,"Step 28, loss: 5.203678131103516, step time: 19.985198974609375ms\r\n",,terminal_output +238,562418,"TERMINAL",0,0,"Step 29, loss: 5.158354759216309, step time: 20.198345184326172ms\r\n",,terminal_output +239,562521,"TERMINAL",0,0,"Step 30, loss: 5.112368106842041, step time: 19.906282424926758ms\r\n",,terminal_output +240,562609,"TERMINAL",0,0,"Step 31, loss: 5.122479438781738, step time: 19.29187774658203ms\r\n",,terminal_output +241,562676,"TERMINAL",0,0,"Step 32, loss: 5.09807014465332, step time: 20.226240158081055ms\r\n",,terminal_output +242,562843,"TERMINAL",0,0,"^CTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 241, in \r\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/array.py"", line 341, in __format__\r\n return format(self._value[()], format_spec)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/profiler.py"", line 354, in wrapper\r\n return func(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/array.py"", line 641, in _value\r\n npy_value, did_copy = self._single_device_array_to_np_array_did_copy()\r\nKeyboardInterrupt\r\n",,terminal_output +243,562982,"TERMINAL",0,0,"^C",,terminal_output +244,563133,"TERMINAL",0,0,"^C",,terminal_output +245,563278,"TERMINAL",0,0,"Exception ignored in atexit callback: .teardown_atexit at 0x1458d4086680>\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/service_connection.py"", line 94, in teardown_atexit\r\n conn.teardown(hooks.exit_code)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/service_connection.py"", line 236, in teardown\r\n return self._proc.join()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/service/service.py"", line 251, in join\r\n ret = self._internal_proc.wait()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/subprocess.py"", line 1222, in wait\r\n self._wait(timeout=sigint_timeout)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/subprocess.py"", line 1953, in _wait\r\n time.sleep(delay)\r\nKeyboardInterrupt: \r\n",,terminal_output +246,563416,"TERMINAL",0,0,"^CException ignored in: .remove at 0x1459af9ca710>\r\nTraceback (most recent call last):\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/weakref.py"", line 370, in remove\r\n def remove(k, selfref=ref(self)):\r\nKeyboardInterrupt: \r\n",,terminal_output +247,564289,"TERMINAL",0,0,"^Cwandb: \r\nwandb: 🚀 View run dynamics-tiny-overfit-big-lr-0000 at: https://wandb.ai/instant-uv/jafar/runs/qidh1q0t\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250630_225107-qidh1q0t/logs\r\nException ignored in atexit callback: \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/api.py"", line 3172, in clean_up\r\n distributed.shutdown()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/distributed.py"", line 291, in shutdown\r\n global_state.shutdown()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/distributed.py"", line 170, in shutdown\r\n self.service.shutdown()\r\nKeyboardInterrupt: \r\n",,terminal_output +248,565270,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +249,565906,"TERMINAL",0,0,"\r\n]0;tum_cte0515@hkn0732:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0732 jafar]$ ",,terminal_output +250,566132,"sample.py",0,0,"",python,tab +251,567992,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +252,594293,"sample.py",0,0,"",python,tab +253,595100,"genie.py",0,0,"",python,tab +254,597284,"genie.py",2781,0,"\n ",python,content +255,598504,"genie.py",2790,0,"j",python,content +256,598506,"genie.py",2791,0,"",python,selection_keyboard +257,598656,"genie.py",2791,0,"a",python,content +258,598658,"genie.py",2792,0,"",python,selection_keyboard +259,598862,"genie.py",2792,0,"x",python,content +260,598863,"genie.py",2793,0,"",python,selection_keyboard +261,598979,"genie.py",2793,0,".",python,content +262,598981,"genie.py",2794,0,"",python,selection_keyboard +263,599347,"genie.py",2794,0,"d",python,content +264,599348,"genie.py",2795,0,"",python,selection_keyboard +265,599439,"genie.py",2795,0,"b",python,content +266,599441,"genie.py",2796,0,"",python,selection_keyboard +267,600094,"genie.py",2795,1,"",python,content +268,600195,"genie.py",2795,0,"e",python,content +269,600196,"genie.py",2796,0,"",python,selection_keyboard +270,600268,"genie.py",2796,0,"b",python,content +271,600269,"genie.py",2797,0,"",python,selection_keyboard +272,600370,"genie.py",2797,0,"u",python,content +273,600371,"genie.py",2798,0,"",python,selection_keyboard +274,600677,"genie.py",2794,4,"debug",python,content +275,601109,"genie.py",2799,0,".",python,content +276,601111,"genie.py",2800,0,"",python,selection_keyboard +277,601409,"genie.py",2800,0,"b",python,content +278,601411,"genie.py",2801,0,"",python,selection_keyboard +279,601506,"genie.py",2801,0,"r",python,content +280,601508,"genie.py",2802,0,"",python,selection_keyboard +281,601843,"genie.py",2800,2,"breakpoint",python,content +282,602642,"genie.py",2810,0,"()",python,content +283,602643,"genie.py",2811,0,"",python,selection_keyboard +284,602762,"genie.py",2811,1,")",python,content +285,602763,"genie.py",2812,0,"",python,selection_keyboard +286,605134,"TERMINAL",0,0,"sh scripts_horeka/train_dynamics.sh ",,terminal_output +287,605635,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +288,605776,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2303099\r\nSLURM_JOB_GPUS=2\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0732\r\nSLURM_JOB_START_TIME=1751316385\r\nSLURM_STEP_NODELIST=hkn0732\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751319985\r\nSLURM_PMI2_SRUN_PORT=35705\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3307524\r\nSLURM_PTY_PORT=32803\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=33\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0732\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_MEM_PER_NODE=51200\r\nSLURM_PTY_WIN_COL=143\r\nSLURM_NODELIST=hkn0732\r\nSLURM_SRUN_COMM_PORT=34929\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3307524\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0732\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=34929\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0732\r\n",,terminal_output +289,606624,"genie.py",0,0,"",python,tab +290,607111,"genie.py",2701,0,"",python,selection_mouse +291,607297,"genie.py",2700,0,"",python,selection_command +292,613504,"TERMINAL",0,0,"2025-06-30 22:54:00.805451: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751316840.818524 2306324 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751316840.822551 2306324 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751316840.834201 2306324 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751316840.834220 2306324 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751316840.834222 2306324 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751316840.834224 2306324 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +293,624310,"TERMINAL",0,0,"W0000 00:00:1751316851.630037 2306324 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +294,624693,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +295,625455,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +296,626034,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250630_225412-by3qou6j\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-tiny-overfit-big-lr-0000\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/by3qou6j\r\n",,terminal_output +297,627539,"TERMINAL",0,0,"2025-06-30 22:54:14.863373: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +298,640195,"TERMINAL",0,0,"2025-06-30 22:54:27.531453: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +299,651444,"TERMINAL",0,0,"Entering jdb:\r\n(jdb) ",,terminal_output +300,659620,"genie.py",2672,0,"",python,selection_mouse +301,665837,"TERMINAL",0,0,"l",,terminal_output +302,665944,"TERMINAL",0,0,"\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py(87)\r\n outputs = dict(\r\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\r\n latent_actions=jax.lax.stop_gradient(latent_actions_mocked),\r\n )\r\n ## \r\n outputs[""mask_rng""] = batch[""mask_rng""]\r\n-> jax.debug.breakpoint()\r\n dyna_outputs = self.dynamics(outputs, training)\r\n outputs.update(dyna_outputs)\r\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\r\n outputs[""recon""] = self.tokenizer.decode(\r\n mle_indices, batch[""videos""].shape[2:4]\r\n(jdb) ",,terminal_output +303,668278,"TERMINAL",0,0,"[?25lba[?25h",,terminal_output +304,668374,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +305,668586,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +306,668725,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +307,668978,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +308,670263,"TERMINAL",0,0,"[?25l[[?25h",,terminal_output +309,670559,"TERMINAL",0,0,"[?25l""[?25h",,terminal_output +310,670917,"TERMINAL",0,0,"[?25lm[?25h[?25la[?25h",,terminal_output +311,671017,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +312,671154,"TERMINAL",0,0,"[?25lk[?25h",,terminal_output +313,671669,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +314,671987,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +315,672090,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +316,672341,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +317,673191,"TERMINAL",0,0,"[?25l""[?25h",,terminal_output +318,673436,"TERMINAL",0,0,"[?25l][?25h",,terminal_output +319,673636,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +320,673801,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +321,673950,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +322,674053,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +323,674134,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +324,674273,"TERMINAL",0,0,"[?25le[?25h\r\n(2,)\r\n(jdb) ",,terminal_output +325,675418,"TERMINAL",0,0,"\rbatch[""mask_rng""].shape",,terminal_output +326,675757,"TERMINAL",0,0,"[?25le\r[?25h",,terminal_output +327,676007,"TERMINAL",0,0,"[?25lp\r[?25h",,terminal_output +328,676105,"TERMINAL",0,0,"[?25la\r[?25h",,terminal_output +329,676281,"TERMINAL",0,0,"[?25lh\r[?25h",,terminal_output +330,676388,"TERMINAL",0,0,"[?25ls\r[?25h",,terminal_output +331,676762,"TERMINAL",0,0,"[?25l.\r[?25h",,terminal_output +332,676914,"TERMINAL",0,0,"\r\nArray([ 928981903, 3453687069], dtype=uint32)\r\n(jdb) ",,terminal_output +333,682799,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +334,684081,"sample.py",0,0,"",python,tab +335,686728,"genie.py",0,0,"",python,tab +336,689761,"genie.py",2846,0,"",python,selection_mouse +337,691966,"genie.py",1813,0,"",python,selection_mouse +338,692364,"models/dynamics.py",0,0,"from typing import Dict, Any\n\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\n\nfrom utils.nn import STTransformer\n\n\nclass DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(self.model_dim)\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n return dict(token_logits=logits, mask=mask)\n",python,tab +339,697581,"models/dynamics.py",997,0,"",python,selection_mouse +340,697715,"models/dynamics.py",995,8,"training",python,selection_mouse +341,697914,"models/dynamics.py",995,26,"training:\n rng1",python,selection_mouse +342,697959,"models/dynamics.py",995,28,"training:\n rng1, ",python,selection_mouse +343,698018,"models/dynamics.py",995,32,"training:\n rng1, rng2",python,selection_mouse +344,698351,"models/dynamics.py",1024,0,"",python,selection_mouse +345,698352,"models/dynamics.py",1023,4,"rng2",python,selection_mouse +346,698559,"models/dynamics.py",1022,5," rng2",python,selection_mouse +347,698601,"models/dynamics.py",1021,6,", rng2",python,selection_mouse +348,698634,"models/dynamics.py",1017,10,"rng1, rng2",python,selection_mouse +349,699096,"models/dynamics.py",1019,0,"",python,selection_mouse +350,699097,"models/dynamics.py",1017,4,"rng1",python,selection_mouse +351,699351,"models/dynamics.py",1017,5,"rng1,",python,selection_mouse +352,699411,"models/dynamics.py",1017,6,"rng1, ",python,selection_mouse +353,699412,"models/dynamics.py",1017,10,"rng1, rng2",python,selection_mouse +354,699860,"models/dynamics.py",1027,0,"",python,selection_mouse +355,699861,"models/dynamics.py",1023,4,"rng2",python,selection_mouse +356,700104,"models/dynamics.py",1021,6,", rng2",python,selection_mouse +357,700144,"models/dynamics.py",1017,10,"rng1, rng2",python,selection_mouse +358,701917,"models/dynamics.py",1026,0,"",python,selection_mouse +359,703314,"models/dynamics.py",1029,0,"",python,selection_mouse +360,704094,"models/dynamics.py",1297,0,"",python,selection_mouse +361,720885,"models/dynamics.py",1350,0,"",python,selection_mouse +362,720902,"models/dynamics.py",1349,0,"",python,selection_command +363,721069,"models/dynamics.py",1349,1,")",python,selection_mouse +364,721069,"models/dynamics.py",1349,15,")\n else:",python,selection_mouse +365,721070,"models/dynamics.py",1349,39,")\n else:\n mask = None",python,selection_mouse +366,721109,"models/dynamics.py",1350,0,"",python,selection_command +367,721118,"models/dynamics.py",1350,14,"\n else:",python,selection_mouse +368,721119,"models/dynamics.py",1287,63,"np.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)",python,selection_mouse +369,721119,"models/dynamics.py",1233,117,"k = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)",python,selection_mouse +370,721133,"models/dynamics.py",1148,202," mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)",python,selection_mouse +371,721156,"models/dynamics.py",1145,205," mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)",python,selection_mouse +372,721167,"models/dynamics.py",1141,209," mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)",python,selection_mouse +373,721203,"models/dynamics.py",1140,210," mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)",python,selection_mouse +374,721203,"models/dynamics.py",1139,211," mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)",python,selection_mouse +375,721292,"models/dynamics.py",1066,284," mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)",python,selection_mouse +376,721598,"models/dynamics.py",1005,345," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)",python,selection_mouse +377,721723,"models/dynamics.py",984,366," if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)",python,selection_mouse +378,722046,"models/dynamics.py",1005,345," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)",python,selection_mouse +379,812812,"models/dynamics.py",1426,0,"",python,selection_mouse +380,812867,"models/dynamics.py",1425,0,"",python,selection_command +381,813369,"models/dynamics.py",1364,0,"",python,selection_mouse +382,813375,"models/dynamics.py",1363,0,"",python,selection_command +383,831063,"models/dynamics.py",1253,0,"",python,selection_mouse +384,831612,"models/dynamics.py",1256,0,"",python,selection_mouse +385,953075,"models/dynamics.py",1518,0,"",python,selection_mouse +386,953645,"models/dynamics.py",1458,0,"",python,selection_mouse +387,966026,"models/dynamics.py",1602,0,"",python,selection_mouse +388,966033,"models/dynamics.py",1601,0,"",python,selection_command +389,969219,"models/dynamics.py",1585,0,"",python,selection_mouse +390,971563,"models/dynamics.py",371,0,"",python,selection_mouse +391,972006,"utils/nn.py",0,0,"import math\nfrom typing import Dict, Tuple\n\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\n\n\nclass PositionalEncoding(nn.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n d_model: int # Hidden dimensionality of the input.\n max_len: int = 5000 # Maximum length of a sequence to expect.\n\n def setup(self):\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n self.pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\n self.pe = self.pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def __call__(self, x):\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nn.Module):\n dim: int\n num_heads: int\n dropout: float\n\n @nn.remat\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm()(z)\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n )(z)\n x = x + z\n\n # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm()(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n )(z, mask=causal_mask)\n x = x + z\n x = x.swapaxes(1, 2)\n\n # --- Feedforward ---\n z = nn.LayerNorm()(x)\n z = nn.Dense(self.dim)(z)\n z = nn.gelu(z)\n x = x + z\n\n return x\n\n\nclass STTransformer(nn.Module):\n model_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n x = nn.Sequential(\n [\n nn.LayerNorm(),\n nn.Dense(self.model_dim),\n nn.LayerNorm(),\n ]\n )(x)\n for _ in range(self.num_blocks):\n x = STBlock(\n dim=self.model_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n )(x)\n x = nn.Dense(self.out_dim)(x)\n return x # (B, T, E)\n\n\ndef normalize(x):\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nn.Module):\n latent_dim: int\n num_latents: int\n dropout: float\n\n def setup(self):\n self.codebook = normalize(\n self.param(\n ""codebook"",\n nn.initializers.lecun_uniform(),\n (self.num_latents, self.latent_dim),\n )\n )\n self.drop = nn.Dropout(self.dropout, deterministic=False)\n\n def __call__(\n self, x: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x = normalize(x)\n codebook = normalize(self.codebook)\n distance = -jnp.matmul(x, codebook.T)\n if training:\n dropout_key = self.make_rng(""dropout"")\n distance = self.drop(distance, rng=dropout_key)\n\n # --- Get indices and embeddings ---\n indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]\n\n # --- Straight through estimator ---\n z_q = x + jax.lax.stop_gradient(z - x)\n return z_q, z, x, indices\n\n def get_codes(self, indices: jax.Array):\n return self.codebook[indices]\n",python,tab +392,977942,"utils/nn.py",2685,0,"",python,selection_mouse +393,978076,"utils/nn.py",2685,1," ",python,selection_mouse +394,978280,"utils/nn.py",2685,2," (",python,selection_mouse +395,978281,"utils/nn.py",2685,5," (jnp",python,selection_mouse +396,978281,"utils/nn.py",2685,12," (jnp.linalg",python,selection_mouse +397,978320,"utils/nn.py",2685,17," (jnp.linalg.norm",python,selection_mouse +398,978810,"utils/nn.py",2709,0,"",python,selection_mouse +399,979033,"utils/nn.py",2706,3,"ord",python,selection_mouse +400,979272,"utils/nn.py",2704,5,", ord",python,selection_mouse +401,979273,"utils/nn.py",2702,7,"(x, ord",python,selection_mouse +402,979273,"utils/nn.py",2698,11,"norm(x, ord",python,selection_mouse +403,979318,"utils/nn.py",2691,18,"linalg.norm(x, ord",python,selection_mouse +404,979459,"utils/nn.py",2690,19,".linalg.norm(x, ord",python,selection_mouse +405,979538,"utils/nn.py",2687,22,"jnp.linalg.norm(x, ord",python,selection_mouse +406,979655,"utils/nn.py",2706,39,"ord=2, axis=-1, keepdims=True) + 1e-8)\n",python,selection_mouse +407,979910,"utils/nn.py",2745,0,"",python,selection_mouse +408,980586,"utils/nn.py",2688,0,"",python,selection_mouse +409,980687,"utils/nn.py",2687,3,"jnp",python,selection_mouse +410,980892,"utils/nn.py",2687,10,"jnp.linalg",python,selection_mouse +411,980948,"utils/nn.py",2687,15,"jnp.linalg.norm",python,selection_mouse +412,980949,"utils/nn.py",2687,17,"jnp.linalg.norm(x",python,selection_mouse +413,980975,"utils/nn.py",2687,22,"jnp.linalg.norm(x, ord",python,selection_mouse +414,980976,"utils/nn.py",2670,20,"\n return x / (jnp",python,selection_mouse +415,981346,"utils/nn.py",2687,57,"jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)",python,selection_mouse +416,981519,"utils/nn.py",2744,0,"",python,selection_mouse +417,981567,"utils/nn.py",2743,0,"",python,selection_command +418,1014897,"utils/nn.py",3619,0,"",python,selection_mouse +419,1015417,"utils/nn.py",3665,0,"",python,selection_mouse +420,1015580,"utils/nn.py",3658,8,"codebook",python,selection_mouse +421,1016934,"utils/nn.py",3655,0,"",python,selection_mouse +422,1017100,"utils/nn.py",3653,4,"self",python,selection_mouse +423,1017335,"utils/nn.py",3653,5,"self.",python,selection_mouse +424,1017335,"utils/nn.py",3653,13,"self.codebook",python,selection_mouse +425,1017413,"utils/nn.py",3653,21,"self.codebook[indices",python,selection_mouse +426,1017544,"utils/nn.py",3653,22,"self.codebook[indices]",python,selection_mouse +427,1017994,"utils/nn.py",3675,0,"",python,selection_mouse +428,1018050,"utils/nn.py",3674,0,"",python,selection_command +429,1018357,"utils/nn.py",3675,0,"",python,selection_mouse +430,1018361,"utils/nn.py",3674,0,"",python,selection_command +431,1018561,"utils/nn.py",3674,1,"]",python,selection_mouse +432,1018587,"utils/nn.py",3675,0,"",python,selection_command +433,1018590,"utils/nn.py",3622,53,"distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +434,1018622,"utils/nn.py",3615,60,"argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +435,1018706,"utils/nn.py",3611,64,"jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +436,1018742,"utils/nn.py",3609,66,"= jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +437,1018742,"utils/nn.py",3608,67," = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +438,1018762,"utils/nn.py",3601,74,"indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +439,1018911,"utils/nn.py",3600,75," indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +440,1019477,"utils/nn.py",3600,0,"",python,selection_mouse +441,1019477,"utils/nn.py",3593,8," ",python,selection_mouse +442,1019705,"utils/nn.py",3593,15," indices",python,selection_mouse +443,1019779,"utils/nn.py",3593,16," indices ",python,selection_mouse +444,1019780,"utils/nn.py",3593,18," indices = ",python,selection_mouse +445,1019780,"utils/nn.py",3593,21," indices = jnp",python,selection_mouse +446,1019820,"utils/nn.py",3593,22," indices = jnp.",python,selection_mouse +447,1019832,"utils/nn.py",3593,28," indices = jnp.argmin",python,selection_mouse +448,1019924,"utils/nn.py",3593,29," indices = jnp.argmin(",python,selection_mouse +449,1019924,"utils/nn.py",3593,37," indices = jnp.argmin(distance",python,selection_mouse +450,1020039,"utils/nn.py",3593,38," indices = jnp.argmin(distance,",python,selection_mouse +451,1020040,"utils/nn.py",3593,43," indices = jnp.argmin(distance, axis",python,selection_mouse +452,1020105,"utils/nn.py",3593,44," indices = jnp.argmin(distance, axis=",python,selection_mouse +453,1020483,"utils/nn.py",3637,0,"",python,selection_mouse +454,1021028,"utils/nn.py",3675,0,"",python,selection_mouse +455,1021046,"utils/nn.py",3674,0,"",python,selection_command +456,1021148,"utils/nn.py",3675,0,"",python,selection_mouse +457,1021191,"utils/nn.py",3674,0,"",python,selection_command +458,1021321,"utils/nn.py",3674,1,"]",python,selection_mouse +459,1021363,"utils/nn.py",3675,0,"",python,selection_command +460,1021372,"utils/nn.py",3674,1,"]",python,selection_mouse +461,1021406,"utils/nn.py",3675,1,"\n",python,selection_mouse +462,1021532,"utils/nn.py",3650,25," = self.codebook[indices]",python,selection_mouse +463,1021533,"utils/nn.py",3649,26,"z = self.codebook[indices]",python,selection_mouse +464,1021533,"utils/nn.py",3648,27," z = self.codebook[indices]",python,selection_mouse +465,1021534,"utils/nn.py",3647,28," z = self.codebook[indices]",python,selection_mouse +466,1021577,"utils/nn.py",3646,29," z = self.codebook[indices]",python,selection_mouse +467,1021630,"utils/nn.py",3645,30," z = self.codebook[indices]",python,selection_mouse +468,1021886,"utils/nn.py",3597,78," indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +469,1021979,"utils/nn.py",3596,79," indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +470,1022382,"utils/nn.py",3644,31," z = self.codebook[indices]",python,selection_mouse +471,1022507,"utils/nn.py",3643,32," z = self.codebook[indices]",python,selection_mouse +472,1022562,"utils/nn.py",3642,33," z = self.codebook[indices]",python,selection_mouse +473,1022618,"utils/nn.py",3641,34," z = self.codebook[indices]",python,selection_mouse +474,1023437,"utils/nn.py",3594,0,"",python,selection_mouse +475,1023599,"utils/nn.py",3593,8," ",python,selection_mouse +476,1023848,"utils/nn.py",3593,15," indices",python,selection_mouse +477,1023933,"utils/nn.py",3593,18," indices = ",python,selection_mouse +478,1023934,"utils/nn.py",3593,21," indices = jnp",python,selection_mouse +479,1023934,"utils/nn.py",3593,28," indices = jnp.argmin",python,selection_mouse +480,1024020,"utils/nn.py",3593,37," indices = jnp.argmin(distance",python,selection_mouse +481,1024198,"utils/nn.py",3593,82," indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +482,1024774,"utils/nn.py",3675,0,"",python,selection_mouse +483,1024801,"utils/nn.py",3674,0,"",python,selection_command +484,1025277,"utils/nn.py",3675,0,"",python,selection_mouse +485,1025294,"utils/nn.py",3674,0,"",python,selection_command +486,1025435,"utils/nn.py",3674,1,"]",python,selection_mouse +487,1025472,"utils/nn.py",3675,0,"",python,selection_command +488,1025472,"utils/nn.py",3673,2,"s]",python,selection_mouse +489,1025487,"utils/nn.py",3669,6,"dices]",python,selection_mouse +490,1025510,"utils/nn.py",3661,14,"ebook[indices]",python,selection_mouse +491,1025534,"utils/nn.py",3658,17,"codebook[indices]",python,selection_mouse +492,1025549,"utils/nn.py",3657,18,".codebook[indices]",python,selection_mouse +493,1025580,"utils/nn.py",3655,20,"lf.codebook[indices]",python,selection_mouse +494,1025603,"utils/nn.py",3654,21,"elf.codebook[indices]",python,selection_mouse +495,1025604,"utils/nn.py",3653,22,"self.codebook[indices]",python,selection_mouse +496,1025617,"utils/nn.py",3652,23," self.codebook[indices]",python,selection_mouse +497,1025650,"utils/nn.py",3651,24,"= self.codebook[indices]",python,selection_mouse +498,1025698,"utils/nn.py",3650,25," = self.codebook[indices]",python,selection_mouse +499,1025699,"utils/nn.py",3649,26,"z = self.codebook[indices]",python,selection_mouse +500,1025766,"utils/nn.py",3601,74,"indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +501,1025767,"utils/nn.py",3600,75," indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +502,1025768,"utils/nn.py",3599,76," indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +503,1025801,"utils/nn.py",3598,77," indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +504,1025840,"utils/nn.py",3597,78," indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +505,1025846,"utils/nn.py",3596,79," indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +506,1025927,"utils/nn.py",3595,80," indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +507,1026239,"utils/nn.py",3594,81," indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +508,1026278,"utils/nn.py",3593,82," indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]",python,selection_mouse +509,1033664,"models/dynamics.py",0,0,"",python,tab +510,1041788,"models/dynamics.py",893,0,"",python,selection_mouse +511,1041803,"models/dynamics.py",892,0,"",python,selection_command +512,1054576,"models/dynamics.py",845,0,"",python,selection_mouse +513,1057168,"models/dynamics.py",1655,0,"",python,selection_mouse +514,1057793,"models/dynamics.py",1644,0,"",python,selection_mouse +515,1059426,"models/dynamics.py",1549,0,"",python,selection_mouse +516,1059586,"models/dynamics.py",1548,1,"0",python,selection_mouse +517,1059729,"models/dynamics.py",1487,74," vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n",python,selection_mouse +518,1124903,"TERMINAL",0,0,"^D",,terminal_output +519,1124979,"TERMINAL",0,0,"ERROR:2025-06-30 23:02:32,237:jax._src.debugging:96: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 94, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 334, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py"", line 36, in exit\r\n self._orig_exit(orig_code) # type: ignore\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 94, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 334, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py"", line 36, in exit\r\n self._orig_exit(orig_code) # type: ignore\r\nSystemExit: 0\r\n",,terminal_output +520,1126153,"sample.py",0,0,"",python,tab +521,1126453,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run dynamics-tiny-overfit-big-lr-0000 at: https://wandb.ai/instant-uv/jafar/runs/by3qou6j\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250630_225412-by3qou6j/logs\r\n",,terminal_output +522,1128130,"TERMINAL",0,0,"]0;tum_cte0515@hkn0732:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0732 jafar]$ ",,terminal_output +523,1128448,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"#!/usr/bin/env bash\n\n# Unload modules that may interfere\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n\n# Activate virtual environment\nsource .venv/bin/activate\n\n# Set workspace and checkpoint directory (update slurm_job_id as needed)\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared'\n# Replace the following with the actual job id/checkpoint you want to sample from\nslurm_job_id=3301029\n\n# job_name=train_dynamics_minecraft_overfit_sample_tiny\nCHECKPOINT_DIR=$ws_dir/checkpoints/${slurm_job_id}\n\n# Example: If you want to use a specific checkpoint, set it here\n# CHECKPOINT_PATH=$ws_dir/checkpoints/3299272/dynamics-tiny-overfit-big-lr-3299272_50000/\n# Or use the latest in the directory\n# CHECKPOINT_PATH=$(ls -d $CHECKPOINT_DIR/*/ | sort | tail -n 1)\nCHECKPOINT_PATH=$CHECKPOINT_DIR/genie_1751067601_200000/\nCHECKPOINT_PATH=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/0000/genie_1751301068_2000/\necho ""Sampling from checkpoint: $CHECKPOINT_PATH""\n\npython sample.py \\n --checkpoint ""$CHECKPOINT_PATH"" \\n --tokenizer_dim=384 \\n --latent_patch_dim=32 \\n --num_patch_latents=1024 \\n --patch_size=4 \\n --tokenizer_num_blocks=8 \\n --tokenizer_num_heads=8 \\n --lam_dim=384 \\n --latent_action_dim=32 \\n --lam_patch_size=16 \\n --lam_num_blocks=8 \\n --lam_num_heads=8 \\n --dyna_dim=128 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4 \\n --maskgit_steps=1 \\n --num_latent_actions=1 \\n --seq_len=5 \\n --start_frame=0\n",shellscript,tab +524,1134400,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +525,1135450,"scripts_horeka/train_dynamics.sh",1644,1,"",shellscript,content +526,1136163,"scripts_horeka/train_dynamics.sh",1644,0,"0",shellscript,content +527,1136164,"scripts_horeka/train_dynamics.sh",1645,0,"",shellscript,selection_keyboard +528,1138612,"TERMINAL",0,0,"sh scripts_horeka/train_dynamics.sh ",,terminal_output +529,1143277,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +530,1143422,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2303099\r\nSLURM_JOB_GPUS=2\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0732\r\nSLURM_JOB_START_TIME=1751316385\r\nSLURM_STEP_NODELIST=hkn0732\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751319985\r\nSLURM_PMI2_SRUN_PORT=35705\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3307524\r\nSLURM_PTY_PORT=32803\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=33\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0732\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_MEM_PER_NODE=51200\r\nSLURM_PTY_WIN_COL=143\r\nSLURM_NODELIST=hkn0732\r\nSLURM_SRUN_COMM_PORT=34929\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3307524\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0732\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=34929\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0732\r\n",,terminal_output +531,1145303,"TERMINAL",0,0,"2025-06-30 23:02:52.608682: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751317372.621381 2308811 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751317372.627996 2308811 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751317372.640820 2308811 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751317372.640839 2308811 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751317372.640841 2308811 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751317372.640843 2308811 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +532,1147371,"TERMINAL",0,0,"W0000 00:00:1751317374.709561 2308811 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +533,1147671,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +534,1148473,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +535,1149195,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250630_230255-uxgsrfh0\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-tiny-overfit-big-lr-0000\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/uxgsrfh0\r\n",,terminal_output +536,1150560,"TERMINAL",0,0,"2025-06-30 23:02:57.896720: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +537,1163308,"TERMINAL",0,0,"2025-06-30 23:03:10.646971: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +538,1174855,"TERMINAL",0,0,"Entering jdb:\r\n(jdb) ",,terminal_output +539,1176803,"TERMINAL",0,0,"l",,terminal_output +540,1176962,"TERMINAL",0,0,"\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py(87)\r\n outputs = dict(\r\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\r\n latent_actions=jax.lax.stop_gradient(latent_actions_mocked),\r\n )\r\n ## \r\n outputs[""mask_rng""] = batch[""mask_rng""]\r\n-> jax.debug.breakpoint()\r\n dyna_outputs = self.dynamics(outputs, training)\r\n outputs.update(dyna_outputs)\r\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\r\n outputs[""recon""] = self.tokenizer.decode(\r\n mle_indices, batch[""videos""].shape[2:4]\r\n(jdb) ",,terminal_output +541,1182466,"TERMINAL",0,0,"^DERROR:2025-06-30 23:03:29,767:jax._src.debugging:96: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 94, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 334, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py"", line 36, in exit\r\n self._orig_exit(orig_code) # type: ignore\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 94, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 334, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py"", line 36, in exit\r\n self._orig_exit(orig_code) # type: ignore\r\nSystemExit: 0\r\n",,terminal_output +542,1183401,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +543,1183858,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run dynamics-tiny-overfit-big-lr-0000 at: https://wandb.ai/instant-uv/jafar/runs/uxgsrfh0\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250630_230255-uxgsrfh0/logs\r\n",,terminal_output +544,1184149,"scripts_horeka/train_dynamics.sh",1154,0,"",shellscript,selection_mouse +545,1185040,"TERMINAL",0,0,"]0;tum_cte0515@hkn0732:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0732 jafar]$ ",,terminal_output +546,1185309,"genie.py",0,0,"",python,tab +547,1185734,"genie.py",1953,0,"",python,selection_mouse +548,1185766,"genie.py",1952,0,"",python,selection_command +549,1191880,"genie.py",2800,0,"",python,selection_command +550,1192459,"genie.py",2782,31,"",python,content +551,1192479,"genie.py",2790,0,"",python,selection_command +552,1194679,"TERMINAL",0,0,"sh scripts_horeka/train_dynamics.sh ",,terminal_output +553,1195283,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +554,1195428,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2303099\r\nSLURM_JOB_GPUS=2\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0732\r\nSLURM_JOB_START_TIME=1751316385\r\nSLURM_STEP_NODELIST=hkn0732\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751319985\r\nSLURM_PMI2_SRUN_PORT=35705\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3307524\r\nSLURM_PTY_PORT=32803\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=33\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0732\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_MEM_PER_NODE=51200\r\nSLURM_PTY_WIN_COL=143\r\nSLURM_NODELIST=hkn0732\r\nSLURM_SRUN_COMM_PORT=34929\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3307524\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0732\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=34929\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0732\r\n",,terminal_output +555,1197365,"TERMINAL",0,0,"2025-06-30 23:03:44.648025: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751317424.661402 2309498 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751317424.666207 2309498 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751317424.679418 2309498 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751317424.679436 2309498 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751317424.679438 2309498 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751317424.679440 2309498 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +556,1199675,"TERMINAL",0,0,"W0000 00:00:1751317427.004840 2309498 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +557,1199960,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +558,1200789,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +559,1201293,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250630_230348-qn4u3nx6\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-tiny-overfit-big-lr-0000\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/qn4u3nx6\r\n",,terminal_output +560,1202721,"TERMINAL",0,0,"2025-06-30 23:03:50.013260: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +561,1215619,"TERMINAL",0,0,"2025-06-30 23:04:02.956113: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +562,1225766,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +563,1227028,"sample.py",0,0,"",python,tab +564,1230228,"sample.py",2960,0,"",python,selection_mouse +565,1230231,"sample.py",2959,0,"",python,selection_command +566,1230701,"TERMINAL",0,0,"2025-06-30 23:04:18.038921: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +567,1230915,"sample.py",2634,0,"",python,selection_mouse +568,1237957,"TERMINAL",0,0,"2025-06-30 23:04:25.295946: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +569,1241143,"sample.py",3137,0,"",python,selection_mouse +570,1241615,"sample.py",3200,0,"",python,selection_mouse +571,1241790,"sample.py",3197,4,"Pass",python,selection_mouse +572,1242257,"sample.py",3151,0,"",python,selection_mouse +573,1242423,"sample.py",3142,15,"_oneshot_sample",python,selection_mouse +574,1243381,"sample.py",3152,0,"",python,selection_mouse +575,1243963,"sample.py",3345,0,"",python,selection_mouse +576,1244112,"sample.py",3344,12,"action_batch",python,selection_mouse +577,1244629,"sample.py",3280,0,"",python,selection_mouse +578,1244794,"sample.py",3272,11,"video_batch",python,selection_mouse +579,1245382,"sample.py",3331,0,"",python,selection_mouse +580,1245535,"sample.py",3329,14,"latent_actions",python,selection_mouse +581,1245744,"sample.py",3265,78,"videos=video_batch, # full batch, not just first frame\n latent_actions",python,selection_mouse +582,1245744,"sample.py",3271,72,"=video_batch, # full batch, not just first frame\n latent_actions",python,selection_mouse +583,1245745,"sample.py",3272,71,"video_batch, # full batch, not just first frame\n latent_actions",python,selection_mouse +584,1246155,"sample.py",3276,0,"",python,selection_mouse +585,1246156,"sample.py",3272,11,"video_batch",python,selection_mouse +586,1257274,"TERMINAL",0,0,"batch shape: (1, 16, 90, 160, 3)\r\n",,terminal_output +587,1270642,"TERMINAL",0,0,"2025-06-30 23:04:57.980194: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-30 23:04:57.980748: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-30 23:04:57.980860: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-30 23:04:57.981542: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-30 23:04:57.982596: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +588,1276710,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +589,1281517,"sample.py",0,0,"",python,tab +590,1284238,"genie.py",0,0,"",python,tab +591,1298570,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +592,1299830,"genie.py",0,0,"",python,tab +593,1300431,"models/dynamics.py",0,0,"",python,tab +594,1317935,"TERMINAL",0,0,"Step 0, loss: 8.689549446105957, step time: 60439.353942871094ms\r\n",,terminal_output +595,1318049,"TERMINAL",0,0,"Step 1, loss: 8.141585350036621, step time: 29.199600219726562ms\r\n",,terminal_output +596,1318133,"TERMINAL",0,0,"Step 2, loss: nan, step time: 22.246122360229492ms\r\n",,terminal_output +597,1318255,"TERMINAL",0,0,"Step 3, loss: 7.575166702270508, step time: 22.265911102294922ms\r\n",,terminal_output +598,1318318,"TERMINAL",0,0,"Step 4, loss: 7.437936305999756, step time: 20.27750015258789ms\r\n",,terminal_output +599,1318412,"TERMINAL",0,0,"Step 5, loss: 7.1324462890625, step time: 19.919872283935547ms\r\n",,terminal_output +600,1318512,"TERMINAL",0,0,"Step 6, loss: 6.92954683303833, step time: 20.08509635925293ms\r\n",,terminal_output +601,1318580,"TERMINAL",0,0,"Step 7, loss: 6.766777515411377, step time: 19.71578598022461ms\r\n",,terminal_output +602,1318669,"TERMINAL",0,0,"Step 8, loss: 6.593115329742432, step time: 19.70386505126953ms\r\n",,terminal_output +603,1318730,"TERMINAL",0,0,"Step 9, loss: 6.6528544425964355, step time: 21.1489200592041ms\r\n",,terminal_output +604,1318820,"TERMINAL",0,0,"Step 10, loss: 6.4016289710998535, step time: 20.355224609375ms\r\n",,terminal_output +605,1318904,"TERMINAL",0,0,"Step 11, loss: 6.411777973175049, step time: 19.99354362487793ms\r\n",,terminal_output +606,1319002,"TERMINAL",0,0,"Step 12, loss: 6.155959129333496, step time: 20.542144775390625ms\r\n",,terminal_output +607,1319096,"TERMINAL",0,0,"Step 13, loss: 6.087970733642578, step time: 19.888877868652344ms\r\n",,terminal_output +608,1319162,"models/dynamics.py",1049,0,"",python,selection_mouse +609,1319299,"models/dynamics.py",1047,5,"batch",python,selection_mouse +610,1319315,"TERMINAL",0,0,"Step 14, loss: 5.9847331047058105, step time: 20.003318786621094ms\r\n",,terminal_output +611,1319318,"TERMINAL",0,0,"Step 15, loss: 5.949831008911133, step time: 21.268844604492188ms\r\n",,terminal_output +612,1319416,"TERMINAL",0,0,"Step 16, loss: 5.889817237854004, step time: 20.215511322021484ms\r\nStep 17, loss: 5.840466499328613, step time: 19.777536392211914ms\r\n",,terminal_output +613,1319491,"models/dynamics.py",1047,6,"batch[",python,selection_mouse +614,1319492,"models/dynamics.py",1047,15,"batch[""mask_rng",python,selection_mouse +615,1319661,"TERMINAL",0,0,"Step 18, loss: 6.0393571853637695, step time: 20.39170265197754ms\r\nStep 19, loss: 5.65833044052124, step time: 20.65587043762207ms\r\n",,terminal_output +616,1319709,"TERMINAL",0,0,"Step 20, loss: 5.579617023468018, step time: 19.830942153930664ms\r\n",,terminal_output +617,1319769,"TERMINAL",0,0,"Step 21, loss: 5.600975036621094, step time: 19.82569694519043ms\r\n",,terminal_output +618,1319870,"TERMINAL",0,0,"Step 22, loss: 5.566799640655518, step time: 19.463062286376953ms\r\n",,terminal_output +619,1319922,"models/dynamics.py",1047,16,"batch[""mask_rng""",python,selection_mouse +620,1320019,"TERMINAL",0,0,"Step 23, loss: 5.4217376708984375, step time: 19.492387771606445ms\r\n",,terminal_output +621,1320069,"TERMINAL",0,0,"Step 24, loss: 5.361861705780029, step time: 19.804000854492188ms\r\n",,terminal_output +622,1320129,"TERMINAL",0,0,"Step 25, loss: 5.370817184448242, step time: 19.557476043701172ms\r\n",,terminal_output +623,1320258,"TERMINAL",0,0,"Step 26, loss: 5.3421244621276855, step time: 19.62733268737793ms\r\n",,terminal_output +624,1320310,"TERMINAL",0,0,"Step 27, loss: 5.286179542541504, step time: 19.88387107849121ms\r\n",,terminal_output +625,1320412,"TERMINAL",0,0,"Step 28, loss: 5.165326118469238, step time: 19.555330276489258ms\r\n",,terminal_output +626,1320462,"TERMINAL",0,0,"Step 29, loss: 5.11193323135376, step time: 19.435405731201172ms\r\n",,terminal_output +627,1320559,"TERMINAL",0,0,"Step 30, loss: 5.064901828765869, step time: 20.736217498779297ms\r\n",,terminal_output +628,1320655,"models/dynamics.py",1063,0,"",python,selection_mouse +629,1320656,"models/dynamics.py",1062,3,"""])",python,selection_mouse +630,1320717,"TERMINAL",0,0,"Step 31, loss: 5.192666530609131, step time: 19.435644149780273ms\r\n",,terminal_output +631,1320764,"TERMINAL",0,0,"Step 32, loss: 5.176880836486816, step time: 19.80900764465332ms\r\n",,terminal_output +632,1320812,"models/dynamics.py",1062,3,"""])",python,selection_mouse +633,1320848,"models/dynamics.py",1054,11,"mask_rng""])",python,selection_mouse +634,1320858,"TERMINAL",0,0,"Step 33, loss: 5.023229598999023, step time: 20.035266876220703ms\r\n",,terminal_output +635,1320984,"models/dynamics.py",1053,12,"""mask_rng""])",python,selection_mouse +636,1320988,"models/dynamics.py",1052,13,"[""mask_rng""])",python,selection_mouse +637,1320989,"models/dynamics.py",1047,18,"batch[""mask_rng""])",python,selection_mouse +638,1321027,"TERMINAL",0,0,"Step 34, loss: 4.929574012756348, step time: 19.577980041503906ms\r\nStep 35, loss: 4.955499649047852, step time: 19.459962844848633ms\r\n",,terminal_output +639,1321105,"TERMINAL",0,0,"Step 36, loss: 4.8841047286987305, step time: 20.039796829223633ms\r\n",,terminal_output +640,1321203,"TERMINAL",0,0,"Step 37, loss: 4.922787189483643, step time: 19.22154426574707ms\r\n",,terminal_output +641,1321279,"TERMINAL",0,0,"Step 38, loss: 4.72947359085083, step time: 19.55246925354004ms\r\n",,terminal_output +642,1321408,"TERMINAL",0,0,"Step 39, loss: 4.814236164093018, step time: 19.680023193359375ms\r\n",,terminal_output +643,1321477,"TERMINAL",0,0,"Step 40, loss: 4.714212417602539, step time: 20.170211791992188ms\r\n",,terminal_output +644,1321616,"TERMINAL",0,0,"Step 41, loss: 4.763108730316162, step time: 19.46854591369629ms\r\nStep 42, loss: 4.662074089050293, step time: 20.12467384338379ms\r\n",,terminal_output +645,1321727,"TERMINAL",0,0,"Step 43, loss: 4.773277282714844, step time: 19.553422927856445ms\r\n",,terminal_output +646,1321775,"TERMINAL",0,0,"Step 44, loss: 4.548850059509277, step time: 19.750356674194336ms\r\n",,terminal_output +647,1321872,"TERMINAL",0,0,"Step 45, loss: 4.621326446533203, step time: 20.503997802734375ms\r\n",,terminal_output +648,1322000,"TERMINAL",0,0,"Step 46, loss: 4.533177375793457, step time: 19.841432571411133ms\r\n",,terminal_output +649,1322040,"TERMINAL",0,0,"Step 47, loss: 4.456869125366211, step time: 19.841670989990234ms\r\n",,terminal_output +650,1322180,"TERMINAL",0,0,"Step 48, loss: 4.505386829376221, step time: 20.381450653076172ms\r\n",,terminal_output +651,1322246,"TERMINAL",0,0,"Step 49, loss: 4.382498741149902, step time: 19.611835479736328ms\r\n",,terminal_output +652,1322344,"TERMINAL",0,0,"Step 50, loss: 4.244980335235596, step time: 19.60468292236328ms\r\n",,terminal_output +653,1322432,"TERMINAL",0,0,"Step 51, loss: 4.340338706970215, step time: 19.68979835510254ms\r\n",,terminal_output +654,1322558,"TERMINAL",0,0,"Step 52, loss: 4.607419013977051, step time: 19.689083099365234ms\r\n",,terminal_output +655,1322644,"TERMINAL",0,0,"Step 53, loss: 4.538043975830078, step time: 19.44756507873535ms\r\n",,terminal_output +656,1322703,"TERMINAL",0,0,"Step 54, loss: 4.338745594024658, step time: 20.088672637939453ms\r\n",,terminal_output +657,1322756,"TERMINAL",0,0,"Step 55, loss: 4.565736770629883, step time: 19.86551284790039ms\r\n",,terminal_output +658,1322865,"TERMINAL",0,0,"Step 56, loss: 4.228820323944092, step time: 19.644975662231445ms\r\n",,terminal_output +659,1322918,"TERMINAL",0,0,"Step 57, loss: 4.443600654602051, step time: 20.264387130737305ms\r\n",,terminal_output +660,1323121,"TERMINAL",0,0,"Step 58, loss: 4.220999717712402, step time: 19.679546356201172ms\r\n",,terminal_output +661,1323177,"TERMINAL",0,0,"Step 59, loss: 4.1761651039123535, step time: 19.78325843811035ms\r\n",,terminal_output +662,1323233,"TERMINAL",0,0,"Step 60, loss: 4.422876834869385, step time: 23.047685623168945ms\r\n",,terminal_output +663,1323297,"TERMINAL",0,0,"Step 61, loss: 4.430337905883789, step time: 20.53093910217285ms\r\n",,terminal_output +664,1323341,"TERMINAL",0,0,"Step 62, loss: 4.457825183868408, step time: 19.686460494995117ms\r\n",,terminal_output +665,1323448,"TERMINAL",0,0,"Step 63, loss: 4.142486572265625, step time: 20.38884162902832ms\r\n",,terminal_output +666,1323495,"TERMINAL",0,0,"Step 64, loss: 4.222621917724609, step time: 19.565105438232422ms\r\n",,terminal_output +667,1323590,"TERMINAL",0,0,"Step 65, loss: 4.0048322677612305, step time: 19.64259147644043ms\r\n",,terminal_output +668,1323684,"TERMINAL",0,0,"Step 66, loss: 4.144190311431885, step time: 20.06840705871582ms\r\n",,terminal_output +669,1323824,"TERMINAL",0,0,"Step 67, loss: 3.9601473808288574, step time: 19.7904109954834ms\r\n",,terminal_output +670,1323919,"TERMINAL",0,0,"Step 68, loss: 4.048372745513916, step time: 19.58012580871582ms\r\n",,terminal_output +671,1324008,"TERMINAL",0,0,"Step 69, loss: 4.0442280769348145, step time: 20.12038230895996ms\r\n",,terminal_output +672,1324074,"TERMINAL",0,0,"Step 70, loss: 4.032417297363281, step time: 19.4854736328125ms\r\n",,terminal_output +673,1324130,"TERMINAL",0,0,"Step 71, loss: 3.8850653171539307, step time: 19.29306983947754ms\r\n",,terminal_output +674,1324365,"TERMINAL",0,0,"Step 72, loss: 4.062219619750977, step time: 19.76609230041504ms\r\nStep 73, loss: 3.971421241760254, step time: 20.354509353637695ms\r\nStep 74, loss: 3.9980955123901367, step time: 20.97034454345703ms\r\n",,terminal_output +675,1324469,"TERMINAL",0,0,"Step 75, loss: 4.213745594024658, step time: 20.054340362548828ms\r\n",,terminal_output +676,1324569,"TERMINAL",0,0,"Step 76, loss: 3.9539215564727783, step time: 20.787715911865234ms\r\n",,terminal_output +677,1324664,"TERMINAL",0,0,"Step 77, loss: 4.094245910644531, step time: 19.605636596679688ms\r\n",,terminal_output +678,1324785,"TERMINAL",0,0,"Step 78, loss: 3.931600570678711, step time: 20.390987396240234ms\r\n",,terminal_output +679,1324826,"TERMINAL",0,0,"Step 79, loss: 3.8395156860351562, step time: 19.40774917602539ms\r\n",,terminal_output +680,1324930,"TERMINAL",0,0,"Step 80, loss: 4.026238441467285, step time: 19.553422927856445ms\r\n",,terminal_output +681,1325076,"TERMINAL",0,0,"Step 81, loss: 3.9452438354492188, step time: 19.817352294921875ms\r\nStep 82, loss: 3.94710111618042, step time: 19.41967010498047ms\r\n",,terminal_output +682,1325216,"sample.py",0,0,"",python,tab +683,1325360,"TERMINAL",0,0,"Step 83, loss: 3.8663227558135986, step time: 19.44279670715332ms\r\nStep 84, loss: 3.8470680713653564, step time: 20.16448974609375ms\r\nStep 85, loss: 3.790492296218872, step time: 19.942760467529297ms\r\n",,terminal_output +684,1325411,"TERMINAL",0,0,"Step 86, loss: 4.401547431945801, step time: 20.224809646606445ms\r\n",,terminal_output +685,1325526,"TERMINAL",0,0,"Step 87, loss: 3.978795051574707, step time: 34.75189208984375ms\r\n",,terminal_output +686,1325639,"TERMINAL",0,0,"Step 88, loss: 3.840873956680298, step time: 21.436214447021484ms\r\n",,terminal_output +687,1325703,"TERMINAL",0,0,"Step 89, loss: 3.81491756439209, step time: 19.945621490478516ms\r\n",,terminal_output +688,1325832,"TERMINAL",0,0,"Step 90, loss: 3.8038806915283203, step time: 20.71380615234375ms\r\n",,terminal_output +689,1325875,"TERMINAL",0,0,"Step 91, loss: 3.727324962615967, step time: 19.932270050048828ms\r\n",,terminal_output +690,1325992,"TERMINAL",0,0,"Step 92, loss: 3.872786521911621, step time: 19.99831199645996ms\r\n",,terminal_output +691,1326060,"TERMINAL",0,0,"Step 93, loss: 4.219388484954834, step time: 20.310640335083008ms\r\n",,terminal_output +692,1326236,"TERMINAL",0,0,"Step 94, loss: 3.729146957397461, step time: 20.13707160949707ms\r\nStep 95, loss: 3.887571096420288, step time: 19.913434982299805ms\r\n",,terminal_output +693,1326312,"TERMINAL",0,0,"Step 96, loss: 3.71624755859375, step time: 20.41172981262207ms\r\n",,terminal_output +694,1326407,"TERMINAL",0,0,"Step 97, loss: 3.71356201171875, step time: 20.094871520996094ms\r\n",,terminal_output +695,1326547,"TERMINAL",0,0,"Step 98, loss: 3.6644108295440674, step time: 20.277976989746094ms\r\n",,terminal_output +696,1326669,"TERMINAL",0,0,"Step 99, loss: 3.8030941486358643, step time: 20.52927017211914ms\r\nStep 100, loss: 3.6369082927703857, step time: 19.81973648071289ms\r\n",,terminal_output +697,1326902,"TERMINAL",0,0,"Step 101, loss: 3.709862232208252, step time: 19.64735984802246ms\r\n",,terminal_output +698,1326932,"TERMINAL",0,0,"Step 102, loss: 3.6857810020446777, step time: 20.144939422607422ms\r\n",,terminal_output +699,1327030,"TERMINAL",0,0,"Step 103, loss: 3.9463818073272705, step time: 21.70562744140625ms\r\n",,terminal_output +700,1327126,"TERMINAL",0,0,"Step 104, loss: 3.690366268157959, step time: 19.405364990234375ms\r\n",,terminal_output +701,1327192,"TERMINAL",0,0,"Step 105, loss: 3.8447508811950684, step time: 19.895553588867188ms\r\n",,terminal_output +702,1327235,"TERMINAL",0,0,"Step 106, loss: 3.672186851501465, step time: 19.43683624267578ms\r\n",,terminal_output +703,1327274,"TERMINAL",0,0,"Step 107, loss: 3.8837902545928955, step time: 19.268035888671875ms\r\n",,terminal_output +704,1327436,"TERMINAL",0,0,"Step 108, loss: 4.0153961181640625, step time: 19.726037979125977ms\r\n",,terminal_output +705,1327456,"TERMINAL",0,0,"Step 109, loss: 3.862694263458252, step time: 19.49477195739746ms\r\n",,terminal_output +706,1327509,"TERMINAL",0,0,"Step 110, loss: 3.7384626865386963, step time: 19.793033599853516ms\r\n",,terminal_output +707,1327615,"TERMINAL",0,0,"Step 111, loss: 3.8446202278137207, step time: 20.292282104492188ms\r\n",,terminal_output +708,1327731,"TERMINAL",0,0,"Step 112, loss: 3.6656250953674316, step time: 20.0347900390625ms\r\n",,terminal_output +709,1327901,"TERMINAL",0,0,"Step 113, loss: 3.714189052581787, step time: 19.76752281188965ms\r\n",,terminal_output +710,1327902,"TERMINAL",0,0,"Step 114, loss: 3.7243764400482178, step time: 20.346403121948242ms\r\n",,terminal_output +711,1328056,"TERMINAL",0,0,"Step 115, loss: 3.7540507316589355, step time: 19.79684829711914ms\r\nStep 116, loss: 3.789947509765625, step time: 19.76752281188965ms\r\n",,terminal_output +712,1328152,"TERMINAL",0,0,"Step 117, loss: 3.6017825603485107, step time: 20.136594772338867ms\r\n",,terminal_output +713,1328218,"TERMINAL",0,0,"Step 118, loss: 3.5994679927825928, step time: 23.03290367126465ms\r\n",,terminal_output +714,1328313,"TERMINAL",0,0,"Step 119, loss: 3.686802625656128, step time: 20.519733428955078ms\r\n",,terminal_output +715,1328503,"TERMINAL",0,0,"Step 120, loss: 3.5096237659454346, step time: 20.505189895629883ms\r\nStep 121, loss: 3.690826892852783, step time: 20.081043243408203ms\r\n",,terminal_output +716,1328590,"TERMINAL",0,0,"Step 122, loss: 3.7813122272491455, step time: 19.890785217285156ms\r\n",,terminal_output +717,1328937,"TERMINAL",0,0,"Step 123, loss: 3.531323194503784, step time: 366.52517318725586ms\r\n",,terminal_output +718,1329033,"TERMINAL",0,0,"Step 124, loss: 3.5179362297058105, step time: 27.69613265991211ms\r\n",,terminal_output +719,1329114,"TERMINAL",0,0,"Step 125, loss: 3.478512763977051, step time: 22.10092544555664ms\r\n",,terminal_output +720,1329285,"TERMINAL",0,0,"Step 126, loss: 3.5144429206848145, step time: 21.200180053710938ms\r\nStep 127, loss: 3.4956228733062744, step time: 20.28346061706543ms\r\n",,terminal_output +721,1329382,"TERMINAL",0,0,"Step 128, loss: 3.4462368488311768, step time: 20.081520080566406ms\r\n",,terminal_output +722,1329535,"models/dynamics.py",0,0,"",python,tab +723,1329636,"TERMINAL",0,0,"Step 129, loss: 3.6973302364349365, step time: 20.608901977539062ms\r\nStep 130, loss: 3.664034128189087, step time: 20.305395126342773ms\r\nStep 131, loss: 3.5010135173797607, step time: 19.864797592163086ms\r\n",,terminal_output +724,1329770,"TERMINAL",0,0,"Step 132, loss: 3.51615571975708, step time: 20.598173141479492ms\r\n",,terminal_output +725,1329820,"TERMINAL",0,0,"Step 133, loss: 3.743128776550293, step time: 19.8516845703125ms\r\n",,terminal_output +726,1329933,"TERMINAL",0,0,"Step 134, loss: 3.4835312366485596, step time: 22.41039276123047ms\r\n",,terminal_output +727,1330001,"TERMINAL",0,0,"Step 135, loss: 3.469942569732666, step time: 20.66802978515625ms\r\n",,terminal_output +728,1330071,"TERMINAL",0,0,"Step 136, loss: 3.4796574115753174, step time: 21.898984909057617ms\r\n",,terminal_output +729,1330179,"TERMINAL",0,0,"Step 137, loss: 3.5361945629119873, step time: 19.855976104736328ms\r\n",,terminal_output +730,1330268,"TERMINAL",0,0,"Step 138, loss: 3.8550634384155273, step time: 20.08962631225586ms\r\n",,terminal_output +731,1330329,"TERMINAL",0,0,"Step 139, loss: 3.362541913986206, step time: 19.395112991333008ms\r\n",,terminal_output +732,1330411,"TERMINAL",0,0,"Step 140, loss: 3.4656248092651367, step time: 20.17807960510254ms\r\n",,terminal_output +733,1330515,"TERMINAL",0,0,"Step 141, loss: 3.4287610054016113, step time: 20.415067672729492ms\r\n",,terminal_output +734,1330583,"TERMINAL",0,0,"Step 142, loss: 3.577120780944824, step time: 19.89006996154785ms\r\n",,terminal_output +735,1330694,"TERMINAL",0,0,"Step 143, loss: 3.3827688694000244, step time: 19.776582717895508ms\r\n",,terminal_output +736,1330784,"TERMINAL",0,0,"Step 144, loss: 3.4101600646972656, step time: 20.439624786376953ms\r\n",,terminal_output +737,1330890,"TERMINAL",0,0,"Step 145, loss: 3.420513391494751, step time: 19.89579200744629ms\r\n",,terminal_output +738,1330971,"TERMINAL",0,0,"Step 146, loss: 3.424936294555664, step time: 19.841432571411133ms\r\n",,terminal_output +739,1331134,"TERMINAL",0,0,"Step 147, loss: 3.432586193084717, step time: 20.328521728515625ms\r\nStep 148, loss: 3.370732545852661, step time: 19.767045974731445ms\r\n",,terminal_output +740,1331283,"TERMINAL",0,0,"Step 149, loss: 3.462331771850586, step time: 19.772768020629883ms\r\nStep 150, loss: 3.353180170059204, step time: 20.44081687927246ms\r\n",,terminal_output +741,1331381,"TERMINAL",0,0,"Step 151, loss: 3.258700370788574, step time: 19.90509033203125ms\r\n",,terminal_output +742,1331509,"TERMINAL",0,0,"Step 152, loss: 3.239636182785034, step time: 19.989967346191406ms\r\n",,terminal_output +743,1331583,"TERMINAL",0,0,"Step 153, loss: 3.4568710327148438, step time: 20.772933959960938ms\r\n",,terminal_output +744,1331625,"TERMINAL",0,0,"Step 154, loss: 3.323068141937256, step time: 20.0655460357666ms\r\n",,terminal_output +745,1331766,"TERMINAL",0,0,"Step 155, loss: 3.480605125427246, step time: 19.63663101196289ms\r\n",,terminal_output +746,1331822,"TERMINAL",0,0,"Step 156, loss: 3.3582165241241455, step time: 20.126819610595703ms\r\n",,terminal_output +747,1331917,"TERMINAL",0,0,"Step 157, loss: 3.4689323902130127, step time: 19.412755966186523ms\r\n",,terminal_output +748,1331991,"TERMINAL",0,0,"Step 158, loss: 3.3808605670928955, step time: 19.381284713745117ms\r\n",,terminal_output +749,1332101,"TERMINAL",0,0,"Step 159, loss: 3.447000026702881, step time: 19.863367080688477ms\r\n",,terminal_output +750,1332218,"TERMINAL",0,0,"Step 160, loss: 3.3215343952178955, step time: 19.484758377075195ms\r\n",,terminal_output +751,1332270,"TERMINAL",0,0,"Step 161, loss: 3.5420103073120117, step time: 19.3634033203125ms\r\n",,terminal_output +752,1332327,"TERMINAL",0,0,"Step 162, loss: 3.2820849418640137, step time: 19.924163818359375ms\r\n",,terminal_output +753,1332501,"TERMINAL",0,0,"Step 163, loss: 3.3535659313201904, step time: 19.43230628967285ms\r\n",,terminal_output +754,1332540,"TERMINAL",0,0,"Step 164, loss: 3.2774970531463623, step time: 19.3636417388916ms\r\n",,terminal_output +755,1332700,"TERMINAL",0,0,"Step 165, loss: 3.87882399559021, step time: 20.27153968811035ms\r\nStep 166, loss: 3.492798328399658, step time: 20.88475227355957ms\r\n",,terminal_output +756,1332846,"TERMINAL",0,0,"Step 167, loss: 3.374270439147949, step time: 20.02406120300293ms\r\nStep 168, loss: 3.3851304054260254, step time: 20.323514938354492ms\r\n",,terminal_output +757,1333013,"TERMINAL",0,0,"Step 169, loss: 3.3242552280426025, step time: 19.65951919555664ms\r\n",,terminal_output +758,1333085,"TERMINAL",0,0,"Step 170, loss: 3.2858192920684814, step time: 19.842863082885742ms\r\n",,terminal_output +759,1333188,"TERMINAL",0,0,"Step 171, loss: 3.2570836544036865, step time: 20.277738571166992ms\r\n",,terminal_output +760,1333243,"TERMINAL",0,0,"Step 172, loss: 3.258340835571289, step time: 22.983789443969727ms\r\n",,terminal_output +761,1333288,"TERMINAL",0,0,"Step 173, loss: 3.6294503211975098, step time: 20.39957046508789ms\r\n",,terminal_output +762,1333471,"TERMINAL",0,0,"Step 174, loss: 3.1668145656585693, step time: 20.561695098876953ms\r\nStep 175, loss: 3.283113479614258, step time: 19.65951919555664ms\r\n",,terminal_output +763,1333615,"TERMINAL",0,0,"Step 176, loss: 3.1662378311157227, step time: 19.62900161743164ms\r\n",,terminal_output +764,1333659,"TERMINAL",0,0,"Step 177, loss: 3.2616539001464844, step time: 19.956588745117188ms\r\n",,terminal_output +765,1333754,"TERMINAL",0,0,"Step 178, loss: 3.4413797855377197, step time: 19.549846649169922ms\r\n",,terminal_output +766,1333812,"TERMINAL",0,0,"Step 179, loss: 3.241896152496338, step time: 19.466638565063477ms\r\n",,terminal_output +767,1333931,"TERMINAL",0,0,"Step 180, loss: 3.082934617996216, step time: 20.021915435791016ms\r\n",,terminal_output +768,1334026,"TERMINAL",0,0,"Step 181, loss: 3.4151902198791504, step time: 19.38033103942871ms\r\n",,terminal_output +769,1334210,"TERMINAL",0,0,"Step 182, loss: 3.234752655029297, step time: 21.836519241333008ms\r\nStep 183, loss: 3.6239774227142334, step time: 20.069599151611328ms\r\n",,terminal_output +770,1334290,"TERMINAL",0,0,"Step 184, loss: 3.1991159915924072, step time: 19.34075355529785ms\r\n",,terminal_output +771,1334352,"TERMINAL",0,0,"Step 185, loss: 3.329585552215576, step time: 19.718408584594727ms\r\n",,terminal_output +772,1334493,"TERMINAL",0,0,"Step 186, loss: 3.311338424682617, step time: 20.47276496887207ms\r\n",,terminal_output +773,1334569,"TERMINAL",0,0,"Step 187, loss: 3.7223291397094727, step time: 19.720792770385742ms\r\n",,terminal_output +774,1334606,"TERMINAL",0,0,"Step 188, loss: 3.2517049312591553, step time: 19.852399826049805ms\r\n",,terminal_output +775,1334739,"TERMINAL",0,0,"Step 189, loss: 3.132556915283203, step time: 20.326852798461914ms\r\n",,terminal_output +776,1334812,"TERMINAL",0,0,"Step 190, loss: 3.2743616104125977, step time: 19.844532012939453ms\r\n",,terminal_output +777,1334889,"TERMINAL",0,0,"Step 191, loss: 3.2310831546783447, step time: 19.716978073120117ms\r\n",,terminal_output +778,1335057,"TERMINAL",0,0,"Step 192, loss: 3.190584182739258, step time: 20.290851593017578ms\r\nStep 193, loss: 3.171610116958618, step time: 19.655704498291016ms\r\n",,terminal_output +779,1335216,"TERMINAL",0,0,"Step 194, loss: 3.269489288330078, step time: 19.727230072021484ms\r\nStep 195, loss: 3.1598808765411377, step time: 19.92321014404297ms\r\n",,terminal_output +780,1335305,"TERMINAL",0,0,"Step 196, loss: 3.367492437362671, step time: 19.338607788085938ms\r\n",,terminal_output +781,1335429,"TERMINAL",0,0,"Step 197, loss: 3.2049367427825928, step time: 19.392728805541992ms\r\n",,terminal_output +782,1335515,"TERMINAL",0,0,"Step 198, loss: 3.154958724975586, step time: 20.016908645629883ms\r\n",,terminal_output +783,1335608,"TERMINAL",0,0,"Step 199, loss: 3.1447906494140625, step time: 19.339561462402344ms\r\n",,terminal_output +784,1335757,"TERMINAL",0,0,"Step 200, loss: 3.4583847522735596, step time: 19.35553550720215ms\r\nStep 201, loss: 3.09238600730896, step time: 19.839048385620117ms\r\n",,terminal_output +785,1335943,"TERMINAL",0,0,"Step 202, loss: 3.202162504196167, step time: 19.610166549682617ms\r\n",,terminal_output +786,1336034,"TERMINAL",0,0,"Step 203, loss: 3.1412785053253174, step time: 19.44422721862793ms\r\n",,terminal_output +787,1336175,"TERMINAL",0,0,"Step 204, loss: 3.153475761413574, step time: 19.875764846801758ms\r\n",,terminal_output +788,1336570,"TERMINAL",0,0,"Step 205, loss: 3.101614475250244, step time: 19.33145523071289ms\r\n",,terminal_output +789,1336571,"TERMINAL",0,0,"Step 206, loss: 3.169607639312744, step time: 19.231796264648438ms\r\n",,terminal_output +790,1336571,"TERMINAL",0,0,"Step 207, loss: 3.1042990684509277, step time: 19.87290382385254ms\r\n",,terminal_output +791,1336571,"TERMINAL",0,0,"Step 208, loss: 3.2059731483459473, step time: 19.375324249267578ms\r\nStep 209, loss: 3.2044546604156494, step time: 21.2399959564209ms\r\n",,terminal_output +792,1336571,"TERMINAL",0,0,"Step 210, loss: 3.235966444015503, step time: 20.498991012573242ms\r\n",,terminal_output +793,1336624,"TERMINAL",0,0,"Step 211, loss: 3.4265425205230713, step time: 19.520282745361328ms\r\n",,terminal_output +794,1336935,"TERMINAL",0,0,"Step 212, loss: 3.084254503250122, step time: 19.40441131591797ms\r\n",,terminal_output +795,1337146,"TERMINAL",0,0,"Step 213, loss: 3.0514206886291504, step time: 19.94919776916504ms\r\nStep 214, loss: 3.1215689182281494, step time: 20.54882049560547ms\r\n",,terminal_output +796,1337146,"TERMINAL",0,0,"Step 215, loss: 3.192633628845215, step time: 19.42729949951172ms\r\n",,terminal_output +797,1337146,"TERMINAL",0,0,"Step 216, loss: 3.094531297683716, step time: 20.014286041259766ms\r\n",,terminal_output +798,1337146,"TERMINAL",0,0,"Step 217, loss: 3.117830991744995, step time: 19.490718841552734ms\r\n",,terminal_output +799,1337223,"TERMINAL",0,0,"Step 218, loss: 3.07887864112854, step time: 20.277738571166992ms\r\n",,terminal_output +800,1337385,"TERMINAL",0,0,"Step 219, loss: 3.0687294006347656, step time: 19.931554794311523ms\r\nStep 220, loss: 3.1078684329986572, step time: 19.4246768951416ms\r\n",,terminal_output +801,1337468,"TERMINAL",0,0,"Step 221, loss: 3.0797319412231445, step time: 19.512414932250977ms\r\n",,terminal_output +802,1337582,"TERMINAL",0,0,"Step 222, loss: 3.193082571029663, step time: 19.92011070251465ms\r\n",,terminal_output +803,1337693,"TERMINAL",0,0,"Step 223, loss: 2.9837498664855957, step time: 19.509077072143555ms\r\n",,terminal_output +804,1337742,"TERMINAL",0,0,"Step 224, loss: 3.117335081100464, step time: 19.66261863708496ms\r\n",,terminal_output +805,1337913,"TERMINAL",0,0,"Step 225, loss: 3.111912250518799, step time: 20.451068878173828ms\r\n",,terminal_output +806,1338005,"TERMINAL",0,0,"Step 226, loss: 2.986440658569336, step time: 19.809961318969727ms\r\n",,terminal_output +807,1338203,"TERMINAL",0,0,"Step 227, loss: 3.088261842727661, step time: 19.814729690551758ms\r\nStep 228, loss: 2.9447414875030518, step time: 20.22075653076172ms\r\n",,terminal_output +808,1338362,"TERMINAL",0,0,"Step 229, loss: 3.016594171524048, step time: 19.751787185668945ms\r\n",,terminal_output +809,1338774,"TERMINAL",0,0,"Step 230, loss: 3.0649118423461914, step time: 22.62735366821289ms\r\n",,terminal_output +810,1338774,"TERMINAL",0,0,"Step 231, loss: 3.2787132263183594, step time: 20.839214324951172ms\r\n",,terminal_output +811,1338952,"TERMINAL",0,0,"Step 232, loss: 3.0221245288848877, step time: 19.938945770263672ms\r\n",,terminal_output +812,1338953,"TERMINAL",0,0,"Step 233, loss: 3.0814545154571533, step time: 19.742965698242188ms\r\n",,terminal_output +813,1338953,"TERMINAL",0,0,"Step 234, loss: 2.9854540824890137, step time: 23.432493209838867ms\r\n",,terminal_output +814,1338954,"TERMINAL",0,0,"Step 235, loss: 2.989588975906372, step time: 20.048856735229492ms\r\n",,terminal_output +815,1338954,"TERMINAL",0,0,"Step 236, loss: 2.9660768508911133, step time: 19.834041595458984ms\r\n",,terminal_output +816,1339122,"TERMINAL",0,0,"Step 237, loss: 3.25596022605896, step time: 20.401477813720703ms\r\n",,terminal_output +817,1339123,"TERMINAL",0,0,"Step 238, loss: 3.088088274002075, step time: 19.77086067199707ms\r\n",,terminal_output +818,1339217,"TERMINAL",0,0,"Step 239, loss: 2.951716899871826, step time: 19.79684829711914ms\r\n",,terminal_output +819,1339316,"TERMINAL",0,0,"Step 240, loss: 2.9688305854797363, step time: 19.911766052246094ms\r\n",,terminal_output +820,1339325,"TERMINAL",0,0,"Step 241, loss: 2.976384162902832, step time: 19.388675689697266ms\r\n",,terminal_output +821,1339393,"TERMINAL",0,0,"Step 242, loss: 3.0114331245422363, step time: 19.367456436157227ms\r\n",,terminal_output +822,1339440,"TERMINAL",0,0,"Step 243, loss: 2.946223735809326, step time: 19.982337951660156ms\r\n",,terminal_output +823,1339496,"TERMINAL",0,0,"Step 244, loss: 2.9339916706085205, step time: 19.470691680908203ms\r\n",,terminal_output +824,1339661,"TERMINAL",0,0,"Step 245, loss: 2.932732582092285, step time: 19.460201263427734ms\r\nStep 246, loss: 3.12707257270813, step time: 19.871234893798828ms\r\n",,terminal_output +825,1339776,"TERMINAL",0,0,"Step 247, loss: 2.9433209896087646, step time: 19.331932067871094ms\r\n",,terminal_output +826,1339820,"TERMINAL",0,0,"Step 248, loss: 3.023536443710327, step time: 19.33121681213379ms\r\n",,terminal_output +827,1339926,"TERMINAL",0,0,"Step 249, loss: 2.94248366355896, step time: 19.82879638671875ms\r\n",,terminal_output +828,1340562,"TERMINAL",0,0,"Step 250, loss: 3.0146801471710205, step time: 370.8910942077637ms\r\nStep 251, loss: 3.0928502082824707, step time: 32.218217849731445ms\r\n",,terminal_output +829,1340649,"TERMINAL",0,0,"Step 252, loss: 2.944918155670166, step time: 23.29874038696289ms\r\n",,terminal_output +830,1340735,"TERMINAL",0,0,"Step 253, loss: 2.8875725269317627, step time: 20.670413970947266ms\r\n",,terminal_output +831,1340760,"TERMINAL",0,0,"Step 254, loss: 3.1834161281585693, step time: 20.496129989624023ms\r\n",,terminal_output +832,1340890,"TERMINAL",0,0,"Step 255, loss: 2.970304250717163, step time: 20.258426666259766ms\r\n",,terminal_output +833,1340940,"TERMINAL",0,0,"Step 256, loss: 3.047882556915283, step time: 20.31564712524414ms\r\n",,terminal_output +834,1341051,"TERMINAL",0,0,"Step 257, loss: 3.052515745162964, step time: 19.771099090576172ms\r\n",,terminal_output +835,1341153,"TERMINAL",0,0,"Step 258, loss: 2.862179756164551, step time: 19.85335350036621ms\r\n",,terminal_output +836,1341270,"TERMINAL",0,0,"Step 259, loss: 2.8526298999786377, step time: 20.0498104095459ms\r\n",,terminal_output +837,1341321,"TERMINAL",0,0,"Step 260, loss: 2.9277684688568115, step time: 19.801855087280273ms\r\n",,terminal_output +838,1341454,"TERMINAL",0,0,"Step 261, loss: 3.2014224529266357, step time: 20.226478576660156ms\r\n",,terminal_output +839,1341510,"TERMINAL",0,0,"Step 262, loss: 3.30601167678833, step time: 20.57671546936035ms\r\n",,terminal_output +840,1341611,"TERMINAL",0,0,"Step 263, loss: 2.9030091762542725, step time: 19.974946975708008ms\r\n",,terminal_output +841,1341907,"TERMINAL",0,0,"Step 264, loss: 2.949496030807495, step time: 20.074129104614258ms\r\nStep 265, loss: 2.9039306640625, step time: 19.94013786315918ms\r\n",,terminal_output +842,1341908,"TERMINAL",0,0,"Step 266, loss: 3.0143754482269287, step time: 19.846677780151367ms\r\n",,terminal_output +843,1341930,"TERMINAL",0,0,"Step 267, loss: 3.256375312805176, step time: 19.65785026550293ms\r\n",,terminal_output +844,1341976,"TERMINAL",0,0,"Step 268, loss: 2.8811943531036377, step time: 19.798994064331055ms\r\n",,terminal_output +845,1342037,"TERMINAL",0,0,"Step 269, loss: 2.862563133239746, step time: 19.451141357421875ms\r\n",,terminal_output +846,1342158,"TERMINAL",0,0,"Step 270, loss: 2.837202310562134, step time: 19.630908966064453ms\r\n",,terminal_output +847,1342229,"TERMINAL",0,0,"Step 271, loss: 2.8480312824249268, step time: 19.9129581451416ms\r\n",,terminal_output +848,1342293,"TERMINAL",0,0,"Step 272, loss: 2.936004400253296, step time: 19.550561904907227ms\r\n",,terminal_output +849,1342418,"TERMINAL",0,0,"Step 273, loss: 2.8588690757751465, step time: 19.4399356842041ms\r\n",,terminal_output +850,1342484,"TERMINAL",0,0,"Step 274, loss: 2.8481945991516113, step time: 19.8972225189209ms\r\n",,terminal_output +851,1342577,"TERMINAL",0,0,"Step 275, loss: 2.856309413909912, step time: 19.587039947509766ms\r\n",,terminal_output +852,1342664,"TERMINAL",0,0,"Step 276, loss: 3.1180224418640137, step time: 19.6075439453125ms\r\n",,terminal_output +853,1342702,"TERMINAL",0,0,"Step 277, loss: 3.1649630069732666, step time: 19.727706909179688ms\r\n",,terminal_output +854,1342745,"TERMINAL",0,0,"Step 278, loss: 2.855541944503784, step time: 19.712448120117188ms\r\n",,terminal_output +855,1342944,"TERMINAL",0,0,"Step 279, loss: 3.0818276405334473, step time: 19.4246768951416ms\r\nStep 280, loss: 2.979748249053955, step time: 19.783973693847656ms\r\n",,terminal_output +856,1343086,"TERMINAL",0,0,"Step 281, loss: 2.8480286598205566, step time: 19.399166107177734ms\r\n",,terminal_output +857,1343218,"TERMINAL",0,0,"Step 282, loss: 3.0045759677886963, step time: 19.39558982849121ms\r\n",,terminal_output +858,1343276,"TERMINAL",0,0,"Step 283, loss: 2.84612774848938, step time: 19.612550735473633ms\r\n",,terminal_output +859,1343396,"TERMINAL",0,0,"Step 284, loss: 3.515535354614258, step time: 22.122621536254883ms\r\n",,terminal_output +860,1343477,"TERMINAL",0,0,"Step 285, loss: 2.849360942840576, step time: 20.31087875366211ms\r\n",,terminal_output +861,1343551,"TERMINAL",0,0,"Step 286, loss: 2.9443271160125732, step time: 20.041942596435547ms\r\n",,terminal_output +862,1343646,"TERMINAL",0,0,"Step 287, loss: 2.8226888179779053, step time: 19.46568489074707ms\r\n",,terminal_output +863,1343751,"TERMINAL",0,0,"Step 288, loss: 2.9337384700775146, step time: 19.3784236907959ms\r\n",,terminal_output +864,1343815,"TERMINAL",0,0,"Step 289, loss: 3.024909019470215, step time: 19.649505615234375ms\r\n",,terminal_output +865,1343926,"TERMINAL",0,0,"Step 290, loss: 2.822242259979248, step time: 19.504070281982422ms\r\n",,terminal_output +866,1344083,"TERMINAL",0,0,"Step 291, loss: 2.8283724784851074, step time: 21.737098693847656ms\r\nStep 292, loss: 2.9554741382598877, step time: 20.429134368896484ms\r\n",,terminal_output +867,1344182,"TERMINAL",0,0,"Step 293, loss: 3.034527540206909, step time: 19.570112228393555ms\r\n",,terminal_output +868,1344292,"TERMINAL",0,0,"Step 294, loss: 2.9136147499084473, step time: 19.49310302734375ms\r\n",,terminal_output +869,1344355,"TERMINAL",0,0,"Step 295, loss: 2.8219921588897705, step time: 19.85335350036621ms\r\n",,terminal_output +870,1344439,"TERMINAL",0,0,"Step 296, loss: 3.0924417972564697, step time: 19.435405731201172ms\r\n",,terminal_output +871,1344560,"TERMINAL",0,0,"Step 297, loss: 2.8223328590393066, step time: 19.570350646972656ms\r\n",,terminal_output +872,1344623,"TERMINAL",0,0,"Step 298, loss: 2.804008960723877, step time: 19.70362663269043ms\r\n",,terminal_output +873,1344691,"TERMINAL",0,0,"Step 299, loss: 2.762672185897827, step time: 19.441843032836914ms\r\n",,terminal_output +874,1344767,"TERMINAL",0,0,"Step 300, loss: 2.8021950721740723, step time: 19.683122634887695ms\r\n",,terminal_output +875,1344910,"TERMINAL",0,0,"Step 301, loss: 2.854135751724243, step time: 19.791603088378906ms\r\n",,terminal_output +876,1344977,"TERMINAL",0,0,"Step 302, loss: 2.806257963180542, step time: 19.38605308532715ms\r\n",,terminal_output +877,1345078,"TERMINAL",0,0,"Step 303, loss: 2.845193862915039, step time: 19.382953643798828ms\r\n",,terminal_output +878,1345123,"TERMINAL",0,0,"Step 304, loss: 2.8479323387145996, step time: 19.917011260986328ms\r\n",,terminal_output +879,1345235,"TERMINAL",0,0,"Step 305, loss: 2.950181722640991, step time: 19.37723159790039ms\r\n",,terminal_output +880,1345303,"TERMINAL",0,0,"Step 306, loss: 2.7230193614959717, step time: 19.449234008789062ms\r\n",,terminal_output +881,1345420,"TERMINAL",0,0,"Step 307, loss: 2.9250645637512207, step time: 19.64426040649414ms\r\n",,terminal_output +882,1345478,"TERMINAL",0,0,"Step 308, loss: 2.726747989654541, step time: 19.488811492919922ms\r\n",,terminal_output +883,1345558,"TERMINAL",0,0,"Step 309, loss: 2.897491455078125, step time: 21.23427391052246ms\r\n",,terminal_output +884,1345693,"TERMINAL",0,0,"Step 310, loss: 2.6752431392669678, step time: 20.35808563232422ms\r\n",,terminal_output +885,1345772,"TERMINAL",0,0,"Step 311, loss: 2.612635374069214, step time: 19.772052764892578ms\r\n",,terminal_output +886,1345903,"TERMINAL",0,0,"Step 312, loss: 2.6799700260162354, step time: 19.838809967041016ms\r\nStep 313, loss: 2.978158473968506, step time: 19.983530044555664ms\r\n",,terminal_output +887,1346042,"TERMINAL",0,0,"Step 314, loss: 3.1537203788757324, step time: 19.952774047851562ms\r\n",,terminal_output +888,1346098,"TERMINAL",0,0,"Step 315, loss: 2.880211114883423, step time: 19.75107192993164ms\r\n",,terminal_output +889,1346172,"TERMINAL",0,0,"Step 316, loss: 2.8314883708953857, step time: 19.983291625976562ms\r\n",,terminal_output +890,1346246,"TERMINAL",0,0,"Step 317, loss: 2.7534520626068115, step time: 19.762516021728516ms\r\n",,terminal_output +891,1346265,"TERMINAL",0,0,"Step 318, loss: 2.9054408073425293, step time: 19.751310348510742ms\r\n",,terminal_output +892,1346323,"TERMINAL",0,0,"Step 319, loss: 2.758772611618042, step time: 19.985675811767578ms\r\n",,terminal_output +893,1346415,"TERMINAL",0,0,"Step 320, loss: 3.1917009353637695, step time: 19.989967346191406ms\r\n",,terminal_output +894,1346515,"TERMINAL",0,0,"Step 321, loss: 2.8247313499450684, step time: 19.71125602722168ms\r\n",,terminal_output +895,1346629,"TERMINAL",0,0,"Step 322, loss: 2.666032075881958, step time: 20.020008087158203ms\r\n",,terminal_output +896,1346709,"TERMINAL",0,0,"Step 323, loss: 2.6740121841430664, step time: 19.623994827270508ms\r\n",,terminal_output +897,1346842,"TERMINAL",0,0,"Step 324, loss: 2.751080274581909, step time: 19.667387008666992ms\r\n",,terminal_output +898,1346969,"TERMINAL",0,0,"Step 325, loss: 2.676928758621216, step time: 19.951343536376953ms\r\n",,terminal_output +899,1347054,"TERMINAL",0,0,"Step 326, loss: 3.3436336517333984, step time: 19.74201202392578ms\r\n",,terminal_output +900,1347149,"TERMINAL",0,0,"Step 327, loss: 2.681183338165283, step time: 19.77372169494629ms\r\n",,terminal_output +901,1347223,"TERMINAL",0,0,"Step 328, loss: 2.6503827571868896, step time: 19.993305206298828ms\r\n",,terminal_output +902,1347330,"TERMINAL",0,0,"Step 329, loss: 2.879173994064331, step time: 20.4925537109375ms\r\n",,terminal_output +903,1347401,"TERMINAL",0,0,"Step 330, loss: 3.0552804470062256, step time: 19.896268844604492ms\r\n",,terminal_output +904,1347496,"TERMINAL",0,0,"Step 331, loss: 2.6817729473114014, step time: 20.032167434692383ms\r\n",,terminal_output +905,1347603,"TERMINAL",0,0,"Step 332, loss: 2.6861913204193115, step time: 19.837379455566406ms\r\n",,terminal_output +906,1347657,"TERMINAL",0,0,"Step 333, loss: 2.753173351287842, step time: 19.718647003173828ms\r\n",,terminal_output +907,1347761,"TERMINAL",0,0,"Step 334, loss: 2.7350637912750244, step time: 19.913911819458008ms\r\n",,terminal_output +908,1347864,"TERMINAL",0,0,"Step 335, loss: 2.8082833290100098, step time: 19.852638244628906ms\r\n",,terminal_output +909,1347954,"TERMINAL",0,0,"Step 336, loss: 2.6485021114349365, step time: 20.04241943359375ms\r\n",,terminal_output +910,1348037,"TERMINAL",0,0,"Step 337, loss: 2.9789624214172363, step time: 20.038127899169922ms\r\n",,terminal_output +911,1348126,"TERMINAL",0,0,"Step 338, loss: 2.6259522438049316, step time: 19.73748207092285ms\r\n",,terminal_output +912,1348227,"TERMINAL",0,0,"Step 339, loss: 2.734635829925537, step time: 19.794940948486328ms\r\n",,terminal_output +913,1348299,"TERMINAL",0,0,"Step 340, loss: 2.9333677291870117, step time: 20.055294036865234ms\r\n",,terminal_output +914,1348395,"TERMINAL",0,0,"Step 341, loss: 2.7213075160980225, step time: 20.790576934814453ms\r\n",,terminal_output +915,1348473,"TERMINAL",0,0,"Step 342, loss: 2.6002309322357178, step time: 21.223783493041992ms\r\n",,terminal_output +916,1348566,"TERMINAL",0,0,"Step 343, loss: 2.7423946857452393, step time: 20.39361000061035ms\r\n",,terminal_output +917,1348616,"TERMINAL",0,0,"Step 344, loss: 2.665848731994629, step time: 19.833803176879883ms\r\n",,terminal_output +918,1348728,"TERMINAL",0,0,"Step 345, loss: 2.8353564739227295, step time: 19.773244857788086ms\r\n",,terminal_output +919,1349002,"TERMINAL",0,0,"Step 346, loss: 2.9878618717193604, step time: 20.04098892211914ms\r\nStep 347, loss: 2.5840907096862793, step time: 19.828081130981445ms\r\nStep 348, loss: 2.7928154468536377, step time: 22.269010543823242ms\r\n",,terminal_output +920,1349057,"TERMINAL",0,0,"Step 349, loss: 2.728196620941162, step time: 19.415616989135742ms\r\n",,terminal_output +921,1349141,"TERMINAL",0,0,"Step 350, loss: 2.671746253967285, step time: 18.801450729370117ms\r\n",,terminal_output +922,1349268,"TERMINAL",0,0,"Step 351, loss: 2.642375946044922, step time: 18.77570152282715ms\r\n",,terminal_output +923,1349328,"TERMINAL",0,0,"Step 352, loss: 2.6808929443359375, step time: 19.504070281982422ms\r\n",,terminal_output +924,1349425,"TERMINAL",0,0,"Step 353, loss: 2.7112302780151367, step time: 18.995046615600586ms\r\n",,terminal_output +925,1349493,"TERMINAL",0,0,"Step 354, loss: 2.5904388427734375, step time: 19.19269561767578ms\r\n",,terminal_output +926,1349615,"TERMINAL",0,0,"Step 355, loss: 2.9916131496429443, step time: 19.347429275512695ms\r\n",,terminal_output +927,1349680,"TERMINAL",0,0,"Step 356, loss: 2.8330302238464355, step time: 18.795013427734375ms\r\n",,terminal_output +928,1349764,"TERMINAL",0,0,"Step 357, loss: 2.6451337337493896, step time: 19.07491683959961ms\r\n",,terminal_output +929,1349833,"TERMINAL",0,0,"Step 358, loss: 2.8144819736480713, step time: 19.35410499572754ms\r\n",,terminal_output +930,1349950,"TERMINAL",0,0,"Step 359, loss: 2.721980333328247, step time: 19.29020881652832ms\r\n",,terminal_output +931,1350012,"TERMINAL",0,0,"Step 360, loss: 2.8273813724517822, step time: 19.22893524169922ms\r\n",,terminal_output +932,1350124,"TERMINAL",0,0,"Step 361, loss: 2.669771671295166, step time: 19.2108154296875ms\r\n",,terminal_output +933,1350239,"TERMINAL",0,0,"Step 362, loss: 2.6230437755584717, step time: 19.17123794555664ms\r\n",,terminal_output +934,1350315,"TERMINAL",0,0,"Step 363, loss: 2.722095012664795, step time: 19.52815055847168ms\r\n",,terminal_output +935,1350401,"TERMINAL",0,0,"Step 364, loss: 2.583611488342285, step time: 19.240379333496094ms\r\n",,terminal_output +936,1350445,"TERMINAL",0,0,"Step 365, loss: 2.5219783782958984, step time: 19.28997039794922ms\r\n",,terminal_output +937,1350532,"TERMINAL",0,0,"Step 366, loss: 2.6950361728668213, step time: 18.482208251953125ms\r\n",,terminal_output +938,1350653,"TERMINAL",0,0,"Step 367, loss: 2.7091941833496094, step time: 18.80359649658203ms\r\n",,terminal_output +939,1350721,"TERMINAL",0,0,"Step 368, loss: 2.5749473571777344, step time: 19.00005340576172ms\r\n",,terminal_output +940,1350820,"TERMINAL",0,0,"Step 369, loss: 2.565413475036621, step time: 18.46790313720703ms\r\n",,terminal_output +941,1350892,"TERMINAL",0,0,"Step 370, loss: 3.3432507514953613, step time: 19.62137222290039ms\r\n",,terminal_output +942,1351011,"TERMINAL",0,0,"Step 371, loss: 2.596367597579956, step time: 19.023895263671875ms\r\nStep 372, loss: 2.8143906593322754, step time: 18.227577209472656ms\r\n",,terminal_output +943,1351091,"TERMINAL",0,0,"Step 373, loss: 2.9087424278259277, step time: 18.573284149169922ms\r\n",,terminal_output +944,1351188,"TERMINAL",0,0,"Step 374, loss: 3.0883302688598633, step time: 18.431901931762695ms\r\n",,terminal_output +945,1351308,"TERMINAL",0,0,"Step 375, loss: 3.21968412399292, step time: 18.525123596191406ms\r\n",,terminal_output +946,1351402,"TERMINAL",0,0,"Step 376, loss: 2.863358497619629, step time: 19.429683685302734ms\r\n",,terminal_output +947,1351457,"TERMINAL",0,0,"Step 377, loss: 2.708643913269043, step time: 18.243789672851562ms\r\n",,terminal_output +948,1351568,"TERMINAL",0,0,"Step 378, loss: 2.6682841777801514, step time: 18.521785736083984ms\r\n",,terminal_output +949,1351649,"TERMINAL",0,0,"Step 379, loss: 2.6737327575683594, step time: 19.32835578918457ms\r\n",,terminal_output +950,1351758,"TERMINAL",0,0,"Step 380, loss: 2.647716760635376, step time: 18.528461456298828ms\r\n",,terminal_output +951,1351809,"TERMINAL",0,0,"Step 381, loss: 3.0268242359161377, step time: 19.296646118164062ms\r\n",,terminal_output +952,1351882,"TERMINAL",0,0,"Step 382, loss: 2.576516628265381, step time: 19.16193962097168ms\r\n",,terminal_output +953,1351966,"TERMINAL",0,0,"Step 383, loss: 3.0395824909210205, step time: 18.82195472717285ms\r\n",,terminal_output +954,1352017,"TERMINAL",0,0,"Step 384, loss: 2.687333106994629, step time: 18.527507781982422ms\r\n",,terminal_output +955,1352129,"TERMINAL",0,0,"Step 385, loss: 2.699012517929077, step time: 19.33741569519043ms\r\n",,terminal_output +956,1352269,"TERMINAL",0,0,"Step 386, loss: 2.6719584465026855, step time: 18.36109161376953ms\r\n",,terminal_output +957,1352343,"TERMINAL",0,0,"Step 387, loss: 2.9720451831817627, step time: 19.066572189331055ms\r\n",,terminal_output +958,1352442,"TERMINAL",0,0,"Step 388, loss: 2.6439414024353027, step time: 19.34361457824707ms\r\n",,terminal_output +959,1352536,"models/dynamics.py",945,0,"",python,selection_mouse +960,1352551,"TERMINAL",0,0,"Step 389, loss: 2.7584686279296875, step time: 18.26310157775879ms\r\n",,terminal_output +961,1352601,"TERMINAL",0,0,"Step 390, loss: 2.72880220413208, step time: 18.259286880493164ms\r\n",,terminal_output +962,1352666,"TERMINAL",0,0,"Step 391, loss: 2.535008192062378, step time: 19.203901290893555ms\r\n",,terminal_output +963,1352764,"TERMINAL",0,0,"Step 392, loss: 2.5864148139953613, step time: 18.143415451049805ms\r\n",,terminal_output +964,1352875,"TERMINAL",0,0,"Step 393, loss: 2.631338119506836, step time: 18.12148094177246ms\r\n",,terminal_output +965,1352929,"TERMINAL",0,0,"Step 394, loss: 2.4825499057769775, step time: 18.36085319519043ms\r\n",,terminal_output +966,1353024,"TERMINAL",0,0,"Step 395, loss: 2.819103717803955, step time: 18.140554428100586ms\r\n",,terminal_output +967,1353136,"TERMINAL",0,0,"Step 396, loss: 2.5391387939453125, step time: 18.11504364013672ms\r\n",,terminal_output +968,1353191,"TERMINAL",0,0,"Step 397, loss: 2.4070146083831787, step time: 18.619298934936523ms\r\n",,terminal_output +969,1353294,"TERMINAL",0,0,"Step 398, loss: 2.4994919300079346, step time: 18.084287643432617ms\r\n",,terminal_output +970,1353400,"TERMINAL",0,0,"Step 399, loss: 2.5282530784606934, step time: 18.34273338317871ms\r\n",,terminal_output +971,1353498,"TERMINAL",0,0,"Step 400, loss: 2.514636516571045, step time: 19.978046417236328ms\r\n",,terminal_output +972,1353569,"TERMINAL",0,0,"Step 401, loss: 2.505814790725708, step time: 18.221139907836914ms\r\n",,terminal_output +973,1353607,"TERMINAL",0,0,"Step 402, loss: 2.5578114986419678, step time: 17.774105072021484ms\r\n",,terminal_output +974,1353733,"TERMINAL",0,0,"Step 403, loss: 2.6223011016845703, step time: 18.046855926513672ms\r\n",,terminal_output +975,1353797,"TERMINAL",0,0,"Step 404, loss: 2.5354044437408447, step time: 17.667055130004883ms\r\n",,terminal_output +976,1353888,"TERMINAL",0,0,"Step 405, loss: 2.6920151710510254, step time: 17.73238182067871ms\r\n",,terminal_output +977,1353996,"TERMINAL",0,0,"Step 406, loss: 2.7881734371185303, step time: 17.96555519104004ms\r\n",,terminal_output +978,1354056,"TERMINAL",0,0,"Step 407, loss: 2.524221658706665, step time: 17.706871032714844ms\r\n",,terminal_output +979,1354173,"TERMINAL",0,0,"Step 408, loss: 2.492345094680786, step time: 17.664432525634766ms\r\n",,terminal_output +980,1354280,"TERMINAL",0,0,"Step 409, loss: 2.5717933177948, step time: 19.751548767089844ms\r\n",,terminal_output +981,1354340,"TERMINAL",0,0,"Step 410, loss: 2.4726407527923584, step time: 18.611431121826172ms\r\n",,terminal_output +982,1354444,"TERMINAL",0,0,"Step 411, loss: 3.034902334213257, step time: 20.70474624633789ms\r\n",,terminal_output +983,1354501,"TERMINAL",0,0,"Step 412, loss: 2.532377004623413, step time: 19.12975311279297ms\r\n",,terminal_output +984,1354525,"TERMINAL",0,0,"Step 413, loss: 3.221187114715576, step time: 19.267797470092773ms\r\n",,terminal_output +985,1354569,"TERMINAL",0,0,"Step 414, loss: 2.5478577613830566, step time: 18.29838752746582ms\r\n",,terminal_output +986,1354699,"TERMINAL",0,0,"Step 415, loss: 2.4962925910949707, step time: 18.262624740600586ms\r\n",,terminal_output +987,1354800,"TERMINAL",0,0,"Step 416, loss: 2.552525043487549, step time: 17.94576644897461ms\r\n",,terminal_output +988,1354913,"TERMINAL",0,0,"Step 417, loss: 2.8971056938171387, step time: 17.90928840637207ms\r\n",,terminal_output +989,1354992,"TERMINAL",0,0,"Step 418, loss: 2.7768239974975586, step time: 18.268108367919922ms\r\n",,terminal_output +990,1355128,"TERMINAL",0,0,"Step 419, loss: 2.504396438598633, step time: 17.902612686157227ms\r\n",,terminal_output +991,1355222,"TERMINAL",0,0,"Step 420, loss: 2.551774501800537, step time: 18.297672271728516ms\r\n",,terminal_output +992,1355295,"TERMINAL",0,0,"Step 421, loss: 2.6426126956939697, step time: 18.598079681396484ms\r\n",,terminal_output +993,1355382,"TERMINAL",0,0,"Step 422, loss: 2.6802690029144287, step time: 18.227338790893555ms\r\n",,terminal_output +994,1355497,"TERMINAL",0,0,"Step 423, loss: 2.768214225769043, step time: 18.228530883789062ms\r\n",,terminal_output +995,1355565,"TERMINAL",0,0,"Step 424, loss: 2.652296781539917, step time: 18.853425979614258ms\r\n",,terminal_output +996,1355646,"TERMINAL",0,0,"Step 425, loss: 2.5712850093841553, step time: 18.03445816040039ms\r\n",,terminal_output +997,1355721,"TERMINAL",0,0,"Step 426, loss: 2.7600390911102295, step time: 17.9293155670166ms\r\n",,terminal_output +998,1355813,"TERMINAL",0,0,"Step 427, loss: 2.587632656097412, step time: 18.556594848632812ms\r\n",,terminal_output +999,1355945,"TERMINAL",0,0,"Step 428, loss: 2.4307429790496826, step time: 18.18680763244629ms\r\n",,terminal_output +1000,1355993,"TERMINAL",0,0,"Step 429, loss: 2.4339938163757324, step time: 18.219470977783203ms\r\n",,terminal_output +1001,1356090,"TERMINAL",0,0,"Step 430, loss: 2.455540418624878, step time: 18.103599548339844ms\r\n",,terminal_output +1002,1356146,"TERMINAL",0,0,"Step 431, loss: 2.442493200302124, step time: 17.850160598754883ms\r\n",,terminal_output +1003,1356261,"TERMINAL",0,0,"Step 432, loss: 2.370122194290161, step time: 28.07760238647461ms\r\n",,terminal_output +1004,1356338,"TERMINAL",0,0,"Step 433, loss: 2.524892568588257, step time: 21.616458892822266ms\r\n",,terminal_output +1005,1356409,"TERMINAL",0,0,"Step 434, loss: 2.382476568222046, step time: 18.14723014831543ms\r\n",,terminal_output +1006,1356532,"TERMINAL",0,0,"Step 435, loss: 2.4658472537994385, step time: 17.948627471923828ms\r\n",,terminal_output +1007,1356663,"models/dynamics.py",983,0,"\n ",python,content +1008,1356675,"TERMINAL",0,0,"Step 436, loss: 2.697021961212158, step time: 18.23878288269043ms\r\n",,terminal_output +1009,1356767,"TERMINAL",0,0,"Step 437, loss: 2.400611639022827, step time: 18.12124252319336ms\r\nStep 438, loss: 2.3627915382385254, step time: 17.892122268676758ms\r\n",,terminal_output +1010,1356848,"TERMINAL",0,0,"Step 439, loss: 2.4565370082855225, step time: 18.148422241210938ms\r\n",,terminal_output +1011,1356956,"TERMINAL",0,0,"Step 440, loss: 3.2913970947265625, step time: 18.270254135131836ms\r\n",,terminal_output +1012,1357066,"TERMINAL",0,0,"Step 441, loss: 2.3342559337615967, step time: 18.254756927490234ms\r\n",,terminal_output +1013,1357223,"models/dynamics.py",992,0,"j",python,content +1014,1357224,"models/dynamics.py",993,0,"",python,selection_keyboard +1015,1357338,"models/dynamics.py",993,0,"a",python,content +1016,1357339,"models/dynamics.py",994,0,"",python,selection_keyboard +1017,1357423,"TERMINAL",0,0,"Step 442, loss: 2.4828970432281494, step time: 367.65074729919434ms\r\n",,terminal_output +1018,1357478,"TERMINAL",0,0,"Step 443, loss: 3.042501449584961, step time: 27.035951614379883ms\r\n",,terminal_output +1019,1357598,"TERMINAL",0,0,"Step 444, loss: 2.5491209030151367, step time: 20.855188369750977ms\r\n",,terminal_output +1020,1357640,"TERMINAL",0,0,"Step 445, loss: 2.398603677749634, step time: 19.33121681213379ms\r\n",,terminal_output +1021,1357734,"TERMINAL",0,0,"Step 446, loss: 2.4360733032226562, step time: 18.453598022460938ms\r\n",,terminal_output +1022,1357811,"TERMINAL",0,0,"Step 447, loss: 2.3536312580108643, step time: 18.39137077331543ms\r\n",,terminal_output +1023,1357851,"TERMINAL",0,0,"Step 448, loss: 2.403773307800293, step time: 18.72420310974121ms\r\n",,terminal_output +1024,1357966,"TERMINAL",0,0,"Step 449, loss: 2.4293510913848877, step time: 18.213510513305664ms\r\n",,terminal_output +1025,1358055,"TERMINAL",0,0,"Step 450, loss: 2.313063383102417, step time: 18.38397979736328ms\r\n",,terminal_output +1026,1358084,"models/dynamics.py",994,0,"x",python,content +1027,1358086,"models/dynamics.py",995,0,"",python,selection_keyboard +1028,1358140,"TERMINAL",0,0,"Step 451, loss: 2.583186149597168, step time: 18.7528133392334ms\r\n",,terminal_output +1029,1358182,"TERMINAL",0,0,"Step 452, loss: 2.391303777694702, step time: 18.324851989746094ms\r\n",,terminal_output +1030,1358233,"models/dynamics.py",995,0,".",python,content +1031,1358234,"models/dynamics.py",996,0,"",python,selection_keyboard +1032,1358316,"TERMINAL",0,0,"Step 453, loss: 2.4012451171875, step time: 18.32127571105957ms\r\n",,terminal_output +1033,1358403,"TERMINAL",0,0,"Step 454, loss: 2.638627529144287, step time: 20.64800262451172ms\r\n",,terminal_output +1034,1358489,"TERMINAL",0,0,"Step 455, loss: 2.422029495239258, step time: 19.083499908447266ms\r\n",,terminal_output +1035,1358539,"TERMINAL",0,0,"Step 456, loss: 2.5538225173950195, step time: 18.578529357910156ms\r\n",,terminal_output +1036,1358672,"TERMINAL",0,0,"Step 457, loss: 2.640064001083374, step time: 18.703699111938477ms\r\n",,terminal_output +1037,1358753,"TERMINAL",0,0,"Step 458, loss: 2.467543601989746, step time: 18.19586753845215ms\r\n",,terminal_output +1038,1358825,"TERMINAL",0,0,"Step 459, loss: 2.3037195205688477, step time: 18.21303367614746ms\r\n",,terminal_output +1039,1358869,"models/dynamics.py",996,0,"b",python,content +1040,1358871,"models/dynamics.py",997,0,"",python,selection_keyboard +1041,1358927,"TERMINAL",0,0,"Step 460, loss: 2.3475382328033447, step time: 18.606185913085938ms\r\n",,terminal_output +1042,1358988,"TERMINAL",0,0,"Step 461, loss: 2.5106866359710693, step time: 19.80304718017578ms\r\n",,terminal_output +1043,1359097,"TERMINAL",0,0,"Step 462, loss: 2.385831117630005, step time: 18.254995346069336ms\r\n",,terminal_output +1044,1359184,"TERMINAL",0,0,"Step 463, loss: 2.3678810596466064, step time: 18.58234405517578ms\r\n",,terminal_output +1045,1359214,"TERMINAL",0,0,"Step 464, loss: 2.3541252613067627, step time: 18.17607879638672ms\r\n",,terminal_output +1046,1359382,"models/dynamics.py",996,1,"",python,content +1047,1359397,"TERMINAL",0,0,"Step 465, loss: 2.561129570007324, step time: 18.198013305664062ms\r\n",,terminal_output +1048,1359413,"models/dynamics.py",996,0,"d",python,content +1049,1359414,"models/dynamics.py",997,0,"",python,selection_keyboard +1050,1359444,"TERMINAL",0,0,"Step 466, loss: 2.342907428741455, step time: 18.599987030029297ms\r\n",,terminal_output +1051,1359519,"TERMINAL",0,0,"Step 467, loss: 2.3077964782714844, step time: 18.31984519958496ms\r\n",,terminal_output +1052,1359679,"models/dynamics.py",997,0,"e",python,content +1053,1359680,"models/dynamics.py",998,0,"",python,selection_keyboard +1054,1359726,"TERMINAL",0,0,"Step 468, loss: 2.4365897178649902, step time: 18.335580825805664ms\r\nStep 469, loss: 2.459993839263916, step time: 18.60356330871582ms\r\n",,terminal_output +1055,1359763,"models/dynamics.py",998,0,"b",python,content +1056,1359764,"models/dynamics.py",999,0,"",python,selection_keyboard +1057,1359821,"models/dynamics.py",999,0,"u",python,content +1058,1359822,"models/dynamics.py",1000,0,"",python,selection_keyboard +1059,1359847,"TERMINAL",0,0,"Step 470, loss: 2.340980052947998, step time: 17.896413803100586ms\r\nStep 471, loss: 2.2651476860046387, step time: 17.794370651245117ms\r\n",,terminal_output +1060,1359882,"models/dynamics.py",1000,0,"g",python,content +1061,1359883,"models/dynamics.py",1001,0,"",python,selection_keyboard +1062,1359884,"TERMINAL",0,0,"Step 472, loss: 2.4398791790008545, step time: 18.17011833190918ms\r\n",,terminal_output +1063,1360046,"TERMINAL",0,0,"Step 473, loss: 2.295151710510254, step time: 17.79913902282715ms\r\n",,terminal_output +1064,1360135,"TERMINAL",0,0,"Step 474, loss: 2.24497127532959, step time: 17.766952514648438ms\r\n",,terminal_output +1065,1360175,"models/dynamics.py",1001,0,"-",python,content +1066,1360176,"models/dynamics.py",1002,0,"",python,selection_keyboard +1067,1360195,"TERMINAL",0,0,"Step 475, loss: 2.777810573577881, step time: 18.58830451965332ms\r\n",,terminal_output +1068,1360283,"TERMINAL",0,0,"Step 476, loss: 2.4903998374938965, step time: 18.387556076049805ms\r\n",,terminal_output +1069,1360466,"models/dynamics.py",1002,0,"b",python,content +1070,1360467,"models/dynamics.py",1003,0,"",python,selection_keyboard +1071,1360467,"TERMINAL",0,0,"Step 477, loss: 2.2142908573150635, step time: 18.233299255371094ms\r\n",,terminal_output +1072,1360534,"models/dynamics.py",1003,0,"r",python,content +1073,1360535,"models/dynamics.py",1004,0,"",python,selection_keyboard +1074,1360535,"TERMINAL",0,0,"Step 478, loss: 2.574084520339966, step time: 18.63718032836914ms\r\n",,terminal_output +1075,1360652,"TERMINAL",0,0,"Step 479, loss: 2.6253912448883057, step time: 18.291950225830078ms\r\nStep 480, loss: 2.3413009643554688, step time: 18.303394317626953ms\r\n",,terminal_output +1076,1360745,"TERMINAL",0,0,"Step 481, loss: 2.621366500854492, step time: 18.54252815246582ms\r\n",,terminal_output +1077,1360824,"TERMINAL",0,0,"Step 482, loss: 2.5636587142944336, step time: 18.080711364746094ms\r\n",,terminal_output +1078,1360854,"models/dynamics.py",1003,1,"",python,content +1079,1360923,"TERMINAL",0,0,"Step 483, loss: 2.318803548812866, step time: 18.116235733032227ms\r\n",,terminal_output +1080,1360992,"models/dynamics.py",1002,1,"",python,content +1081,1361020,"TERMINAL",0,0,"Step 484, loss: 2.3892621994018555, step time: 18.4326171875ms\r\n",,terminal_output +1082,1361088,"TERMINAL",0,0,"Step 485, loss: 2.608354091644287, step time: 18.193483352661133ms\r\n",,terminal_output +1083,1361137,"models/dynamics.py",1001,1,"",python,content +1084,1361265,"TERMINAL",0,0,"Step 486, loss: 2.4664552211761475, step time: 18.100261688232422ms\r\nStep 487, loss: 2.2845256328582764, step time: 18.526554107666016ms\r\n",,terminal_output +1085,1361371,"TERMINAL",0,0,"Step 488, loss: 2.2248077392578125, step time: 18.052101135253906ms\r\n",,terminal_output +1086,1361439,"TERMINAL",0,0,"Step 489, loss: 2.4334805011749268, step time: 18.11075210571289ms\r\n",,terminal_output +1087,1361513,"TERMINAL",0,0,"Step 490, loss: 2.597905397415161, step time: 18.503904342651367ms\r\n",,terminal_output +1088,1361595,"TERMINAL",0,0,"Step 491, loss: 2.303550958633423, step time: 18.31364631652832ms\r\n",,terminal_output +1089,1361662,"TERMINAL",0,0,"Step 492, loss: 2.276592254638672, step time: 18.159151077270508ms\r\n",,terminal_output +1090,1361743,"TERMINAL",0,0,"Step 493, loss: 2.1811790466308594, step time: 18.637895584106445ms\r\n",,terminal_output +1091,1361817,"TERMINAL",0,0,"Step 494, loss: 2.3230655193328857, step time: 18.11838150024414ms\r\n",,terminal_output +1092,1361878,"models/dynamics.py",1001,0,".",python,content +1093,1361879,"models/dynamics.py",1002,0,"",python,selection_keyboard +1094,1361928,"TERMINAL",0,0,"Step 495, loss: 2.3417086601257324, step time: 18.035411834716797ms\r\n",,terminal_output +1095,1362123,"TERMINAL",0,0,"Step 496, loss: 2.266763210296631, step time: 18.21589469909668ms\r\nStep 497, loss: 2.4475884437561035, step time: 17.84515380859375ms\r\n",,terminal_output +1096,1362199,"models/dynamics.py",1002,0,"b",python,content +1097,1362201,"models/dynamics.py",1003,0,"",python,selection_keyboard +1098,1362235,"TERMINAL",0,0,"Step 498, loss: 2.1600229740142822, step time: 17.839908599853516ms\r\n",,terminal_output +1099,1362243,"models/dynamics.py",1003,0,"r",python,content +1100,1362245,"models/dynamics.py",1004,0,"",python,selection_keyboard +1101,1362306,"TERMINAL",0,0,"Step 499, loss: 2.3211817741394043, step time: 18.27836036682129ms\r\n",,terminal_output +1102,1362469,"models/dynamics.py",1004,0,"e",python,content +1103,1362470,"models/dynamics.py",1005,0,"",python,selection_keyboard +1104,1362745,"models/dynamics.py",1005,0,"a",python,content +1105,1362746,"models/dynamics.py",1006,0,"",python,selection_keyboard +1106,1362910,"models/dynamics.py",1002,4,"breakpoint",python,content +1107,1363939,"models/dynamics.py",1012,0,"()",python,content +1108,1363940,"models/dynamics.py",1013,0,"",python,selection_keyboard +1109,1363951,"models/dynamics.py",1013,1,")",python,content +1110,1363952,"models/dynamics.py",1014,0,"",python,selection_keyboard +1111,1365333,"sample.py",0,0,"",python,tab +1112,1365714,"TERMINAL",0,0,"Step 500, loss: 2.2660348415374756, step time: 26.0164737701416ms\r\n",,terminal_output +1113,1365916,"TERMINAL",0,0,"Step 501, loss: 2.2250378131866455, step time: 26.9320011138916ms\r\n",,terminal_output +1114,1366530,"TERMINAL",0,0,"Step 502, loss: 2.515218734741211, step time: 21.901607513427734ms\r\nStep 503, loss: 2.9290387630462646, step time: 19.968509674072266ms\r\n",,terminal_output +1115,1366531,"TERMINAL",0,0,"Step 504, loss: 2.2664473056793213, step time: 20.002126693725586ms\r\n",,terminal_output +1116,1366706,"TERMINAL",0,0,"Step 505, loss: 2.401737689971924, step time: 19.6533203125ms\r\nStep 506, loss: 2.16792893409729, step time: 19.394874572753906ms\r\nStep 507, loss: 2.421999931335449, step time: 18.677949905395508ms\r\nStep 508, loss: 2.40836763381958, step time: 17.99178123474121ms\r\nStep 509, loss: 2.6202244758605957, step time: 18.327713012695312ms\r\nStep 510, loss: 2.3773770332336426, step time: 18.862009048461914ms\r\nStep 511, loss: 2.3612542152404785, step time: 18.494606018066406ms\r\n",,terminal_output +1117,1366771,"TERMINAL",0,0,"Step 512, loss: 2.216806173324585, step time: 18.290042877197266ms\r\n",,terminal_output +1118,1367032,"TERMINAL",0,0,"Step 513, loss: 2.2056970596313477, step time: 18.15652847290039ms\r\n",,terminal_output +1119,1367110,"TERMINAL",0,0,"Step 514, loss: 2.2270421981811523, step time: 18.273353576660156ms\r\nStep 515, loss: 2.877382516860962, step time: 18.280982971191406ms\r\n",,terminal_output +1120,1367160,"TERMINAL",0,0,"Step 516, loss: 2.208111524581909, step time: 18.386363983154297ms\r\n",,terminal_output +1121,1367521,"TERMINAL",0,0,"Step 517, loss: 2.954993724822998, step time: 341.20821952819824ms\r\n",,terminal_output +1122,1367521,"TERMINAL",0,0,"Step 518, loss: 2.4315192699432373, step time: 25.301456451416016ms\r\n",,terminal_output +1123,1367695,"TERMINAL",0,0,"Step 519, loss: 2.2856791019439697, step time: 20.537137985229492ms\r\nStep 520, loss: 2.3952479362487793, step time: 19.194602966308594ms\r\n",,terminal_output +1124,1367807,"TERMINAL",0,0,"Step 521, loss: 2.2606215476989746, step time: 18.505334854125977ms\r\n",,terminal_output +1125,1367926,"TERMINAL",0,0,"Step 522, loss: 2.2463958263397217, step time: 18.650054931640625ms\r\n",,terminal_output +1126,1367999,"TERMINAL",0,0,"Step 523, loss: 2.239377021789551, step time: 18.58043670654297ms\r\n",,terminal_output +1127,1368040,"TERMINAL",0,0,"Step 524, loss: 2.3454842567443848, step time: 18.441200256347656ms\r\n",,terminal_output +1128,1368146,"TERMINAL",0,0,"Step 525, loss: 2.6227645874023438, step time: 18.248558044433594ms\r\n",,terminal_output +1129,1368236,"TERMINAL",0,0,"Step 526, loss: 2.680223226547241, step time: 18.30267906188965ms\r\n",,terminal_output +1130,1368299,"TERMINAL",0,0,"Step 527, loss: 2.3013293743133545, step time: 18.079280853271484ms\r\n",,terminal_output +1131,1368405,"TERMINAL",0,0,"Step 528, loss: 2.2122347354888916, step time: 18.278121948242188ms\r\n",,terminal_output +1132,1368517,"TERMINAL",0,0,"Step 529, loss: 2.272183418273926, step time: 22.4609375ms\r\n",,terminal_output +1133,1368618,"TERMINAL",0,0,"Step 530, loss: 2.312736988067627, step time: 19.42133903503418ms\r\n",,terminal_output +1134,1368734,"TERMINAL",0,0,"Step 531, loss: 2.161836862564087, step time: 18.406152725219727ms\r\nStep 532, loss: 2.4163360595703125, step time: 18.44167709350586ms\r\n",,terminal_output +1135,1368847,"TERMINAL",0,0,"Step 533, loss: 2.2528512477874756, step time: 18.05567741394043ms\r\n",,terminal_output +1136,1368936,"TERMINAL",0,0,"Step 534, loss: 2.1028342247009277, step time: 18.260955810546875ms\r\n",,terminal_output +1137,1369036,"TERMINAL",0,0,"Step 535, loss: 2.5630125999450684, step time: 18.5701847076416ms\r\nStep 536, loss: 2.268728733062744, step time: 18.239498138427734ms\r\n",,terminal_output +1138,1369104,"TERMINAL",0,0,"Step 537, loss: 2.2670695781707764, step time: 18.094778060913086ms\r\n",,terminal_output +1139,1369275,"TERMINAL",0,0,"Step 538, loss: 2.2455127239227295, step time: 18.21422576904297ms\r\n",,terminal_output +1140,1370080,"TERMINAL",0,0,"Step 539, loss: 2.2326085567474365, step time: 17.972707748413086ms\r\n",,terminal_output +1141,1370108,"TERMINAL",0,0,"Step 540, loss: 2.191760540008545, step time: 18.263578414916992ms\r\n",,terminal_output +1142,1370108,"TERMINAL",0,0,"Step 541, loss: 2.301858425140381, step time: 18.384695053100586ms\r\n",,terminal_output +1143,1370108,"TERMINAL",0,0,"Step 542, loss: 2.18163800239563, step time: 18.453359603881836ms\r\n",,terminal_output +1144,1370108,"TERMINAL",0,0,"Step 543, loss: 2.145538568496704, step time: 18.114805221557617ms\r\n",,terminal_output +1145,1370180,"TERMINAL",0,0,"Step 544, loss: 2.1901628971099854, step time: 18.21136474609375ms\r\nStep 545, loss: 2.196000814437866, step time: 19.367456436157227ms\r\n",,terminal_output +1146,1370389,"TERMINAL",0,0,"Step 546, loss: 2.1434929370880127, step time: 18.418312072753906ms\r\nStep 547, loss: 2.677196502685547, step time: 18.449068069458008ms\r\nStep 548, loss: 2.1895174980163574, step time: 18.26000213623047ms\r\nStep 549, loss: 2.390092372894287, step time: 18.06354522705078ms\r\nStep 550, loss: 2.6073715686798096, step time: 18.76235008239746ms\r\n",,terminal_output +1147,1370527,"TERMINAL",0,0,"Step 551, loss: 2.147319793701172, step time: 18.26024055480957ms\r\n",,terminal_output +1148,1370719,"TERMINAL",0,0,"Step 552, loss: 2.3218557834625244, step time: 18.314599990844727ms\r\nStep 553, loss: 2.0927159786224365, step time: 18.492698669433594ms\r\n",,terminal_output +1149,1370792,"TERMINAL",0,0,"Step 554, loss: 2.4763293266296387, step time: 26.4284610748291ms\r\nStep 555, loss: 2.08743953704834, step time: 18.42021942138672ms\r\n",,terminal_output +1150,1370919,"TERMINAL",0,0,"Step 556, loss: 2.0618295669555664, step time: 18.926620483398438ms\r\n",,terminal_output +1151,1371010,"TERMINAL",0,0,"Step 557, loss: 2.113741874694824, step time: 18.55921745300293ms\r\n",,terminal_output +1152,1371091,"TERMINAL",0,0,"Step 558, loss: 2.2820823192596436, step time: 18.512487411499023ms\r\n",,terminal_output +1153,1371176,"TERMINAL",0,0,"Step 559, loss: 2.273077964782715, step time: 18.537044525146484ms\r\n",,terminal_output +1154,1371278,"TERMINAL",0,0,"Step 560, loss: 2.1255950927734375, step time: 18.49675178527832ms\r\n",,terminal_output +1155,1371392,"TERMINAL",0,0,"Step 561, loss: 2.3845419883728027, step time: 18.181562423706055ms\r\n",,terminal_output +1156,1371440,"TERMINAL",0,0,"Step 562, loss: 2.1162455081939697, step time: 18.256425857543945ms\r\n",,terminal_output +1157,1371530,"TERMINAL",0,0,"Step 563, loss: 2.2289798259735107, step time: 18.06473731994629ms\r\n",,terminal_output +1158,1371662,"TERMINAL",0,0,"Step 564, loss: 2.2594168186187744, step time: 18.10598373413086ms\r\nStep 565, loss: 2.2014997005462646, step time: 18.474578857421875ms\r\n",,terminal_output +1159,1371748,"TERMINAL",0,0,"Step 566, loss: 2.0871901512145996, step time: 18.443584442138672ms\r\n",,terminal_output +1160,1371818,"TERMINAL",0,0,"Step 567, loss: 2.101443290710449, step time: 18.114805221557617ms\r\n",,terminal_output +1161,1371862,"TERMINAL",0,0,"Step 568, loss: 2.240459442138672, step time: 18.360614776611328ms\r\n",,terminal_output +1162,1371940,"TERMINAL",0,0,"Step 569, loss: 2.1354830265045166, step time: 18.1734561920166ms\r\n",,terminal_output +1163,1372029,"TERMINAL",0,0,"Step 570, loss: 2.273563861846924, step time: 18.36991310119629ms\r\n",,terminal_output +1164,1372127,"TERMINAL",0,0,"Step 571, loss: 2.1532435417175293, step time: 18.415212631225586ms\r\n",,terminal_output +1165,1372269,"TERMINAL",0,0,"Step 572, loss: 2.0843756198883057, step time: 18.19467544555664ms\r\nStep 573, loss: 2.401937484741211, step time: 18.103361129760742ms\r\n",,terminal_output +1166,1372340,"TERMINAL",0,0,"Step 574, loss: 2.1210968494415283, step time: 18.257856369018555ms\r\n",,terminal_output +1167,1372477,"TERMINAL",0,0,"Step 575, loss: 2.03918194770813, step time: 19.25349235534668ms\r\n",,terminal_output +1168,1372644,"TERMINAL",0,0,"Step 576, loss: 2.7354750633239746, step time: 18.338441848754883ms\r\n",,terminal_output +1169,1372773,"TERMINAL",0,0,"Step 577, loss: 2.0766608715057373, step time: 18.526792526245117ms\r\n",,terminal_output +1170,1372835,"TERMINAL",0,0,"Step 578, loss: 2.3654839992523193, step time: 18.210411071777344ms\r\n",,terminal_output +1171,1372905,"TERMINAL",0,0,"Step 579, loss: 2.0361456871032715, step time: 18.024682998657227ms\r\nStep 580, loss: 2.056401014328003, step time: 23.256540298461914ms\r\n",,terminal_output +1172,1372968,"TERMINAL",0,0,"Step 581, loss: 2.42836594581604, step time: 18.291950225830078ms\r\n",,terminal_output +1173,1373095,"TERMINAL",0,0,"Step 582, loss: 2.0269904136657715, step time: 18.487930297851562ms\r\nStep 583, loss: 2.022831439971924, step time: 18.464326858520508ms\r\n",,terminal_output +1174,1373266,"TERMINAL",0,0,"Step 584, loss: 2.038447856903076, step time: 18.29838752746582ms\r\n",,terminal_output +1175,1373303,"TERMINAL",0,0,"Step 585, loss: 2.4450156688690186, step time: 18.166303634643555ms\r\n",,terminal_output +1176,1373353,"TERMINAL",0,0,"Step 586, loss: 2.0881974697113037, step time: 18.227338790893555ms\r\n",,terminal_output +1177,1373411,"TERMINAL",0,0,"Step 587, loss: 1.8618409633636475, step time: 19.75274085998535ms\r\n",,terminal_output +1178,1373539,"TERMINAL",0,0,"Step 588, loss: 2.1290197372436523, step time: 19.7751522064209ms\r\n",,terminal_output +1179,1373585,"TERMINAL",0,0,"Step 589, loss: 2.3351521492004395, step time: 18.725872039794922ms\r\n",,terminal_output +1180,1373698,"TERMINAL",0,0,"Step 590, loss: 2.2447948455810547, step time: 18.409013748168945ms\r\n",,terminal_output +1181,1373799,"TERMINAL",0,0,"Step 591, loss: 1.8805972337722778, step time: 17.78125762939453ms\r\n",,terminal_output +1182,1373852,"TERMINAL",0,0,"Step 592, loss: 2.1197900772094727, step time: 18.01276206970215ms\r\n",,terminal_output +1183,1373942,"TERMINAL",0,0,"Step 593, loss: 2.2854201793670654, step time: 17.96412467956543ms\r\n",,terminal_output +1184,1374001,"TERMINAL",0,0,"Step 594, loss: 2.5257983207702637, step time: 17.952680587768555ms\r\n",,terminal_output +1185,1374106,"TERMINAL",0,0,"Step 595, loss: 2.2133069038391113, step time: 18.091201782226562ms\r\n",,terminal_output +1186,1374224,"TERMINAL",0,0,"Step 596, loss: 2.268489122390747, step time: 18.020153045654297ms\r\n",,terminal_output +1187,1374352,"TERMINAL",0,0,"Step 597, loss: 2.091630458831787, step time: 17.75360107421875ms\r\nStep 598, loss: 2.4111545085906982, step time: 17.889022827148438ms\r\n",,terminal_output +1188,1374491,"TERMINAL",0,0,"Step 599, loss: 2.415388584136963, step time: 17.727136611938477ms\r\n",,terminal_output +1189,1374542,"TERMINAL",0,0,"Step 600, loss: 1.9961057901382446, step time: 18.123149871826172ms\r\n",,terminal_output +1190,1374615,"TERMINAL",0,0,"Step 601, loss: 2.0543181896209717, step time: 18.102645874023438ms\r\n",,terminal_output +1191,1374764,"TERMINAL",0,0,"Step 602, loss: 2.2169346809387207, step time: 17.8830623626709ms\r\n",,terminal_output +1192,1374834,"TERMINAL",0,0,"Step 603, loss: 2.4026591777801514, step time: 17.72785186767578ms\r\n",,terminal_output +1193,1375212,"TERMINAL",0,0,"Step 604, loss: 2.102069139480591, step time: 17.912864685058594ms\r\n",,terminal_output +1194,1375302,"TERMINAL",0,0,"Step 605, loss: 2.0573818683624268, step time: 18.093585968017578ms\r\nStep 606, loss: 1.9875017404556274, step time: 20.157575607299805ms\r\nStep 607, loss: 2.225372552871704, step time: 18.445253372192383ms\r\nStep 608, loss: 2.168712615966797, step time: 18.21160316467285ms\r\nStep 609, loss: 2.444406509399414, step time: 18.051624298095703ms\r\n",,terminal_output +1195,1375355,"TERMINAL",0,0,"Step 610, loss: 2.1280081272125244, step time: 18.17941665649414ms\r\n",,terminal_output +1196,1375513,"TERMINAL",0,0,"Step 611, loss: 2.0435879230499268, step time: 18.471240997314453ms\r\n",,terminal_output +1197,1375646,"TERMINAL",0,0,"Step 612, loss: 2.043351411819458, step time: 18.33176612854004ms\r\nStep 613, loss: 2.208116054534912, step time: 18.318891525268555ms\r\n",,terminal_output +1198,1375716,"TERMINAL",0,0,"Step 614, loss: 2.0310144424438477, step time: 18.23139190673828ms\r\n",,terminal_output +1199,1375844,"TERMINAL",0,0,"Step 615, loss: 2.405442476272583, step time: 18.04351806640625ms\r\n",,terminal_output +1200,1375931,"TERMINAL",0,0,"Step 616, loss: 2.0543291568756104, step time: 18.231868743896484ms\r\n",,terminal_output +1201,1376049,"TERMINAL",0,0,"Step 617, loss: 2.043231964111328, step time: 18.029212951660156ms\r\n",,terminal_output +1202,1376101,"TERMINAL",0,0,"Step 618, loss: 2.0562736988067627, step time: 18.22972297668457ms\r\n",,terminal_output +1203,1376168,"TERMINAL",0,0,"Step 619, loss: 1.989710807800293, step time: 18.291950225830078ms\r\n",,terminal_output +1204,1376229,"TERMINAL",0,0,"Step 620, loss: 2.118176221847534, step time: 18.220186233520508ms\r\n",,terminal_output +1205,1376353,"TERMINAL",0,0,"Step 621, loss: 2.1197118759155273, step time: 18.009662628173828ms\r\n",,terminal_output +1206,1376438,"TERMINAL",0,0,"Step 622, loss: 1.912993311882019, step time: 18.175125122070312ms\r\n",,terminal_output +1207,1376515,"TERMINAL",0,0,"Step 623, loss: 3.283238410949707, step time: 18.06020736694336ms\r\n",,terminal_output +1208,1376571,"TERMINAL",0,0,"Step 624, loss: 2.213366985321045, step time: 18.142223358154297ms\r\n",,terminal_output +1209,1376671,"TERMINAL",0,0,"Step 625, loss: 2.2009198665618896, step time: 18.297195434570312ms\r\n",,terminal_output +1210,1376822,"TERMINAL",0,0,"Step 626, loss: 2.0831000804901123, step time: 18.090248107910156ms\r\n",,terminal_output +1211,1376959,"TERMINAL",0,0,"Step 627, loss: 2.1391100883483887, step time: 17.95816421508789ms\r\nStep 628, loss: 1.8599061965942383, step time: 18.172502517700195ms\r\n",,terminal_output +1212,1377003,"TERMINAL",0,0,"Step 629, loss: 2.386446952819824, step time: 17.879247665405273ms\r\n",,terminal_output +1213,1377141,"TERMINAL",0,0,"Step 630, loss: 1.9951950311660767, step time: 18.13483238220215ms\r\n",,terminal_output +1214,1377213,"TERMINAL",0,0,"Step 631, loss: 2.1135237216949463, step time: 18.290042877197266ms\r\n",,terminal_output +1215,1377241,"TERMINAL",0,0,"Step 632, loss: 2.0173799991607666, step time: 18.044233322143555ms\r\n",,terminal_output +1216,1377419,"TERMINAL",0,0,"Step 633, loss: 2.326913595199585, step time: 17.894744873046875ms\r\n",,terminal_output +1217,1377464,"TERMINAL",0,0,"Step 634, loss: 1.9848976135253906, step time: 18.17178726196289ms\r\n",,terminal_output +1218,1377544,"TERMINAL",0,0,"Step 635, loss: 1.967524766921997, step time: 18.06330680847168ms\r\n",,terminal_output +1219,1377656,"TERMINAL",0,0,"Step 636, loss: 2.0000815391540527, step time: 18.108129501342773ms\r\n",,terminal_output +1220,1377718,"TERMINAL",0,0,"Step 637, loss: 1.924073576927185, step time: 18.247365951538086ms\r\n",,terminal_output +1221,1377764,"TERMINAL",0,0,"Step 638, loss: 2.1519558429718018, step time: 18.144845962524414ms\r\n",,terminal_output +1222,1377920,"TERMINAL",0,0,"Step 639, loss: 1.9262961149215698, step time: 17.975568771362305ms\r\n",,terminal_output +1223,1378064,"TERMINAL",0,0,"Step 640, loss: 2.251024007797241, step time: 18.247127532958984ms\r\nStep 641, loss: 2.1670966148376465, step time: 17.910480499267578ms\r\n",,terminal_output +1224,1378152,"TERMINAL",0,0,"Step 642, loss: 2.240734100341797, step time: 18.11075210571289ms\r\n",,terminal_output +1225,1378225,"TERMINAL",0,0,"Step 643, loss: 1.8922475576400757, step time: 18.274545669555664ms\r\n",,terminal_output +1226,1378350,"TERMINAL",0,0,"Step 644, loss: 2.222580909729004, step time: 18.04041862487793ms\r\n",,terminal_output +1227,1378482,"TERMINAL",0,0,"Step 645, loss: 2.9796218872070312, step time: 17.980098724365234ms\r\nStep 646, loss: 2.188338279724121, step time: 21.533966064453125ms\r\n",,terminal_output +1228,1378611,"TERMINAL",0,0,"Step 647, loss: 2.5105602741241455, step time: 18.78046989440918ms\r\n",,terminal_output +1229,1378654,"TERMINAL",0,0,"Step 648, loss: 2.0541698932647705, step time: 18.379688262939453ms\r\n",,terminal_output +1230,1378731,"TERMINAL",0,0,"Step 649, loss: 2.031346321105957, step time: 18.404483795166016ms\r\n",,terminal_output +1231,1378846,"TERMINAL",0,0,"Step 650, loss: 1.8933383226394653, step time: 18.25737953186035ms\r\nStep 651, loss: 2.0287842750549316, step time: 18.006563186645508ms\r\n",,terminal_output +1232,1378926,"TERMINAL",0,0,"Step 652, loss: 2.0112578868865967, step time: 18.130064010620117ms\r\n",,terminal_output +1233,1379089,"TERMINAL",0,0,"Step 653, loss: 2.2121407985687256, step time: 17.971515655517578ms\r\n",,terminal_output +1234,1379230,"TERMINAL",0,0,"Step 654, loss: 2.0539236068725586, step time: 18.16391944885254ms\r\n",,terminal_output +1235,1379262,"TERMINAL",0,0,"Step 655, loss: 1.8931832313537598, step time: 18.3258056640625ms\r\n",,terminal_output +1236,1379312,"TERMINAL",0,0,"Step 656, loss: 2.499210834503174, step time: 19.13595199584961ms\r\n",,terminal_output +1237,1379476,"TERMINAL",0,0,"Step 657, loss: 2.053678035736084, step time: 18.303871154785156ms\r\n",,terminal_output +1238,1379580,"TERMINAL",0,0,"Step 658, loss: 1.9447702169418335, step time: 18.612146377563477ms\r\nStep 659, loss: 2.057004690170288, step time: 18.1732177734375ms\r\n",,terminal_output +1239,1379715,"TERMINAL",0,0,"Step 660, loss: 2.200038433074951, step time: 18.33057403564453ms\r\n",,terminal_output +1240,1379824,"TERMINAL",0,0,"Step 661, loss: 2.019087076187134, step time: 18.457412719726562ms\r\n",,terminal_output +1241,1379948,"TERMINAL",0,0,"Step 662, loss: 1.9675977230072021, step time: 18.284082412719727ms\r\nStep 663, loss: 1.9022504091262817, step time: 19.938945770263672ms\r\n",,terminal_output +1242,1380047,"TERMINAL",0,0,"Step 664, loss: 2.0172414779663086, step time: 18.31984519958496ms\r\n",,terminal_output +1243,1380131,"TERMINAL",0,0,"Step 665, loss: 1.9197670221328735, step time: 18.093109130859375ms\r\n",,terminal_output +1244,1380228,"TERMINAL",0,0,"Step 666, loss: 1.8156834840774536, step time: 18.425464630126953ms\r\n",,terminal_output +1245,1380279,"TERMINAL",0,0,"Step 667, loss: 1.878857135772705, step time: 18.73779296875ms\r\n",,terminal_output +1246,1380363,"TERMINAL",0,0,"Step 668, loss: 2.578517436981201, step time: 18.411636352539062ms\r\n",,terminal_output +1247,1380427,"TERMINAL",0,0,"Step 669, loss: 2.043527364730835, step time: 18.11504364013672ms\r\n",,terminal_output +1248,1380485,"TERMINAL",0,0,"Step 670, loss: 2.6745080947875977, step time: 18.560409545898438ms\r\n",,terminal_output +1249,1380582,"TERMINAL",0,0,"Step 671, loss: 2.965709686279297, step time: 18.33200454711914ms\r\n",,terminal_output +1250,1380679,"TERMINAL",0,0,"Step 672, loss: 1.8286486864089966, step time: 18.48316192626953ms\r\n",,terminal_output +1251,1380763,"TERMINAL",0,0,"Step 673, loss: 2.444918155670166, step time: 18.412113189697266ms\r\n",,terminal_output +1252,1380899,"TERMINAL",0,0,"Step 674, loss: 1.955784559249878, step time: 18.249034881591797ms\r\n",,terminal_output +1253,1380941,"TERMINAL",0,0,"Step 675, loss: 2.2709317207336426, step time: 18.085718154907227ms\r\n",,terminal_output +1254,1381092,"TERMINAL",0,0,"Step 676, loss: 2.5862934589385986, step time: 18.28932762145996ms\r\n",,terminal_output +1255,1381176,"TERMINAL",0,0,"Step 677, loss: 1.8462451696395874, step time: 17.987966537475586ms\r\n",,terminal_output +1256,1381193,"TERMINAL",0,0,"Step 678, loss: 1.917319655418396, step time: 18.19777488708496ms\r\n",,terminal_output +1257,1381247,"TERMINAL",0,0,"Step 679, loss: 2.2316105365753174, step time: 18.390417098999023ms\r\n",,terminal_output +1258,1381355,"TERMINAL",0,0,"Step 680, loss: 2.2544479370117188, step time: 18.319129943847656ms\r\n",,terminal_output +1259,1381410,"TERMINAL",0,0,"Step 681, loss: 2.1776115894317627, step time: 18.553495407104492ms\r\n",,terminal_output +1260,1381561,"TERMINAL",0,0,"Step 682, loss: 1.987014651298523, step time: 18.38397979736328ms\r\n",,terminal_output +1261,1381600,"TERMINAL",0,0,"Step 683, loss: 1.8332326412200928, step time: 18.11385154724121ms\r\n",,terminal_output +1262,1381688,"TERMINAL",0,0,"Step 684, loss: 1.8753608465194702, step time: 18.341064453125ms\r\n",,terminal_output +1263,1382014,"TERMINAL",0,0,"^C",,terminal_output +1264,1382134,"TERMINAL",0,0,"Exception ignored in: \r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/lib/__init__.py"", line 128, in _xla_gc_callback\r\n def _xla_gc_callback(*args):\r\nKeyboardInterrupt: \r\nStep 685, loss: 2.1385624408721924, step time: 352.56123542785645ms\r\n",,terminal_output +1265,1382181,"TERMINAL",0,0,"Step 686, loss: 1.94075608253479, step time: 25.290727615356445ms\r\n",,terminal_output +1266,1382200,"TERMINAL",0,0,"^CTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 240, in \r\n elapsed_time = (time.time() - start_time) * 1000\r\nKeyboardInterrupt\r\n",,terminal_output +1267,1382352,"TERMINAL",0,0,"^CException ignored in atexit callback: .teardown_atexit at 0x14c54c12a7a0>\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/service_connection.py"", line 94, in teardown_atexit\r\n conn.teardown(hooks.exit_code)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/service_connection.py"", line 226, in teardown\r\n self._router.join()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/interface/router.py"", line 75, in join\r\n self._thread.join()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/threading.py"", line 1096, in join\r\n self._wait_for_tstate_lock()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/threading.py"", line 1116, in _wait_for_tstate_lock\r\n if lock.acquire(block, timeout):\r\nKeyboardInterrupt: \r\n",,terminal_output +1268,1382677,"TERMINAL",0,0,"^CException ignored in: .remove at 0x14c5a848e710>\r\nTraceback (most recent call last):\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/weakref.py"", line 370, in remove\r\n def remove(k, selfref=ref(self)):\r\nKeyboardInterrupt: \r\n",,terminal_output +1269,1383457,"TERMINAL",0,0,"^C",,terminal_output +1270,1383809,"TERMINAL",0,0,"\r\n]0;tum_cte0515@hkn0732:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0732 jafar]$ ",,terminal_output +1271,1384712,"TERMINAL",0,0,"sh scripts_horeka/train_dynamics.sh ",,terminal_output +1272,1386122,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1273,1386275,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2303099\r\nSLURM_JOB_GPUS=2\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0732\r\nSLURM_JOB_START_TIME=1751316385\r\nSLURM_STEP_NODELIST=hkn0732\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751319985\r\nSLURM_PMI2_SRUN_PORT=35705\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3307524\r\nSLURM_PTY_PORT=32803\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=33\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0732\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_MEM_PER_NODE=51200\r\nSLURM_PTY_WIN_COL=143\r\nSLURM_NODELIST=hkn0732\r\nSLURM_SRUN_COMM_PORT=34929\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3307524\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0732\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=34929\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0732\r\n",,terminal_output +1274,1387734,"genie.py",0,0,"",python,tab +1275,1388200,"TERMINAL",0,0,"2025-06-30 23:06:55.369118: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751317615.381968 2311750 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751317615.386336 2311750 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751317615.398718 2311750 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751317615.398735 2311750 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751317615.398737 2311750 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751317615.398739 2311750 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +1276,1390697,"TERMINAL",0,0,"W0000 00:00:1751317617.884234 2311750 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +1277,1390961,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +1278,1391269,"models/dynamics.py",0,0,"",python,tab +1279,1391686,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +1280,1392540,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250630_230658-14wu60p4\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-tiny-overfit-big-lr-0000\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/14wu60p4\r\n",,terminal_output +1281,1393855,"TERMINAL",0,0,"2025-06-30 23:07:01.117248: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1282,1406585,"TERMINAL",0,0,"2025-06-30 23:07:13.783638: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1283,1418315,"TERMINAL",0,0,"Entering jdb:\r\n(jdb) ",,terminal_output +1284,1479247,"models/dynamics.py",1420,0,"",python,selection_mouse +1285,1480003,"models/dynamics.py",1014,0,"",python,selection_mouse +1286,1483810,"TERMINAL",0,0,"l",,terminal_output +1287,1483934,"TERMINAL",0,0,"\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/dynamics.py(39)\r\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\r\n # --- Mask videos ---\r\n vid_embed = self.patch_embed(batch[""video_tokens""])\r\n-> jax.debug.breakpoint()\r\n if training:\r\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\r\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\r\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\r\n mask = mask.at[:, 0].set(False)\r\n(jdb) ",,terminal_output +1288,1486693,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +1289,1486789,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +1290,1487487,"TERMINAL",0,0,"[?25l vi\r[?25h\r",,terminal_output +1291,1487632,"TERMINAL",0,0,"",,terminal_output +1292,1492578,"TERMINAL",0,0,"[?25lbatch[""mask_rng""][?25h",,terminal_output +1293,1493737,"TERMINAL",0,0,"\r\nArray([ 928981903, 3453687069], dtype=uint32)\r\n(jdb) ",,terminal_output +1294,1497967,"TERMINAL",0,0,"bash",,terminal_focus +1295,1500866,"TERMINAL",0,0,"# Array([ 928981903, 3453687069], dtype=uint32)",,terminal_command +1296,1500907,"TERMINAL",0,0,"\rArray([ 928981903, 3453687069], dtype=uint32)\r\n[?2004l\r]633;E;;9a6fb2c6-f596-403e-97b8-ced5ab6848ce]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +1297,1502503,"TERMINAL",0,0,"srun",,terminal_focus +1298,1516780,"Untitled-1",0,0,"",plaintext,tab +1299,1518636,"Untitled-1",0,0,"Array([ 928981903, 3453687069], dtype=uint32)",plaintext,content +1300,1520113,"Untitled-1",0,0,"",plaintext,selection_mouse +1301,1520449,"Untitled-1",0,0,"\n",plaintext,content +1302,1520577,"Untitled-1",1,0,"\n",plaintext,content +1303,1520724,"Untitled-1",2,0,"\n",plaintext,content +1304,1521379,"Untitled-1",2,0,"",plaintext,selection_command +1305,1522496,"Untitled-1",2,0,"R",plaintext,content +1306,1522497,"Untitled-1",3,0,"",plaintext,selection_keyboard +1307,1522584,"Untitled-1",3,0,"N",plaintext,content +1308,1522585,"Untitled-1",4,0,"",plaintext,selection_keyboard +1309,1522729,"Untitled-1",4,0,"G",plaintext,content +1310,1522730,"Untitled-1",5,0,"",plaintext,selection_keyboard +1311,1523550,"Untitled-1",5,0,".",plaintext,content +1312,1523551,"Untitled-1",6,0,"",plaintext,selection_keyboard +1313,1524081,"Untitled-1",5,1,"",plaintext,content +1314,1524323,"Untitled-1",5,0,".",plaintext,content +1315,1524324,"Untitled-1",6,0,"",plaintext,selection_keyboard +1316,1524776,"Untitled-1",5,1,"",plaintext,content +1317,1524958,"Untitled-1",5,0,":",plaintext,content +1318,1524959,"Untitled-1",6,0,"",plaintext,selection_keyboard +1319,1525985,"Untitled-1",52,0,"",plaintext,selection_mouse +1320,1526250,"Untitled-1",52,0,"\n",plaintext,content +1321,1527998,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +1322,1529000,"sample.py",0,0,"",python,tab +1323,1533410,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +1324,1535096,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +1325,1535211,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1326,1535502,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +1327,1535672,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1328,1535741,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +1329,1536069,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +1330,1536281,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1331,1536376,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1332,1536530,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +1333,1536746,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1334,1536959,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +1335,1537000,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1336,1537117,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +1337,1537195,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1338,1537280,"TERMINAL",0,0,"\r\n(1, 16, 920, 128)\r\n(jdb) ",,terminal_output +1339,1563331,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1340,1563520,"TERMINAL",0,0,"\r\n*** NameError: name 's' is not defined\r\n(jdb) ",,terminal_output +1341,1564653,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1342,1564848,"TERMINAL",0,0,"\r\n*** NameError: name 'n' is not defined\r\n(jdb) ",,terminal_output +1343,1567364,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1344,1567994,"TERMINAL",0,0,"\r\n",,terminal_output +1345,1571477,"TERMINAL",0,0,"2025-06-30 23:09:58.795403: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1346,1578642,"TERMINAL",0,0,"2025-06-30 23:10:05.969689: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1347,1598421,"TERMINAL",0,0,"batch shape: (1, 16, 90, 160, 3)\r\n",,terminal_output +1348,1612997,"TERMINAL",0,0,"2025-06-30 23:10:40.146230: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-30 23:10:40.146792: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-30 23:10:40.147305: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-30 23:10:40.147936: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-06-30 23:10:40.148995: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1349,1667004,"TERMINAL",0,0,"Entering jdb:\r\n(jdb) ",,terminal_output +1350,1718965,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1351,1719139,"TERMINAL",0,0,"\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/dynamics.py(39)\r\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\r\n # --- Mask videos ---\r\n vid_embed = self.patch_embed(batch[""video_tokens""])\r\n-> jax.debug.breakpoint()\r\n if training:\r\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\r\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\r\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\r\n mask = mask.at[:, 0].set(False)\r\n(jdb) ",,terminal_output +1352,1723160,"TERMINAL",0,0,"\rl",,terminal_output +1353,1723471,"TERMINAL",0,0,"\rc",,terminal_output +1354,1723970,"TERMINAL",0,0,"\rn",,terminal_output +1355,1724369,"TERMINAL",0,0,"\rs",,terminal_output +1356,1724773,"TERMINAL",0,0,"\rvid_embed.shape",,terminal_output +1357,1725660,"TERMINAL",0,0,"\rbatch[""mask_rng""]",,terminal_output +1358,1726813,"TERMINAL",0,0,"\r\nArray([3678777915, 926359935], dtype=uint32)\r\n(jdb) ",,terminal_output +1359,1731789,"Untitled-1",0,0,"",plaintext,tab +1360,1732463,"Untitled-1",53,0,"Array([3678777915, 926359935], dtype=uint32)",plaintext,content +1361,1802250,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1362,1802397,"TERMINAL",0,0,"\r\n",,terminal_output +1363,1802455,"TERMINAL",0,0,"Step 0, loss: 8.689549446105957, step time: 203860.94689369202ms\r\n",,terminal_output +1364,1804585,"TERMINAL",0,0,"Entering jdb:\r\n(jdb) ",,terminal_output +1365,1833658,"TERMINAL",0,0,"\rc",,terminal_output +1366,1834230,"TERMINAL",0,0,"\rbatch[""mask_rng""]",,terminal_output +1367,1835185,"TERMINAL",0,0,"\r\nArray([4197154823, 3444956078], dtype=uint32)\r\n(jdb) ",,terminal_output +1368,1841865,"Untitled-1",0,0,"",plaintext,tab +1369,1842710,"Untitled-1",98,0,"\n",plaintext,content +1370,1842963,"Untitled-1",99,0,"Array([4197154823, 3444956078], dtype=uint32)",plaintext,content +1371,1846580,"TERMINAL",0,0,"\rbatch[""mask_rng""]",,terminal_output +1372,1847542,"TERMINAL",0,0,"\rc",,terminal_output +1373,1848161,"TERMINAL",0,0,"\r\nStep 1, loss: 8.141585350036621, step time: 45686.34629249573ms\r\n",,terminal_output +1374,1850149,"TERMINAL",0,0,"Entering jdb:\r\n(jdb) ",,terminal_output +1375,1850853,"TERMINAL",0,0,"\rc",,terminal_output +1376,1851016,"TERMINAL",0,0,"\rbatch[""mask_rng""]",,terminal_output +1377,1851854,"TERMINAL",0,0,"\r\nArray([ 440053898, 3287260784], dtype=uint32)\r\n(jdb) ",,terminal_output +1378,1856183,"TERMINAL",0,0,"\rbatch[""mask_rng""]",,terminal_output +1379,1856456,"TERMINAL",0,0,"\rc",,terminal_output +1380,1857035,"TERMINAL",0,0,"\r\n",,terminal_output +1381,1857100,"TERMINAL",0,0,"Step 2, loss: nan, step time: 8913.456439971924ms\r\n",,terminal_output +1382,1859474,"TERMINAL",0,0,"Entering jdb:\r\n(jdb) ",,terminal_output +1383,1860322,"TERMINAL",0,0,"\rc",,terminal_output +1384,1860452,"TERMINAL",0,0,"\rbatch[""mask_rng""]",,terminal_output +1385,1861209,"TERMINAL",0,0,"\r\nArray([3036565412, 2224539052], dtype=uint32)\r\n(jdb) ",,terminal_output +1386,1863945,"TERMINAL",0,0,"\rbatch[""mask_rng""]",,terminal_output +1387,1864243,"TERMINAL",0,0,"\rc",,terminal_output +1388,1865240,"TERMINAL",0,0,"\r\nStep 3, loss: 7.575166702270508, step time: 8095.523357391357ms\r\n",,terminal_output +1389,1867225,"TERMINAL",0,0,"Entering jdb:\r\n(jdb) ",,terminal_output +1390,1867935,"TERMINAL",0,0,"\rc",,terminal_output +1391,1868213,"TERMINAL",0,0,"\rbatch[""mask_rng""]",,terminal_output +1392,1869825,"TERMINAL",0,0,"\r\nArray([3557079072, 390234495], dtype=uint32)\r\n(jdb) ",,terminal_output +1393,1873523,"TERMINAL",0,0,"\rbatch[""mask_rng""]",,terminal_output +1394,1873762,"TERMINAL",0,0,"\rc",,terminal_output +1395,1874389,"TERMINAL",0,0,"\r\nStep 4, loss: 7.437936305999756, step time: 9093.08409690857ms\r\n",,terminal_output +1396,1876356,"TERMINAL",0,0,"Entering jdb:\r\n(jdb) ",,terminal_output +1397,1876929,"TERMINAL",0,0,"\rc",,terminal_output +1398,1877071,"TERMINAL",0,0,"\rbatch[""mask_rng""]",,terminal_output +1399,1877646,"TERMINAL",0,0,"\r\nArray([2630130017, 1139119405], dtype=uint32)\r\n(jdb) ",,terminal_output +1400,1883390,"TERMINAL",0,0,"^DERROR:2025-06-30 23:15:10,628:jax._src.debugging:96: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 94, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 334, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py"", line 36, in exit\r\n self._orig_exit(orig_code) # type: ignore\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 94, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 334, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py"", line 36, in exit\r\n self._orig_exit(orig_code) # type: ignore\r\nSystemExit: 0\r\nE0630 23:15:10.650999 2311750 pjrt_stream_executor_client.cc:2917] Execution of replica 0 failed: INTERNAL: CpuCallback error calling callback: Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 239, in \r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/traceback_util.py"", line 182, in reraise_with_filtered_traceback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 334, in cache_miss\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 195, in _python_pjit_helper\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 1884, in _pjit_call_impl_python\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/profiler.py"", line 354, in wrapper\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/interpreters/pxla.py"", line 1297, in __call__\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/callback.py"", line 782, in _wrapped_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 200, in _callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 97, in debug_callback_impl\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 334, in _flat_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 162, in run\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 145, in cmdloop\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py"", line 36, in exit\r\nSystemExit: 0\r\njax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 239, in \r\n train_state, loss, recon, gt_debug, metrics = train_step(train_state, inputs)\r\njaxlib._jax.XlaRuntimeError: INTERNAL: CpuCallback error calling callback: Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py"", line 239, in \r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/traceback_util.py"", line 182, in reraise_with_filtered_traceback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 334, in cache_miss\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 195, in _python_pjit_helper\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 1884, in _pjit_call_impl_python\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/profiler.py"", line 354, in wrapper\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/interpreters/pxla.py"", line 1297, in __call__\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/callback.py"", line 782, in _wrapped_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 200, in _callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 97, in debug_callback_impl\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 334, in _flat_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 162, in run\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 145, in cmdloop\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py"", line 36, in exit\r\nSystemExit: 0\r\n",,terminal_output +1401,1884341,"Untitled-1",0,0,"",plaintext,tab +1402,1884567,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run dynamics-tiny-overfit-big-lr-0000 at: https://wandb.ai/instant-uv/jafar/runs/14wu60p4\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250630_230658-14wu60p4/logs\r\n",,terminal_output +1403,1885019,"Untitled-1",101,43,"ray([4197154823, 3444956078], dtype=uint32)",plaintext,selection_mouse +1404,1885020,"Untitled-1",53,91,"Array([3678777915, 926359935], dtype=uint32)\nArray([4197154823, 3444956078], dtype=uint32)",plaintext,selection_mouse +1405,1885020,"Untitled-1",7,137,"Array([ 928981903, 3453687069], dtype=uint32)\nArray([3678777915, 926359935], dtype=uint32)\nArray([4197154823, 3444956078], dtype=uint32)",plaintext,selection_mouse +1406,1885049,"Untitled-1",2,142,"RNG:\nArray([ 928981903, 3453687069], dtype=uint32)\nArray([3678777915, 926359935], dtype=uint32)\nArray([4197154823, 3444956078], dtype=uint32)",plaintext,selection_mouse +1407,1885110,"Untitled-1",1,143,"\nRNG:\nArray([ 928981903, 3453687069], dtype=uint32)\nArray([3678777915, 926359935], dtype=uint32)\nArray([4197154823, 3444956078], dtype=uint32)",plaintext,selection_mouse +1408,1885169,"Untitled-1",0,144,"\n\nRNG:\nArray([ 928981903, 3453687069], dtype=uint32)\nArray([3678777915, 926359935], dtype=uint32)\nArray([4197154823, 3444956078], dtype=uint32)",plaintext,selection_mouse +1409,1886025,"Untitled-1",0,144,"",plaintext,content +1410,1886719,"TERMINAL",0,0,"]0;tum_cte0515@hkn0732:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0732 jafar]$ ",,terminal_output +1411,1889441,".gitignore",0,0,"*.pyc\n*.npy\n*.png\n*.gif\n\nwandb_key\ncheckpoints/\nwandb/\n__pycache__/\n*ckpt\nslurm*.out\ndata\ndata_tfrecord*\nlogs\nscripts",ignore,tab +1412,1890415,"sample.py",0,0,"",python,tab +1413,1893432,"sample.py",2746,0,"",python,selection_mouse +1414,1894302,"sample.py",3136,0,"",python,selection_mouse +1415,1894305,"sample.py",3135,0,"",python,selection_command +1416,1898596,"models/dynamics.py",0,0,"",python,tab +1417,1900395,"models/dynamics.py",1145,0,"",python,selection_mouse +1418,1900511,"models/dynamics.py",1145,1," ",python,selection_mouse +1419,1901097,"models/dynamics.py",1081,0,"",python,selection_mouse +1420,1901228,"models/dynamics.py",1078,5,"batch",python,selection_mouse +1421,1909851,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom genie import Genie, restore_genie_components\nfrom models.tokenizer import TokenizerVQVAE\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n # Optimization\n batch_size: int = 36\n min_lr: float = 3e-6\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n )\n return ce_loss, (outputs[""recon""], outputs[""gt_debug""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, gt_debug, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, gt_debug, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=jnp.float32\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Restore checkpoint ---\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n # dataloader = get_dataloader(\n # # NOTE: We deliberately pass the global batch size\n # # The dataloader shards the dataset across all processes\n # tfrecord_files,\n # args.seq_len,\n # args.batch_size,\n # *image_shape,\n # )\n step = 0\n while step < args.num_steps:\n # for videos in dataloader:\n npy_path = ""overfit_dir/single_sample_corner.npy""\n # npy_path = ""overfit_dir/single_batch_3_elems.npy""\n videos = np.load(npy_path)\n print(""batch shape: "", videos.shape)\n while(True):\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n start_time = time.time()\n train_state, loss, recon, gt_debug, metrics = train_step(train_state, inputs)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n debug_seq = gt_debug[0].clip(0,1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n debug_comparison_seq = jnp.concatenate((gt_seq, debug_seq), axis=1)\n debug_comparison_seq = einops.rearrange(\n debug_comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n true_vs_debug=wandb.Image(\n np.asarray(debug_comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""genie_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +1422,1912487,"train_dynamics.py",9006,0,"",python,selection_mouse +1423,1912515,"train_dynamics.py",9005,0,"",python,selection_command +1424,1916902,"train_dynamics.py",5481,0,"",python,selection_command +1425,1918087,"train_dynamics.py",5491,0,"",python,selection_mouse +1426,1918273,"train_dynamics.py",5490,4,"_rng",python,selection_mouse +1427,1952658,"train_dynamics.py",7544,0,"",python,selection_mouse +1428,1952792,"train_dynamics.py",7540,9,"_rng_mask",python,selection_mouse +1429,2025719,"train_dynamics.py",7851,0,"",python,selection_mouse +1430,2025762,"train_dynamics.py",7850,0,"",python,selection_command +1431,2026274,"train_dynamics.py",7794,0,"",python,selection_mouse +1432,2145949,"train_dynamics.py",8473,0,"",python,selection_mouse +1433,2152977,"train_dynamics.py",8689,0,"",python,selection_mouse +1434,2152980,"train_dynamics.py",8688,0,"",python,selection_command +1435,2166679,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +1436,2167373,"sample.py",0,0,"",python,tab +1437,2168742,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +1438,2169499,"sample.py",0,0,"",python,tab +1439,2191256,"sample.py",3256,0,"",python,selection_mouse +1440,2191270,"sample.py",3255,0,"",python,selection_command +1441,2192386,"sample.py",3536,0,"",python,selection_mouse +1442,2193191,"sample.py",3433,0,"",python,selection_mouse +1443,2193214,"sample.py",3432,0,"",python,selection_command +1444,2193709,"sample.py",3475,0,"",python,selection_mouse +1445,2196851,"genie.py",0,0,"",python,tab +1446,2223906,"genie.py",3053,0,"",python,selection_mouse +1447,2223935,"genie.py",3052,0,"",python,selection_command +1448,2225927,"genie.py",3053,0,"\n ",python,content +1449,2226319,"genie.py",3062,0,"b",python,content +1450,2226320,"genie.py",3063,0,"",python,selection_keyboard +1451,2226770,"genie.py",3062,1,"",python,content +1452,2227099,"genie.py",3062,0,"a",python,content +1453,2227100,"genie.py",3063,0,"",python,selection_keyboard +1454,2227401,"genie.py",3062,1,"",python,content +1455,2227686,"genie.py",3062,0,"a",python,content +1456,2227687,"genie.py",3063,0,"",python,selection_keyboard +1457,2228146,"genie.py",3062,1,"",python,content +1458,2228408,"genie.py",3062,0,"j",python,content +1459,2228409,"genie.py",3063,0,"",python,selection_keyboard +1460,2228569,"genie.py",3063,0,"a",python,content +1461,2228570,"genie.py",3064,0,"",python,selection_keyboard +1462,2228767,"genie.py",3064,0,"x",python,content +1463,2228769,"genie.py",3065,0,"",python,selection_keyboard +1464,2228798,"genie.py",3065,0,".",python,content +1465,2228799,"genie.py",3066,0,"",python,selection_keyboard +1466,2229126,"genie.py",3066,0,"d",python,content +1467,2229127,"genie.py",3067,0,"",python,selection_keyboard +1468,2229292,"genie.py",3067,0,"e",python,content +1469,2229294,"genie.py",3068,0,"",python,selection_keyboard +1470,2229319,"genie.py",3068,0,"b",python,content +1471,2229320,"genie.py",3069,0,"",python,selection_keyboard +1472,2229466,"genie.py",3069,0,"u",python,content +1473,2229468,"genie.py",3070,0,"",python,selection_keyboard +1474,2229564,"genie.py",3070,0,"g",python,content +1475,2229565,"genie.py",3071,0,"",python,selection_keyboard +1476,2229659,"genie.py",3071,0,".",python,content +1477,2229660,"genie.py",3072,0,"",python,selection_keyboard +1478,2229984,"genie.py",3072,0,"b",python,content +1479,2229986,"genie.py",3073,0,"",python,selection_keyboard +1480,2229987,"genie.py",3073,0,"r",python,content +1481,2229988,"genie.py",3074,0,"",python,selection_keyboard +1482,2230186,"genie.py",3074,0,"e",python,content +1483,2230187,"genie.py",3075,0,"",python,selection_keyboard +1484,2230534,"genie.py",3072,3,"breakpoint",python,content +1485,2231352,"genie.py",3082,0,"()",python,content +1486,2231353,"genie.py",3083,0,"",python,selection_keyboard +1487,2231394,"genie.py",3083,1,")",python,content +1488,2231395,"genie.py",3084,0,"",python,selection_keyboard +1489,2231504,"genie.py",3083,0,"",python,selection_command +1490,2233634,"TERMINAL",0,0,"sh scripts_horeka/train_dynamics.sh ",,terminal_output +1491,2234963,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1492,2235102,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2303099\r\nSLURM_JOB_GPUS=2\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0732\r\nSLURM_JOB_START_TIME=1751316385\r\nSLURM_STEP_NODELIST=hkn0732\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751319985\r\nSLURM_PMI2_SRUN_PORT=35705\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3307524\r\nSLURM_PTY_PORT=32803\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=33\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0732\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_MEM_PER_NODE=51200\r\nSLURM_PTY_WIN_COL=143\r\nSLURM_NODELIST=hkn0732\r\nSLURM_SRUN_COMM_PORT=34929\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3307524\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0732\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=34929\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0732\r\n",,terminal_output +1493,2237064,"TERMINAL",0,0,"2025-06-30 23:21:04.342903: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751318464.356129 2316206 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751318464.360827 2316206 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751318464.373956 2316206 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751318464.373973 2316206 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751318464.373975 2316206 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751318464.373977 2316206 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +1494,2239133,"TERMINAL",0,0,"W0000 00:00:1751318466.472158 2316206 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +1495,2239434,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +1496,2240208,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +1497,2240767,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250630_232107-ts3e7iky\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-tiny-overfit-big-lr-0000\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/ts3e7iky\r\n",,terminal_output +1498,2242150,"TERMINAL",0,0,"2025-06-30 23:21:09.484833: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1499,2255165,"TERMINAL",0,0,"2025-06-30 23:21:22.501224: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1500,2266993,"TERMINAL",0,0,"Entering jdb:\r\n(jdb) ",,terminal_output +1501,2272896,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1502,2272959,"TERMINAL",0,0,"\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/dynamics.py(39)\r\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\r\n # --- Mask videos ---\r\n vid_embed = self.patch_embed(batch[""video_tokens""])\r\n-> jax.debug.breakpoint()\r\n if training:\r\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\r\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\r\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\r\n mask = mask.at[:, 0].set(False)\r\n(jdb) ",,terminal_output +1503,2283951,"models/dynamics.py",0,0,"",python,tab +1504,2285178,"models/dynamics.py",1082,0,"",python,selection_command +1505,2285491,"models/dynamics.py",1034,0,"",python,selection_command +1506,2285665,"models/dynamics.py",1013,0,"",python,selection_command +1507,2286082,"models/dynamics.py",984,31,"",python,content +1508,2286149,"models/dynamics.py",992,0,"",python,selection_command +1509,2288626,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1510,2288722,"TERMINAL",0,0,"\r\n",,terminal_output +1511,2292436,"TERMINAL",0,0,"2025-06-30 23:21:59.774104: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1512,2299641,"TERMINAL",0,0,"2025-06-30 23:22:06.975614: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1513,2302840,"TERMINAL",0,0,"Entering jdb:\r\n(jdb) ",,terminal_output +1514,2320781,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +1515,2320882,"TERMINAL",0,0,"\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py(93)\r\n dyna_outputs = self.dynamics(outputs, training)\r\n outputs.update(dyna_outputs)\r\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\r\n outputs[""recon""] = self.tokenizer.decode(\r\n mle_indices, batch[""videos""].shape[2:4]\r\n )\r\n-> jax.debug.breakpoint()\r\n outputs[""gt_debug""] = self.tokenizer.decode(\r\n tokenizer_outputs[""indices""], batch[""videos""].shape[2:4]\r\n )\r\n return outputs\r\n \r\n(jdb) ",,terminal_output +1516,2324117,"sample.py",0,0,"",python,tab +1517,2324707,"genie.py",0,0,"",python,tab +1518,2330981,"TERMINAL",0,0,"bash",,terminal_focus +1519,2337132,"genie.py",0,0,"",python,tab +1520,2337133,"genie.py",2278,0,"",python,selection_mouse +1521,2337152,"genie.py",2277,0,"",python,selection_command +1522,2338283,"TERMINAL",0,0,"srun",,terminal_focus +1523,2355588,"TERMINAL",0,0,"o",,terminal_output +1524,2355924,"TERMINAL",0,0,"u",,terminal_output +1525,2356080,"TERMINAL",0,0,"t",,terminal_output +1526,2356593,"TERMINAL",0,0,"p",,terminal_output +1527,2356694,"TERMINAL",0,0,"u",,terminal_output +1528,2357212,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1529,2357418,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1530,2358349,"TERMINAL",0,0,"[?25l[[?25h",,terminal_output +1531,2358685,"TERMINAL",0,0,"[?25l""[?25h",,terminal_output +1532,2358993,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +1533,2359134,"TERMINAL",0,0,"[?25la[?25h[?25ls[?25h",,terminal_output +1534,2359400,"TERMINAL",0,0,"[?25lk[?25h",,terminal_output +1535,2359727,"TERMINAL",0,0,"[?25l""[?25h",,terminal_output +1536,2360834,"TERMINAL",0,0,"[?25l][?25h",,terminal_output +1537,2361077,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +1538,2361348,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1539,2361441,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +1540,2361563,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1541,2361660,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +1542,2361797,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1543,2362178,"TERMINAL",0,0,"\r\n(1, 16, 920)\r\n(jdb) ",,terminal_output +1544,2363034,"TERMINAL",0,0,"\routputs[""mask""].shape",,terminal_output +1545,2365496,"TERMINAL",0,0,")",,terminal_output +1546,2365850,"TERMINAL",0,0,"[?25l[?25h",,terminal_output +1547,2366013,"TERMINAL",0,0,"[?25l[?25h",,terminal_output +1548,2366167,"TERMINAL",0,0,"[?25l[?25h",,terminal_output +1549,2366757,"TERMINAL",0,0,"[?25lo\r[1@so[?25h",,terminal_output +1550,2366954,"TERMINAL",0,0,"[?25lo\r[1@uo[?25h",,terminal_output +1551,2367163,"TERMINAL",0,0,"[?25lo\r[1@mo[?25h",,terminal_output +1552,2367605,"TERMINAL",0,0,"[?25lo\r[1@(o[?25h",,terminal_output +1553,2368237,"TERMINAL",0,0,"\r\n937\r\n(jdb) ",,terminal_output +1554,2399257,"genie.py",0,0,"",python,tab +1555,2399259,"genie.py",2837,0,"",python,selection_mouse +1556,2399297,"genie.py",2836,0,"",python,selection_command +1557,2399980,"genie.py",2837,0,"",python,selection_mouse +1558,2399991,"genie.py",2836,0,"",python,selection_command +1559,2400611,"genie.py",2828,0,"",python,selection_mouse +1560,2401181,"genie.py",2874,0,"",python,selection_mouse +1561,2401188,"genie.py",2873,0,"",python,selection_command +1562,2401745,"genie.py",2823,0,"",python,selection_mouse +1563,2401876,"genie.py",2819,7,"outputs",python,selection_mouse +1564,2402562,"genie.py",2814,0,"",python,selection_mouse +1565,2402729,"genie.py",2810,8,"dynamics",python,selection_mouse +1566,2405265,"genie.py",2822,0,"",python,selection_mouse +1567,2405424,"genie.py",2819,7,"outputs",python,selection_mouse +1568,2427693,"TERMINAL",0,0,"1",,terminal_output +1569,2428076,"TERMINAL",0,0,"[?25l6[?25h",,terminal_output +1570,2428496,"TERMINAL",0,0,"[?25l0[?25h",,terminal_output +1571,2429018,"TERMINAL",0,0,"[?25l*[?25h",,terminal_output +1572,2429445,"TERMINAL",0,0,"[?25l9[?25h",,terminal_output +1573,2429499,"TERMINAL",0,0,"[?25l0[?25h",,terminal_output +1574,2429814,"TERMINAL",0,0,"\r\n14400\r\n(jdb) ",,terminal_output +1575,2432978,"TERMINAL",0,0,"14400",,terminal_output +1576,2433514,"TERMINAL",0,0,"[?25l/[?25h",,terminal_output +1577,2433764,"TERMINAL",0,0,"[?25l1[?25h",,terminal_output +1578,2433891,"TERMINAL",0,0,"[?25l6[?25h",,terminal_output +1579,2434183,"TERMINAL",0,0,"[?25l+[?25h",,terminal_output +1580,2434760,"TERMINAL",0,0,"[?25l+\r[?25h",,terminal_output +1581,2434949,"TERMINAL",0,0,"\r\n900.0\r\n(jdb) ",,terminal_output +1582,2450893,"train_dynamics.py",0,0,"",python,tab +1583,2452693,"genie.py",0,0,"",python,tab +1584,2453392,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +1585,2464729,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +1586,2466801,"genie.py",0,0,"",python,tab +1587,2467408,"models/dynamics.py",0,0,"",python,tab +1588,2469388,"models/dynamics.py",1330,0,"",python,selection_mouse +1589,2470142,"models/dynamics.py",1388,0,"",python,selection_mouse +1590,2470149,"models/dynamics.py",1387,0,"",python,selection_command +1591,2481954,"models/dynamics.py",1595,0,"",python,selection_mouse +1592,2482106,"models/dynamics.py",1592,9,"vid_embed",python,selection_mouse +1593,2483422,"models/dynamics.py",1486,0,"",python,selection_mouse +1594,2483436,"models/dynamics.py",1485,0,"",python,selection_command +1595,2489921,"models/dynamics.py",1589,0,"",python,selection_mouse +1596,2490922,"models/dynamics.py",1588,0,"",python,selection_mouse +1597,2492909,"models/dynamics.py",370,0,"",python,selection_mouse +1598,2493455,"utils/nn.py",0,0,"",python,tab +1599,2499230,"utils/nn.py",2345,0,"",python,selection_mouse +1600,2499910,"utils/nn.py",2250,0,"",python,selection_mouse +1601,2499911,"utils/nn.py",2249,0,"",python,selection_command +1602,2500074,"utils/nn.py",2250,0,"",python,selection_mouse +1603,2500077,"utils/nn.py",2249,0,"",python,selection_command +1604,2500808,"utils/nn.py",2274,0,"",python,selection_mouse +1605,2500988,"utils/nn.py",2270,9,"LayerNorm",python,selection_mouse +1606,2501718,"utils/nn.py",2348,0,"",python,selection_mouse +1607,2501875,"utils/nn.py",2344,9,"LayerNorm",python,selection_mouse +1608,2502453,"utils/nn.py",2305,0,"",python,selection_mouse +1609,2502617,"utils/nn.py",2302,5,"Dense",python,selection_mouse +1610,2503274,"utils/nn.py",2347,0,"",python,selection_mouse +1611,2503456,"utils/nn.py",2344,9,"LayerNorm",python,selection_mouse +1612,2504298,"utils/nn.py",2406,0,"",python,selection_mouse +1613,2504980,"utils/nn.py",2446,0,"",python,selection_mouse +1614,2508638,"utils/nn.py",2601,0,"",python,selection_mouse +1615,2511315,"utils/nn.py",2444,0,"",python,selection_mouse +1616,2514870,"utils/nn.py",1296,0,"",python,selection_mouse +1617,2516441,"utils/nn.py",1463,0,"",python,selection_mouse +1618,2517047,"utils/nn.py",1534,0,"",python,selection_mouse +1619,2517689,"utils/nn.py",1691,0,"",python,selection_mouse +1620,2518554,"utils/nn.py",1608,0,"",python,selection_mouse +1621,2518705,"utils/nn.py",1600,11,"causal_mask",python,selection_mouse +1622,2519351,"utils/nn.py",1620,0,"",python,selection_mouse +1623,2519510,"utils/nn.py",1618,3,"tri",python,selection_mouse +1624,2520129,"utils/nn.py",1616,0,"",python,selection_mouse +1625,2520289,"utils/nn.py",1614,3,"jnp",python,selection_mouse +1626,2520872,"utils/nn.py",1620,0,"",python,selection_mouse +1627,2521045,"utils/nn.py",1618,3,"tri",python,selection_mouse +1628,2522534,"utils/nn.py",1460,0,"",python,selection_mouse +1629,2522679,"utils/nn.py",1452,8," ",python,selection_mouse +1630,2522862,"utils/nn.py",1452,47," # --- Temporal attention ---\n x ",python,selection_mouse +1631,2522906,"utils/nn.py",1452,48," # --- Temporal attention ---\n x =",python,selection_mouse +1632,2522907,"utils/nn.py",1452,50," # --- Temporal attention ---\n x = x",python,selection_mouse +1633,2522917,"utils/nn.py",1452,96," # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding",python,selection_mouse +1634,2522951,"utils/nn.py",1452,134," # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm",python,selection_mouse +1635,2522994,"utils/nn.py",1452,135," # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(",python,selection_mouse +1636,2522995,"utils/nn.py",1452,166," # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm()(z)\n causal_mask = jnp.",python,selection_mouse +1637,2523038,"utils/nn.py",1452,169," # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm()(z)\n causal_mask = jnp.tri",python,selection_mouse +1638,2523039,"utils/nn.py",1452,170," # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm()(z)\n causal_mask = jnp.tri(",python,selection_mouse +1639,2523042,"utils/nn.py",1452,171," # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm()(z)\n causal_mask = jnp.tri(z",python,selection_mouse +1640,2523083,"utils/nn.py",1452,177," # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm()(z)\n causal_mask = jnp.tri(z.shape",python,selection_mouse +1641,2523095,"utils/nn.py",1452,217," # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm()(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(",python,selection_mouse +1642,2533008,"models/dynamics.py",0,0,"",python,tab +1643,2630538,"models/dynamics.py",0,0,"",python,tab +1644,2630539,"models/dynamics.py",776,0,"",python,selection_mouse +1645,2632735,"models/dynamics.py",1004,0,"",python,selection_mouse +1646,2632738,"models/dynamics.py",1003,0,"",python,selection_command +1647,2653392,"models/dynamics.py",983,0,"",python,selection_mouse +1648,2653398,"models/dynamics.py",982,0,"",python,selection_command +1649,2657438,"models/dynamics.py",983,0,"\n ",python,content +1650,2657818,"models/dynamics.py",992,0,"j",python,content +1651,2657819,"models/dynamics.py",993,0,"",python,selection_keyboard +1652,2657926,"models/dynamics.py",993,0,"a",python,content +1653,2657927,"models/dynamics.py",994,0,"",python,selection_keyboard +1654,2658154,"models/dynamics.py",994,0,"x",python,content +1655,2658155,"models/dynamics.py",995,0,"",python,selection_keyboard +1656,2658289,"models/dynamics.py",995,0,".",python,content +1657,2658290,"models/dynamics.py",996,0,"",python,selection_keyboard +1658,2658488,"models/dynamics.py",996,0,"d",python,content +1659,2658489,"models/dynamics.py",997,0,"",python,selection_keyboard +1660,2658625,"models/dynamics.py",997,0,"e",python,content +1661,2658626,"models/dynamics.py",998,0,"",python,selection_keyboard +1662,2658710,"models/dynamics.py",998,0,"b",python,content +1663,2658711,"models/dynamics.py",999,0,"",python,selection_keyboard +1664,2658831,"models/dynamics.py",999,0,"u",python,content +1665,2658832,"models/dynamics.py",1000,0,"",python,selection_keyboard +1666,2658923,"models/dynamics.py",1000,0,"g",python,content +1667,2658924,"models/dynamics.py",1001,0,"",python,selection_keyboard +1668,2659271,"models/dynamics.py",996,5,"debug",python,content +1669,2659896,"models/dynamics.py",1001,0,".",python,content +1670,2659898,"models/dynamics.py",1002,0,"",python,selection_keyboard +1671,2660325,"models/dynamics.py",1002,0,"b",python,content +1672,2660326,"models/dynamics.py",1003,0,"",python,selection_keyboard +1673,2660369,"models/dynamics.py",1003,0,"r",python,content +1674,2660370,"models/dynamics.py",1004,0,"",python,selection_keyboard +1675,2660546,"models/dynamics.py",1004,0,"e",python,content +1676,2660548,"models/dynamics.py",1005,0,"",python,selection_keyboard +1677,2660681,"models/dynamics.py",1005,0,"a",python,content +1678,2660683,"models/dynamics.py",1006,0,"",python,selection_keyboard +1679,2660769,"models/dynamics.py",1006,0,"k",python,content +1680,2660771,"models/dynamics.py",1007,0,"",python,selection_keyboard +1681,2661129,"models/dynamics.py",1002,5,"breakpoint",python,content +1682,2661940,"models/dynamics.py",1012,0,"()",python,content +1683,2661940,"models/dynamics.py",1013,0,"",python,selection_keyboard +1684,2662031,"models/dynamics.py",1013,1,")",python,content +1685,2662031,"models/dynamics.py",1014,0,"",python,selection_keyboard +1686,2662130,"models/dynamics.py",1013,0,"",python,selection_command +1687,2665098,"models/dynamics.py",1007,0,"",python,selection_mouse +1688,2665632,"models/dynamics.py",984,31,"",python,content +1689,2665634,"models/dynamics.py",992,0,"",python,selection_command +1690,2665713,"models/dynamics.py",1013,0,"",python,selection_command +1691,2665891,"models/dynamics.py",1074,0,"",python,selection_command +1692,2666026,"models/dynamics.py",1147,0,"",python,selection_command +1693,2666204,"models/dynamics.py",1226,0,"",python,selection_command +1694,2666352,"models/dynamics.py",1270,0,"",python,selection_command +1695,2666517,"models/dynamics.py",1359,0,"",python,selection_command +1696,2666667,"models/dynamics.py",1373,0,"",python,selection_command +1697,2666827,"models/dynamics.py",1389,0,"",python,selection_command +1698,2667591,"models/dynamics.py",1373,0,"",python,selection_command +1699,2667787,"models/dynamics.py",1388,0,"\n jax.debug.breakpoint()",python,content +1700,2667800,"models/dynamics.py",1397,0,"",python,selection_command +1701,2671822,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +1702,2680369,"TERMINAL",0,0,"^DERROR:2025-06-30 23:28:27,652:jax._src.debugging:96: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 94, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 334, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py"", line 36, in exit\r\n self._orig_exit(orig_code) # type: ignore\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 94, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 334, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py"", line 36, in exit\r\n self._orig_exit(orig_code) # type: ignore\r\nSystemExit: 0\r\n",,terminal_output +1703,2681667,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run dynamics-tiny-overfit-big-lr-0000 at: https://wandb.ai/instant-uv/jafar/runs/ts3e7iky\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250630_232107-ts3e7iky/logs\r\n",,terminal_output +1704,2681937,"sample.py",0,0,"",python,tab +1705,2683034,"TERMINAL",0,0,"]0;tum_cte0515@hkn0732:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0732 jafar]$ ",,terminal_output +1706,2683140,"genie.py",0,0,"",python,tab +1707,2685552,"genie.py",3083,0,"",python,selection_mouse +1708,2688948,"TERMINAL",0,0,"sh scripts_horeka/train_dynamics.sh ",,terminal_output +1709,2689786,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +1710,2689980,"TERMINAL",0,0,"SLURM_STEP_NUM_TASKS=1\r\nSLURM_JOB_USER=tum_cte0515\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_JOB_UID=999226\r\nSLURM_TASK_PID=2303099\r\nSLURM_JOB_GPUS=2\r\nSLURM_LOCALID=0\r\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar\r\nSLURMD_NODENAME=hkn0732\r\nSLURM_JOB_START_TIME=1751316385\r\nSLURM_STEP_NODELIST=hkn0732\r\nSLURM_CLUSTER_NAME=hk\r\nSLURM_JOB_END_TIME=1751319985\r\nSLURM_PMI2_SRUN_PORT=35705\r\nSLURM_CPUS_ON_NODE=6\r\nSLURM_JOB_CPUS_PER_NODE=6\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_GTIDS=0\r\nSLURM_JOB_PARTITION=accelerated\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_STEPID=4294967290\r\nSLURM_JOBID=3307524\r\nSLURM_PTY_PORT=32803\r\nSLURM_JOB_QOS=normal\r\nSLURM_LAUNCH_NODE_IPADDR=10.0.7.198\r\nSLURM_PTY_WIN_ROW=33\r\nSLURM_PMI2_PROC_MAPPING=(vector,(0,1,1))\r\nSLURMD_DEBUG=2\r\nSLURM_PROCID=0\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e9.hkn0732\r\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\r\nSLURM_SRUN_COMM_HOST=10.0.7.198\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_MEM_PER_NODE=51200\r\nSLURM_PTY_WIN_COL=143\r\nSLURM_NODELIST=hkn0732\r\nSLURM_SRUN_COMM_PORT=34929\r\nSLURM_STEP_ID=4294967290\r\nSLURM_JOB_ACCOUNT=hk-project-p0023960\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_NPROCS=1\r\nSLURM_NNODES=1\r\nSLURM_SUBMIT_HOST=hkn1990.localdomain\r\nSLURM_JOB_ID=3307524\r\nSLURM_NODEID=0\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_MPI_TYPE=pmi2\r\nSLURM_PMI2_STEP_NODES=hkn0732\r\nSLURM_CONF=/etc/slurm/slurm.conf\r\nSLURM_JOB_NAME=interactive\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_STEP_LAUNCHER_PORT=34929\r\nSLURM_JOB_GID=502226\r\nSLURM_JOB_NODELIST=hkn0732\r\n",,terminal_output +1711,2691843,"TERMINAL",0,0,"2025-06-30 23:28:39.154581: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751318919.167768 2318665 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751318919.172191 2318665 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1751318919.184319 2318665 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751318919.184337 2318665 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751318919.184339 2318665 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751318919.184341 2318665 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output +1712,2694020,"TERMINAL",0,0,"W0000 00:00:1751318921.294984 2318665 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output +1713,2694335,"TERMINAL",0,0,"Running on 1 devices.\r\n",,terminal_output +1714,2695061,"TERMINAL",0,0,"wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n",,terminal_output +1715,2695583,"TERMINAL",0,0,"wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250630_232842-bjhkh5c8\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-tiny-overfit-big-lr-0000\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/bjhkh5c8\r\n",,terminal_output +1716,2697015,"TERMINAL",0,0,"2025-06-30 23:28:44.306370: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1717,2709954,"TERMINAL",0,0,"2025-06-30 23:28:57.231617: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +1718,2722202,"TERMINAL",0,0,"Entering jdb:\r\n(jdb) ",,terminal_output +1719,2723612,"TERMINAL",0,0,"l",,terminal_output +1720,2723897,"TERMINAL",0,0,"\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/dynamics.py(47)\r\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\r\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\r\n mask = mask.at[:, 0].set(False)\r\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\r\n else:\r\n mask = None\r\n-> jax.debug.breakpoint()\r\n \r\n # --- Predict transition ---\r\n act_embed = self.action_up(batch[""latent_actions""])\r\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\r\n logits = self.dynamics(vid_embed)\r\n(jdb) ",,terminal_output +1721,2728384,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +1722,2728478,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +1723,2728610,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1724,2728913,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +1725,2729227,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1726,2729323,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +1727,2729602,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +1728,2729689,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1729,2729819,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1730,2729952,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +1731,2730193,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1732,2730246,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +1733,2730510,"TERMINAL",0,0,"[?25la[?25h[?25lp[?25h",,terminal_output +1734,2730655,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1735,2730709,"TERMINAL",0,0,"\r\n(1, 16, 920, 128)\r\n(jdb) ",,terminal_output +1736,2743541,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +1737,2743593,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +1738,2743793,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1739,2744223,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +1740,2744495,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1741,2744650,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +1742,2744966,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +1743,2745103,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1744,2745155,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +1745,2745992,"TERMINAL",0,0,"[?25l[[?25h",,terminal_output +1746,2746548,"TERMINAL",0,0,"[?25l0[?25h",,terminal_output +1747,2747238,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +1748,2747996,"TERMINAL",0,0,"[?25l.\r[?25h",,terminal_output +1749,2748260,"TERMINAL",0,0,"[?25l,[?25h",,terminal_output +1750,2748571,"TERMINAL",0,0,"[?25l0[?25h",,terminal_output +1751,2748844,"TERMINAL",0,0,"[?25l][?25h",,terminal_output +1752,2750494,"TERMINAL",0,0,"\r\n",,terminal_output +1753,2750953,"TERMINAL",0,0,"Array([[ 0.05725611, 0.09551146, -0.00489834, ..., -0.09083664,\r\n -0.0697903 , -0.00391799],\r\n [ 0.05725611, 0.09551146, -0.00489834, ..., -0.09083664,\r\n -0.0697903 , -0.00391799],\r\n [ 0.05725611, 0.09551146, -0.00489834, ..., -0.09083664,\r\n -0.0697903 , -0.00391799],\r\n ...,\r\n [ 0.05725611, 0.09551146, -0.00489834, ..., -0.09083664,\r\n -0.0697903 , -0.00391799],\r\n [ 0.05725611, 0.09551146, -0.00489834, ..., -0.09083664,\r\n -0.0697903 , -0.00391799],\r\n [ 0.05725611, 0.09551146, -0.00489834, ..., -0.09083664,\r\n -0.0697903 , -0.00391799]], dtype=float32)\r\n(jdb) ",,terminal_output +1754,2752263,"TERMINAL",0,0,"\rvid_embed[0,0]",,terminal_output +1755,2753019,"TERMINAL",0,0,"[?25l-[?25h",,terminal_output +1756,2753640,"TERMINAL",0,0,"[?25l-\r[?25h",,terminal_output +1757,2753785,"TERMINAL",0,0,".",,terminal_output +1758,2754064,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +1759,2754357,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +1760,2754497,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +1761,2754597,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +1762,2754692,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +1763,2754790,"TERMINAL",0,0,"\r\n(920, 128)\r\n(jdb) ",,terminal_output +1764,2756099,"TERMINAL",0,0,"\rvid_embed[0,0].shape",,terminal_output +1765,2757493,"TERMINAL",0,0,"[?25le\r[?25h",,terminal_output +1766,2757769,"TERMINAL",0,0,"[?25lap\r[?25h",,terminal_output +1767,2757914,"TERMINAL",0,0,"[?25la\r[?25h",,terminal_output +1768,2758015,"TERMINAL",0,0,"\r",,terminal_output +1769,2758151,"TERMINAL",0,0,"[?25ls\r[?25h",,terminal_output +1770,2758312,"TERMINAL",0,0,"[?25l.\r[?25h",,terminal_output +1771,2758735,"TERMINAL",0,0,"[?25l]\r[?25h",,terminal_output +1772,2759387,"TERMINAL",0,0,"[?25l,[?25h",,terminal_output +1773,2759854,"TERMINAL",0,0,"[?25l0[?25h",,terminal_output +1774,2760264,"TERMINAL",0,0,"[?25l][?25h",,terminal_output +1775,2760623,"TERMINAL",0,0,"\r\nArray([ 0.05725611, 0.09551146, -0.00489834, 0.01260828, 0.05435381,\r\n -0.07198133, -0.07775287, 0.05409481, 0.02997333, 0.04513329,\r\n 0.02724726, 0.07016625, 0.0266482 , 0.00043704, 0.05048018,\r\n 0.23477305, -0.21280609, 0.09817033, 0.07657167, -0.10951705,\r\n -0.04667508, -0.08143116, 0.24134587, 0.14582449, 0.02667357,\r\n 0.02862681, -0.07047819, -0.02924404, -0.0280011 , -0.04653165,\r\n 0.04913179, 0.11870538, 0.02786516, -0.02981153, 0.01190054,\r\n 0.11647964, 0.04842164, 0.20591389, -0.16787218, 0.00283359,\r\n 0.00505792, -0.09712408, -0.0347484 , 0.03225971, -0.09866993,\r\n 0.11096629, -0.09046505, -0.0140517 , 0.03503699, 0.11512648,\r\n -0.02379899, -0.04026883, 0.03255694, -0.09811404, 0.00511647,\r\n -0.01332523, -0.04000858, 0.02270413, 0.07081898, 0.05539022,\r\n -0.0611615 , -0.08200137, 0.15767165, 0.0129967 , 0.07423519,\r\n -0.01111614, -0.06456082, 0.04739142, -0.04031334, 0.01418943,\r\n 0.01895791, 0.08658207, 0.11832447, -0.03598715, 0.2079043 ,\r\n 0.07268324, -0.03344419, -0.0061969 , 0.00807034, 0.15595683,\r\n -0.0384468 , 0.03934486, -0.00418254, -0.12929118, 0.07121508,\r\n -0.05045069, -0.05637243, 0.06838017, -0.0856661 , -0.22436744,\r\n -0.03157925, -0.11812835, 0.13658975, 0.15003507, 0.07303645,\r\n -0.19493134, -0.07597115, -0.10575026, 0.04326085, -0.05067817,\r\n 0.10927688, -0.0358252 , -0.16355576, 0.01243466, -0.01344708,\r\n 0.04452743, 0.02736073, -0.06833841, -0.11444332, 0.09598876,\r\n -0.1212263 , -0.06760522, 0.00453666, 0.01041534, -0.15467572,\r\n -0.02522295, -0.04485739, -0.03417278, -0.05451573, -0.12252605,\r\n -0.1292875 , 0.04643333, 0.01454904, -0.05047289, 0.00826681,\r\n -0.09083664, -0.0697903 , -0.00391799], dtype=float32)\r\n(jdb) ",,terminal_output +1776,2772664,"Untitled-1",0,0,"",plaintext,tab +1777,2774678,"Untitled-1",0,0,"\n",plaintext,content +1778,2775177,"Untitled-1",0,1,"",plaintext,content +1779,2775427,"Untitled-1",0,0,"Array([ 0.05725611, 0.09551146, -0.00489834, 0.01260828, 0.05435381,\n -0.07198133, -0.07775287, 0.05409481, 0.02997333, 0.04513329,\n 0.02724726, 0.07016625, 0.0266482 , 0.00043704, 0.05048018,\n 0.23477305, -0.21280609, 0.09817033, 0.07657167, -0.10951705,\n -0.04667508, -0.08143116, 0.24134587, 0.14582449, 0.02667357,\n 0.02862681, -0.07047819, -0.02924404, -0.0280011 , -0.04653165,\n 0.04913179, 0.11870538, 0.02786516, -0.02981153, 0.01190054,\n 0.11647964, 0.04842164, 0.20591389, -0.16787218, 0.00283359,\n 0.00505792, -0.09712408, -0.0347484 , 0.03225971, -0.09866993,\n 0.11096629, -0.09046505, -0.0140517 , 0.03503699, 0.11512648,\n -0.02379899, -0.04026883, 0.03255694, -0.09811404, 0.00511647,\n -0.01332523, -0.04000858, 0.02270413, 0.07081898, 0.05539022,\n -0.0611615 , -0.08200137, 0.15767165, 0.0129967 , 0.07423519,\n -0.01111614, -0.06456082, 0.04739142, -0.04031334, 0.01418943,\n 0.01895791, 0.08658207, 0.11832447, -0.03598715, 0.2079043 ,\n 0.07268324, -0.03344419, -0.0061969 , 0.00807034, 0.15595683,\n -0.0384468 , 0.03934486, -0.00418254, -0.12929118, 0.07121508,\n -0.05045069, -0.05637243, 0.06838017, -0.0856661 , -0.22436744,\n -0.03157925, -0.11812835, 0.13658975, 0.15003507, 0.07303645,\n -0.19493134, -0.07597115, -0.10575026, 0.04326085, -0.05067817,\n 0.10927688, -0.0358252 , -0.16355576, 0.01243466, -0.01344708,\n 0.04452743, 0.02736073, -0.06833841, -0.11444332, 0.09598876,\n -0.1212263 , -0.06760522, 0.00453666, 0.01041534, -0.15467572,\n -0.02522295, -0.04485739, -0.03417278, -0.05451573, -0.12252605,\n -0.1292875 , 0.04643333, 0.01454904, -0.05047289, 0.00826681,\n -0.09083664, -0.0697903 , -0.00391799], dtype=float32)",plaintext,content +1780,2777032,"Untitled-1",1861,0,"\n ",r,content +1781,2777188,"Untitled-1",1869,0,"\n ",r,content +1782,2777189,"Untitled-1",1862,7,"",r,content +1783,2777330,"Untitled-1",1870,0,"\n ",r,content +1784,2777331,"Untitled-1",1863,7,"",r,content +1785,2777841,"Untitled-1",1868,3,"",r,content +1786,2779464,"Untitled-1",1864,4,"",r,content +1787,2779618,"Untitled-1",1863,1,"",r,content +1788,2785562,"TERMINAL",0,0,"^DERROR:2025-06-30 23:30:12,847:jax._src.debugging:96: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 94, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 334, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py"", line 36, in exit\r\n self._orig_exit(orig_code) # type: ignore\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 94, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 334, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py"", line 36, in exit\r\n self._orig_exit(orig_code) # type: ignore\r\nSystemExit: 0\r\n",,terminal_output +1789,2786578,"TERMINAL",0,0,"wandb: \r\nwandb: 🚀 View run dynamics-tiny-overfit-big-lr-0000 at: https://wandb.ai/instant-uv/jafar/runs/bjhkh5c8\r\nwandb: Find logs at: ../../../../../hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/wandb/run-20250630_232842-bjhkh5c8/logs\r\n",,terminal_output +1790,2787817,"TERMINAL",0,0,"]0;tum_cte0515@hkn0732:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0732 jafar]$ ",,terminal_output +1791,2794743,"TERMINAL",0,0,"sh scripts_horeka/train_dynamics.sh ",,terminal_output +1792,2797417,"genie.py",0,0,"",python,tab +1793,2801463,"sample.py",0,0,"",python,tab +1794,2806354,"sample.py",3706,0,"",python,selection_mouse +1795,2808781,"sample.py",4219,0,"",python,selection_mouse +1796,2809568,"sample.py",4200,0,"",python,selection_command +1797,2809931,"sample.py",4200,0,"#",python,content +1798,2809933,"sample.py",4201,0,"",python,selection_keyboard +1799,2810023,"sample.py",4201,0," ",python,content +1800,2810024,"sample.py",4202,0,"",python,selection_keyboard +1801,2810659,"sample.py",4201,0,"",python,selection_command +1802,2811298,"sample.py",4255,0,"\n",python,content +1803,2812602,"sample.py",4256,0,"v",python,content +1804,2812602,"sample.py",4257,0,"",python,selection_keyboard +1805,2812645,"sample.py",4257,0,"i",python,content +1806,2812646,"sample.py",4258,0,"",python,selection_keyboard +1807,2812774,"sample.py",4258,0,"d",python,content +1808,2812775,"sample.py",4259,0,"",python,selection_keyboard +1809,2813296,"sample.py",4259,0," ",python,content +1810,2813297,"sample.py",4260,0,"",python,selection_keyboard +1811,2813512,"sample.py",4260,0,"=",python,content +1812,2813513,"sample.py",4261,0,"",python,selection_keyboard +1813,2813620,"sample.py",4261,0," ",python,content +1814,2813621,"sample.py",4262,0,"",python,selection_keyboard +1815,2814826,"sample.py",4262,0,"_",python,content +1816,2814826,"sample.py",4263,0,"",python,selection_keyboard +1817,2815268,"sample.py",4263,0,"o",python,content +1818,2815269,"sample.py",4264,0,"",python,selection_keyboard +1819,2815483,"sample.py",4264,0,"n",python,content +1820,2815484,"sample.py",4265,0,"",python,selection_keyboard +1821,2815592,"sample.py",4265,0,"e",python,content +1822,2815593,"sample.py",4266,0,"",python,selection_keyboard +1823,2816074,"sample.py",4262,4,"_oneshot_sample",python,content +1824,2817508,"sample.py",4277,0,"()",python,content +1825,2817509,"sample.py",4278,0,"",python,selection_keyboard +1826,2817703,"sample.py",4278,0,"r",python,content +1827,2817704,"sample.py",4279,0,"",python,selection_keyboard +1828,2817734,"sample.py",4279,0,"n",python,content +1829,2817734,"sample.py",4280,0,"",python,selection_keyboard +1830,2817960,"sample.py",4280,0,"g",python,content +1831,2817962,"sample.py",4281,0,"",python,selection_keyboard +1832,2818169,"sample.py",4281,0,",",python,content +1833,2818170,"sample.py",4282,0,"",python,selection_keyboard +1834,2818237,"sample.py",4282,0," ",python,content +1835,2818238,"sample.py",4283,0,"",python,selection_keyboard +1836,2818953,"sample.py",4283,0,"v",python,content +1837,2818953,"sample.py",4284,0,"",python,selection_keyboard +1838,2818955,"sample.py",4284,0,"i",python,content +1839,2818956,"sample.py",4285,0,"",python,selection_keyboard +1840,2819275,"sample.py",4285,0,"d",python,content +1841,2819276,"sample.py",4286,0,"",python,selection_keyboard +1842,2819510,"sample.py",4286,0,"e",python,content +1843,2819511,"sample.py",4287,0,"",python,selection_keyboard +1844,2819612,"sample.py",4287,0,"o",python,content +1845,2819613,"sample.py",4288,0,"",python,selection_keyboard +1846,2820654,"sample.py",4283,5,"video_batch",python,content +1847,2820966,"sample.py",4294,0,",",python,content +1848,2820967,"sample.py",4295,0,"",python,selection_keyboard +1849,2821095,"sample.py",4295,0," ",python,content +1850,2821096,"sample.py",4296,0,"",python,selection_keyboard +1851,2821991,"sample.py",4228,0,"",python,selection_mouse +1852,2823408,"sample.py",4296,0,"",python,selection_mouse +1853,2823431,"sample.py",4296,0,"a",python,content +1854,2823432,"sample.py",4297,0,"",python,selection_keyboard +1855,2823535,"sample.py",4297,0,"c",python,content +1856,2823536,"sample.py",4298,0,"",python,selection_keyboard +1857,2824915,"sample.py",4296,2,"action_batch",python,content +1858,2825685,"sample.py",4365,0,"",python,selection_mouse +1859,2826015,"sample.py",4309,0,"",python,selection_mouse +1860,2828879,"sample.py",3273,0,"",python,selection_mouse +1861,2829048,"sample.py",3272,11,"video_batch",python,selection_mouse +1862,2829758,"sample.py",3280,0,"",python,selection_mouse +1863,2829759,"sample.py",3272,11,"video_batch",python,selection_mouse +1864,2830281,"sample.py",3358,0,"",python,selection_mouse +1865,2845856,"genie.py",0,0,"",python,tab +1866,2856217,"genie.py",4341,0,"",python,selection_mouse +1867,2856745,"genie.py",4308,0,"",python,selection_mouse +1868,2856909,"genie.py",4303,8,"dynamics",python,selection_mouse +1869,2857844,"genie.py",4343,0,"",python,selection_mouse +1870,2858007,"genie.py",4340,9,"tokenizer",python,selection_mouse +1871,2858790,"genie.py",4267,0,"",python,selection_mouse +1872,2859818,"genie.py",4271,0,"",python,selection_mouse +1873,2862062,"genie.py",4038,0,"",python,selection_mouse +1874,2867300,"genie.py",5614,0,"",python,selection_mouse +1875,2867408,"genie.py",5612,8,"dynamics",python,selection_mouse +1876,2867986,"genie.py",5607,0,"",python,selection_mouse +1877,2868117,"genie.py",5607,4,"self",python,selection_mouse +1878,2868376,"genie.py",5607,5,"self.",python,selection_mouse +1879,2868418,"genie.py",5607,13,"self.dynamics",python,selection_mouse +1880,2868449,"genie.py",5607,24,"self.dynamics.mask_token",python,selection_mouse +1881,2868828,"genie.py",5628,0,"",python,selection_mouse +1882,2868828,"genie.py",5621,10,"mask_token",python,selection_mouse +1883,2869644,"genie.py",5618,0,"",python,selection_mouse +1884,2869863,"genie.py",5612,8,"dynamics",python,selection_mouse +1885,2870673,"genie.py",5665,0,"",python,selection_mouse +1886,2870673,"genie.py",5664,0,"",python,selection_command +1887,2870737,"genie.py",5665,0,"",python,selection_mouse +1888,2870745,"genie.py",5664,0,"",python,selection_command +1889,2871529,"genie.py",5630,0,"",python,selection_mouse +1890,2871695,"genie.py",5621,10,"mask_token",python,selection_mouse +1891,2872604,"genie.py",5675,0,"",python,selection_mouse +1892,2872606,"genie.py",5674,0,"",python,selection_command +1893,2873146,"genie.py",5649,0,"",python,selection_mouse +1894,2873304,"genie.py",5648,9,"vid_embed",python,selection_mouse +1895,2873546,"genie.py",5648,12,"vid_embed[:,",python,selection_mouse +1896,2873578,"genie.py",5648,14,"vid_embed[:, -",python,selection_mouse +1897,2873579,"genie.py",5648,16,"vid_embed[:, -1]",python,selection_mouse +1898,2873619,"genie.py",5648,17,"vid_embed[:, -1],",python,selection_mouse +1899,2873631,"genie.py",5648,27,"vid_embed[:, -1],\n )",python,selection_mouse +1900,2873882,"genie.py",5675,0,"",python,selection_mouse +1901,2873894,"genie.py",5674,0,"",python,selection_command +1902,2874530,"genie.py",5675,0,"",python,selection_mouse +1903,2874533,"genie.py",5674,0,"",python,selection_command +1904,2874682,"genie.py",5674,1,")",python,selection_mouse +1905,2874685,"genie.py",5675,0,"",python,selection_command +1906,2874744,"genie.py",5652,23,"embed[:, -1],\n )",python,selection_mouse +1907,2874780,"genie.py",5607,68,"self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1908,2874780,"genie.py",5606,69," self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1909,2874822,"genie.py",5604,71," self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1910,2874823,"genie.py",5603,72," self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1911,2874861,"genie.py",5602,73," self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1912,2874893,"genie.py",5563,112," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1913,2874893,"genie.py",5562,113," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1914,2874985,"genie.py",5561,114," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1915,2874985,"genie.py",5522,153," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1916,2875115,"genie.py",5521,154," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1917,2875192,"genie.py",5520,155," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1918,2875246,"genie.py",5519,156," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1919,2875503,"genie.py",5518,157," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1920,2875672,"genie.py",5517,158," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1921,2876348,"genie.py",5517,0,"",python,selection_mouse +1922,2876504,"genie.py",5517,8," ",python,selection_mouse +1923,2876748,"genie.py",5517,41," curr_masked_frame = jnp.where(\n ",python,selection_mouse +1924,2876753,"genie.py",5517,43," curr_masked_frame = jnp.where(\n ",python,selection_mouse +1925,2876766,"genie.py",5517,84," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n ",python,selection_mouse +1926,2876779,"genie.py",5517,86," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n ",python,selection_mouse +1927,2876789,"genie.py",5517,127," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n ",python,selection_mouse +1928,2876799,"genie.py",5517,129," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n ",python,selection_mouse +1929,2876803,"genie.py",5517,130," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n ",python,selection_mouse +1930,2876804,"genie.py",5517,131," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n ",python,selection_mouse +1931,2876806,"genie.py",5517,158," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1932,2877285,"genie.py",5675,0,"",python,selection_mouse +1933,2877289,"genie.py",5674,0,"",python,selection_command +1934,2877438,"genie.py",5675,0,"",python,selection_mouse +1935,2877444,"genie.py",5674,0,"",python,selection_command +1936,2877633,"genie.py",5674,1,")",python,selection_mouse +1937,2877633,"genie.py",5647,27," vid_embed[:, -1],\n ",python,selection_mouse +1938,2877664,"genie.py",5675,0,"",python,selection_command +1939,2877664,"genie.py",5605,70," self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1940,2877665,"genie.py",5603,72," self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1941,2877711,"genie.py",5602,73," self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1942,2877724,"genie.py",5563,112," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1943,2877761,"genie.py",5562,113," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1944,2877806,"genie.py",5522,153," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1945,2877826,"genie.py",5521,154," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1946,2877853,"genie.py",5520,155," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1947,2877898,"genie.py",5519,156," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1948,2877946,"genie.py",5457,218," vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1949,2877947,"genie.py",5456,219," vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1950,2878251,"genie.py",5518,157," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1951,2878280,"genie.py",5517,158," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1952,2878915,"genie.py",5517,0,"",python,selection_mouse +1953,2878916,"genie.py",5517,8," ",python,selection_mouse +1954,2879125,"genie.py",5517,43," curr_masked_frame = jnp.where(\n ",python,selection_mouse +1955,2879162,"genie.py",5517,85," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n ",python,selection_mouse +1956,2879162,"genie.py",5517,89," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n ",python,selection_mouse +1957,2879162,"genie.py",5517,90," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n ",python,selection_mouse +1958,2879178,"genie.py",5517,94," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self",python,selection_mouse +1959,2879210,"genie.py",5517,140," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed",python,selection_mouse +1960,2879305,"genie.py",5517,158," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1961,2879506,"genie.py",5675,0,"",python,selection_mouse +1962,2879522,"genie.py",5674,0,"",python,selection_command +1963,2879678,"genie.py",5675,0,"",python,selection_mouse +1964,2879684,"genie.py",5674,0,"",python,selection_command +1965,2879868,"genie.py",5674,1,")",python,selection_mouse +1966,2879869,"genie.py",5675,0,"",python,selection_command +1967,2879898,"genie.py",5648,27,"vid_embed[:, -1],\n )",python,selection_mouse +1968,2879921,"genie.py",5606,69," self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1969,2879941,"genie.py",5605,70," self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1970,2879991,"genie.py",5604,71," self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1971,2880042,"genie.py",5565,110," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1972,2880087,"genie.py",5564,111," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1973,2880087,"genie.py",5563,112," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1974,2880103,"genie.py",5562,113," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1975,2880103,"genie.py",5522,153," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1976,2880144,"genie.py",5521,154," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1977,2880210,"genie.py",5520,155," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1978,2880239,"genie.py",5519,156," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1979,2880252,"genie.py",5518,157," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1980,2880381,"genie.py",5517,158," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +1981,2881175,"genie.py",5517,38," curr_masked_frame = jnp.where(",python,selection_command +1982,2884239,"TERMINAL",0,0,"bash",,terminal_focus +1983,2886942,"TERMINAL",0,0,"ls gifs",,terminal_command +1984,2886979,"TERMINAL",0,0,"]633;E;2025-06-30 23:31:54 ls gifs;9a6fb2c6-f596-403e-97b8-ced5ab6848ce]633;C",,terminal_output +1985,2887051,"TERMINAL",0,0,"generation_1750863858.4915645.gif generation_1751035879.4153903.gif generation_1751303075.601022.gif generation_overfit-sample-big-lr.gif generation_overfit-sample-mid-lr.gif\r\ngeneration_1751035086.6566052.gif generation_1751302525.3066723.gif generation_1751307347.4951653.gif generation_overfit-sample-low-lr.gif sample-maskgit-steps-1.gif\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1986,2892738,"genie.py",5220,0,"",python,selection_mouse +1987,2893922,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0732 jafar]$ sh scripts_horeka/train_dynamics.sh ",,terminal_output +1988,2925517,"genie.py",0,0,"",python,tab +1989,2939703,"genie.py",6155,0,"",python,selection_mouse +1990,2940229,"genie.py",5866,0,"",python,selection_mouse +1991,2940741,"genie.py",6121,0,"",python,selection_mouse +1992,2941287,"genie.py",6080,0,"",python,selection_mouse +1993,2945046,"sample.py",0,0,"",python,tab +1994,2951889,"sample.py",3051,0,"",python,selection_mouse +1995,2952033,"genie.py",0,0,"",python,tab +1996,2955385,"genie.py",3773,0,"",python,selection_mouse +1997,2955511,"genie.py",3771,9,"init_mask",python,selection_mouse +1998,2955717,"genie.py",3771,74,"init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry",python,selection_mouse +1999,2955760,"genie.py",3771,96,"init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch",python,selection_mouse +2000,2955804,"genie.py",3771,131,"init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs",python,selection_mouse +2001,2955845,"genie.py",3771,154,"init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask",python,selection_mouse +2002,2955845,"genie.py",3771,155,"init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,",python,selection_mouse +2003,2955846,"genie.py",3771,179,"init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,",python,selection_mouse +2004,2955904,"genie.py",3771,206,"init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,",python,selection_mouse +2005,2956214,"genie.py",3977,0,"",python,selection_mouse +2006,2956248,"genie.py",3976,0,"",python,selection_command +2007,2956388,"genie.py",3977,0,"",python,selection_mouse +2008,2956431,"genie.py",3976,0,"",python,selection_command +2009,2956568,"genie.py",3976,1,",",python,selection_mouse +2010,2956601,"genie.py",3977,0,"",python,selection_command +2011,2956601,"genie.py",3963,14,"action_tokens,",python,selection_mouse +2012,2956627,"genie.py",3939,38,"token_idxs,\n action_tokens,",python,selection_mouse +2013,2956628,"genie.py",3916,61,"init_mask,\n token_idxs,\n action_tokens,",python,selection_mouse +2014,2956670,"genie.py",3888,89,"new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,",python,selection_mouse +2015,2956714,"genie.py",3862,115,"batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,",python,selection_mouse +2016,2956715,"genie.py",3861,116," batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,",python,selection_mouse +2017,2956760,"genie.py",3835,142,"init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,",python,selection_mouse +2018,2956844,"genie.py",3770,207," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,",python,selection_mouse +2019,2956844,"genie.py",3769,208," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,",python,selection_mouse +2020,2956884,"genie.py",3768,209," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,",python,selection_mouse +2021,2956930,"genie.py",3767,210," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,",python,selection_mouse +2022,2957354,"genie.py",3767,0,"",python,selection_mouse +2023,2957355,"genie.py",3763,8," ",python,selection_mouse +2024,2957611,"genie.py",3763,70," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n ",python,selection_mouse +2025,2957612,"genie.py",3763,94," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n ",python,selection_mouse +2026,2957612,"genie.py",3763,96," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n ",python,selection_mouse +2027,2957654,"genie.py",3763,124," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n ",python,selection_mouse +2028,2957654,"genie.py",3763,139," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs",python,selection_mouse +2029,2957657,"genie.py",3763,162," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask",python,selection_mouse +2030,2957701,"genie.py",3763,186," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs",python,selection_mouse +2031,2957713,"genie.py",3763,213," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens",python,selection_mouse +2032,2957745,"genie.py",3763,224," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )",python,selection_mouse +2033,2957838,"genie.py",3763,254," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan",python,selection_mouse +2034,2958247,"genie.py",3763,224," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )",python,selection_mouse +2035,2958600,"genie.py",3987,0,"",python,selection_mouse +2036,2958606,"genie.py",3986,0,"",python,selection_command +2037,2961791,"genie.py",3839,0,"",python,selection_mouse +2038,2961955,"genie.py",3835,10,"init_carry",python,selection_mouse +2039,2964949,"genie.py",4035,0,"",python,selection_mouse +2040,2968770,"genie.py",5347,0,"",python,selection_mouse +2041,2968918,"genie.py",5340,14,"vid_token_idxs",python,selection_mouse +2042,2969477,"genie.py",5368,0,"",python,selection_mouse +2043,2969630,"genie.py",5361,11,"concatenate",python,selection_mouse +2044,2971941,"genie.py",5483,0,"",python,selection_mouse +2045,2972700,"genie.py",5539,0,"",python,selection_mouse +2046,2972836,"genie.py",5525,17,"curr_masked_frame",python,selection_mouse +2047,2973658,"genie.py",5554,0,"",python,selection_mouse +2048,2973785,"genie.py",5549,5,"where",python,selection_mouse +2049,2974416,"genie.py",5577,0,"",python,selection_mouse +2050,2974571,"genie.py",5572,11,"expand_dims",python,selection_mouse +2051,2974724,"genie.py",5556,39," jnp.expand_dims(mask, -1),\n",python,selection_mouse +2052,2975469,"genie.py",5615,0,"",python,selection_mouse +2053,2976219,"genie.py",5655,0,"",python,selection_mouse +2054,2976385,"genie.py",5648,9,"vid_embed",python,selection_mouse +2055,2976993,"genie.py",5665,0,"",python,selection_mouse +2056,2977019,"genie.py",5664,0,"",python,selection_command +2057,2977157,"genie.py",5665,0,"",python,selection_mouse +2058,2977161,"genie.py",5664,0,"",python,selection_command +2059,2977372,"genie.py",5664,1,",",python,selection_mouse +2060,2977372,"genie.py",5664,0,"",python,selection_mouse +2061,2977373,"genie.py",5661,3,"-1]",python,selection_mouse +2062,2977373,"genie.py",5659,5,", -1]",python,selection_mouse +2063,2977373,"genie.py",5657,7,"[:, -1]",python,selection_mouse +2064,2977375,"genie.py",5665,0,"",python,selection_command +2065,2977396,"genie.py",5612,53,"dynamics.mask_token[0],\n vid_embed[:, -1],",python,selection_mouse +2066,2977423,"genie.py",5607,58,"self.dynamics.mask_token[0],\n vid_embed[:, -1],",python,selection_mouse +2067,2977532,"genie.py",5606,59," self.dynamics.mask_token[0],\n vid_embed[:, -1],",python,selection_mouse +2068,2977550,"genie.py",5566,99," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],",python,selection_mouse +2069,2977550,"genie.py",5565,100," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],",python,selection_mouse +2070,2977580,"genie.py",5564,101," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],",python,selection_mouse +2071,2977688,"genie.py",5563,102," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],",python,selection_mouse +2072,2977697,"genie.py",5524,141," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],",python,selection_mouse +2073,2978121,"genie.py",5524,0,"",python,selection_mouse +2074,2984500,"genie.py",6060,0,"",python,selection_mouse +2075,2985529,"genie.py",6071,0,"",python,selection_mouse +2076,2986082,"genie.py",6082,0,"",python,selection_mouse +2077,2986678,"genie.py",6075,0,"",python,selection_mouse +2078,2986810,"genie.py",6073,8,"dynamics",python,selection_mouse +2079,2987397,"genie.py",6085,0,"",python,selection_mouse +2080,2987543,"genie.py",6082,8,"dynamics",python,selection_mouse +2081,2988062,"genie.py",6097,0,"",python,selection_mouse +2082,2988240,"genie.py",6091,9,"vid_embed",python,selection_mouse +2083,2988933,"genie.py",6103,0,"",python,selection_mouse +2084,2989078,"genie.py",6100,4,")[:,",python,selection_mouse +2085,2990124,"genie.py",6102,0,"",python,selection_mouse +2086,2991111,"genie.py",6107,0,"",python,selection_mouse +2087,2991293,"genie.py",6106,1,"1",python,selection_mouse +2088,2991485,"genie.py",6105,2,"-1",python,selection_mouse +2089,2991486,"genie.py",6103,4,", -1",python,selection_mouse +2090,2991520,"genie.py",6102,5,":, -1",python,selection_mouse +2091,2991567,"genie.py",6101,6,"[:, -1",python,selection_mouse +2092,2992045,"genie.py",6101,0,"",python,selection_mouse +2093,2993464,"genie.py",6101,1,"[",python,selection_mouse +2094,2993938,"genie.py",6102,0,"",python,selection_mouse +2095,3006459,"genie.py",6197,0,"",python,selection_mouse +2096,3006571,"genie.py",6190,13,"sample_argmax",python,selection_mouse +2097,3027427,"genie.py",4423,0,"",python,selection_mouse +2098,3027576,"genie.py",4414,13,"sample_argmax",python,selection_mouse +2099,3029612,"genie.py",3405,0,"",python,selection_mouse +2100,3029792,"genie.py",3403,5,"False",python,selection_mouse +2101,3030456,"genie.py",3423,0,"",python,selection_mouse +2102,3030464,"genie.py",3422,0,"",python,selection_command +2103,3031004,"genie.py",3390,0,"",python,selection_mouse +2104,3031158,"genie.py",3381,13,"sample_argmax",python,selection_mouse +2105,3032056,"sample.py",0,0,"",python,tab +2106,3033895,"sample.py",3014,0,"",python,selection_mouse +2107,3033990,"sample.py",3008,13,"sample_argmax",python,selection_mouse +2108,3037810,"genie.py",0,0,"",python,tab +2109,3068408,"genie.py",4478,0,"",python,selection_mouse +2110,3068528,"genie.py",4472,11,"final_carry",python,selection_mouse +2111,3070331,"genie.py",4487,0,"",python,selection_mouse +2112,3070853,"genie.py",4494,0,"",python,selection_mouse +2113,3071013,"genie.py",4489,7,"loop_fn",python,selection_mouse +2114,3071953,"genie.py",4504,0,"",python,selection_mouse +2115,3072086,"genie.py",4497,10,"init_carry",python,selection_mouse +2116,3072909,"genie.py",4545,0,"",python,selection_mouse +2117,3073057,"genie.py",4536,14,"new_frame_idxs",python,selection_mouse +2118,3073923,"genie.py",4600,0,"",python,selection_mouse +2119,3074506,"genie.py",4561,0,"",python,selection_mouse +2120,3074674,"genie.py",4553,11,"final_carry",python,selection_mouse +2121,3076797,"genie.py",4269,0,"",python,selection_mouse +2122,3078962,"genie.py",4687,0,"",python,selection_mouse +2123,3079554,"genie.py",4679,0,"",python,selection_mouse +2124,3079686,"genie.py",4678,8,"video_hw",python,selection_mouse +2125,3079905,"genie.py",4678,14,"video_hw=batch",python,selection_mouse +2126,3079932,"genie.py",4678,15,"video_hw=batch[",python,selection_mouse +2127,3079948,"genie.py",4678,22,"video_hw=batch[""videos",python,selection_mouse +2128,3080034,"genie.py",4678,25,"video_hw=batch[""videos""].",python,selection_mouse +2129,3080035,"genie.py",4678,30,"video_hw=batch[""videos""].shape",python,selection_mouse +2130,3080073,"genie.py",4678,31,"video_hw=batch[""videos""].shape[",python,selection_mouse +2131,3080081,"genie.py",4678,32,"video_hw=batch[""videos""].shape[2",python,selection_mouse +2132,3080098,"genie.py",4678,33,"video_hw=batch[""videos""].shape[2:",python,selection_mouse +2133,3080124,"genie.py",4678,34,"video_hw=batch[""videos""].shape[2:4",python,selection_mouse +2134,3080130,"genie.py",4678,35,"video_hw=batch[""videos""].shape[2:4]",python,selection_mouse +2135,3080171,"genie.py",4678,36,"video_hw=batch[""videos""].shape[2:4],",python,selection_mouse +2136,3080219,"genie.py",4678,46,"video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2137,3081392,"genie.py",4700,0,"",python,selection_mouse +2138,3081866,"genie.py",4724,0,"",python,selection_mouse +2139,3081873,"genie.py",4723,0,"",python,selection_command +2140,3082059,"genie.py",4723,1,")",python,selection_mouse +2141,3082065,"genie.py",4724,0,"",python,selection_command +2142,3082113,"genie.py",4677,47," video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2143,3082114,"genie.py",4673,51," video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2144,3082132,"genie.py",4669,55," video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2145,3082162,"genie.py",4619,105," jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2146,3082179,"genie.py",4618,106," jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2147,3082466,"genie.py",4568,156," new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2148,3083212,"genie.py",4724,0,"",python,selection_mouse +2149,3083232,"genie.py",4723,0,"",python,selection_command +2150,3083828,"genie.py",4724,0,"",python,selection_mouse +2151,3083841,"genie.py",4723,0,"",python,selection_command +2152,3084018,"genie.py",4723,1,")",python,selection_mouse +2153,3084048,"genie.py",4724,0,"",python,selection_command +2154,3084049,"genie.py",4676,48," video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2155,3084090,"genie.py",4624,100," jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2156,3084091,"genie.py",4622,102," jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2157,3084091,"genie.py",4621,103," jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2158,3084157,"genie.py",4620,104," jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2159,3084191,"genie.py",4619,105," jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2160,3084199,"genie.py",4569,155," new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2161,3084223,"genie.py",4568,156," new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2162,3084362,"genie.py",4528,196," new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2163,3084856,"genie.py",4568,156," new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )",python,selection_mouse +2164,3086030,"genie.py",4679,0,"",python,selection_mouse +2165,3100250,"genie.py",6253,0,"",python,selection_mouse +2166,3100417,"genie.py",6249,12,"final_logits",python,selection_mouse +2167,3148405,"genie.py",6788,0,"",python,selection_mouse +2168,3148542,"genie.py",6787,5,"where",python,selection_mouse +2169,3149090,"genie.py",6837,0,"",python,selection_mouse +2170,3150545,"genie.py",6801,0,"",python,selection_mouse +2171,3151030,"genie.py",6795,0,"",python,selection_mouse +2172,3151201,"genie.py",6793,4,"mask",python,selection_mouse +2173,3152146,"genie.py",6771,0,"",python,selection_mouse +2174,3152295,"genie.py",6766,14,"new_token_idxs",python,selection_mouse +2175,3162618,"genie.py",6794,0,"",python,selection_mouse +2176,3162965,"genie.py",6793,4,"mask",python,selection_mouse +2177,3164796,"genie.py",6707,0,"",python,selection_mouse +2178,3164956,"genie.py",6695,17,"final_token_probs",python,selection_mouse +2179,3171757,"genie.py",6721,0,"",python,selection_mouse +2180,3171774,"genie.py",6720,0,"",python,selection_command +2181,3171921,"genie.py",6720,1,"k",python,selection_mouse +2182,3171922,"genie.py",6720,0,"",python,selection_mouse +2183,3171922,"genie.py",6710,10,"bs += ~mas",python,selection_mouse +2184,3171924,"genie.py",6618,102,"_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mas",python,selection_mouse +2185,3171964,"genie.py",6721,0,"",python,selection_command +2186,3171977,"genie.py",6614,107,"oken_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask",python,selection_mouse +2187,3171978,"genie.py",6610,111,"al_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask",python,selection_mouse +2188,3171978,"genie.py",6608,113,"inal_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask",python,selection_mouse +2189,3172008,"genie.py",6607,114,"final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask",python,selection_mouse +2190,3172023,"genie.py",6604,117," final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask",python,selection_mouse +2191,3172051,"genie.py",6603,118," final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask",python,selection_mouse +2192,3172052,"genie.py",6602,119," final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask",python,selection_mouse +2193,3172097,"genie.py",6601,120," final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask",python,selection_mouse +2194,3172275,"genie.py",6600,121," final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask",python,selection_mouse +2195,3172634,"genie.py",6599,122," final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask",python,selection_mouse +2196,3173721,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0732 jafar]$ sh scripts_horeka/train_dynamics.sh ",,terminal_output +2197,3243600,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0732 jafar]$ s\r\n\rh scripts_horeka/train_dynamics.sh ",,terminal_output +2198,3243934,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0732 jafar]$ s\r\n\rh scripts_horeka/train_dynamics.sh ",,terminal_output +2199,3244983,"genie.py",6707,0,"",python,selection_mouse +2200,3245102,"genie.py",6695,17,"final_token_probs",python,selection_mouse +2201,3263164,"genie.py",7074,0,"",python,selection_mouse +2202,3263334,"genie.py",7063,17,"final_token_probs",python,selection_mouse +2203,3268994,"genie.py",7218,0,"",python,selection_mouse +2204,3269159,"genie.py",7205,14,"mask_update_fn",python,selection_mouse +2205,3276263,"genie.py",7239,0,"",python,selection_mouse +2206,3276488,"genie.py",7232,7,"_idxs)\n",python,selection_mouse +2207,3276489,"genie.py",7224,15,", sorted_idxs)\n",python,selection_mouse +2208,3276489,"genie.py",7141,98,"(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2209,3276490,"genie.py",7134,105,"ax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2210,3276490,"genie.py",7054,185,".argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2211,3276490,"genie.py",7051,188,"jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2212,3276494,"genie.py",7232,7,"_idxs)\n",python,selection_command +2213,3276519,"genie.py",7049,190,"= jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2214,3276561,"genie.py",6965,274,"= jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2215,3276562,"genie.py",6964,275," = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2216,3276610,"genie.py",6963,276,"k = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2217,3276651,"genie.py",6962,277,"sk = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2218,3276652,"genie.py",6961,278,"ask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2219,3276738,"genie.py",6960,279,"mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2220,3276739,"genie.py",6958,281,"x_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2221,3276739,"genie.py",6957,282,"dx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2222,3276746,"genie.py",6956,283,"idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2223,3276747,"genie.py",6953,286," idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2224,3276762,"genie.py",6952,287," idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2225,3276775,"genie.py",6950,289," idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2226,3276806,"genie.py",6869,370," num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2227,3276817,"genie.py",6868,371," num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n",python,selection_mouse +2228,3357168,"genie.py",7349,0,"",python,selection_mouse +2229,3357188,"genie.py",7348,0,"",python,selection_command +2230,3357350,"genie.py",7348,1,"e",python,selection_mouse +2231,3357350,"genie.py",7053,295,"p.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, Non",python,selection_mouse +2232,3357351,"genie.py",6731,617," Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, Non",python,selection_mouse +2233,3357394,"genie.py",7349,0,"",python,selection_command +2234,3357414,"genie.py",6376,973," step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2235,3357481,"genie.py",6286,1063," rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2236,3357482,"genie.py",6272,1077," else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2237,3357694,"genie.py",6205,1144," sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2238,3357695,"genie.py",6174,1175," if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2239,3357695,"genie.py",6122,1227," # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2240,3358283,"genie.py",6045,1304," final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2241,3358284,"genie.py",5983,1366," step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2242,3358285,"genie.py",5777,1572," act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2243,3358285,"genie.py",5666,1683," )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2244,3358285,"genie.py",5517,1832," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2245,3358286,"genie.py",5332,2017," vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2246,3358286,"genie.py",5249,2100," B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2247,3358316,"genie.py",5095,2254," steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2248,3358352,"genie.py",4968,2381,"\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2249,3358384,"genie.py",4757,2592,"\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2250,3358410,"genie.py",4618,2731," jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2251,3358442,"genie.py",4454,2895," )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2252,3358466,"genie.py",4388,2961," sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2253,3358489,"genie.py",4351,2998," temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2254,3358534,"genie.py",4528,2821," new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2255,3358577,"genie.py",4818,2531," # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2256,3358578,"genie.py",5048,2301," temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2257,3358633,"genie.py",5127,2222," def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2258,3358635,"genie.py",5250,2099," B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2259,3358635,"genie.py",5288,2061,"\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2260,3358651,"genie.py",5293,2056," # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2261,3358678,"genie.py",5337,2012," vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2262,3358683,"genie.py",5380,1969," (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2263,3358693,"genie.py",5381,1968," (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2264,3358791,"genie.py",5452,1897," )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2265,3358824,"genie.py",5451,1898," )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2266,3358841,"genie.py",5378,1971," (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2267,3358869,"genie.py",5377,1972," (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2268,3358890,"genie.py",5376,1973," (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2269,3358916,"genie.py",5375,1974," (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2270,3358917,"genie.py",5374,1975," (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2271,3359092,"genie.py",5332,2017," vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2272,3359093,"genie.py",5289,2060," # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2273,3359093,"genie.py",5249,2100," B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2274,3359095,"genie.py",5232,2117," step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2275,3359112,"genie.py",5161,2188," rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2276,3359175,"genie.py",5127,2222," def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2277,3359175,"genie.py",5111,2238," @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2278,3359201,"genie.py",5071,2278," sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2279,3359227,"genie.py",5048,2301," temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2280,3359228,"genie.py",5023,2326," tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2281,3359250,"genie.py",4999,2350," dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2282,3359392,"genie.py",4969,2380,"class MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2283,3359436,"genie.py",4968,2381,"\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2284,3359770,"genie.py",4967,2382,"\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2285,3359770,"genie.py",4930,2419," return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2286,3359771,"genie.py",4760,2589," def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2287,3359771,"genie.py",4728,2621," return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2288,3359771,"genie.py",4718,2631," )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2289,3359772,"genie.py",4669,2680," video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2290,3359797,"genie.py",4668,2681," video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2291,3359822,"genie.py",4620,2729," jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2292,3359936,"genie.py",4668,2681," video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2293,3359937,"genie.py",4667,2682," video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2294,3359970,"genie.py",4716,2633," )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2295,3359970,"genie.py",4726,2623," return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2296,3360012,"genie.py",4757,2592,"\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2297,3360013,"genie.py",4758,2591," def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2298,3360030,"genie.py",4818,2531," # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2299,3360062,"genie.py",4930,2419," return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2300,3360062,"genie.py",4967,2382,"\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2301,3360078,"genie.py",4969,2380,"class MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2302,3360111,"genie.py",5050,2299," temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2303,3360125,"genie.py",5098,2251," steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2304,3360173,"genie.py",5115,2234,"@nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2305,3360174,"genie.py",5132,2217,"ef __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2306,3360181,"genie.py",5168,2181," rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2307,3360225,"genie.py",5239,2110," step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2308,3360273,"genie.py",5168,2181," rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2309,3360314,"genie.py",5053,2296,"emperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2310,3360327,"genie.py",4716,2633," )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2311,3360373,"genie.py",4465,2884," final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2312,3360674,"genie.py",4429,2920," steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2313,3360675,"genie.py",4454,2895," )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2314,3360677,"genie.py",4464,2885," final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2315,3360677,"genie.py",4568,2781," new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2316,3360678,"genie.py",4569,2780," new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2317,3360678,"genie.py",4619,2730," jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2318,3360698,"genie.py",4620,2729," jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2319,3360712,"genie.py",4668,2681," video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2320,3360761,"genie.py",4669,2680," video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2321,3360783,"genie.py",4718,2631," )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2322,3360836,"genie.py",4729,2620," return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2323,3360864,"genie.py",4757,2592,"\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2324,3361031,"genie.py",4729,2620," return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2325,3361032,"genie.py",4670,2679," video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2326,3361032,"genie.py",4467,2882," final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2327,3361033,"genie.py",4432,2917," steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2328,3361080,"genie.py",4351,2998," temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2329,3361136,"genie.py",4277,3072," dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2330,3361137,"genie.py",4246,3103," loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2331,3361137,"genie.py",4211,3138," # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2332,3361187,"genie.py",4210,3139,"\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2333,3361187,"genie.py",4200,3149," )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2334,3361214,"genie.py",4150,3199," out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2335,3361231,"genie.py",4127,3222," in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2336,3361257,"genie.py",4085,3264," split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2337,3361279,"genie.py",4044,3305," variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2338,3361301,"genie.py",4019,3330," MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2339,3361323,"genie.py",3988,3361," MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2340,3361347,"genie.py",3978,3371," )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2341,3361368,"genie.py",4127,3222," in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2342,3361393,"genie.py",4210,3139,"\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2343,3361420,"genie.py",4246,3103," loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2344,3361440,"genie.py",4314,3035," tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2345,3361468,"genie.py",4352,2997," temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2346,3361528,"genie.py",4389,2960," sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2347,3361573,"genie.py",4352,2997," temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2348,3361573,"genie.py",4314,3035," tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2349,3361596,"genie.py",4246,3103," loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2350,3361616,"genie.py",4150,3199," out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2351,3361643,"genie.py",4127,3222," in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2352,3361661,"genie.py",4044,3305," variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2353,3361975,"genie.py",4085,3264," split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2354,3362043,"genie.py",4044,3305," variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2355,3362154,"genie.py",3978,3371," )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2356,3362155,"genie.py",3904,3445," init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2357,3362155,"genie.py",3827,3522," init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2358,3362205,"genie.py",3725,3624,"\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2359,3362216,"genie.py",3424,3925," # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2360,3362252,"genie.py",3318,4031," steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2361,3362294,"genie.py",3241,4108," @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2362,3362334,"genie.py",3207,4142," )\n return outputs\n\n @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2363,3362381,"genie.py",2992,4357," mle_indices, batch[""videos""].shape[2:4]\n )\n jax.debug.breakpoint()\n outputs[""gt_debug""] = self.tokenizer.decode(\n tokenizer_outputs[""indices""], batch[""videos""].shape[2:4]\n )\n return outputs\n\n @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2364,3362424,"genie.py",3240,4109,"\n @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2365,3362425,"genie.py",3273,4076," self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2366,3362465,"genie.py",3287,4062," batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2367,3362465,"genie.py",3343,4006," temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2368,3362473,"genie.py",3410,3939," ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2369,3362483,"genie.py",3424,3925," # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2370,3362528,"genie.py",3469,3880," tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2371,3362564,"genie.py",3551,3798," token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2372,3362673,"genie.py",3550,3799," token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2373,3362719,"genie.py",3596,3753," new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2374,3362727,"genie.py",3654,3695," action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2375,3362741,"genie.py",3725,3624,"\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2376,3362767,"genie.py",3726,3623," # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2377,3362826,"genie.py",3763,3586," init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2378,3362915,"genie.py",3726,3623," # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2379,3362930,"genie.py",3725,3624,"\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2380,3362947,"genie.py",3596,3753," new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2381,3362965,"genie.py",3468,3881," tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2382,3362996,"genie.py",3410,3939," ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2383,3363048,"genie.py",3373,3976," sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2384,3363088,"genie.py",3343,4006," temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2385,3363089,"genie.py",3318,4031," steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2386,3363108,"genie.py",3287,4062," batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2387,3363146,"genie.py",3273,4076," self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2388,3363321,"genie.py",3257,4092," def sample(\n self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2389,3363384,"genie.py",3241,4108," @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2390,3363461,"genie.py",3257,4092," def sample(\n self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_mouse +2391,3541206,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +2392,3541918,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +2393,3543339,"scripts_horeka/overfit_sample_tiny/sample.sh",1444,0,"",shellscript,selection_mouse +2394,3544922,"scripts_horeka/overfit_sample_tiny/sample.sh",1444,0,"0",shellscript,content +2395,3544923,"scripts_horeka/overfit_sample_tiny/sample.sh",1445,0,"",shellscript,selection_keyboard +2396,3545016,"scripts_horeka/overfit_sample_tiny/sample.sh",1445,0,"0",shellscript,content +2397,3545017,"scripts_horeka/overfit_sample_tiny/sample.sh",1446,0,"",shellscript,selection_keyboard +2398,3546038,"scripts_horeka/overfit_sample_tiny/sample.sh",1301,0,"",shellscript,selection_mouse +2399,3546427,"scripts_horeka/overfit_sample_tiny/sample.sh",1475,0,"",shellscript,selection_mouse +2400,3546865,"scripts_horeka/overfit_sample_tiny/sample.sh",1516,0,"",shellscript,selection_mouse +2401,3547822,"scripts_horeka/overfit_sample_tiny/sample.sh",1492,0,"",shellscript,selection_mouse +2402,3548860,"scripts_horeka/overfit_sample_tiny/sample.sh",1492,1,"",shellscript,content +2403,3548982,"scripts_horeka/overfit_sample_tiny/sample.sh",1492,0,"1",shellscript,content +2404,3548984,"scripts_horeka/overfit_sample_tiny/sample.sh",1493,0,"",shellscript,selection_keyboard +2405,3549410,"scripts_horeka/overfit_sample_tiny/sample.sh",1493,0,"6",shellscript,content +2406,3549410,"scripts_horeka/overfit_sample_tiny/sample.sh",1494,0,"",shellscript,selection_keyboard +2407,3550316,"scripts_horeka/overfit_sample_tiny/sample.sh",1517,0,"",shellscript,selection_mouse +2408,3557128,"TERMINAL",0,0,"srun",,terminal_focus +2409,3558577,"TERMINAL",0,0,"\r\n\r\r\n\r",,terminal_output +2410,3559079,"TERMINAL",0,0,"s\r\n\rh scripts_horeka/train_dynamics.sh ",,terminal_output +2411,3561375,"TERMINAL",0,0,"",,terminal_output +2412,3562165,"TERMINAL",0,0,"\r\r\n\r",,terminal_output +2413,3562311,"TERMINAL",0,0,"",,terminal_output +2414,3562369,"TERMINAL",0,0,"",,terminal_output +2415,3563317,"TERMINAL",0,0,"o",,terminal_output +2416,3563426,"TERMINAL",0,0,"verfit_",,terminal_output +2417,3564020,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +2418,3564167,"TERMINAL",0,0,"ample",,terminal_output +2419,3565534,"TERMINAL",0,0,"_",,terminal_output +2420,3565696,"TERMINAL",0,0,"tiny/",,terminal_output +2421,3566790,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +2422,3567019,"TERMINAL",0,0,"ample.sh ",,terminal_output +2423,3567477,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +2424,3567624,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/0000/genie_1751301068_2000/\r\n",,terminal_output +2425,3568366,"TERMINAL",0,0,"bash",,terminal_focus +2426,3569569,"TERMINAL",0,0,"queue",,terminal_command +2427,3569678,"TERMINAL",0,0,"]633;E;2025-06-30 23:43:16 queue;9a6fb2c6-f596-403e-97b8-ced5ab6848ce]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Mon Jun 30 23:43:16 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3307524 accelerat interact tum_cte0 R56:51\t 1 hkn0732",,terminal_output +2428,3570372,"TERMINAL",0,0,"2025-06-30 23:43:17.708897: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2429,3570715,"TERMINAL",0,0,"73[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +2430,3574555,"TERMINAL",0,0,"2025-06-30 23:43:21.574138: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2431,3574826,"TERMINAL",0,0,"srun",,terminal_focus +2432,3576047,"TERMINAL",0,0,"^CTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/compiler.py"", line 335, in backend_compile\r\n return backend.compile(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: ptxas exited with non-zero error code 2, output: \r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 84, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 75, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/tokenizer.py"", line 57, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 87, in __call__\r\n x = STBlock(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 41, in __call__\r\n z = nn.MultiHeadAttention(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 674, in __call__\r\n x = self.attention_fn(*attn_args, **attn_kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 266, in dot_product_attention\r\n attn_weights = dot_product_attention_weights(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/attention.py"", line 146, in dot_product_attention_weights\r\n attn_weights = jax.nn.softmax(attn_weights).astype(dtype)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py"", line 597, in softmax\r\n return _softmax_deprecated(x, axis, where)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/nn/functions.py"", line 628, in _softmax_deprecated\r\n result = unnormalized / jnp.sum(unnormalized, axis, where=where, keepdims=True)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/reductions.py"", line 307, in sum\r\n return _reduce_sum(a, axis=_ensure_optional_axes(axis), dtype=dtype, out=out,\r\njax._src.source_info_util.JaxStackTraceBeforeTransformation: KeyboardInterrupt\r\n\r\nThe preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception.\r\n\r\n--------------------\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 84, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/traceback_util.py"", line 182, in reraise_with_filtered_traceback\r\n return fun(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 2452, in init\r\n _, v_out = self.init_with_output(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/traceback_util.py"", line 182, in reraise_with_filtered_traceback\r\n return fun(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 2304, in init_with_output\r\n return init_with_output(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/scope.py"", line 1115, in wrapper\r\n return apply(fn, mutable=mutable, flags=init_flags)(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/scope.py"", line 1079, in wrapper\r\n y = fn(root, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 3093, in scope_fn\r\n return fn(module.clone(parent=scope, _deep_clone=True), *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 699, in wrapped_module_method\r\n return self._call_wrapped_method(fun, args, kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 1216, in _call_wrapped_method\r\n y = run_fun(self, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 75, in __call__\r\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 699, in wrapped_module_method\r\n return self._call_wrapped_method(fun, args, kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 1216, in _call_wrapped_method\r\n y = run_fun(self, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/tokenizer.py"", line 57, in vq_encode\r\n x = self.encoder(x) # (B, T, N, E)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 699, in wrapped_module_method\r\n return self._call_wrapped_method(fun, args, kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 1216, in _call_wrapped_method\r\n y = run_fun(self, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 87, in __call__\r\n x = STBlock(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/transforms.py"", line 433, in wrapped_fn\r\n return trafo_fn(module_scopes, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/lift.py"", line 319, in wrapper\r\n y, out_variable_groups_xs_t = fn(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/lift.py"", line 1474, in inner\r\n return rematted(variable_groups, rng_groups, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/traceback_util.py"", line 182, in reraise_with_filtered_traceback\r\n return fun(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/ad_checkpoint.py"", line 333, in fun_remat\r\n out_flat = remat_p.bind(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 531, in bind\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 551, in _true_bind\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 556, in bind_with_trace\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1060, in process_primitive\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/ad_checkpoint.py"", line 514, in remat_impl\r\n return core.eval_jaxpr(jaxpr, (), *args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 624, in eval_jaxpr\r\n ans = eqn.primitive.bind(*subfuns, *map(read, eqn.invars), **bind_params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 531, in bind\r\n return self._true_bind(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 551, in _true_bind\r\n return self.bind_with_trace(prev_trace, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 556, in bind_with_trace\r\n return trace.process_primitive(self, args, params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/core.py"", line 1060, in process_primitive\r\n return primitive.impl(*args, **params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/dispatch.py"", line 88, in apply_primitive\r\n outs = fun(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/traceback_util.py"", line 182, in reraise_with_filtered_traceback\r\n return fun(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 334, in cache_miss\r\n executable, pgle_profiler) = _python_pjit_helper(fun, jit_info, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 195, in _python_pjit_helper\r\n out_flat, compiled, profiler = _pjit_call_impl_python(*args_flat, **p.params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 1862, in _pjit_call_impl_python\r\n ).compile()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/interpreters/pxla.py"", line 2467, in compile\r\n executable = UnloadedMeshExecutable.from_hlo(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/interpreters/pxla.py"", line 3009, in from_hlo\r\n xla_executable = _cached_compilation(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/interpreters/pxla.py"", line 2800, in _cached_compilation\r\n xla_executable = compiler.compile_or_get_cached(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/compiler.py"", line 447, in compile_or_get_cached\r\n return _compile_and_write_cache(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/compiler.py"", line 719, in _compile_and_write_cache\r\n executable = backend_compile(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/profiler.py"", line 354, in wrapper\r\n return func(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/compiler.py"", line 335, in backend_compile\r\n return backend.compile(\r\nKeyboardInterrupt\r\n",,terminal_output +2433,3576559,"TERMINAL",0,0,"\r\n]0;tum_cte0515@hkn0732:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0732 jafar]$ ",,terminal_output +2434,3576755,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0732:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0732 jafar]$ ",,terminal_output +2435,3577502,"TERMINAL",0,0,"[?2004l\r\r\nexit\r\nsrun: error: hkn0732: task 0: Exited with exit code 130\r\nsalloc: Relinquishing job allocation 3307524\r\nsalloc: Job allocation 3307524 has been revoked.\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;130",,terminal_output +2436,3578422,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5 --mem=50G",,terminal_command +2437,3578486,"TERMINAL",0,0,"]633;E;2025-06-30 23:43:25 salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5 --mem=50G;4d11dbdc-690b-4257-b927-bbd493ebfa56]633;Csalloc: Pending job allocation 3307600\r\nsalloc: job 3307600 queued and waiting for resources\r\n",,terminal_output +2438,3582317,"Untitled-2",0,0,"",plaintext,tab +2439,3642850,"TERMINAL",0,0,"salloc: job 3307600 has been allocated resources\r\nsalloc: Granted job allocation 3307600\r\n",,terminal_output +2440,3643032,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +2441,3670079,"TERMINAL",0,0,"salloc: Nodes hkn0518 are ready for job\r\n",,terminal_output +2442,3670869,"TERMINAL",0,0,"]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h[tum_cte0515@hkn0518 jafar]$ ",,terminal_output +2443,3687635,"TERMINAL",0,0,"s",,terminal_output +2444,3687773,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +2445,3687910,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +2446,3687985,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +2447,3688248,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +2448,3688327,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +2449,3688402,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +2450,3688622,"TERMINAL",0,0,"[?25l.[?25h[?25lv[?25h",,terminal_output +2451,3688779,"TERMINAL",0,0,"env/",,terminal_output +2452,3689153,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +2453,3689313,"TERMINAL",0,0,"in/",,terminal_output +2454,3689589,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +2455,3689688,"TERMINAL",0,0,"[?25lx[?25h",,terminal_output +2456,3689889,"TERMINAL",0,0,"",,terminal_output +2457,3690602,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +2458,3690764,"TERMINAL",0,0,"tivate",,terminal_output +2459,3691182,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +2460,3741531,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_output +2461,3741837,"TERMINAL",0,0,"h scripts_horeka/overfit_sample_tiny/sample.sh ",,terminal_output +2462,3743556,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +2463,3743713,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/0000/genie_1751301068_2000/\r\n",,terminal_output +2464,3763879,"TERMINAL",0,0,"2025-06-30 23:46:31.216279: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2465,3768028,"TERMINAL",0,0,"2025-06-30 23:46:35.366922: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2466,3776378,"TERMINAL",0,0,"2025-06-30 23:46:43.715646: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2467,3783451,"TERMINAL",0,0,"2025-06-30 23:46:50.784302: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2468,3787361,"TERMINAL",0,0,"Entering jdb:\r\n(jdb) ",,terminal_output +2469,3790525,"TERMINAL",0,0,"l",,terminal_output +2470,3791071,"TERMINAL",0,0,"\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/dynamics.py(47)\r\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\r\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\r\n mask = mask.at[:, 0].set(False)\r\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\r\n else:\r\n mask = None\r\n-> jax.debug.breakpoint()\r\n \r\n # --- Predict transition ---\r\n act_embed = self.action_up(batch[""latent_actions""])\r\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\r\n logits = self.dynamics(vid_embed)\r\n(jdb) ",,terminal_output +2471,3794614,"TERMINAL",0,0,"[?25lvi[?25h[?25li[?25h",,terminal_output +2472,3794876,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +2473,3794987,"TERMINAL",0,0,"[?25le[?25h[?25lo[?25h",,terminal_output +2474,3795848,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +2475,3795983,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +2476,3796123,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +2477,3796348,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +2478,3796402,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +2479,3796527,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +2480,3797503,"TERMINAL",0,0,"[",,terminal_output +2481,3797667,"TERMINAL",0,0,"0",,terminal_output +2482,3798949,"TERMINAL",0,0,"[?25l,[?25h",,terminal_output +2483,3799047,"TERMINAL",0,0,"[?25l0[?25h",,terminal_output +2484,3799739,"TERMINAL",0,0,"[?25l,[?25h",,terminal_output +2485,3801338,"TERMINAL",0,0,"0",,terminal_output +2486,3801761,"TERMINAL",0,0,"]",,terminal_output +2487,3801895,"TERMINAL",0,0,"\r\n*** NameError: name 'video_embed' is not defined\r\n(jdb) ",,terminal_output +2488,3803951,"TERMINAL",0,0,"\rvideo_embed[0,0,0]",,terminal_output +2489,3806760,"TERMINAL",0,0,"[?25lo\r_[?25h",,terminal_output +2490,3806943,"TERMINAL",0,0,"[?25le\r_[?25h",,terminal_output +2491,3807181,"TERMINAL",0,0,"\r\n",,terminal_output +2492,3807324,"TERMINAL",0,0,"Array([ 3.1430408e-02, 9.2464060e-02, 4.6741180e-02, 4.4946712e-02,\r\n 4.0849060e-02, 1.5903854e-01, 8.5261263e-02, -5.0046258e-02,\r\n 1.3122630e-01, -3.2819204e-02, -1.1929660e-01, -2.1288337e-01,\r\n 1.2003889e-01, 4.1469868e-02, -6.0602043e-02, -7.0957288e-02,\r\n -2.8018629e-02, 3.0199461e-02, -5.4291103e-02, 1.8455264e-01,\r\n 1.1611148e-01, 8.8016681e-02, -1.1226088e-01, 1.6462137e-01,\r\n 2.9738750e-02, 5.4291315e-02, -5.8947198e-02, 1.9123649e-02,\r\n 3.9460551e-02, 3.0961015e-05, -3.0220527e-02, 7.6583706e-02,\r\n 4.7222655e-02, 2.7840761e-02, -7.1577899e-02, 9.2682414e-02,\r\n -7.6887041e-02, -2.5045413e-01, 2.1420253e-02, -7.1284950e-02,\r\n 5.6634662e-03, 7.3354915e-02, -1.7348000e-01, 3.4615997e-02,\r\n -3.2564558e-02, -5.9136495e-02, -4.7633491e-02, -8.1838751e-03,\r\n 1.2534963e-01, -1.0248238e-01, -1.5041859e-01, 9.8264880e-02,\r\n -5.3846285e-02, 2.0879345e-02, -8.1899434e-02, -8.9715518e-02,\r\n -7.0511013e-02, 9.6462881e-03, -1.0328349e-01, 1.8247880e-01,\r\n -5.3078894e-02, -6.8787292e-02, 6.0688850e-02, 3.0767094e-02,\r\n -2.8026905e-02, 2.3132475e-01, 2.9357679e-02, -2.8319707e-02,\r\n 2.7568148e-02, 9.5987052e-02, -6.2912218e-02, 3.7598938e-02,\r\n 1.4148790e-01, -6.3332453e-02, 2.0451102e-02, -2.0065141e-01,\r\n -1.2242822e-01, -7.2603845e-03, 4.2571858e-02, -9.3802109e-02,\r\n 2.6962103e-02, -2.4043778e-02, -1.2504991e-02, -1.2540063e-03,\r\n -1.3763572e-02, 2.8071860e-02, -1.4627062e-01, -5.1848087e-02,\r\n -4.5841732e-03, 9.5388949e-02, 1.2645793e-01, -5.1203363e-02,\r\n 3.5534672e-02, 1.9342002e-02, 1.0948886e-01, 4.4822499e-02,\r\n -5.0669578e-03, -6.1139286e-02, -4.5224130e-02, 1.6399375e-01,\r\n -1.7257795e-02, 1.5652233e-01, -1.1799398e-01, 2.1358494e-02,\r\n -9.7792402e-02, 6.2721498e-02, -7.7454969e-02, -4.0084090e-02,\r\n 7.2253183e-02, -1.1347165e-01, -1.9745750e-02, -5.1988144e-03,\r\n 1.6690373e-01, 1.8015216e-01, -2.1447660e-02, -4.9860902e-02,\r\n 1.8019162e-01, -1.0997839e-01, -6.2858775e-02, -2.3409504e-02,\r\n -1.2383691e-01, 7.6143488e-02, -2.4857154e-02, 1.3635203e-01,\r\n -8.1784641e-03, -1.4921306e-01, 1.5984055e-01, 7.4276313e-02], dtype=float32)\r\n(jdb) ",,terminal_output +2493,3819968,"Untitled-1",0,0,"",r,tab +2494,3832604,"TERMINAL",0,0,"c",,terminal_output +2495,3833074,"TERMINAL",0,0,"\r\n",,terminal_output +2496,3835999,"TERMINAL",0,0,"2025-06-30 23:47:43.305742: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2497,3841479,"TERMINAL",0,0,"2025-06-30 23:47:48.814320: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2498,3844293,"TERMINAL",0,0,"Entering jdb:\r\n(jdb) ",,terminal_output +2499,3855723,"TERMINAL",0,0,"l",,terminal_output +2500,3855927,"TERMINAL",0,0,"\r\n> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py(93)\r\n dyna_outputs = self.dynamics(outputs, training)\r\n outputs.update(dyna_outputs)\r\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\r\n outputs[""recon""] = self.tokenizer.decode(\r\n mle_indices, batch[""videos""].shape[2:4]\r\n )\r\n-> jax.debug.breakpoint()\r\n outputs[""gt_debug""] = self.tokenizer.decode(\r\n tokenizer_outputs[""indices""], batch[""videos""].shape[2:4]\r\n )\r\n return outputs\r\n \r\n(jdb) ",,terminal_output +2501,3858387,"TERMINAL",0,0,"n",,terminal_output +2502,3865177,"TERMINAL",0,0,"[?25ln\r[?25h",,terminal_output +2503,3865404,"TERMINAL",0,0,"^DERROR:2025-06-30 23:48:12,664:jax._src.debugging:96: jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 94, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 334, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\nERROR:jax._src.debugging:jax.debug.callback failed\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 94, in debug_callback_impl\r\n callback(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugging.py"", line 334, in _flat_callback\r\n callback(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/core.py"", line 220, in _breakpoint_callback\r\n debugger(frames, thread_id, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 167, in run_debugger\r\n CliDebugger(frames, thread_id, **kwargs).run()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 160, in run\r\n self.cmdloop()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 138, in cmdloop\r\n stop = self.onecmd(line)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/cmd.py"", line 217, in onecmd\r\n return func(arg)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/debugger/cli_debugger.py"", line 146, in do_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n",,terminal_output +2504,3866597,"TERMINAL",0,0,"]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +2505,3869221,"models/dynamics.py",0,0,"",python,tab +2506,3870667,"models/dynamics.py",1389,31,"",python,content +2507,3873128,"train_dynamics.py",0,0,"",python,tab +2508,3875353,"train_dynamics.py",8749,0,"",python,selection_mouse +2509,3889548,"genie.py",0,0,"",python,tab +2510,3890243,"genie.py",3257,4092," def sample(\n self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_command +2511,3892176,"genie.py",3072,4277,"breakpoint()\n outputs[""gt_debug""] = self.tokenizer.decode(\n tokenizer_outputs[""indices""], batch[""videos""].shape[2:4]\n )\n return outputs\n\n @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,selection_command +2512,3892418,"genie.py",3072,4277,"",python,content +2513,3892455,"genie.py",3071,0,"",python,selection_command +2514,3894345,"genie.py",3072,0,"breakpoint()\n outputs[""gt_debug""] = self.tokenizer.decode(\n tokenizer_outputs[""indices""], batch[""videos""].shape[2:4]\n )\n return outputs\n\n @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""]\n new_frame_idxs = jnp.zeros_like(token_idxs)[:, 0]\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.ones_like(token_idxs, dtype=bool)[:, 0]\n init_carry = (\n batch[""rng""],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n final_carry, _ = loop_fn(init_carry, jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.expand_dims(new_frame_idxs, 1),\n video_hw=batch[""videos""].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, final_token_idxs, mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.expand_dims(final_token_idxs, 1)), axis=1\n )\n vid_embed = self.dynamics.patch_embed(vid_token_idxs)\n curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].set(curr_masked_frame)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed)[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jnp.where(\n step == self.steps - 1,\n jnp.argmax(final_logits, axis=-1),\n jax.random.categorical(_rng, final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None",python,content +2515,3894418,"genie.py",3072,0,"",python,selection_command +2516,3895582,"genie.py",3054,31,"",python,content +2517,3895586,"genie.py",3062,0,"",python,selection_command +2518,3897122,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/sample.sh ",,terminal_output +2519,3897899,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +2520,3898058,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/0000/genie_1751301068_2000/\r\n",,terminal_output +2521,3900909,"TERMINAL",0,0,"2025-06-30 23:48:48.202262: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2522,3904905,"TERMINAL",0,0,"2025-06-30 23:48:52.154452: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2523,3905939,"TERMINAL",0,0,"bash",,terminal_focus +2524,3913118,"TERMINAL",0,0,"2025-06-30 23:49:00.403568: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2525,3920553,"TERMINAL",0,0,"2025-06-30 23:49:07.831119: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2526,3927080,"TERMINAL",0,0,"2025-06-30 23:49:14.402792: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2527,3928736,"TERMINAL",0,0,"srun",,terminal_focus +2528,3932746,"TERMINAL",0,0,"2025-06-30 23:49:19.995919: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2529,3936490,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +2530,3947858,"TERMINAL",0,0,"2025-06-30 23:49:35.189389: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2531,3959138,"TERMINAL",0,0,"2025-06-30 23:49:46.473993: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2532,3963486,"TERMINAL",0,0,"2025-06-30 23:49:50.818023: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2533,3969972,"TERMINAL",0,0,"2025-06-30 23:49:57.299773: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2534,3976901,"TERMINAL",0,0,"SSIM: 0.15632537007331848\r\n",,terminal_output +2535,3979756,"TERMINAL",0,0,"]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +2536,4016889,"sample.py",0,0,"",python,tab +2537,4065107,"train_dynamics.py",0,0,"",python,tab +2538,4068899,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +2539,4075762,"genie.py",0,0,"",python,tab +2540,4081073,"sample.py",0,0,"",python,tab +2541,4085723,"sample.py",4207,0,"",python,selection_mouse +2542,4086582,"sample.py",4206,0,"",python,selection_command +2543,4087053,"sample.py",4200,0,"",python,selection_command +2544,4087506,"sample.py",4200,1,"",python,content +2545,4087726,"sample.py",4200,1,"",python,content +2546,4088361,"sample.py",4254,0,"",python,selection_command +2547,4089408,"sample.py",4254,0,"#",python,content +2548,4089409,"sample.py",4255,0,"",python,selection_keyboard +2549,4089489,"sample.py",4255,0," ",python,content +2550,4089490,"sample.py",4256,0,"",python,selection_keyboard +2551,4089860,"sample.py",4255,0,"",python,selection_command +2552,4092004,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/sample.sh ",,terminal_output +2553,4092490,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +2554,4092592,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/0000/genie_1751301068_2000/\r\n",,terminal_output +2555,4095404,"TERMINAL",0,0,"2025-06-30 23:52:02.714622: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2556,4099616,"TERMINAL",0,0,"2025-06-30 23:52:06.950025: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2557,4107756,"TERMINAL",0,0,"2025-06-30 23:52:15.080392: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2558,4114807,"TERMINAL",0,0,"2025-06-30 23:52:22.144944: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2559,4121225,"TERMINAL",0,0,"2025-06-30 23:52:28.561515: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2560,4126824,"TERMINAL",0,0,"2025-06-30 23:52:34.140738: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2561,4130785,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +2562,4141622,"TERMINAL",0,0,"====================================================================================================\r\nFrame 1\r\n====================================================================================================\r\n",,terminal_output +2563,4142188,"TERMINAL",0,0,"2025-06-30 23:52:49.515535: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2564,4145207,"TERMINAL",0,0,"2025-06-30 23:52:52.529360: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2565,4153598,"TERMINAL",0,0,"2025-06-30 23:53:00.876545: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2566,4156341,"sample.py",0,0,"",python,tab +2567,4156342,"sample.py",4610,0,"",python,selection_mouse +2568,4156372,"sample.py",4609,0,"",python,selection_command +2569,4156908,"TERMINAL",0,0,"2025-06-30 23:53:04.230522: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2570,4161789,"TERMINAL",0,0,"2025-06-30 23:53:09.127407: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2571,4164073,"TERMINAL",0,0,"====================================================================================================\r\nFrame 2\r\n====================================================================================================\r\n",,terminal_output +2572,4164592,"TERMINAL",0,0,"2025-06-30 23:53:11.922984: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2573,4167522,"TERMINAL",0,0,"2025-06-30 23:53:14.807494: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2574,4175324,"TERMINAL",0,0,"2025-06-30 23:53:22.650335: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2575,4178094,"TERMINAL",0,0,"2025-06-30 23:53:25.426785: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2576,4183753,"TERMINAL",0,0,"====================================================================================================\r\nFrame 3\r\n====================================================================================================\r\n",,terminal_output +2577,4184300,"TERMINAL",0,0,"2025-06-30 23:53:31.609306: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2578,4187202,"TERMINAL",0,0,"2025-06-30 23:53:34.533456: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2579,4195845,"TERMINAL",0,0,"2025-06-30 23:53:43.175204: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2580,4198760,"TERMINAL",0,0,"2025-06-30 23:53:46.060671: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2581,4204665,"TERMINAL",0,0,"====================================================================================================\r\nFrame 4\r\n====================================================================================================\r\n",,terminal_output +2582,4205290,"TERMINAL",0,0,"2025-06-30 23:53:52.573913: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2583,4208336,"TERMINAL",0,0,"2025-06-30 23:53:55.671868: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2584,4216967,"TERMINAL",0,0,"2025-06-30 23:54:04.298904: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2585,4219891,"TERMINAL",0,0,"2025-06-30 23:54:07.163252: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2586,4225728,"TERMINAL",0,0,"====================================================================================================\r\nFrame 5\r\n====================================================================================================\r\n",,terminal_output +2587,4226412,"TERMINAL",0,0,"2025-06-30 23:54:13.645479: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2588,4229247,"TERMINAL",0,0,"2025-06-30 23:54:16.503656: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2589,4237970,"TERMINAL",0,0,"2025-06-30 23:54:25.308242: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2590,4240738,"TERMINAL",0,0,"2025-06-30 23:54:28.068542: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2591,4247397,"TERMINAL",0,0,"====================================================================================================\r\nFrame 6\r\n====================================================================================================\r\n",,terminal_output +2592,4248081,"TERMINAL",0,0,"2025-06-30 23:54:35.376180: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2593,4251077,"TERMINAL",0,0,"2025-06-30 23:54:38.411373: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2594,4259672,"TERMINAL",0,0,"2025-06-30 23:54:47.007000: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2595,4262453,"TERMINAL",0,0,"2025-06-30 23:54:49.762739: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2596,4268364,"TERMINAL",0,0,"====================================================================================================\r\nFrame 7\r\n====================================================================================================\r\n",,terminal_output +2597,4268989,"TERMINAL",0,0,"2025-06-30 23:54:56.322410: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2598,4272111,"TERMINAL",0,0,"2025-06-30 23:54:59.420094: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2599,4280824,"TERMINAL",0,0,"2025-06-30 23:55:08.156543: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2600,4283802,"TERMINAL",0,0,"2025-06-30 23:55:11.123155: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2601,4290749,"TERMINAL",0,0,"====================================================================================================\r\nFrame 8\r\n====================================================================================================\r\n",,terminal_output +2602,4291371,"TERMINAL",0,0,"2025-06-30 23:55:18.702989: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2603,4294291,"TERMINAL",0,0,"2025-06-30 23:55:21.626037: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2604,4303959,"TERMINAL",0,0,"2025-06-30 23:55:31.286452: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2605,4306772,"TERMINAL",0,0,"2025-06-30 23:55:34.109894: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2606,4313126,"TERMINAL",0,0,"====================================================================================================\r\nFrame 9\r\n====================================================================================================\r\n",,terminal_output +2607,4313788,"TERMINAL",0,0,"2025-06-30 23:55:41.123416: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2608,4326343,"TERMINAL",0,0,"2025-06-30 23:55:53.674203: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2609,4329549,"TERMINAL",0,0,"2025-06-30 23:55:56.880736: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2610,4336134,"TERMINAL",0,0,"====================================================================================================\r\nFrame 10\r\n====================================================================================================\r\n",,terminal_output +2611,4336820,"TERMINAL",0,0,"2025-06-30 23:56:04.125036: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2612,4348970,"TERMINAL",0,0,"2025-06-30 23:56:16.308863: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2613,4352044,"TERMINAL",0,0,"2025-06-30 23:56:19.382194: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2614,4359415,"TERMINAL",0,0,"====================================================================================================\r\nFrame 11\r\n====================================================================================================\r\n",,terminal_output +2615,4360166,"TERMINAL",0,0,"2025-06-30 23:56:27.503096: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2616,4372613,"TERMINAL",0,0,"2025-06-30 23:56:39.950380: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2617,4375745,"TERMINAL",0,0,"2025-06-30 23:56:43.083343: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2618,4382426,"TERMINAL",0,0,"====================================================================================================\r\nFrame 12\r\n====================================================================================================\r\n",,terminal_output +2619,4383139,"TERMINAL",0,0,"2025-06-30 23:56:50.456010: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2620,4395050,"TERMINAL",0,0,"2025-06-30 23:57:02.387311: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2621,4398389,"TERMINAL",0,0,"2025-06-30 23:57:05.670966: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2622,4405034,"TERMINAL",0,0,"====================================================================================================\r\nFrame 13\r\n====================================================================================================\r\n",,terminal_output +2623,4405790,"TERMINAL",0,0,"2025-06-30 23:57:13.117305: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2624,4418085,"TERMINAL",0,0,"2025-06-30 23:57:25.421336: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2625,4421170,"TERMINAL",0,0,"2025-06-30 23:57:28.509911: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2626,4427890,"TERMINAL",0,0,"====================================================================================================\r\nFrame 14\r\n====================================================================================================\r\n",,terminal_output +2627,4428664,"TERMINAL",0,0,"2025-06-30 23:57:35.915758: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2628,4440480,"TERMINAL",0,0,"2025-06-30 23:57:47.791226: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2629,4443390,"TERMINAL",0,0,"2025-06-30 23:57:50.728189: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2630,4450672,"TERMINAL",0,0,"====================================================================================================\r\nFrame 15\r\n====================================================================================================\r\n",,terminal_output +2631,4451381,"TERMINAL",0,0,"2025-06-30 23:57:58.719978: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2632,4464123,"TERMINAL",0,0,"2025-06-30 23:58:11.459018: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2633,4467453,"TERMINAL",0,0,"2025-06-30 23:58:14.781182: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2634,4476858,"TERMINAL",0,0,"SSIM: 0.17794367671012878\r\n",,terminal_output +2635,4481345,"TERMINAL",0,0,"]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +2636,4500122,"sample.py",0,0,"",python,tab +2637,4516005,"sample.py",0,0,"",python,tab +2638,4524333,"sample.py",4337,0,"",python,selection_mouse +2639,4524929,"sample.py",4215,0,"",python,selection_mouse +2640,4525925,"sample.py",4253,0,"",python,selection_mouse +2641,4525928,"sample.py",4252,0,"",python,selection_command +2642,4527578,"sample.py",4253,0,"\n",python,content +2643,4527951,"sample.py",4254,0,"v",python,content +2644,4527952,"sample.py",4255,0,"",python,selection_keyboard +2645,4527998,"sample.py",4255,0,"i",python,content +2646,4528000,"sample.py",4256,0,"",python,selection_keyboard +2647,4528171,"sample.py",4256,0,"d",python,content +2648,4528172,"sample.py",4257,0,"",python,selection_keyboard +2649,4528900,"sample.py",4257,0," ",python,content +2650,4528901,"sample.py",4258,0,"",python,selection_keyboard +2651,4529128,"sample.py",4258,0,"=",python,content +2652,4529128,"sample.py",4259,0,"",python,selection_keyboard +2653,4529305,"sample.py",4259,0," ",python,content +2654,4529306,"sample.py",4260,0,"",python,selection_keyboard +2655,4530567,"sample.py",4254,0,"",python,selection_command +2656,4547559,"sample.py",4254,0,"# Generate 16 frames of the pad token from DynamicsMaskGIT\n",python,content +2657,4547780,"sample.py",4313,0,"from models.dynamics import DynamicsMaskGIT\n",python,content +2658,4547783,"sample.py",4357,0,"\n",python,content +2659,4547948,"sample.py",4358,0,"# Instantiate a dummy DynamicsMaskGIT to access the pad/mask token\n",python,content +2660,4548019,"sample.py",4425,0,"dummy_dynamics = DynamicsMaskGIT(\n",python,content +2661,4548065,"sample.py",4459,0," model_dim=args.dyna_dim,\n",python,content +2662,4548134,"sample.py",4488,0," num_latents=args.num_patch_latents,\n",python,content +2663,4548182,"sample.py",4528,0," num_blocks=args.dyna_num_blocks,\n",python,content +2664,4548223,"sample.py",4565,0," num_heads=args.dyna_num_heads,\n",python,content +2665,4548258,"sample.py",4600,0," dropout=0.0,\n",python,content +2666,4548406,"sample.py",4617,0," mask_limit=0.0,\n",python,content +2667,4548462,"sample.py",4637,0,")\n",python,content +2668,4548465,"sample.py",4639,0,"\n",python,content +2669,4548521,"sample.py",4640,0,"# Initialize parameters to get the mask token\n",python,content +2670,4548689,"sample.py",4686,0,"dummy_vars = dummy_dynamics.init(\n",python,content +2671,4548732,"sample.py",4720,0," jax.random.PRNGKey(0),\n",python,content +2672,4548754,"sample.py",4747,0," {\n",python,content +2673,4549212,"sample.py",4753,0," ""video_tokens"": jnp.zeros((1, 1, args.num_patch_latents), dtype=jnp.int32),\n",python,content +2674,4549361,"sample.py",4837,0," ""latent_actions"": jnp.zeros((1, 1, args.num_latent_actions, args.latent_action_dim), dtype=jnp.float32),\n",python,content +2675,4549614,"sample.py",4950,0," ""mask_rng"": jax.random.PRNGKey(1),\n",python,content +2676,4549657,"sample.py",4993,0," },\n",python,content +2677,4549729,"sample.py",5000,0," training=True,\n",python,content +2678,4549731,"sample.py",5019,0,")\n",python,content +2679,4549734,"sample.py",5021,0,"\n",python,content +2680,4550140,"sample.py",5022,0,"pad_token = dummy_vars[""params""][""mask_token""] # shape (1, 1, 1, model_dim)\n",python,content +2681,4550143,"sample.py",5099,0,"\n",python,content +2682,4550551,"sample.py",5100,0,"# Repeat to shape (batch, frames, patches, model_dim)\n",python,content +2683,4550595,"sample.py",5154,0,"vid = jnp.tile(\n",python,content +2684,4550645,"sample.py",5170,0," pad_token,\n",python,content +2685,4550799,"sample.py",5185,0," (video_batch.shape[0], args.seq_len, args.num_patch_latents, 1)\n",python,content +2686,4550802,"sample.py",5253,0,")\n",python,content +2687,4550804,"sample.py",5255,7,"",python,content +2688,4571942,"sample.py",4254,0,"",python,selection_command +2689,4572839,"sample.py",4254,1000,"vid = ",python,content +2690,4577212,"sample.py",4254,0,"# Generate 16 frames of the mask token from DynamicsMaskGIT\n",python,content +2691,4577451,"sample.py",4314,0,"from models.dynamics import DynamicsMaskGIT\n",python,content +2692,4577454,"sample.py",4358,0,"\n",python,content +2693,4577562,"sample.py",4359,0,"# Instantiate a dummy DynamicsMaskGIT to access the mask token\n",python,content +2694,4577640,"sample.py",4422,0,"dummy_dynamics = DynamicsMaskGIT(\n",python,content +2695,4577686,"sample.py",4456,0," model_dim=args.dyna_dim,\n",python,content +2696,4577689,"sample.py",4485,0," num_latents=args.num_patch_latents,\n",python,content +2697,4577735,"sample.py",4525,0," num_blocks=args.dyna_num_blocks,\n",python,content +2698,4577739,"sample.py",4562,0," num_heads=args.dyna_num_heads,\n",python,content +2699,4577775,"sample.py",4597,0," dropout=0.0,\n",python,content +2700,4577869,"sample.py",4614,0," mask_limit=0.0,\n",python,content +2701,4577872,"sample.py",4634,0,")\n",python,content +2702,4577874,"sample.py",4636,0,"\n",python,content +2703,4577938,"sample.py",4637,0,"# Initialize parameters to get the mask token\n",python,content +2704,4577942,"sample.py",4683,0,"dummy_vars = dummy_dynamics.init(\n",python,content +2705,4577945,"sample.py",4717,0," jax.random.PRNGKey(0),\n",python,content +2706,4577987,"sample.py",4744,0," {\n",python,content +2707,4579645,"sample.py",4750,0," ""video_tokens"": jnp.zeros((1, 1, args.num_patch_latents), dtype=jnp.int32),\n",python,content +2708,4580914,"sample.py",4834,0," ""latent_actions"": jnp.zeros((1, 1, args.num_latent_actions, args.latent_action_dim), dtype=jnp.float32),\n",python,content +2709,4580964,"sample.py",4947,0," ""mask_rng"": jax.random.PRNGKey(1),\n",python,content +2710,4580966,"sample.py",4990,0," },\n",python,content +2711,4581027,"sample.py",4997,0," training=True,\n",python,content +2712,4581030,"sample.py",5016,0,")\n",python,content +2713,4581032,"sample.py",5018,0,"\n",python,content +2714,4581178,"sample.py",5019,0,"mask_token = dummy_vars[""params""][""mask_token""] # shape (1, 1, 1, model_dim)\n",python,content +2715,4581181,"sample.py",5097,0,"\n",python,content +2716,4581220,"sample.py",5098,0,"# Repeat to shape (batch, frames, patches, model_dim)\n",python,content +2717,4581266,"sample.py",5152,0,"vid = jnp.tile(\n",python,content +2718,4581310,"sample.py",5168,0," mask_token,\n",python,content +2719,4581410,"sample.py",5184,0," (video_batch.shape[0], args.seq_len, args.num_patch_latents, 1)\n",python,content +2720,4581536,"sample.py",5252,0,")\n",python,content +2721,4581539,"sample.py",5254,7,"",python,content +2722,4585973,"sample.py",4254,999,"vid = ",python,content +2723,4589385,"sample.py",4260,0,"",python,selection_mouse +2724,4589476,"sample.py",4259,0,"",python,selection_command +2725,4590101,"sample.py",4254,7,"",python,content +2726,4590149,"sample.py",4200,0,"",python,selection_command +2727,4591084,"sample.py",4253,0,"\n",python,content +2728,4592299,"sample.py",4254,0,"v",python,content +2729,4592300,"sample.py",4255,0,"",python,selection_keyboard +2730,4592431,"sample.py",4255,0,"i",python,content +2731,4592431,"sample.py",4256,0,"",python,selection_keyboard +2732,4592806,"sample.py",4256,0,"d",python,content +2733,4592807,"sample.py",4257,0,"",python,selection_keyboard +2734,4593127,"sample.py",4257,0," ",python,content +2735,4593128,"sample.py",4258,0,"",python,selection_keyboard +2736,4593365,"sample.py",4258,0,"=",python,content +2737,4593366,"sample.py",4259,0,"",python,selection_keyboard +2738,4593453,"sample.py",4259,0," ",python,content +2739,4593453,"sample.py",4260,0,"",python,selection_keyboard +2740,4595587,"sample.py",4258,2,"",python,content +2741,4595771,"sample.py",4254,4,"",python,content +2742,4598484,"sample.py",4200,0,"",python,selection_command +2743,4600239,"sample.py",4200,0,"#",python,content +2744,4600240,"sample.py",4201,0,"",python,selection_keyboard +2745,4600345,"sample.py",4201,0," ",python,content +2746,4600346,"sample.py",4202,0,"",python,selection_keyboard +2747,4601000,"sample.py",4201,0,"",python,selection_command +2748,4601212,"sample.py",4256,0,"",python,selection_command +2749,4601483,"sample.py",4258,0,"",python,selection_command +2750,4601667,"sample.py",4314,0,"",python,selection_command +2751,4601832,"sample.py",4370,0,"",python,selection_command +2752,4602007,"sample.py",4453,0,"",python,selection_command +2753,4602348,"sample.py",4370,0,"",python,selection_command +2754,4602556,"sample.py",4314,0,"",python,selection_command +2755,4602700,"sample.py",4258,0,"",python,selection_command +2756,4602877,"sample.py",4256,0,"",python,selection_command +2757,4603026,"sample.py",4201,0,"",python,selection_command +2758,4603495,"sample.py",4256,0,"",python,selection_command +2759,4604422,"sample.py",4256,0,"v",python,content +2760,4604423,"sample.py",4257,0,"",python,selection_keyboard +2761,4604776,"sample.py",4257,0,"i",python,content +2762,4604777,"sample.py",4258,0,"",python,selection_keyboard +2763,4604932,"sample.py",4258,0,"d",python,content +2764,4604933,"sample.py",4259,0,"",python,selection_keyboard +2765,4605260,"sample.py",4259,0," ",python,content +2766,4605260,"sample.py",4260,0,"",python,selection_keyboard +2767,4605457,"sample.py",4260,0,"=",python,content +2768,4605458,"sample.py",4261,0,"",python,selection_keyboard +2769,4605544,"sample.py",4261,0," ",python,content +2770,4605545,"sample.py",4262,0,"",python,selection_keyboard +2771,4605869,"sample.py",4262,0,"N",python,content +2772,4605869,"sample.py",4263,0,"",python,selection_keyboard +2773,4606471,"sample.py",4262,1,"",python,content +2774,4606874,"sample.py",4262,0,"N",python,content +2775,4606875,"sample.py",4263,0,"",python,selection_keyboard +2776,4607045,"sample.py",4263,0,"o",python,content +2777,4607045,"sample.py",4264,0,"",python,selection_keyboard +2778,4607161,"sample.py",4264,0,"n",python,content +2779,4607162,"sample.py",4265,0,"",python,selection_keyboard +2780,4607240,"sample.py",4265,0,"e",python,content +2781,4607241,"sample.py",4266,0,"",python,selection_keyboard +2782,4607702,"sample.py",4265,0,"",python,selection_command +2783,4607860,"sample.py",4276,0,"",python,selection_command +2784,4608146,"sample.py",4332,0,"",python,selection_command +2785,4608388,"sample.py",4388,0,"",python,selection_command +2786,4609341,"sample.py",4461,0,"\n",python,content +2787,4624720,"sample.py",4462,0,"b",python,content +2788,4624721,"sample.py",4463,0,"",python,selection_keyboard +2789,4624838,"sample.py",4463,0,"r",python,content +2790,4624839,"sample.py",4464,0,"",python,selection_keyboard +2791,4625026,"sample.py",4464,0,"e",python,content +2792,4625027,"sample.py",4465,0,"",python,selection_keyboard +2793,4625158,"sample.py",4465,0,"a",python,content +2794,4625159,"sample.py",4466,0,"",python,selection_keyboard +2795,4625214,"sample.py",4466,0,"k",python,content +2796,4625216,"sample.py",4467,0,"",python,selection_keyboard +2797,4625507,"sample.py",4467,0,"p",python,content +2798,4625508,"sample.py",4468,0,"",python,selection_keyboard +2799,4625894,"sample.py",4462,6,"breakpoint",python,content +2800,4626849,"sample.py",4472,0,"()",python,content +2801,4626850,"sample.py",4473,0,"",python,selection_keyboard +2802,4626972,"sample.py",4473,1,")",python,content +2803,4626973,"sample.py",4474,0,"",python,selection_keyboard +2804,4628378,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/sample.sh ",,terminal_output +2805,4628560,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +2806,4628701,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/0000/genie_1751301068_2000/\r\n",,terminal_output +2807,4631581,"TERMINAL",0,0,"2025-07-01 00:00:58.899008: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2808,4635645,"TERMINAL",0,0,"2025-07-01 00:01:02.960287: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2809,4643938,"TERMINAL",0,0,"2025-07-01 00:01:11.258933: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2810,4650937,"TERMINAL",0,0,"2025-07-01 00:01:18.270891: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2811,4657182,"TERMINAL",0,0,"2025-07-01 00:01:24.514669: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2812,4662623,"TERMINAL",0,0,"2025-07-01 00:01:29.947038: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2813,4666366,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +2814,4677478,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 138, in \r\n gt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\r\nAttributeError: 'NoneType' object has no attribute 'shape'\r\n",,terminal_output +2815,4678662,"TERMINAL",0,0,"]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +2816,4684316,"sample.py",0,0,"",python,tab +2817,4684317,"sample.py",5054,0,"",python,selection_mouse +2818,4685184,"sample.py",4384,0,"",python,selection_mouse +2819,4687063,"sample.py",4383,0,"",python,selection_command +2820,4687153,"sample.py",4327,0,"",python,selection_command +2821,4687334,"sample.py",4271,0,"",python,selection_command +2822,4687497,"sample.py",4260,0,"",python,selection_command +2823,4687938,"sample.py",4256,11,"",python,content +2824,4688015,"sample.py",4200,0,"",python,selection_command +2825,4688271,"sample.py",4166,0,"",python,selection_command +2826,4688574,"sample.py",4200,0,"",python,selection_command +2827,4692415,"sample.py",4256,0,"",python,selection_command +2828,4692665,"sample.py",4312,0,"",python,selection_command +2829,4692882,"sample.py",4256,0,"",python,selection_command +2830,4694087,"sample.py",4311,0,"\n",python,content +2831,4705158,"sample.py",4312,0,"vid = jnp.zeros_like(video_batch)\n\n",python,content +2832,4705162,"sample.py",4346,1,"",python,content +2833,4708589,"sample.py",4346,1,"",python,content +2834,4709768,"sample.py",4283,0,"",python,selection_mouse +2835,4709948,"sample.py",4280,3,"rng",python,selection_mouse +2836,4710484,"sample.py",4338,0,"",python,selection_mouse +2837,4710618,"sample.py",4333,11,"video_batch",python,selection_mouse +2838,4714232,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/sample.sh ",,terminal_output +2839,4715646,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +2840,4715736,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/0000/genie_1751301068_2000/\r\n",,terminal_output +2841,4718567,"TERMINAL",0,0,"2025-07-01 00:02:25.873841: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2842,4722738,"TERMINAL",0,0,"2025-07-01 00:02:29.945652: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2843,4731057,"TERMINAL",0,0,"2025-07-01 00:02:38.360090: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2844,4738249,"TERMINAL",0,0,"2025-07-01 00:02:45.541056: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2845,4744715,"TERMINAL",0,0,"2025-07-01 00:02:52.016652: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2846,4745146,"sample.py",0,0,"",python,tab +2847,4745146,"sample.py",3433,0,"",python,selection_mouse +2848,4745183,"sample.py",3432,0,"",python,selection_command +2849,4745612,"sample.py",3474,0,"",python,selection_mouse +2850,4746216,"sample.py",3476,0,"",python,selection_mouse +2851,4749176,"sample.py",3475,0,"",python,selection_command +2852,4749575,"sample.py",3476,0,"",python,selection_command +2853,4749761,"sample.py",3475,0,"",python,selection_command +2854,4749913,"sample.py",3476,0,"",python,selection_command +2855,4749973,"sample.py",3475,0,"",python,selection_command +2856,4750119,"sample.py",3476,0,"",python,selection_command +2857,4750209,"sample.py",3475,0,"",python,selection_command +2858,4750366,"sample.py",3476,0,"",python,selection_command +2859,4750393,"TERMINAL",0,0,"2025-07-01 00:02:57.664556: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +2860,4750408,"sample.py",3475,0,"",python,selection_command +2861,4750532,"sample.py",3476,0,"",python,selection_command +2862,4750639,"sample.py",3475,0,"",python,selection_command +2863,4750765,"sample.py",3476,0,"",python,selection_command +2864,4750863,"sample.py",3475,0,"",python,selection_command +2865,4750971,"sample.py",3476,0,"",python,selection_command +2866,4751060,"sample.py",3475,0,"",python,selection_command +2867,4751192,"sample.py",3476,0,"",python,selection_command +2868,4751256,"sample.py",3475,0,"",python,selection_command +2869,4751362,"sample.py",3476,0,"",python,selection_command +2870,4751465,"sample.py",3475,0,"",python,selection_command +2871,4751550,"sample.py",3476,0,"",python,selection_command +2872,4751650,"sample.py",3475,0,"",python,selection_command +2873,4751748,"sample.py",3476,0,"",python,selection_command +2874,4751885,"sample.py",3475,0,"",python,selection_command +2875,4751967,"sample.py",3476,0,"",python,selection_command +2876,4752509,"sample.py",3475,0,"",python,selection_command +2877,4753084,"sample.py",3475,4,"",python,content +2878,4753846,"sample.py",3475,0,"F",python,content +2879,4753847,"sample.py",3476,0,"",python,selection_keyboard +2880,4754085,"sample.py",3476,0,"a",python,content +2881,4754086,"sample.py",3477,0,"",python,selection_keyboard +2882,4754213,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +2883,4754269,"sample.py",3477,0,"l",python,content +2884,4754270,"sample.py",3478,0,"",python,selection_keyboard +2885,4754388,"sample.py",3478,0,"s",python,content +2886,4754389,"sample.py",3479,0,"",python,selection_keyboard +2887,4754553,"sample.py",3479,0,"e",python,content +2888,4754554,"sample.py",3480,0,"",python,selection_keyboard +2889,4754821,"sample.py",3479,0,"",python,selection_command +2890,4755617,"sample.py",3463,0,"",python,selection_mouse +2891,4755759,"sample.py",3462,1,"r",python,selection_mouse +2892,4755759,"sample.py",3459,4,"(par",python,selection_mouse +2893,4755760,"sample.py",3458,5,"y(par",python,selection_mouse +2894,4755760,"sample.py",3457,6,"ly(par",python,selection_mouse +2895,4755892,"sample.py",3456,7,"ply(par",python,selection_mouse +2896,4755892,"sample.py",3455,8,"pply(par",python,selection_mouse +2897,4756172,"sample.py",3456,7,"ply(par",python,selection_mouse +2898,4756220,"sample.py",3457,6,"ly(par",python,selection_mouse +2899,4756220,"sample.py",3458,5,"y(par",python,selection_mouse +2900,4758984,"sample.py",3456,0,"",python,selection_mouse +2901,4765391,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py(140)()\r\n-> recon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\r\n(Pdb) ",,terminal_output +2902,4769862,"TERMINAL",0,0,"l",,terminal_output +2903,4770003,"TERMINAL",0,0,"\r\n135 \t# vid = _oneshot_sample(rng, video_batch, action_batch)\r\n136 \tvid = jnp.zeros_like(video_batch)\r\n137 \t# vid = _oneshot_sample(rng, video_batch, action_batch)\r\n138 \tgt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\r\n139 \tbreakpoint()\r\n140 ->\trecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\r\n141 \tssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\r\n142 \tprint(f""SSIM: {ssim}"")\r\n143 \t\r\n144 \t# --- Construct video ---\r\n145 \tfirst_true = (video_batch[0:1] * 255).astype(np.uint8)\r\n(Pdb) ",,terminal_output +2904,4771167,"TERMINAL",0,0,"[?25lvi[?25h",,terminal_output +2905,4771264,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +2906,4771760,"TERMINAL",0,0,"[?25ld[?25h[?25l.[?25h",,terminal_output +2907,4771949,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +2908,4772079,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +2909,4772154,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +2910,4772416,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +2911,4772437,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +2912,4772549,"TERMINAL",0,0,"\r\n(1, 16, 90, 160, 3)\r\n(Pdb) ",,terminal_output +2913,4774492,"TERMINAL",0,0,"c",,terminal_output +2914,4774588,"TERMINAL",0,0,"\r\n",,terminal_output +2915,4776166,"TERMINAL",0,0,"SSIM: 0.0017220183508470654\r\n",,terminal_output +2916,4777987,"TERMINAL",0,0,"]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +2917,4785766,"sample.py",0,0,"",python,tab +2918,4793204,"sample.py",3453,0,"",python,selection_mouse +2919,4796347,"genie.py",0,0,"",python,tab +2920,4808719,"genie.py",3185,0,"",python,selection_mouse +2921,4808769,"genie.py",3184,0,"",python,selection_command +2922,4809477,"genie.py",2902,0,"",python,selection_mouse +2923,4810078,"genie.py",2907,0,"",python,selection_mouse +2924,4810839,"genie.py",2905,0,"",python,selection_mouse +2925,4810977,"genie.py",2901,6,"argmax",python,selection_mouse +2926,4811533,"genie.py",2911,0,"",python,selection_mouse +2927,4811696,"genie.py",2908,7,"outputs",python,selection_mouse +2928,4812435,"genie.py",2930,0,"",python,selection_mouse +2929,4812571,"genie.py",2929,3,"""],",python,selection_mouse +2930,4812750,"genie.py",2929,3,"""],",python,selection_mouse +2931,4812751,"genie.py",2917,15,"token_logits""],",python,selection_mouse +2932,4813249,"genie.py",2919,0,"",python,selection_mouse +2933,4813250,"genie.py",2917,12,"token_logits",python,selection_mouse +2934,4814114,"genie.py",2925,0,"",python,selection_mouse +2935,4815894,"genie.py",2917,12,"token_logits",python,selection_mouse +2936,4817041,"genie.py",2919,0,"",python,selection_mouse +2937,4817042,"genie.py",2917,12,"token_logits",python,selection_mouse +2938,4817732,"genie.py",2919,0,"",python,selection_mouse +2939,4818446,"genie.py",2917,12,"token_logits",python,selection_mouse +2940,4819467,"genie.py",2919,0,"",python,selection_mouse +2941,4819978,"genie.py",2917,12,"token_logits",python,selection_mouse +2942,4821522,"genie.py",3016,0,"",python,selection_mouse +2943,4821611,"genie.py",3016,1," ",python,selection_mouse +2944,4821802,"genie.py",3016,6," batch",python,selection_mouse +2945,4821874,"genie.py",3016,8," batch[""",python,selection_mouse +2946,4821874,"genie.py",3016,14," batch[""videos",python,selection_mouse +2947,4821970,"genie.py",3016,15," batch[""videos""",python,selection_mouse +2948,4822022,"genie.py",3016,16," batch[""videos""]",python,selection_mouse +2949,4822068,"genie.py",3016,17," batch[""videos""].",python,selection_mouse +2950,4822093,"genie.py",3016,22," batch[""videos""].shape",python,selection_mouse +2951,4822229,"genie.py",3016,24," batch[""videos""].shape[2",python,selection_mouse +2952,4822230,"genie.py",3016,25," batch[""videos""].shape[2:",python,selection_mouse +2953,4822230,"genie.py",3016,26," batch[""videos""].shape[2:4",python,selection_mouse +2954,4822230,"genie.py",3016,27," batch[""videos""].shape[2:4]",python,selection_mouse +2955,4823081,"genie.py",3043,0,"",python,selection_mouse +2956,4823102,"genie.py",3042,0,"",python,selection_command +2957,4823227,"genie.py",3042,1,"]",python,selection_mouse +2958,4823269,"genie.py",3043,0,"",python,selection_command +2959,4823269,"genie.py",3043,10,"\n )",python,selection_mouse +2960,4823318,"genie.py",3028,15,"os""].shape[2:4]",python,selection_mouse +2961,4823352,"genie.py",3027,16,"eos""].shape[2:4]",python,selection_mouse +2962,4823352,"genie.py",3025,18,"ideos""].shape[2:4]",python,selection_mouse +2963,4823361,"genie.py",3023,20,"""videos""].shape[2:4]",python,selection_mouse +2964,4823394,"genie.py",3022,21,"[""videos""].shape[2:4]",python,selection_mouse +2965,4823395,"genie.py",3020,23,"ch[""videos""].shape[2:4]",python,selection_mouse +2966,4823442,"genie.py",3019,24,"tch[""videos""].shape[2:4]",python,selection_mouse +2967,4823443,"genie.py",3017,26,"batch[""videos""].shape[2:4]",python,selection_mouse +2968,4823464,"genie.py",3015,28,", batch[""videos""].shape[2:4]",python,selection_mouse +2969,4823570,"genie.py",3013,30,"es, batch[""videos""].shape[2:4]",python,selection_mouse +2970,4823570,"genie.py",3012,31,"ces, batch[""videos""].shape[2:4]",python,selection_mouse +2971,4823571,"genie.py",3010,33,"dices, batch[""videos""].shape[2:4]",python,selection_mouse +2972,4823595,"genie.py",3008,35,"indices, batch[""videos""].shape[2:4]",python,selection_mouse +2973,4823596,"genie.py",2957,86,"[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]",python,selection_mouse +2974,4823636,"genie.py",2956,87,"s[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]",python,selection_mouse +2975,4823658,"genie.py",2955,88,"ts[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]",python,selection_mouse +2976,4823694,"genie.py",2954,89,"uts[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]",python,selection_mouse +2977,4823697,"genie.py",2953,90,"puts[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]",python,selection_mouse +2978,4823739,"genie.py",2952,91,"tputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]",python,selection_mouse +2979,4823845,"genie.py",2951,92,"utputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]",python,selection_mouse +2980,4823884,"genie.py",2950,93,"outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]",python,selection_mouse +2981,4824345,"genie.py",2950,0,"",python,selection_mouse +2982,4824346,"genie.py",2950,7,"outputs",python,selection_mouse +2983,4824612,"genie.py",2950,53,"outputs[""recon""] = self.tokenizer.decode(\n ",python,selection_mouse +2984,4824613,"genie.py",2950,65,"outputs[""recon""] = self.tokenizer.decode(\n mle_indices",python,selection_mouse +2985,4824613,"genie.py",2950,103,"outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )",python,selection_mouse +2986,4825203,"genie.py",3053,0,"",python,selection_mouse +2987,4825211,"genie.py",3052,0,"",python,selection_command +2988,4825395,"genie.py",3053,0,"",python,selection_mouse +2989,4825417,"genie.py",3052,0,"",python,selection_command +2990,4825617,"genie.py",3052,1,")",python,selection_mouse +2991,4825617,"genie.py",3053,0,"",python,selection_command +2992,4825661,"genie.py",2999,54," mle_indices, batch[""videos""].shape[2:4]\n )",python,selection_mouse +2993,4825662,"genie.py",2998,55," mle_indices, batch[""videos""].shape[2:4]\n )",python,selection_mouse +2994,4825662,"genie.py",2997,56," mle_indices, batch[""videos""].shape[2:4]\n )",python,selection_mouse +2995,4825712,"genie.py",2995,58," mle_indices, batch[""videos""].shape[2:4]\n )",python,selection_mouse +2996,4825765,"genie.py",2994,59," mle_indices, batch[""videos""].shape[2:4]\n )",python,selection_mouse +2997,4825927,"genie.py",2943,110," outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )",python,selection_mouse +2998,4826643,"genie.py",2942,111," outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )",python,selection_mouse +2999,4826698,"genie.py",2992,61," mle_indices, batch[""videos""].shape[2:4]\n )",python,selection_mouse +3000,4827085,"genie.py",2942,111," outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )",python,selection_mouse +3001,4852833,"Untitled-1",0,0,"",r,tab +3002,4854419,"Untitled-1",0,1863,"",r,content +3003,4854427,"genie.py",0,0,"",python,tab +3004,4856413,"train_dynamics.py",0,0,"",python,tab +3005,4860103,"genie.py",0,0,"",python,tab +3006,4862650,"TERMINAL",0,0,"bash",,terminal_focus +3007,4862651,"TERMINAL",0,0,"srun",,terminal_focus +3008,4862987,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3009,4864164,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +3010,4864811,"genie.py",0,0,"",python,tab +3011,4866245,"genie.py",2604,0,"",python,selection_mouse +3012,4868941,"sample.py",0,0,"",python,tab +3013,4873925,"sample.py",3153,0,"",python,selection_mouse +3014,4876204,"genie.py",0,0,"",python,tab +3015,4878182,"genie.py",2184,0,"",python,selection_mouse +3016,4878344,"genie.py",2178,17,"tokenizer_outputs",python,selection_mouse +3017,4880112,"genie.py",2216,0,"",python,selection_mouse +3018,4880271,"genie.py",2213,9,"vq_encode",python,selection_mouse +3019,4886863,"genie.py",2814,0,"",python,selection_mouse +3020,4889875,"genie.py",1813,0,"",python,selection_mouse +3021,4890170,"models/dynamics.py",0,0,"",python,tab +3022,4892456,"models/dynamics.py",615,0,"",python,selection_mouse +3023,4892579,"models/dynamics.py",610,10,"mask_token",python,selection_mouse +3024,4898611,"models/dynamics.py",1587,0,"",python,selection_mouse +3025,4899137,"models/dynamics.py",1591,0,"",python,selection_mouse +3026,4902240,"models/dynamics.py",1588,0,"",python,selection_mouse +3027,4903231,"models/dynamics.py",1593,0,"",python,selection_mouse +3028,4903394,"models/dynamics.py",1592,9,"vid_embed",python,selection_mouse +3029,4903557,"models/dynamics.py",1561,42," logits = self.dynamics(vid_embed)\n",python,selection_mouse +3030,4904523,"models/dynamics.py",1593,0,"",python,selection_mouse +3031,4904524,"models/dynamics.py",1592,9,"vid_embed",python,selection_mouse +3032,4904691,"models/dynamics.py",1561,42," logits = self.dynamics(vid_embed)\n",python,selection_mouse +3033,4905519,"models/dynamics.py",1585,0,"",python,selection_mouse +3034,4905519,"models/dynamics.py",1583,8,"dynamics",python,selection_mouse +3035,4905657,"models/dynamics.py",1561,42," logits = self.dynamics(vid_embed)\n",python,selection_mouse +3036,4906454,"models/dynamics.py",1585,0,"",python,selection_mouse +3037,4906549,"models/dynamics.py",1583,8,"dynamics",python,selection_mouse +3038,4906737,"models/dynamics.py",1561,42," logits = self.dynamics(vid_embed)\n",python,selection_mouse +3039,4907487,"models/dynamics.py",1585,0,"",python,selection_mouse +3040,4907487,"models/dynamics.py",1583,8,"dynamics",python,selection_mouse +3041,4907689,"models/dynamics.py",1561,42," logits = self.dynamics(vid_embed)\n",python,selection_mouse +3042,4908415,"models/dynamics.py",1585,0,"",python,selection_mouse +3043,4918477,"genie.py",0,0,"",python,tab +3044,4919754,"genie.py",3019,0,"",python,selection_mouse +3045,4920319,"genie.py",2896,0,"",python,selection_mouse +3046,4920975,"genie.py",2907,0,"",python,selection_mouse +3047,4922735,"genie.py",2912,0,"",python,selection_mouse +3048,4922913,"genie.py",2908,7,"outputs",python,selection_mouse +3049,4927779,"genie.py",3209,0,"",python,selection_mouse +3050,4928548,"genie.py",2278,0,"",python,selection_mouse +3051,4928574,"genie.py",2277,0,"",python,selection_command +3052,4929330,"genie.py",2311,0,"",python,selection_mouse +3053,4929945,"genie.py",2304,0,"",python,selection_mouse +3054,4930510,"genie.py",2200,0,"",python,selection_mouse +3055,4931036,"genie.py",2115,0,"",python,selection_mouse +3056,4931887,"genie.py",2169,0,"\n ",python,content +3057,4932267,"genie.py",2178,0,"r",python,content +3058,4932268,"genie.py",2179,0,"",python,selection_keyboard +3059,4932426,"genie.py",2179,0,"e",python,content +3060,4932427,"genie.py",2180,0,"",python,selection_keyboard +3061,4932538,"genie.py",2180,0,"t",python,content +3062,4932539,"genie.py",2181,0,"",python,selection_keyboard +3063,4932704,"genie.py",2181,0,"u",python,content +3064,4932704,"genie.py",2182,0,"",python,selection_keyboard +3065,4932802,"genie.py",2182,0,"r",python,content +3066,4932802,"genie.py",2183,0,"",python,selection_keyboard +3067,4932902,"genie.py",2183,0,"n",python,content +3068,4932903,"genie.py",2184,0,"",python,selection_keyboard +3069,4933009,"genie.py",2184,0," ",python,content +3070,4933010,"genie.py",2185,0,"",python,selection_keyboard +3071,4934982,"genie.py",2185,0,"d",python,content +3072,4934983,"genie.py",2186,0,"",python,selection_keyboard +3073,4935249,"genie.py",2186,0,"y",python,content +3074,4935250,"genie.py",2187,0,"",python,selection_keyboard +3075,4935360,"genie.py",2187,0,"n",python,content +3076,4935361,"genie.py",2188,0,"",python,selection_keyboard +3077,4935489,"genie.py",2188,0,"a",python,content +3078,4935490,"genie.py",2189,0,"",python,selection_keyboard +3079,4935637,"genie.py",2189,0,"m",python,content +3080,4935639,"genie.py",2190,0,"",python,selection_keyboard +3081,4935712,"genie.py",2190,0,"i",python,content +3082,4935713,"genie.py",2191,0,"",python,selection_keyboard +3083,4935847,"genie.py",2191,0,"c",python,content +3084,4935848,"genie.py",2192,0,"",python,selection_keyboard +3085,4936157,"genie.py",2192,0,"s",python,content +3086,4936158,"genie.py",2193,0,"",python,selection_keyboard +3087,4938562,"genie.py",2193,0,".",python,content +3088,4938563,"genie.py",2194,0,"",python,selection_keyboard +3089,4938870,"genie.py",2194,0,"m",python,content +3090,4938871,"genie.py",2195,0,"",python,selection_keyboard +3091,4939111,"genie.py",2195,0,"a",python,content +3092,4939112,"genie.py",2196,0,"",python,selection_keyboard +3093,4940111,"genie.py",2196,0,"s",python,content +3094,4940111,"genie.py",2197,0,"",python,selection_keyboard +3095,4940184,"genie.py",2197,0,"k",python,content +3096,4940185,"genie.py",2198,0,"",python,selection_keyboard +3097,4941080,"genie.py",2198,0,"-",python,content +3098,4941081,"genie.py",2199,0,"",python,selection_keyboard +3099,4941834,"genie.py",2198,1,"",python,content +3100,4942098,"genie.py",2198,0,"_",python,content +3101,4942098,"genie.py",2199,0,"",python,selection_keyboard +3102,4942311,"genie.py",2199,0,"g",python,content +3103,4942312,"genie.py",2200,0,"",python,selection_keyboard +3104,4942408,"genie.py",2200,0,"o",python,content +3105,4942409,"genie.py",2201,0,"",python,selection_keyboard +3106,4942509,"genie.py",2201,0,"k",python,content +3107,4942510,"genie.py",2202,0,"",python,selection_keyboard +3108,4943094,"genie.py",2201,1,"",python,content +3109,4943220,"genie.py",2200,1,"",python,content +3110,4943370,"genie.py",2199,1,"",python,content +3111,4943844,"genie.py",2199,0,"t",python,content +3112,4943844,"genie.py",2200,0,"",python,selection_keyboard +3113,4943964,"genie.py",2200,0,"o",python,content +3114,4943965,"genie.py",2201,0,"",python,selection_keyboard +3115,4943965,"genie.py",2201,0,"k",python,content +3116,4943966,"genie.py",2202,0,"",python,selection_keyboard +3117,4944098,"genie.py",2202,0,"e",python,content +3118,4944099,"genie.py",2203,0,"",python,selection_keyboard +3119,4944192,"genie.py",2203,0,"n",python,content +3120,4944193,"genie.py",2204,0,"",python,selection_keyboard +3121,4945338,"genie.py",2185,0,"",python,selection_mouse +3122,4945480,"genie.py",2185,0,"s",python,content +3123,4945481,"genie.py",2186,0,"",python,selection_keyboard +3124,4945735,"genie.py",2186,0,"e",python,content +3125,4945736,"genie.py",2187,0,"",python,selection_keyboard +3126,4946117,"genie.py",2187,0,"f",python,content +3127,4946118,"genie.py",2188,0,"",python,selection_keyboard +3128,4946605,"genie.py",2187,1,"",python,content +3129,4946806,"genie.py",2187,0,"l",python,content +3130,4946807,"genie.py",2188,0,"",python,selection_keyboard +3131,4946827,"genie.py",2188,0,"f",python,content +3132,4946828,"genie.py",2189,0,"",python,selection_keyboard +3133,4947024,"genie.py",2189,0,"-",python,content +3134,4947025,"genie.py",2190,0,"",python,selection_keyboard +3135,4947725,"genie.py",2189,1,"",python,content +3136,4948812,"genie.py",2189,0,".",python,content +3137,4948813,"genie.py",2190,0,"",python,selection_keyboard +3138,4949700,"genie.py",2209,0,"",python,selection_mouse +3139,4951633,"genie.py",3060,0,"",python,selection_mouse +3140,4951815,"genie.py",3057,5,"batch",python,selection_mouse +3141,4952024,"genie.py",3057,36,"batch[""videos""].shape[2:4]\n )",python,selection_mouse +3142,4952051,"genie.py",3044,18,"mle_indices, batch",python,selection_mouse +3143,4952372,"genie.py",3043,19," mle_indices, batch",python,selection_mouse +3144,4952475,"genie.py",2990,72,"outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch",python,selection_mouse +3145,4952909,"genie.py",2993,0,"",python,selection_mouse +3146,4953344,"genie.py",3093,0,"",python,selection_mouse +3147,4954068,"genie.py",3052,0,"",python,selection_mouse +3148,4954245,"genie.py",3044,11,"mle_indices",python,selection_mouse +3149,4955103,"genie.py",3052,0,"",python,selection_mouse +3150,4955104,"genie.py",3044,11,"mle_indices",python,selection_mouse +3151,4955792,"genie.py",3052,0,"",python,selection_mouse +3152,4956323,"genie.py",3053,0,"",python,selection_mouse +3153,4956501,"genie.py",3044,11,"mle_indices",python,selection_mouse +3154,4958045,"genie.py",3053,0,"",python,selection_mouse +3155,4958706,"genie.py",3044,11,"mle_indices",python,selection_mouse +3156,4959814,"genie.py",3053,0,"",python,selection_mouse +3157,4960233,"genie.py",3044,11,"mle_indices",python,selection_mouse +3158,4961215,"genie.py",3053,0,"",python,selection_mouse +3159,4961835,"genie.py",3044,11,"mle_indices",python,selection_mouse +3160,4963437,"genie.py",2954,0,"",python,selection_mouse +3161,4963579,"genie.py",2948,7,"outputs",python,selection_mouse +3162,4963821,"genie.py",2947,8,"(outputs",python,selection_mouse +3163,4963821,"genie.py",2941,14,"argmax(outputs",python,selection_mouse +3164,4963901,"genie.py",2940,15,".argmax(outputs",python,selection_mouse +3165,4963914,"genie.py",2937,18,"jnp.argmax(outputs",python,selection_mouse +3166,4964462,"genie.py",2938,0,"",python,selection_mouse +3167,4964463,"genie.py",2937,3,"jnp",python,selection_mouse +3168,4964736,"genie.py",2937,10,"jnp.argmax",python,selection_mouse +3169,4964825,"genie.py",2937,11,"jnp.argmax(",python,selection_mouse +3170,4964826,"genie.py",2937,18,"jnp.argmax(outputs",python,selection_mouse +3171,4964952,"genie.py",2937,19,"jnp.argmax(outputs[",python,selection_mouse +3172,4965013,"genie.py",2937,20,"jnp.argmax(outputs[""",python,selection_mouse +3173,4965014,"genie.py",2937,32,"jnp.argmax(outputs[""token_logits",python,selection_mouse +3174,4965268,"genie.py",2937,33,"jnp.argmax(outputs[""token_logits""",python,selection_mouse +3175,4965321,"genie.py",2937,34,"jnp.argmax(outputs[""token_logits""]",python,selection_mouse +3176,4965322,"genie.py",2937,35,"jnp.argmax(outputs[""token_logits""],",python,selection_mouse +3177,4965334,"genie.py",2937,36,"jnp.argmax(outputs[""token_logits""], ",python,selection_mouse +3178,4965378,"genie.py",2937,40,"jnp.argmax(outputs[""token_logits""], axis",python,selection_mouse +3179,4965493,"genie.py",2937,41,"jnp.argmax(outputs[""token_logits""], axis=",python,selection_mouse +3180,4965531,"genie.py",2937,42,"jnp.argmax(outputs[""token_logits""], axis=-",python,selection_mouse +3181,4965570,"genie.py",2937,43,"jnp.argmax(outputs[""token_logits""], axis=-1",python,selection_mouse +3182,4965627,"genie.py",2937,44,"jnp.argmax(outputs[""token_logits""], axis=-1)",python,selection_mouse +3183,4965911,"genie.py",2981,0,"",python,selection_mouse +3184,4966437,"genie.py",2979,2,"1)",python,selection_mouse +3185,4966437,"genie.py",2981,50,"\n outputs[""recon""] = self.tokenizer.decode(",python,selection_mouse +3186,4966556,"genie.py",2981,49,"\n outputs[""recon""] = self.tokenizer.decode",python,selection_mouse +3187,4966618,"genie.py",2981,43,"\n outputs[""recon""] = self.tokenizer.",python,selection_mouse +3188,4966618,"genie.py",2981,42,"\n outputs[""recon""] = self.tokenizer",python,selection_mouse +3189,4966690,"genie.py",2948,33,"outputs[""token_logits""], axis=-1)",python,selection_mouse +3190,4966772,"genie.py",2947,34,"(outputs[""token_logits""], axis=-1)",python,selection_mouse +3191,4966773,"genie.py",2941,40,"argmax(outputs[""token_logits""], axis=-1)",python,selection_mouse +3192,4966939,"genie.py",2940,41,".argmax(outputs[""token_logits""], axis=-1)",python,selection_mouse +3193,4967010,"genie.py",2937,44,"jnp.argmax(outputs[""token_logits""], axis=-1)",python,selection_mouse +3194,4967514,"genie.py",2937,0,"",python,selection_mouse +3195,4967515,"genie.py",2937,3,"jnp",python,selection_mouse +3196,4967732,"genie.py",2937,4,"jnp.",python,selection_mouse +3197,4967776,"genie.py",2937,76,"jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self",python,selection_mouse +3198,4967777,"genie.py",2937,86,"jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer",python,selection_mouse +3199,4967838,"genie.py",2937,93,"jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode",python,selection_mouse +3200,4967881,"genie.py",2937,94,"jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(",python,selection_mouse +3201,4967901,"genie.py",2937,32,"jnp.argmax(outputs[""token_logits",python,selection_mouse +3202,4967939,"genie.py",2937,33,"jnp.argmax(outputs[""token_logits""",python,selection_mouse +3203,4967940,"genie.py",2937,35,"jnp.argmax(outputs[""token_logits""],",python,selection_mouse +3204,4967960,"genie.py",2937,36,"jnp.argmax(outputs[""token_logits""], ",python,selection_mouse +3205,4968014,"genie.py",2937,40,"jnp.argmax(outputs[""token_logits""], axis",python,selection_mouse +3206,4968016,"genie.py",2937,41,"jnp.argmax(outputs[""token_logits""], axis=",python,selection_mouse +3207,4968028,"genie.py",2937,42,"jnp.argmax(outputs[""token_logits""], axis=-",python,selection_mouse +3208,4968067,"genie.py",2937,43,"jnp.argmax(outputs[""token_logits""], axis=-1",python,selection_mouse +3209,4968068,"genie.py",2937,44,"jnp.argmax(outputs[""token_logits""], axis=-1)",python,selection_mouse +3210,4968400,"genie.py",2981,0,"",python,selection_mouse +3211,4968806,"genie.py",2980,1,")",python,selection_mouse +3212,4968806,"genie.py",2973,8,"axis=-1)",python,selection_mouse +3213,4968807,"genie.py",2969,12,"""], axis=-1)",python,selection_mouse +3214,4968807,"genie.py",2957,24,"token_logits""], axis=-1)",python,selection_mouse +3215,4968900,"genie.py",2948,33,"outputs[""token_logits""], axis=-1)",python,selection_mouse +3216,4969001,"genie.py",2981,42,"\n outputs[""recon""] = self.tokenizer",python,selection_mouse +3217,4969001,"genie.py",2981,32,"\n outputs[""recon""] = self",python,selection_mouse +3218,4969072,"genie.py",2981,28,"\n outputs[""recon""] = ",python,selection_mouse +3219,4969091,"genie.py",2981,27,"\n outputs[""recon""] =",python,selection_mouse +3220,4969108,"genie.py",2981,26,"\n outputs[""recon""] ",python,selection_mouse +3221,4969162,"genie.py",2981,25,"\n outputs[""recon""]",python,selection_mouse +3222,4969281,"genie.py",2937,44,"jnp.argmax(outputs[""token_logits""], axis=-1)",python,selection_mouse +3223,4969926,"genie.py",2939,0,"",python,selection_mouse +3224,4969927,"genie.py",2937,3,"jnp",python,selection_mouse +3225,4970148,"genie.py",2937,4,"jnp.",python,selection_mouse +3226,4970149,"genie.py",2937,76,"jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self",python,selection_mouse +3227,4970149,"genie.py",2937,86,"jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer",python,selection_mouse +3228,4970181,"genie.py",2937,87,"jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.",python,selection_mouse +3229,4970202,"genie.py",2937,93,"jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode",python,selection_mouse +3230,4970237,"genie.py",2937,94,"jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(",python,selection_mouse +3231,4970338,"genie.py",2937,40,"jnp.argmax(outputs[""token_logits""], axis",python,selection_mouse +3232,4970339,"genie.py",2937,43,"jnp.argmax(outputs[""token_logits""], axis=-1",python,selection_mouse +3233,4970368,"genie.py",2937,44,"jnp.argmax(outputs[""token_logits""], axis=-1)",python,selection_mouse +3234,4970621,"genie.py",2981,0,"",python,selection_mouse +3235,4970936,"genie.py",2981,50,"\n outputs[""recon""] = self.tokenizer.decode(",python,selection_mouse +3236,4971025,"genie.py",2981,49,"\n outputs[""recon""] = self.tokenizer.decode",python,selection_mouse +3237,4971046,"genie.py",2981,43,"\n outputs[""recon""] = self.tokenizer.",python,selection_mouse +3238,4971072,"genie.py",2981,42,"\n outputs[""recon""] = self.tokenizer",python,selection_mouse +3239,4971111,"genie.py",2948,33,"outputs[""token_logits""], axis=-1)",python,selection_mouse +3240,4971127,"genie.py",2941,40,"argmax(outputs[""token_logits""], axis=-1)",python,selection_mouse +3241,4971287,"genie.py",2940,41,".argmax(outputs[""token_logits""], axis=-1)",python,selection_mouse +3242,4971325,"genie.py",2937,44,"jnp.argmax(outputs[""token_logits""], axis=-1)",python,selection_mouse +3243,4971817,"genie.py",2937,0,"",python,selection_mouse +3244,4971818,"genie.py",2937,3,"jnp",python,selection_mouse +3245,4972072,"genie.py",2937,10,"jnp.argmax",python,selection_mouse +3246,4972072,"genie.py",2937,18,"jnp.argmax(outputs",python,selection_mouse +3247,4972072,"genie.py",2937,32,"jnp.argmax(outputs[""token_logits",python,selection_mouse +3248,4972073,"genie.py",2937,94,"jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(",python,selection_mouse +3249,4972410,"genie.py",3031,0,"",python,selection_mouse +3250,4973149,"genie.py",2981,50,"\n outputs[""recon""] = self.tokenizer.decode(",python,selection_mouse +3251,4973303,"genie.py",2981,0,"",python,selection_mouse +3252,4973859,"genie.py",2915,67," mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n",python,selection_mouse +3253,4974391,"genie.py",2981,0,"",python,selection_mouse +3254,4974903,"genie.py",2981,50,"\n outputs[""recon""] = self.tokenizer.decode(",python,selection_mouse +3255,4974904,"genie.py",2980,1,")",python,selection_mouse +3256,4974904,"genie.py",2973,8,"axis=-1)",python,selection_mouse +3257,4974904,"genie.py",2972,9," axis=-1)",python,selection_mouse +3258,4974904,"genie.py",2981,50,"\n outputs[""recon""] = self.tokenizer.decode(",python,selection_mouse +3259,4974919,"genie.py",2981,49,"\n outputs[""recon""] = self.tokenizer.decode",python,selection_mouse +3260,4974961,"genie.py",2981,42,"\n outputs[""recon""] = self.tokenizer",python,selection_mouse +3261,4975034,"genie.py",2981,32,"\n outputs[""recon""] = self",python,selection_mouse +3262,4975083,"genie.py",2981,28,"\n outputs[""recon""] = ",python,selection_mouse +3263,4975102,"genie.py",2981,27,"\n outputs[""recon""] =",python,selection_mouse +3264,4975173,"genie.py",2940,41,".argmax(outputs[""token_logits""], axis=-1)",python,selection_mouse +3265,4975199,"genie.py",2937,44,"jnp.argmax(outputs[""token_logits""], axis=-1)",python,selection_mouse +3266,4975308,"genie.py",2894,87,"update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)",python,selection_mouse +3267,4975718,"genie.py",2899,0,"",python,selection_mouse +3268,4982052,"genie.py",2250,0,"",python,selection_mouse +3269,4982755,"genie.py",2201,0,"",python,selection_mouse +3270,4983876,"genie.py",2189,1,"",python,content +3271,4984217,"genie.py",2189,0,"-",python,content +3272,4984421,"genie.py",2187,3,"",python,content +3273,4984601,"genie.py",2187,0,"f",python,content +3274,4984769,"genie.py",2185,3,"",python,content +3275,4984932,"genie.py",2199,5,"",python,content +3276,4985130,"genie.py",2199,0,"gok",python,content +3277,4985350,"genie.py",2198,4,"",python,content +3278,4985599,"genie.py",2198,0,"-",python,content +3279,4985725,"genie.py",2193,6,"",python,content +3280,4985922,"genie.py",2185,8,"",python,content +3281,4986072,"genie.py",2184,1,"",python,content +3282,4986441,"genie.py",2178,6,"",python,content +3283,4986720,"genie.py",2169,9,"",python,content +3284,4988510,"models/dynamics.py",0,0,"",python,tab +3285,4993852,"models/dynamics.py",1507,0,"",python,selection_mouse +3286,4994458,"models/dynamics.py",1594,0,"",python,selection_mouse +3287,4995165,"models/dynamics.py",1601,0,"",python,selection_mouse +3288,4995931,"models/dynamics.py",1595,0,"",python,selection_mouse +3289,4997721,"models/dynamics.py",616,0,"",python,selection_mouse +3290,4998921,"models/dynamics.py",654,0,"",python,selection_mouse +3291,4999072,"models/dynamics.py",648,10,"mask_token",python,selection_mouse +3292,5001842,"models/dynamics.py",1426,0,"",python,selection_mouse +3293,5001882,"models/dynamics.py",1425,0,"",python,selection_command +3294,5002581,"models/dynamics.py",1364,0,"",python,selection_mouse +3295,5002583,"models/dynamics.py",1363,0,"",python,selection_command +3296,5003221,"models/dynamics.py",1388,0,"",python,selection_mouse +3297,5003225,"models/dynamics.py",1387,0,"",python,selection_command +3298,5004686,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +3299,5006087,"sample.py",0,0,"",python,tab +3300,5008345,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +3301,5009374,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +3302,5012559,"scripts_horeka/train_tokenizer.sh",0,0,"#!/usr/bin/env bash\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=$ws_dir/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""0000""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=16 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=100 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,tab +3303,5014129,"models/dynamics.py",0,0,"",python,tab +3304,5014909,"genie.py",0,0,"",python,tab +3305,5035641,"genie.py",7109,0,"",python,selection_mouse +3306,5035811,"genie.py",7106,4,"vmap",python,selection_mouse +3307,5036520,"genie.py",7187,0,"",python,selection_mouse +3308,5036669,"genie.py",7174,14,"mask_update_fn",python,selection_mouse +3309,5037226,"genie.py",7192,0,"",python,selection_mouse +3310,5037373,"genie.py",7189,4,"mask",python,selection_mouse +3311,5037896,"genie.py",7204,0,"",python,selection_mouse +3312,5038103,"genie.py",7195,11,"sorted_idxs",python,selection_mouse +3313,5038692,"genie.py",7208,0,"",python,selection_mouse +3314,5039332,"genie.py",7190,0,"",python,selection_mouse +3315,5039488,"genie.py",7189,4,"mask",python,selection_mouse +3316,5040076,"genie.py",7186,0,"",python,selection_mouse +3317,5040235,"genie.py",7174,14,"mask_update_fn",python,selection_mouse +3318,5041156,"genie.py",7235,0,"",python,selection_mouse +3319,5041289,"genie.py",7235,14,"new_token_idxs",python,selection_mouse +3320,5041612,"genie.py",7235,24,"new_token_idxs, new_mask",python,selection_mouse +3321,5041648,"genie.py",7235,25,"new_token_idxs, new_mask,",python,selection_mouse +3322,5041669,"genie.py",7235,36,"new_token_idxs, new_mask, token_idxs",python,selection_mouse +3323,5041789,"genie.py",7235,38,"new_token_idxs, new_mask, token_idxs, ",python,selection_mouse +3324,5041805,"genie.py",7235,51,"new_token_idxs, new_mask, token_idxs, action_tokens",python,selection_mouse +3325,5042316,"genie.py",7279,0,"",python,selection_mouse +3326,5042317,"genie.py",7273,13,"action_tokens",python,selection_mouse +3327,5042508,"genie.py",7261,25,"token_idxs, action_tokens",python,selection_mouse +3328,5042553,"genie.py",7259,27,", token_idxs, action_tokens",python,selection_mouse +3329,5042553,"genie.py",7251,35,"new_mask, token_idxs, action_tokens",python,selection_mouse +3330,5042576,"genie.py",7249,37,", new_mask, token_idxs, action_tokens",python,selection_mouse +3331,5042577,"genie.py",7235,51,"new_token_idxs, new_mask, token_idxs, action_tokens",python,selection_mouse +3332,5042922,"genie.py",7234,52," new_token_idxs, new_mask, token_idxs, action_tokens",python,selection_mouse +3333,5042951,"genie.py",7233,53,", new_token_idxs, new_mask, token_idxs, action_tokens",python,selection_mouse +3334,5042987,"genie.py",7230,56,"rng, new_token_idxs, new_mask, token_idxs, action_tokens",python,selection_mouse +3335,5043418,"genie.py",7231,0,"",python,selection_mouse +3336,5043419,"genie.py",7230,3,"rng",python,selection_mouse +3337,5043616,"genie.py",7230,19,"rng, new_token_idxs",python,selection_mouse +3338,5043701,"genie.py",7230,29,"rng, new_token_idxs, new_mask",python,selection_mouse +3339,5043775,"genie.py",7230,30,"rng, new_token_idxs, new_mask,",python,selection_mouse +3340,5043775,"genie.py",7230,41,"rng, new_token_idxs, new_mask, token_idxs",python,selection_mouse +3341,5044199,"genie.py",7266,0,"",python,selection_mouse +3342,5046887,"genie.py",5507,0,"",python,selection_mouse +3343,5047053,"genie.py",5494,17,"curr_masked_frame",python,selection_mouse +3344,5049176,"genie.py",5596,0,"",python,selection_mouse +3345,5049356,"genie.py",5590,10,"mask_token",python,selection_mouse +3346,5050748,"genie.py",5577,0,"",python,selection_mouse +3347,5050909,"genie.py",5576,4,"self",python,selection_mouse +3348,5051227,"genie.py",5576,13,"self.dynamics",python,selection_mouse +3349,5051331,"genie.py",5576,14,"self.dynamics.",python,selection_mouse +3350,5051332,"genie.py",5576,24,"self.dynamics.mask_token",python,selection_mouse +3351,5051503,"genie.py",5576,25,"self.dynamics.mask_token[",python,selection_mouse +3352,5051503,"genie.py",5576,26,"self.dynamics.mask_token[0",python,selection_mouse +3353,5051538,"genie.py",5576,27,"self.dynamics.mask_token[0]",python,selection_mouse +3354,5051554,"genie.py",5576,28,"self.dynamics.mask_token[0],",python,selection_mouse +3355,5051924,"genie.py",5604,0,"",python,selection_mouse +3356,5052486,"genie.py",5604,30,"\n vid_embed[:, -1],",python,selection_mouse +3357,5052533,"genie.py",5604,27,"\n vid_embed[:, -",python,selection_mouse +3358,5052533,"genie.py",5604,24,"\n vid_embed[:",python,selection_mouse +3359,5052550,"genie.py",5604,22,"\n vid_embed",python,selection_mouse +3360,5052671,"genie.py",5576,28,"self.dynamics.mask_token[0],",python,selection_mouse +3361,5052754,"genie.py",5575,29," self.dynamics.mask_token[0],",python,selection_mouse +3362,5052819,"genie.py",5536,68," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],",python,selection_mouse +3363,5052868,"genie.py",5537,67,"jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],",python,selection_mouse +3364,5053279,"genie.py",5537,0,"",python,selection_mouse +3365,5053280,"genie.py",5537,3,"jnp",python,selection_mouse +3366,5053586,"genie.py",5537,43,"jnp.expand_dims(mask, -1),\n self",python,selection_mouse +3367,5053587,"genie.py",5537,52,"jnp.expand_dims(mask, -1),\n self.dynamics",python,selection_mouse +3368,5053587,"genie.py",5537,90,"jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[",python,selection_mouse +3369,5053587,"genie.py",5537,92,"jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:,",python,selection_mouse +3370,5053588,"genie.py",5537,93,"jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, ",python,selection_mouse +3371,5053619,"genie.py",5537,107,"jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +3372,5053858,"genie.py",5644,0,"",python,selection_mouse +3373,5054401,"genie.py",5628,16,", -1],\n )",python,selection_mouse +3374,5054401,"genie.py",5626,18,"[:, -1],\n )",python,selection_mouse +3375,5054402,"genie.py",5581,63,"dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +3376,5054402,"genie.py",5537,107,"jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +3377,5054444,"genie.py",5536,108," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +3378,5054485,"genie.py",5494,150,"curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +3379,5054619,"genie.py",5493,151," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +3380,5054658,"genie.py",5492,152," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +3381,5054701,"genie.py",5491,153," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +3382,5055075,"genie.py",5491,0,"",python,selection_mouse +3383,5055076,"genie.py",5486,8," ",python,selection_mouse +3384,5055437,"genie.py",5486,39," curr_masked_frame = jnp.where(\n",python,selection_mouse +3385,5055636,"genie.py",5486,78," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n",python,selection_mouse +3386,5055691,"genie.py",5486,119," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n",python,selection_mouse +3387,5055756,"genie.py",5486,149," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n",python,selection_mouse +3388,5056235,"genie.py",5621,0,"",python,selection_mouse +3389,5056680,"genie.py",5644,0,"",python,selection_mouse +3390,5057156,"genie.py",5614,30," vid_embed[:, -1],\n )",python,selection_mouse +3391,5057157,"genie.py",5611,33," vid_embed[:, -1],\n )",python,selection_mouse +3392,5057157,"genie.py",5609,35," vid_embed[:, -1],\n )",python,selection_mouse +3393,5057158,"genie.py",5566,78," self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +3394,5057177,"genie.py",5565,79," self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +3395,5057194,"genie.py",5564,80," self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +3396,5057286,"genie.py",5525,119," jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +3397,5057397,"genie.py",5486,158," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )",python,selection_mouse +3398,5058908,"genie.py",5487,0,"",python,selection_mouse +3399,5058909,"genie.py",5486,8," ",python,selection_mouse +3400,5059182,"genie.py",5486,43," curr_masked_frame = jnp.where(\n ",python,selection_mouse +3401,5059182,"genie.py",5486,85," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n ",python,selection_mouse +3402,5059183,"genie.py",5486,88," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n ",python,selection_mouse +3403,5059183,"genie.py",5486,90," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n ",python,selection_mouse +3404,5059183,"genie.py",5486,140," curr_masked_frame = jnp.where(\n jnp.expand_dims(mask, -1),\n self.dynamics.mask_token[0],\n vid_embed",python,selection_mouse +3405,5059539,"genie.py",5619,0,"",python,selection_mouse +3406,5061071,"genie.py",5777,0,"",python,selection_mouse +3407,5061458,"genie.py",5771,8,"dynamics",python,selection_mouse +3408,5063140,"genie.py",6090,0,"",python,selection_mouse +3409,5063910,"genie.py",6049,0,"",python,selection_mouse +3410,5064066,"genie.py",6042,8,"dynamics",python,selection_mouse +3411,5065336,"genie.py",6047,0,"",python,selection_mouse +3412,5065744,"genie.py",6038,0,"",python,selection_mouse +3413,5065922,"genie.py",6037,4,"self",python,selection_mouse +3414,5066149,"genie.py",6037,13,"self.dynamics",python,selection_mouse +3415,5066211,"genie.py",6037,22,"self.dynamics.dynamics",python,selection_mouse +3416,5066310,"genie.py",6037,23,"self.dynamics.dynamics(",python,selection_mouse +3417,5066310,"genie.py",6037,32,"self.dynamics.dynamics(vid_embed",python,selection_mouse +3418,5066432,"genie.py",6037,33,"self.dynamics.dynamics(vid_embed)",python,selection_mouse +3419,5066433,"genie.py",6037,34,"self.dynamics.dynamics(vid_embed)[",python,selection_mouse +3420,5066465,"genie.py",6037,35,"self.dynamics.dynamics(vid_embed)[:",python,selection_mouse +3421,5066465,"genie.py",6037,36,"self.dynamics.dynamics(vid_embed)[:,",python,selection_mouse +3422,5066498,"genie.py",6037,38,"self.dynamics.dynamics(vid_embed)[:, -",python,selection_mouse +3423,5066499,"genie.py",6037,39,"self.dynamics.dynamics(vid_embed)[:, -1",python,selection_mouse +3424,5066510,"genie.py",6037,40,"self.dynamics.dynamics(vid_embed)[:, -1]",python,selection_mouse +3425,5066556,"genie.py",6037,41,"self.dynamics.dynamics(vid_embed)[:, -1] ",python,selection_mouse +3426,5066557,"genie.py",6037,42,"self.dynamics.dynamics(vid_embed)[:, -1] /",python,selection_mouse +3427,5066570,"genie.py",6037,43,"self.dynamics.dynamics(vid_embed)[:, -1] / ",python,selection_mouse +3428,5066604,"genie.py",6037,52,"self.dynamics.dynamics(vid_embed)[:, -1] / step_temp",python,selection_mouse +3429,5066963,"genie.py",6089,0,"",python,selection_mouse +3430,5068263,"genie.py",6032,0,"",python,selection_mouse +3431,5068888,"genie.py",6022,12,"final_logits",python,selection_mouse +3432,5079241,"genie.py",7243,0,"",python,selection_mouse +3433,5079409,"genie.py",7235,14,"new_token_idxs",python,selection_mouse +3434,5085449,"genie.py",6748,0,"",python,selection_mouse +3435,5086331,"genie.py",6755,0,"",python,selection_mouse +3436,5086940,"genie.py",6743,0,"",python,selection_mouse +3437,5087096,"genie.py",6735,14,"new_token_idxs",python,selection_mouse +3438,5091141,"genie.py",6806,0,"",python,selection_mouse +3439,5091876,"genie.py",6747,0,"",python,selection_mouse +3440,5092044,"genie.py",6735,14,"new_token_idxs",python,selection_mouse +3441,5096145,"genie.py",6806,0,"",python,selection_mouse +3442,5096708,"genie.py",6751,0,"",python,selection_mouse +3443,5109912,"genie.py",6806,0,"",python,selection_mouse +3444,5110660,"genie.py",6776,0,"",python,selection_mouse +3445,5110794,"genie.py",6768,18,"sampled_token_idxs",python,selection_mouse +3446,5112565,"genie.py",6802,0,"",python,selection_mouse +3447,5112730,"genie.py",6788,16,"final_token_idxs",python,selection_mouse +3448,5115769,"genie.py",6765,0,"",python,selection_mouse +3449,5116556,"genie.py",6764,0,"",python,selection_command +3450,5117117,"genie.py",6735,0,"",python,selection_command +3451,5118241,"genie.py",6735,0,"#",python,content +3452,5118242,"genie.py",6736,0,"",python,selection_keyboard +3453,5118292,"genie.py",6736,0," ",python,content +3454,5118293,"genie.py",6737,0,"",python,selection_keyboard +3455,5119114,"genie.py",6736,0,"",python,selection_command +3456,5120134,"genie.py",6807,0,"\n # new_token_idxs = jnp.where(mask, sampled_token_idxs, final_token_idxs)",python,content +3457,5120153,"genie.py",6816,0,"",python,selection_command +3458,5120857,"genie.py",6816,1,"",python,content +3459,5121096,"genie.py",6816,1,"",python,content +3460,5121877,"genie.py",6886,0,"",python,selection_command +3461,5122224,"genie.py",6885,1,"",python,content +3462,5122379,"genie.py",6869,16,"",python,content +3463,5122534,"genie.py",6867,2,"",python,content +3464,5122688,"genie.py",6849,18,"",python,content +3465,5122876,"genie.py",6847,2,"",python,content +3466,5123205,"genie.py",6843,4,"",python,content +3467,5123517,"genie.py",6842,1,"",python,content +3468,5123887,"genie.py",6837,5,"",python,content +3469,5124178,"genie.py",6836,1,"",python,content +3470,5124801,"genie.py",6833,3,"",python,content +3471,5125011,"genie.py",6833,0,"f",python,content +3472,5125012,"genie.py",6834,0,"",python,selection_keyboard +3473,5125134,"genie.py",6834,0,"i",python,content +3474,5125136,"genie.py",6835,0,"",python,selection_keyboard +3475,5125847,"genie.py",6835,0,"n",python,content +3476,5125848,"genie.py",6836,0,"",python,selection_keyboard +3477,5126005,"genie.py",6836,0,"a",python,content +3478,5126006,"genie.py",6837,0,"",python,selection_keyboard +3479,5126140,"genie.py",6837,0,"l",python,content +3480,5126141,"genie.py",6838,0,"",python,selection_keyboard +3481,5127743,"genie.py",6833,5,"final_token_idxs",python,content +3482,5129157,"genie.py",6850,0,"",python,selection_mouse +3483,5130399,"genie.py",6842,0,"",python,selection_mouse +3484,5130560,"genie.py",6833,16,"final_token_idxs",python,selection_mouse +3485,5143250,"genie.py",7288,0,"",python,selection_mouse +3486,5143464,"genie.py",7279,14,"new_token_idxs",python,selection_mouse +3487,5156742,"genie.py",5658,0,"",python,selection_mouse +3488,5156837,"genie.py",5653,9,"vid_embed",python,selection_mouse +3489,5191577,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/sample.sh ",,terminal_output +3490,5194424,"models/dynamics.py",0,0,"",python,tab +3491,5195687,"genie.py",0,0,"",python,tab +3492,5196388,"sample.py",0,0,"",python,tab +3493,5198775,"sample.py",3702,0,"",python,selection_mouse +3494,5200250,"sample.py",3764,0,"",python,selection_command +3495,5200781,"sample.py",3807,0,"",python,selection_command +3496,5200782,"sample.py",3852,0,"",python,selection_command +3497,5200822,"sample.py",3885,0,"",python,selection_command +3498,5200823,"sample.py",3958,0,"",python,selection_command +3499,5200823,"sample.py",4018,0,"",python,selection_command +3500,5200887,"sample.py",4062,0,"",python,selection_command +3501,5200892,"sample.py",4108,0,"",python,selection_command +3502,5200911,"sample.py",4166,0,"",python,selection_command +3503,5200956,"sample.py",4179,0,"",python,selection_command +3504,5200981,"sample.py",4213,0,"",python,selection_command +3505,5201021,"sample.py",4269,0,"",python,selection_command +3506,5201058,"sample.py",4325,0,"",python,selection_command +3507,5201385,"sample.py",4269,0,"",python,selection_command +3508,5201634,"sample.py",4325,0,"",python,selection_command +3509,5201833,"sample.py",4313,0,"",python,selection_command +3510,5202509,"sample.py",4313,0,"#",python,content +3511,5202510,"sample.py",4314,0,"",python,selection_keyboard +3512,5202567,"sample.py",4314,0," ",python,content +3513,5202568,"sample.py",4315,0,"",python,selection_keyboard +3514,5202919,"sample.py",4314,0,"",python,selection_command +3515,5203030,"sample.py",4258,0,"",python,selection_command +3516,5203485,"sample.py",4257,0,"",python,selection_command +3517,5204875,"sample.py",4257,1,"",python,content +3518,5205049,"sample.py",4257,1,"",python,content +3519,5208233,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +3520,5208403,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/0000/genie_1751301068_2000/\r\n",,terminal_output +3521,5211658,"TERMINAL",0,0,"2025-07-01 00:10:38.519867: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3522,5215002,"TERMINAL",0,0,"2025-07-01 00:10:42.326133: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3523,5222914,"TERMINAL",0,0,"2025-07-01 00:10:50.247250: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3524,5229883,"TERMINAL",0,0,"2025-07-01 00:10:57.090320: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3525,5236455,"TERMINAL",0,0,"2025-07-01 00:11:03.783750: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3526,5242085,"TERMINAL",0,0,"2025-07-01 00:11:09.418435: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3527,5245861,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +3528,5257678,"TERMINAL",0,0,"2025-07-01 00:11:24.691292: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3529,5268364,"TERMINAL",0,0,"2025-07-01 00:11:35.692696: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3530,5272854,"TERMINAL",0,0,"2025-07-01 00:11:40.101335: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3531,5279600,"TERMINAL",0,0,"2025-07-01 00:11:46.927984: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +3532,5282987,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py(140)()\r\n-> recon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\r\n(Pdb) ",,terminal_output +3533,5286146,"TERMINAL",0,0,"c",,terminal_output +3534,5286332,"TERMINAL",0,0,"\r\n",,terminal_output +3535,5288518,"TERMINAL",0,0,"SSIM: 0.15577787160873413\r\n",,terminal_output +3536,5288631,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",0,0,"from dataclasses import dataclass\nimport time\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\n#from utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_resolution: int = 64\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_resolution, args.image_resolution, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n for frame_idx in range(args.start_frame + 1, args.seq_len):\n # --- Sample next frame ---\n print(""=""*100)\n print(""Frame"", frame_idx)\n print(""=""*100)\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch[:, :frame_idx], rng=_rng)\n new_frame = genie.apply(\n params,\n batch,\n args.maskgit_steps,\n args.temperature,\n args.sample_argmax,\n method=Genie.sample,\n )\n vid = jnp.concatenate([vid, new_frame], axis=1)\n return vid\n\ndef _oneshot_sample(rng, video_batch, action_batch):\n # Pass the full video batch, as in training\n batch = dict(\n videos=video_batch, # full batch, not just first frame\n latent_actions=action_batch, # shape should match what was used in training\n mask_rng=rng,\n )\n outputs = genie.apply(params, batch, False) # training=False for eval\n return outputs[""recon""]\n\n# --- Get video + latent actions ---\n# dataloader = get_dataloader(args.data_dir, args.seq_len, args.batch_size)\n# video_batch = next(iter(dataloader))\nvideo_batch = np.load(""overfit_dir/single_sample_corner.npy"")\n# Get latent actions from first video only\nfirst_video = video_batch[:1, :args.seq_len]\nbatch = dict(videos=first_video)\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(1, args.seq_len - 1, 1)\naction_batch = jnp.zeros_like(action_batch)\n# Use actions from first video for all videos\naction_batch = jnp.repeat(action_batch, video_batch.shape[0], axis=0)\n\n# --- Sample + evaluate video ---\n# vid = _autoreg_sample(rng, video_batch, action_batch)\nvid = _oneshot_sample(rng, video_batch, action_batch)\n# vid = jnp.zeros_like(video_batch)\n# vid = _oneshot_sample(rng, video_batch, action_batch)\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\nbreakpoint()\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\nprint(f""SSIM: {ssim}"")\n\n# --- Construct video ---\nfirst_true = (video_batch[0:1] * 255).astype(np.uint8)\nfirst_pred = (vid[0:1] * 255).astype(np.uint8)\nfirst_video_comparison = np.zeros((2, *vid.shape[1:5]), dtype=np.uint8)\nfirst_video_comparison[0] = first_true[:, : vid.shape[1]]\nfirst_video_comparison[1] = first_pred\n# For other videos, only show generated video\nother_preds = (vid[1:] * 255).astype(np.uint8)\nall_frames = np.concatenate([first_video_comparison, other_preds], axis=0)\nflat_vid = einops.rearrange(all_frames, ""n t h w c -> t h (n w) c"")\n\n# --- Save video ---\nimgs = [Image.fromarray(img) for img in flat_vid]\n# Write actions on each frame\nfor img, action in zip(imgs[1:], action_batch[0, :, 0]):\n d = ImageDraw.Draw(img)\n d.text((2, 2), f""{action}"", fill=255)\nimgs[0].save(\n f""generation_{time.time()}.gif"",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n)\n",python,tab +3537,5290925,"TERMINAL",0,0,"]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3538,5291115,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4498,0,"",python,selection_mouse +3539,5291135,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4497,0,"",python,selection_command +3540,5292143,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4486,13,"",python,content +3541,5307772,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +3542,5309553,"genie.py",0,0,"",python,tab +3543,5315040,"genie.py",6849,0,"",python,selection_mouse +3544,5315602,"genie.py",6759,0,"",python,selection_mouse +3545,5315733,"genie.py",6758,5,"where",python,selection_mouse +3546,5316327,"genie.py",6749,0,"",python,selection_mouse +3547,5316990,"genie.py",6762,0,"",python,selection_mouse +3548,5317523,"genie.py",6778,0,"",python,selection_mouse +3549,5317698,"genie.py",6770,18,"sampled_token_idxs",python,selection_mouse +3550,5318911,"genie.py",6798,0,"",python,selection_mouse +3551,5319055,"genie.py",6790,16,"final_token_idxs",python,selection_mouse +3552,5320851,"genie.py",6849,0,"",python,selection_mouse +3553,5320992,"genie.py",6833,16,"final_token_idxs",python,selection_mouse +3554,5322063,"genie.py",6781,0,"",python,selection_mouse +3555,5322222,"genie.py",6770,18,"sampled_token_idxs",python,selection_mouse +3556,5325122,"genie.py",6676,0,"",python,selection_mouse +3557,5325282,"genie.py",6664,17,"final_token_probs",python,selection_mouse +3558,5328518,"genie.py",7213,0,"",python,selection_mouse +3559,5328706,"genie.py",7207,8,"new_mask",python,selection_mouse +3560,5329721,"genie.py",7224,0,"",python,selection_mouse +3561,5330311,"genie.py",7218,14,"mask_update_fn",python,selection_mouse +3562,5331135,"genie.py",6891,0,"",python,selection_mouse +3563,5331305,"genie.py",6889,19,"num_unmasked_tokens",python,selection_mouse +3564,5331529,"genie.py",6889,88,"num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask",python,selection_mouse +3565,5331616,"genie.py",6889,89,"num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask ",python,selection_mouse +3566,5331617,"genie.py",6889,172,"num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs",python,selection_mouse +3567,5331617,"genie.py",6889,175,"num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = ",python,selection_mouse +3568,5331617,"genie.py",6889,179,"num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.",python,selection_mouse +3569,5331617,"genie.py",6889,186,"num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort",python,selection_mouse +3570,5331659,"genie.py",6889,265,"num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap",python,selection_mouse +3571,5331685,"genie.py",6889,266,"num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(",python,selection_mouse +3572,5331685,"genie.py",6889,272,"num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda",python,selection_mouse +3573,5331777,"genie.py",6889,273,"num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda ",python,selection_mouse +3574,5332140,"genie.py",7162,0,"",python,selection_mouse +3575,5332815,"genie.py",6748,0,"",python,selection_mouse +3576,5333610,"genie.py",6833,16,"final",python,content +3577,5333775,"genie.py",6833,5,"",python,content +3578,5334155,"genie.py",6833,0,"jnp",python,content +3579,5334349,"genie.py",6836,0,".",python,content +3580,5334496,"genie.py",6837,0,"where",python,content +3581,5334662,"genie.py",6842,0,"(",python,content +3582,5334849,"genie.py",6843,0,"mask",python,content +3583,5335004,"genie.py",6847,0,", ",python,content +3584,5335161,"genie.py",6849,0,"sampled_token_idxs",python,content +3585,5335310,"genie.py",6867,0,", ",python,content +3586,5335540,"genie.py",6869,0,"final_token_idxs",python,content +3587,5335910,"genie.py",6885,0,")",python,content +3588,5336180,"genie.py",6816,0," ",python,content +3589,5336372,"genie.py",6816,0,"#",python,content +3590,5336773,"genie.py",6807,81,"",python,content +3591,5337280,"genie.py",6736,1,"",python,content +3592,5337665,"genie.py",6735,1,"",python,content +3593,5346004,"TERMINAL",0,0,"[?25lqu[?25h",,terminal_output +3594,5346061,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +3595,5346157,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3596,5346228,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +3597,5346385,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3598,5346514,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0518.localdomain: Tue Jul 1 00:12:53 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3307600 accelerat interact tum_cte0 R28:23\t 1 hkn0518",,terminal_output +3599,5347472,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3600,5347964,"TERMINAL",0,0,"s",,terminal_output +3601,5348175,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +3602,5348838,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +3603,5349013,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +3604,5349121,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +3605,5349219,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3606,5349313,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +3607,5349412,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +3608,5354001,"TERMINAL",0,0,"\r(jafar) [tum_cte0515@hkn0518 jafar]$ sbatch ",,terminal_output +3609,5359205,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --mail-user=mihir.mahajan2002@gmail.com\n#SBATCH --job-name=train_dynamics_minecraft_overfit_sample_tiny\n#SBATCH --mem=50G\n#SBATCH --mail-type=ALL\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=$ws_dir/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\necho Running dynamics model overfit run. Slurm id: $slurm_job_id\n\n# Use checkpoints from tokenizer/lam overfit sample runs\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3299272/tokenizer_1751037678_153500/\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3299259/lam_1751036759_200000/\n\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log \\n --log_checkpoint_interval=500 \\n --name=dynamics-tiny-overfit-big-lr-$slurm_job_id \\n --tags dynamics overfit tiny \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --lam_checkpoint=$lam_ckpt_dir \\n --data_dir $tf_records_dir \\n --tokenizer_dim=384 \\n --latent_patch_dim=32 \\n --num_patch_latents=1024 \\n --patch_size=4 \\n --tokenizer_num_blocks=8 \\n --tokenizer_num_heads=8 \\n --lam_dim=384 \\n --latent_action_dim=32 \\n --num_latent_actions=6 \\n --lam_patch_size=16 \\n --lam_num_blocks=8 \\n --lam_num_heads=8 \\n --dyna_dim=128 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4\n",shellscript,tab +3610,5361914,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2021,0,"",shellscript,selection_mouse +3611,5362602,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1997,0,"",shellscript,selection_mouse +3612,5362609,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1996,0,"",shellscript,selection_command +3613,5363205,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2021,0,"",shellscript,selection_mouse +3614,5363964,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2020,0,"",shellscript,selection_mouse +3615,5363968,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2019,0,"",shellscript,selection_command +3616,5364917,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2020,0,"",shellscript,selection_command +3617,5365234,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2020,0," ",shellscript,content +3618,5365235,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2021,0,"",shellscript,selection_keyboard +3619,5365429,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2021,0,"\",shellscript,content +3620,5365430,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2022,0,"",shellscript,selection_keyboard +3621,5365508,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2022,0," ",shellscript,content +3622,5365508,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2023,0,"",shellscript,selection_keyboard +3623,5365625,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2023,0,"\n ",shellscript,content +3624,5368916,"scripts_horeka/train_dynamics.sh",0,0,"",shellscript,tab +3625,5370337,"scripts_horeka/train_dynamics.sh",1630,0,"",shellscript,selection_mouse +3626,5370486,"scripts_horeka/train_dynamics.sh",1629,2,"--",shellscript,selection_mouse +3627,5370654,"scripts_horeka/train_dynamics.sh",1625,21," --mask_limit=0.0\n",shellscript,selection_mouse +3628,5376499,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",0,0,"",shellscript,tab +3629,5378300,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2028,0," --mask_limit=0.0\n",shellscript,content +3630,5379454,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2031,0,"",shellscript,selection_mouse +3631,5380285,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2050,0,"",shellscript,selection_keyboard +3632,5380850,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2049,0,"",shellscript,selection_command +3633,5381184,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2024,0,"",shellscript,selection_command +3634,5381335,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1998,0,"",shellscript,selection_command +3635,5381718,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2024,0,"",shellscript,selection_command +3636,5383769,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2028,4,"",shellscript,content +3637,5385451,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2044,0,"",shellscript,selection_mouse +3638,5386049,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1997,0,"",shellscript,selection_mouse +3639,5387137,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2023,0,"",shellscript,selection_mouse +3640,5387642,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2044,0,"",shellscript,selection_mouse +3641,5388068,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2043,1,"0",shellscript,selection_mouse +3642,5388069,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2037,7,"mit=0.0",shellscript,selection_mouse +3643,5388069,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2033,11,"k_limit=0.0",shellscript,selection_mouse +3644,5388070,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2031,13,"ask_limit=0.0",shellscript,selection_mouse +3645,5388070,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2030,14,"mask_limit=0.0",shellscript,selection_mouse +3646,5388096,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2028,16,"--mask_limit=0.0",shellscript,selection_mouse +3647,5388097,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2027,17," --mask_limit=0.0",shellscript,selection_mouse +3648,5388108,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2025,19," --mask_limit=0.0",shellscript,selection_mouse +3649,5388159,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2024,20," --mask_limit=0.0",shellscript,selection_mouse +3650,5388159,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2044,1,"\n",shellscript,selection_mouse +3651,5388834,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2044,1,"",shellscript,content +3652,5389183,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2043,1,"",shellscript,content +3653,5389645,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2042,1,"",shellscript,content +3654,5389782,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2041,1,"",shellscript,content +3655,5390136,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2040,1,"",shellscript,content +3656,5390372,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2030,10,"",shellscript,content +3657,5390609,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2028,2,"",shellscript,content +3658,5391036,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2024,4,"",shellscript,content +3659,5391222,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2023,1,"",shellscript,content +3660,5391593,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2023,0,"\n ",shellscript,content +3661,5392072,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2024,4,"",shellscript,content +3662,5392476,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2023,1,"",shellscript,content +3663,5392997,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2022,1,"",shellscript,content +3664,5393220,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2022,0,"\n ",shellscript,content +3665,5394085,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2027,0,"-",shellscript,content +3666,5394087,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2028,0,"",shellscript,selection_keyboard +3667,5394245,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2028,0,"-",shellscript,content +3668,5394246,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2029,0,"",shellscript,selection_keyboard +3669,5394421,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2029,0,"m",shellscript,content +3670,5394422,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2030,0,"",shellscript,selection_keyboard +3671,5394500,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2030,0,"a",shellscript,content +3672,5394501,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2031,0,"",shellscript,selection_keyboard +3673,5394585,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2031,0,"s",shellscript,content +3674,5394585,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2032,0,"",shellscript,selection_keyboard +3675,5394783,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2032,0,"k",shellscript,content +3676,5394783,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2033,0,"",shellscript,selection_keyboard +3677,5395350,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2029,4,"",shellscript,content +3678,5395636,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2027,2,"",shellscript,content +3679,5396107,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2023,4,"",shellscript,content +3680,5396643,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2023,0," --mask_limit=0.0\n",shellscript,content +3681,5397786,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2043,1,"",shellscript,content +3682,5401073,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1460,0,"",shellscript,selection_mouse +3683,5402061,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1461,0,"",shellscript,selection_command +3684,5402420,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1461,0," ",shellscript,content +3685,5402421,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1462,0,"",shellscript,selection_keyboard +3686,5402555,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1462,0,"m",shellscript,content +3687,5402556,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1463,0,"",shellscript,selection_keyboard +3688,5402645,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1463,0,"a",shellscript,content +3689,5402646,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1464,0,"",shellscript,selection_keyboard +3690,5402829,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1464,0,"s",shellscript,content +3691,5402829,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1465,0,"",shellscript,selection_keyboard +3692,5402983,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1465,0,"k",shellscript,content +3693,5402984,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1466,0,"",shellscript,selection_keyboard +3694,5403366,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1466,0,"l",shellscript,content +3695,5403366,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1467,0,"",shellscript,selection_keyboard +3696,5403595,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1467,0,"i",shellscript,content +3697,5403596,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1468,0,"",shellscript,selection_keyboard +3698,5403674,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1468,0,"m",shellscript,content +3699,5403674,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1469,0,"",shellscript,selection_keyboard +3700,5403795,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1469,0,"i",shellscript,content +3701,5403796,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1470,0,"",shellscript,selection_keyboard +3702,5403934,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1470,0,"t",shellscript,content +3703,5403934,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1471,0,"",shellscript,selection_keyboard +3704,5404093,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1471,0,"-",shellscript,content +3705,5404094,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1472,0,"",shellscript,selection_keyboard +3706,5404704,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1472,0,"p",shellscript,content +3707,5404704,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1473,0,"",shellscript,selection_keyboard +3708,5405368,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1472,1,"",shellscript,content +3709,5405679,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1472,0,"0",shellscript,content +3710,5405680,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1473,0,"",shellscript,selection_keyboard +3711,5411345,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1384,0,"",shellscript,selection_mouse +3712,5412323,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1384,0,"m",shellscript,content +3713,5412324,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1385,0,"",shellscript,selection_keyboard +3714,5412493,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1385,0,"a",shellscript,content +3715,5412494,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1386,0,"",shellscript,selection_keyboard +3716,5412585,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1386,0,"s",shellscript,content +3717,5412586,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1387,0,"",shellscript,selection_keyboard +3718,5412700,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1387,0,"k",shellscript,content +3719,5412701,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1388,0,"",shellscript,selection_keyboard +3720,5413300,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1388,0,"l",shellscript,content +3721,5413301,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1389,0,"",shellscript,selection_keyboard +3722,5413561,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1389,0,"i",shellscript,content +3723,5413562,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1390,0,"",shellscript,selection_keyboard +3724,5413721,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1390,0,"m",shellscript,content +3725,5413722,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1391,0,"",shellscript,selection_keyboard +3726,5414625,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1391,0,"-",shellscript,content +3727,5414626,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1392,0,"",shellscript,selection_keyboard +3728,5414903,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1392,0,"0",shellscript,content +3729,5414903,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1393,0,"",shellscript,selection_keyboard +3730,5416460,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1393,0,"-",shellscript,content +3731,5416461,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1394,0,"",shellscript,selection_keyboard +3732,5421612,"TERMINAL",0,0,"s",,terminal_output +3733,5421737,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3734,5422388,"TERMINAL",0,0,"ripts_",,terminal_output +3735,5424799,"TERMINAL",0,0,"[?25lh[?25h",,terminal_output +3736,5424880,"TERMINAL",0,0,"oreka/",,terminal_output +3737,5425216,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +3738,5425275,"TERMINAL",0,0,"rain_",,terminal_output +3739,5428373,"TERMINAL",0,0,"[?25lo[?25hverfit_",,terminal_output +3740,5428853,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +3741,5429053,"TERMINAL",0,0,"ample",,terminal_output +3742,5430758,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +3743,5431133,"TERMINAL",0,0,"tiny/",,terminal_output +3744,5432154,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +3745,5432214,"TERMINAL",0,0,"",,terminal_output +3746,5432558,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3747,5432636,"TERMINAL",0,0,"",,terminal_output +3748,5433617,"TERMINAL",0,0,"",,terminal_output +3749,5434139,"TERMINAL",0,0,"\r\ntester.sh train_dynamics_overfit_sample_mid_lr.sbatch train_lam_overfit_sample.sbatch\r\ntrain_dynamics_overfit_sample_big_lr.sbatch train_dynamics_overfit_sample_smol_lr.sbatch train_tokenizer_overfit_sample.sbatch\r\n(jafar) [tum_cte0515@hkn0518 jafar]$ sbatch scripts_horeka/overfit_sample_tiny/t",,terminal_output +3750,5434427,"TERMINAL",0,0,"r",,terminal_output +3751,5434663,"TERMINAL",0,0,"ain_",,terminal_output +3752,5435375,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3753,5435532,"TERMINAL",0,0,"ynamics_overfit_sample_",,terminal_output +3754,5436133,"TERMINAL",0,0,"[?25lb[?25h",,terminal_output +3755,5436246,"TERMINAL",0,0,"ig_lr.sbatch",,terminal_output +3756,5438447,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",0,0,"",shellscript,tab +3757,5439731,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2065,0,"",shellscript,selection_mouse +3758,5442542,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1416,0,"",shellscript,selection_mouse +3759,5442735,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1416,1,"b",shellscript,selection_mouse +3760,5442735,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1416,2,"bi",shellscript,selection_mouse +3761,5442755,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1416,3,"big",shellscript,selection_mouse +3762,5443133,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1419,0,"",shellscript,selection_mouse +3763,5444956,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +3764,5445938,"TERMINAL",0,0,"Submitted batch job 3307614\r\n]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3765,5447102,"TERMINAL",0,0,"[?25lque[?25h",,terminal_output +3766,5447233,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +3767,5447351,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3768,5447389,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +3769,5447577,"TERMINAL",0,0,"[?25le[?25h\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0518.localdomain: Tue Jul 1 00:14:34 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3307614 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3307600 accelerat interact tum_cte0 R30:04\t 1 hkn0518",,terminal_output +3770,5448529,"TERMINAL",0,0,"55",,terminal_output +3771,5449521,"TERMINAL",0,0,"66",,terminal_output +3772,5450558,"TERMINAL",0,0,"77",,terminal_output +3773,5451590,"TERMINAL",0,0,"88",,terminal_output +3774,5452631,"TERMINAL",0,0,"99",,terminal_output +3775,5453627,"TERMINAL",0,0,"4010",,terminal_output +3776,5454648,"TERMINAL",0,0,"11",,terminal_output +3777,5455620,"TERMINAL",0,0,"22",,terminal_output +3778,5456325,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3779,5456955,"TERMINAL",0,0,"i",,terminal_output +3780,5457188,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +3781,5457330,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3782,5457552,"TERMINAL",0,0,"[?25li[?25h[?25ln[?25h",,terminal_output +3783,5457631,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +3784,5457909,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn0518.localdomain: Tue Jul 1 00:14:45 2025Partition dev_cpuonly: 12 nodes idle\rPartition cpuonly: 28 nodes idle\rPartition dev_accelerated:\t 2 nodes idle\rPartition accelerated:\t 6 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 8 nodes idle",,terminal_output +3785,5458944,"TERMINAL",0,0,"6",,terminal_output +3786,5459936,"TERMINAL",0,0,"7",,terminal_output +3787,5460896,"TERMINAL",0,0,"8",,terminal_output +3788,5461939,"TERMINAL",0,0,"9",,terminal_output +3789,5462964,"TERMINAL",0,0,"50",,terminal_output +3790,5463995,"TERMINAL",0,0,"1",,terminal_output +3791,5464087,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3792,5473349,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",0,0,"",shellscript,tab +3793,5474431,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1758,0,"",shellscript,selection_mouse +3794,5475024,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",1993,0,"",shellscript,selection_mouse +3795,5475700,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",2066,0,"",shellscript,selection_mouse +3796,5476514,"TERMINAL",0,0,"bash",,terminal_focus +3797,5478144,"TERMINAL",0,0,"srun",,terminal_focus +3798,5479154,"TERMINAL",0,0,"idling",,terminal_output +3799,5479381,"TERMINAL",0,0,"queue",,terminal_output +3800,5479869,"TERMINAL",0,0,"sbatch scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",,terminal_output +3801,5483287,"TERMINAL",0,0,"[?25lch scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch\r[?25h",,terminal_output +3802,5483367,"TERMINAL",0,0,"[?25lc scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch\r[?25h",,terminal_output +3803,5483577,"TERMINAL",0,0,"[?25lt scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch\r[?25h",,terminal_output +3804,5483660,"TERMINAL",0,0,"[?25la scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch\r[?25h",,terminal_output +3805,5483828,"TERMINAL",0,0,"[?25lb scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch\r[?25h",,terminal_output +3806,5483971,"TERMINAL",0,0,"[?25ls scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch\r[?25h",,terminal_output +3807,5484164,"TERMINAL",0,0,"",,terminal_output +3808,5484424,"TERMINAL",0,0,"c scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch\rp scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch\r",,terminal_output +3809,5487382,"TERMINAL",0,0,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch",,terminal_output +3810,5489766,"TERMINAL",0,0,"[?25l \rscripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch[?25h",,terminal_output +3811,5491271,"TERMINAL",0,0,"[?25l[?25h",,terminal_output +3812,5491457,"TERMINAL",0,0,"",,terminal_output +3813,5494335,"TERMINAL",0,0,"[?25l scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch[?25h",,terminal_output +3814,5495627,"TERMINAL",0,0,"[?25l[?25h",,terminal_output +3815,5495881,"TERMINAL",0,0,"[?25l[?25h",,terminal_output +3816,5495926,"TERMINAL",0,0,"",,terminal_output +3817,5496213,"TERMINAL",0,0,"\r\n\r",,terminal_output +3818,5497069,"TERMINAL",0,0,"",,terminal_output +3819,5497205,"TERMINAL",0,0,"",,terminal_output +3820,5497789,"TERMINAL",0,0,"",,terminal_output +3821,5497987,"TERMINAL",0,0,"",,terminal_output +3822,5499074,"TERMINAL",0,0,"-.sbatch",,terminal_output +3823,5499195,"TERMINAL",0,0,"2.sbatch",,terminal_output +3824,5499566,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3825,5511368,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=05:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=logs/logs_training/%x_%j.log\n#SBATCH --error=logs/logs_training/%x_%j.log\n#SBATCH --mail-user=mihir.mahajan2002@gmail.com\n#SBATCH --job-name=train_dynamics_minecraft_overfit_sample_tiny\n#SBATCH --mem=50G\n#SBATCH --mail-type=ALL\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=$ws_dir/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\necho Running dynamics model overfit run. Slurm id: $slurm_job_id\n\n# Use checkpoints from tokenizer/lam overfit sample runs\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3299272/tokenizer_1751037678_153500/\nlam_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3299259/lam_1751036759_200000/\n\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --min_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=500 \\n --log \\n --log_checkpoint_interval=500 \\n --name=masklim-0-dynamics-tiny-overfit-big-lr-$slurm_job_id \\n --tags dynamics overfit tiny masklimit-0 \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --lam_checkpoint=$lam_ckpt_dir \\n --data_dir $tf_records_dir \\n --tokenizer_dim=384 \\n --latent_patch_dim=32 \\n --num_patch_latents=1024 \\n --patch_size=4 \\n --tokenizer_num_blocks=8 \\n --tokenizer_num_heads=8 \\n --lam_dim=384 \\n --latent_action_dim=32 \\n --num_latent_actions=6 \\n --lam_patch_size=16 \\n --lam_num_blocks=8 \\n --lam_num_heads=8 \\n --dyna_dim=128 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4 \\n --mask_limit=0.0\n",shellscript,tab +3826,5513130,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch",2065,0,"",shellscript,selection_mouse +3827,5513131,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch",2064,0,"",shellscript,selection_command +3828,5514314,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch",2063,0,"",shellscript,selection_command +3829,5514443,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch",2062,0,"",shellscript,selection_command +3830,5514776,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch",2062,1,"1",shellscript,content +3831,5519134,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch",1392,0,"",shellscript,selection_mouse +3832,5519547,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch",1392,1,"1",shellscript,content +3833,5521023,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch",1482,0,"",shellscript,selection_mouse +3834,5522670,"scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch",1482,1,"1",shellscript,content +3835,5527991,"TERMINAL",0,0,"cp scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch",,terminal_output +3836,5528808,"TERMINAL",0,0,"",,terminal_output +3837,5529059,"TERMINAL",0,0,"",,terminal_output +3838,5529311,"TERMINAL",0,0,"",,terminal_output +3839,5529441,"TERMINAL",0,0,"",,terminal_output +3840,5529597,"TERMINAL",0,0,"",,terminal_output +3841,5529769,"TERMINAL",0,0,"",,terminal_output +3842,5529928,"TERMINAL",0,0,"",,terminal_output +3843,5530131,"TERMINAL",0,0,"",,terminal_output +3844,5530353,"TERMINAL",0,0,"",,terminal_output +3845,5530445,"TERMINAL",0,0,"",,terminal_output +3846,5530701,"TERMINAL",0,0,"",,terminal_output +3847,5530861,"TERMINAL",0,0,"",,terminal_output +3848,5531080,"TERMINAL",0,0,"",,terminal_output +3849,5531353,"TERMINAL",0,0,"",,terminal_output +3850,5532294,"TERMINAL",0,0,"\r scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch\r\n\r",,terminal_output +3851,5532393,"TERMINAL",0,0,"",,terminal_output +3852,5533378,"TERMINAL",0,0,"[1@s[?25l [1@b[?25h",,terminal_output +3853,5533568,"TERMINAL",0,0,"[1@a",,terminal_output +3854,5533808,"TERMINAL",0,0,"[?25l [1@t[?25h",,terminal_output +3855,5533937,"TERMINAL",0,0,"[?25l [1@c[?25h",,terminal_output +3856,5534125,"TERMINAL",0,0,"[?25l [1@j[?25h",,terminal_output +3857,5534783,"TERMINAL",0,0,"[?25lj[?25h",,terminal_output +3858,5535131,"TERMINAL",0,0,"[?25l [1@h[?25h",,terminal_output +3859,5535303,"TERMINAL",0,0,"[?25l[?2004l\r[?25hSubmitted batch job 3307616\r\n]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3860,5536179,"TERMINAL",0,0,"q",,terminal_output +3861,5536364,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3862,5536451,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +3863,5536549,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3864,5536764,"TERMINAL",0,0,"\r\n[?2004l\rbash: qeue: command not found...\r\n",,terminal_output +3865,5537906,"TERMINAL",0,0,"^C\r\n]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3866,5538327,"TERMINAL",0,0,"q",,terminal_output +3867,5538424,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +3868,5538548,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3869,5538683,"TERMINAL",0,0,"[?25lu[?25h[?25le[?25h",,terminal_output +3870,5539143,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0518.localdomain: Tue Jul 1 00:16:06 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3307616 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3307614 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3307600 accelerat interact tum_cte0 R31:36\t 1 hkn0518",,terminal_output +3871,5540124,"TERMINAL",0,0,"77",,terminal_output +3872,5540213,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3873,5551785,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3874,5552664,"TERMINAL",0,0,"q",,terminal_output +3875,5552904,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +3876,5552977,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3877,5553030,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +3878,5553125,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3879,5553337,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0518.localdomain: Tue Jul 1 00:16:20 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3307616 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3307614 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3307600 accelerat interact tum_cte0 R31:50\t 1 hkn0518",,terminal_output +3880,5554300,"TERMINAL",0,0,"11",,terminal_output +3881,5555308,"TERMINAL",0,0,"22",,terminal_output +3882,5556341,"TERMINAL",0,0,"33",,terminal_output +3883,5556639,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3884,5557464,"TERMINAL",0,0,"s",,terminal_output +3885,5557666,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3886,5557818,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +3887,5557919,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +3888,5558038,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3889,5558182,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3890,5558242,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +3891,5558325,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +3892,5558973,"TERMINAL",0,0,"3307614",,terminal_output +3893,5559475,"TERMINAL",0,0,"3307614\r\n[?2004l\r]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3894,5560220,"TERMINAL",0,0,"scancel 3307614",,terminal_output +3895,5560705,"TERMINAL",0,0,"queue",,terminal_output +3896,5561088,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0518.localdomain: Tue Jul 1 00:16:28 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3307616 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3307600 accelerat interact tum_cte0 R31:58\t 1 hkn0518",,terminal_output +3897,5562117,"TERMINAL",0,0,"99",,terminal_output +3898,5563086,"TERMINAL",0,0,"302:00",,terminal_output +3899,5563757,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3900,5564368,"TERMINAL",0,0,"queue",,terminal_output +3901,5564564,"TERMINAL",0,0,"scancel 3307614",,terminal_output +3902,5565112,"TERMINAL",0,0,"",,terminal_output +3903,5566117,"TERMINAL",0,0,"3307616",,terminal_output +3904,5566330,"TERMINAL",0,0,"3307616\r\n[?2004l\r]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3905,5569088,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +3906,5580756,"genie.py",0,0,"",python,tab +3907,5580977,"genie.py",2256,0,"",python,selection_command +3908,5588381,"genie.py",2255,23,"",python,content +3909,5590274,"genie.py",2329,184,"",python,content +3910,5592459,"genie.py",2432,72," latent_actions=jax.lax.stop_gradient(lam_outputs[""z_q""]),",python,content +3911,5593852,"genie.py",2511,12,"",python,content +3912,5595105,"genie.py",2831,132,"",python,content +3913,5604417,"sample.py",0,0,"",python,tab +3914,5604418,"sample.py",2648,0,"",python,selection_command +3915,5612294,"train_dynamics.py",0,0,"",python,tab +3916,5612296,"train_dynamics.py",2850,0,"",python,selection_command +3917,5615178,"train_dynamics.py",2850,68," return ce_loss, (outputs[""recon""], metrics)",python,content +3918,5616483,"train_dynamics.py",3064,84," (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)",python,content +3919,5617589,"train_dynamics.py",3332,48," return state, loss, recon, metrics",python,content +3920,5622075,"train_dynamics.py",7561,89," train_state, loss, recon, metrics = train_step(train_state, inputs)",python,content +3921,5625407,"train_dynamics.py",8378,54,"",python,content +3922,5626570,"train_dynamics.py",8606,246,"",python,content +3923,5627837,"train_dynamics.py",9033,168,"",python,content +3924,5643925,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom genie import Genie, restore_genie_components\nfrom models.tokenizer import TokenizerVQVAE\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n # Optimization\n batch_size: int = 36\n min_lr: float = 3e-6\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=jnp.float32\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Restore checkpoint ---\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n # dataloader = get_dataloader(\n # # NOTE: We deliberately pass the global batch size\n # # The dataloader shards the dataset across all processes\n # tfrecord_files,\n # args.seq_len,\n # args.batch_size,\n # *image_shape,\n # )\n step = 0\n while step < args.num_steps:\n # for videos in dataloader:\n npy_path = ""overfit_dir/single_sample_corner.npy""\n # npy_path = ""overfit_dir/single_batch_3_elems.npy""\n videos = np.load(npy_path)\n print(""batch shape: "", videos.shape)\n while(True):\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n start_time = time.time()\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""genie_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +3925,5643926,"train_dynamics.py",9623,0,"",python,selection_mouse +3926,5645093,"train_dynamics.py",0,0,"",python,tab +3927,5645694,"genie.py",0,0,"",python,tab +3928,5649956,"TERMINAL",0,0,"s",,terminal_output +3929,5650287,"TERMINAL",0,0,"[?25ly[?25h",,terminal_output +3930,5650551,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +3931,5650781,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +3932,5651226,"TERMINAL",0,0,"[?25l-[?25h",,terminal_output +3933,5651541,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +3934,5652269,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +3935,5652350,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +3936,5652541,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +3937,5652652,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3938,5652745,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +3939,5652859,"TERMINAL",0,0,"\r\n[?2004l\rsending incremental file list\r\n",,terminal_output +3940,5654376,"TERMINAL",0,0,"./\r\n.gitignore\r\ngeneration_1751310068.977446.gif\r\ngeneration_1751320204.666793.gif\r\ngeneration_1751320704.5755262.gif\r\ngeneration_1751321003.750392.gif\r\ngeneration_1751321516.115979.gif\r\ngenie.py\r\nrequirements.txt\r\nsample.py\r\ntrain_dynamics.py\r\ntrain_lam.py\r\ntrain_tokenizer.py\r\n",,terminal_output +3941,5654816,"TERMINAL",0,0,"gifs/\r\ngifs/generation_1751035879.4153903.gif\r\ngifs/generation_1751302525.3066723.gif\r\ngifs/generation_1751303075.601022.gif\r\ngifs/generation_1751307347.4951653.gif\r\ngifs/generation_overfit-sample-big-lr.gif\r\ngifs/generation_overfit-sample-low-lr.gif\r\ngifs/generation_overfit-sample-mid-lr.gif\r\ngifs/sample-maskgit-steps-1.gif\r\nmodels/dynamics.py\r\nscripts_horeka/train_dynamics.sh\r\nscripts_horeka/train_lam.sh\r\nscripts_horeka/train_tokenizer.sh\r\nscripts_horeka/overfit_sample_tiny/\r\nscripts_horeka/overfit_sample_tiny/sample.sh\r\nscripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch\r\nscripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr.sbatch\r\nslurm/dev/alfred/\r\nslurm/dev/alfred/overfit_minecraft_single_sample/\r\nslurm/dev/alfred/overfit_minecraft_single_sample/train_dynamics_overfit_sample.sbatch\r\nslurm/dev/alfred/overfit_minecraft_single_sample/train_dynamics_overfit_sample.sh\r\nslurm/dev/alfred/overfit_sample/\r\nslurm/dev/alfred/overfit_sample/train_tokenizer_overfit_sample_size_0.6_mio.sbatch\r\nslurm/dev/alfred/overfit_sample/train_tokenizer_overfit_sample_size_0_5.sh\r\nslurm/jobs/mihir/horeka/batchsize_scaling/adjusted_lr/\r\nslurm/jobs/mihir/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_16_nodes.sbatch\r\nslurm/jobs/mihir/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_1_nodes.sbatch\r\nslurm/jobs/mihir/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_2_nodes.sbatch\r\nslurm/jobs/mihir/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_32_nodes.sbatch\r\nslurm/jobs/mihir/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes.sbatch\r\nslurm/jobs/mihir/horeka/batchsize_scaling/adjusted_lr/train_tokenizer_8_nodes.sbatch\r\nslurm/utils/\r\nslurm/utils/alfred/\r\nslurm/utils/alfred/alfred_placeholder.txt\r\nslurm/utils/franz/\r\nslurm/utils/franz/franz_placeholder.txt\r\nutils/\r\nutils/dataloader.py\r\nutils/preprocess_dataset.py\r\n",,terminal_output +3942,5655066,"TERMINAL",0,0,"\r\nsent 4,370,723 bytes received 896 bytes 1,748,647.60 bytes/sec\r\ntotal size is 69,107,278 speedup is 15.81\r\n]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3943,5659673,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0518:~/Projects/jafar[?2004h(jafar) [tum_cte0515@hkn0518 jafar]$ ",,terminal_output +3944,5660761,"TERMINAL",0,0,"c",,terminal_output +3945,5661562,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +3946,5662120,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +3947,5662201,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +3948,5662399,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +3949,5662575,"TERMINAL",0,0,"[?25le[?25h[?25lr[?25h",,terminal_output +3950,5662751,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0518:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0518 jafar_jobs]$ ",,terminal_output +3951,5663806,"TERMINAL",0,0,"sbatch scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch",,terminal_output +3952,5666848,"TERMINAL",0,0,"\rsbatch scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch\r\n[?2004l\rSubmitted batch job 3307618\r\n]0;tum_cte0515@hkn0518:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0518 jafar_jobs]$ ",,terminal_output +3953,5667926,"TERMINAL",0,0,"sbatch scripts_horeka/overfit_sample_tiny/train_dynamics_overfit_sample_big_lr-2.sbatch",,terminal_output +3954,5669738,"TERMINAL",0,0,"[?25l2.sbatch[?25h",,terminal_output +3955,5669949,"TERMINAL",0,0,"[?25l-.sbatch[?25h",,terminal_output +3956,5670693,"TERMINAL",0,0,"\r\n[?2004l\rSubmitted batch job 3307619\r\n]0;tum_cte0515@hkn0518:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0518 jafar_jobs]$ ",,terminal_output +3957,5673604,"TERMINAL",0,0,"q",,terminal_output +3958,5673729,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +3959,5673868,"TERMINAL",0,0,"[?25le[?25h[?25lu[?25h",,terminal_output +3960,5674420,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +3961,5674669,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0518.localdomain: Tue Jul 1 00:18:21 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3307619 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3307618 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3307600 accelerat interact tum_cte0 R33:51\t 1 hkn0518",,terminal_output +3962,5675598,"TERMINAL",0,0,"22",,terminal_output +3963,5676150,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0518:~/Projects/jafar_jobs[?2004h(jafar) [tum_cte0515@hkn0518 jafar_jobs]$ ",,terminal_output +3964,5677364,"TERMINAL",0,0,"[?2004l\r\r\nexit\r\nsalloc: Relinquishing job allocation 3307600\r\nsalloc: Job allocation 3307600 has been revoked.\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +3965,5680779,"TERMINAL",0,0,"idling",,terminal_command +3966,5680828,"TERMINAL",0,0,"]633;E;2025-07-01 00:18:28 idling;4d11dbdc-690b-4257-b927-bbd493ebfa56]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1990.localdomain: Tue Jul 1 00:18:28 2025Partition dev_cpuonly: 12 nodes idle\rPartition cpuonly: 28 nodes idle\rPartition dev_accelerated:\t 2 nodes idle\rPartition accelerated:\t 8 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 8 nodes idle",,terminal_output +3967,5681858,"TERMINAL",0,0,"9",,terminal_output +3968,5682752,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +3969,5683616,"TERMINAL",0,0,"queue",,terminal_command +3970,5683670,"TERMINAL",0,0,"]633;E;2025-07-01 00:18:30 queue;4d11dbdc-690b-4257-b927-bbd493ebfa56]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Tue Jul 1 00:18:30 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3307600 accelerat interact tum_cte0 CG33:54\t 1 hkn05183307619 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)3307618 accelerat train_dy tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output +3971,5684754,"TERMINAL",0,0,"2",,terminal_output +3972,5685817,"TERMINAL",0,0,"3",,terminal_output +3973,5686824,"TERMINAL",0,0,"48 R1hkn04209 R1hkn0420",,terminal_output +3974,5687872,"TERMINAL",0,0,"522",,terminal_output +3975,5688917,"TERMINAL",0,0,"\r618train_dy R 0:0342093",,terminal_output +3976,5690005,"TERMINAL",0,0,"744",,terminal_output +3977,5691005,"TERMINAL",0,0,"855",,terminal_output +3978,5692086,"TERMINAL",0,0,"966",,terminal_output +3979,5693082,"TERMINAL",0,0,"4077",,terminal_output +3980,5694135,"TERMINAL",0,0,"188",,terminal_output +3981,5695157,"TERMINAL",0,0,"299",,terminal_output +3982,5696219,"TERMINAL",0,0,"31010",,terminal_output +3983,5697240,"TERMINAL",0,0,"411",,terminal_output +3984,5698344,"TERMINAL",0,0,"522",,terminal_output +3985,5699341,"TERMINAL",0,0,"633",,terminal_output +3986,5700384,"TERMINAL",0,0,"744",,terminal_output +3987,5701463,"TERMINAL",0,0,"855",,terminal_output +3988,5702480,"TERMINAL",0,0,"966",,terminal_output +3989,5703537,"TERMINAL",0,0,"5077",,terminal_output +3990,5704597,"TERMINAL",0,0,"188",,terminal_output +3991,5705604,"TERMINAL",0,0,"299",,terminal_output +3992,5706653,"TERMINAL",0,0,"32020",,terminal_output +3993,5707689,"TERMINAL",0,0,"422",,terminal_output +3994,5708737,"TERMINAL",0,0,"633",,terminal_output +3995,5709785,"TERMINAL",0,0,"744",,terminal_output +3996,5710824,"TERMINAL",0,0,"855",,terminal_output +3997,5711868,"TERMINAL",0,0,"966",,terminal_output +3998,5712911,"TERMINAL",0,0,"9:0077",,terminal_output +3999,5713951,"TERMINAL",0,0,"188",,terminal_output +4000,5715001,"TERMINAL",0,0,"299",,terminal_output +4001,5716154,"TERMINAL",0,0,"33030",,terminal_output +4002,5717147,"TERMINAL",0,0,"411",,terminal_output +4003,5718112,"TERMINAL",0,0,"522",,terminal_output +4004,5719167,"TERMINAL",0,0,"633",,terminal_output +4005,5720199,"TERMINAL",0,0,"744",,terminal_output +4006,5721258,"TERMINAL",0,0,"855",,terminal_output +4007,5722286,"TERMINAL",0,0,"966",,terminal_output +4008,5723329,"TERMINAL",0,0,"1077",,terminal_output +4009,5724457,"TERMINAL",0,0,"188",,terminal_output +4010,5725429,"TERMINAL",0,0,"299",,terminal_output +4011,5726451,"TERMINAL",0,0,"34040",,terminal_output +4012,5727498,"TERMINAL",0,0,"411",,terminal_output +4013,5728539,"TERMINAL",0,0,"522",,terminal_output +4014,5729582,"TERMINAL",0,0,"633",,terminal_output +4015,5730618,"TERMINAL",0,0,"744",,terminal_output +4016,5731662,"TERMINAL",0,0,"855",,terminal_output +4017,5732738,"TERMINAL",0,0,"977",,terminal_output +4018,5733755,"TERMINAL",0,0,"2188",,terminal_output +4019,5734803,"TERMINAL",0,0,"299",,terminal_output +4020,5735848,"TERMINAL",0,0,"35050",,terminal_output +4021,5736889,"TERMINAL",0,0,"411",,terminal_output +4022,5737919,"TERMINAL",0,0,"522",,terminal_output +4023,5738965,"TERMINAL",0,0,"633",,terminal_output +4024,5740002,"TERMINAL",0,0,"744",,terminal_output +4025,5741041,"TERMINAL",0,0,"855",,terminal_output +4026,5742204,"TERMINAL",0,0,"966",,terminal_output +4027,5743249,"TERMINAL",0,0,"3077",,terminal_output +4028,5744329,"TERMINAL",0,0,"188",,terminal_output +4029,5745388,"TERMINAL",0,0,"299",,terminal_output +4030,5746446,"TERMINAL",0,0,"31:001:00",,terminal_output +4031,5747482,"TERMINAL",0,0,"411",,terminal_output +4032,5748373,"TERMINAL",0,0,"522",,terminal_output +4033,5749421,"TERMINAL",0,0,"633",,terminal_output +4034,5750453,"TERMINAL",0,0,"744",,terminal_output +4035,5751492,"TERMINAL",0,0,"855",,terminal_output +4036,5752528,"TERMINAL",0,0,"966",,terminal_output +4037,5753574,"TERMINAL",0,0,"4077",,terminal_output +4038,5754614,"TERMINAL",0,0,"188",,terminal_output +4039,5755659,"TERMINAL",0,0,"299",,terminal_output +4040,5756699,"TERMINAL",0,0,"31111",,terminal_output +4041,5757778,"TERMINAL",0,0,"522",,terminal_output +4042,5758885,"TERMINAL",0,0,"633",,terminal_output +4043,5759828,"TERMINAL",0,0,"744",,terminal_output +4044,5760869,"TERMINAL",0,0,"855",,terminal_output +4045,5761913,"TERMINAL",0,0,"966",,terminal_output +4046,5762956,"TERMINAL",0,0,"5077",,terminal_output +4047,5763997,"TERMINAL",0,0,"188",,terminal_output +4048,5765032,"TERMINAL",0,0,"299",,terminal_output +4049,5766121,"TERMINAL",0,0,"32020",,terminal_output +4050,5767129,"TERMINAL",0,0,"411",,terminal_output +4051,5768165,"TERMINAL",0,0,"522",,terminal_output +4052,5769216,"TERMINAL",0,0,"633",,terminal_output +4053,5770252,"TERMINAL",0,0,"744",,terminal_output +4054,5771303,"TERMINAL",0,0,"855",,terminal_output +4055,5772359,"TERMINAL",0,0,"966",,terminal_output +4056,5773400,"TERMINAL",0,0,"20:0077",,terminal_output +4057,5774455,"TERMINAL",0,0,"188",,terminal_output +4058,5775478,"TERMINAL",0,0,"299",,terminal_output +4059,5776520,"TERMINAL",0,0,"33030",,terminal_output +4060,5777556,"TERMINAL",0,0,"411",,terminal_output +4061,5778594,"TERMINAL",0,0,"522",,terminal_output +4062,5779649,"TERMINAL",0,0,"633",,terminal_output +4063,5780735,"TERMINAL",0,0,"755",,terminal_output +4064,5781744,"TERMINAL",0,0,"966",,terminal_output +4065,5782895,"TERMINAL",0,0,"1077",,terminal_output +4066,5784044,"TERMINAL",0,0,"188",,terminal_output +4067,5786150,"TERMINAL",0,0,"24040",,terminal_output +4068,5787229,"TERMINAL",0,0,"411",,terminal_output +4069,5788273,"TERMINAL",0,0,"522",,terminal_output +4070,5789136,"TERMINAL",0,0,"633",,terminal_output +4071,5790180,"TERMINAL",0,0,"744",,terminal_output +4072,5791214,"TERMINAL",0,0,"855",,terminal_output +4073,5792291,"TERMINAL",0,0,"966",,terminal_output +4074,5793362,"TERMINAL",0,0,"2077",,terminal_output +4075,5794426,"TERMINAL",0,0,"188",,terminal_output +4076,5795536,"TERMINAL",0,0,"299",,terminal_output +4077,5796647,"TERMINAL",0,0,"35050",,terminal_output +4078,5797526,"TERMINAL",0,0,"411",,terminal_output +4079,5798585,"TERMINAL",0,0,"522",,terminal_output +4080,5799637,"TERMINAL",0,0,"633",,terminal_output +4081,5800689,"TERMINAL",0,0,"744",,terminal_output +4082,5801705,"TERMINAL",0,0,"866",,terminal_output +4083,5802776,"TERMINAL",0,0,"3077",,terminal_output +4084,5803852,"TERMINAL",0,0,"188",,terminal_output +4085,5804880,"TERMINAL",0,0,"299",,terminal_output +4086,5805900,"TERMINAL",0,0,"32:002:00",,terminal_output +4087,5806938,"TERMINAL",0,0,"411",,terminal_output +4088,5807991,"TERMINAL",0,0,"522",,terminal_output +4089,5809072,"TERMINAL",0,0,"633",,terminal_output +4090,5810124,"TERMINAL",0,0,"744",,terminal_output +4091,5811236,"TERMINAL",0,0,"855",,terminal_output +4092,5812194,"TERMINAL",0,0,"966",,terminal_output +4093,5813265,"TERMINAL",0,0,"4077",,terminal_output +4094,5814293,"TERMINAL",0,0,"188",,terminal_output +4095,5815361,"TERMINAL",0,0,"299",,terminal_output +4096,5816397,"TERMINAL",0,0,"31010",,terminal_output +4097,5817473,"TERMINAL",0,0,"411",,terminal_output +4098,5818502,"TERMINAL",0,0,"522",,terminal_output +4099,5819557,"TERMINAL",0,0,"633",,terminal_output +4100,5820597,"TERMINAL",0,0,"744",,terminal_output +4101,5821651,"TERMINAL",0,0,"855",,terminal_output +4102,5822700,"TERMINAL",0,0,"977",,terminal_output +4103,5823752,"TERMINAL",0,0,"5188",,terminal_output +4104,5824806,"TERMINAL",0,0,"299",,terminal_output +4105,5825918,"TERMINAL",0,0,"32020",,terminal_output +4106,5826936,"TERMINAL",0,0,"411",,terminal_output +4107,5827989,"TERMINAL",0,0,"522",,terminal_output +4108,5829038,"TERMINAL",0,0,"633",,terminal_output +4109,5830087,"TERMINAL",0,0,"744",,terminal_output +4110,5831141,"TERMINAL",0,0,"855",,terminal_output +4111,5832195,"TERMINAL",0,0,"966",,terminal_output +4112,5833235,"TERMINAL",0,0,"1:0077",,terminal_output +4113,5834280,"TERMINAL",0,0,"188",,terminal_output +4114,5835333,"TERMINAL",0,0,"299",,terminal_output +4115,5836382,"TERMINAL",0,0,"33030",,terminal_output +4116,5837435,"TERMINAL",0,0,"411",,terminal_output +4117,5838496,"TERMINAL",0,0,"522",,terminal_output +4118,5839532,"TERMINAL",0,0,"633",,terminal_output +4119,5840584,"TERMINAL",0,0,"744",,terminal_output +4120,5841637,"TERMINAL",0,0,"855",,terminal_output +4121,5842677,"TERMINAL",0,0,"977",,terminal_output +4122,5843728,"TERMINAL",0,0,"1188",,terminal_output +4123,5844771,"TERMINAL",0,0,"299",,terminal_output +4124,5845836,"TERMINAL",0,0,"34040",,terminal_output +4125,5846867,"TERMINAL",0,0,"411",,terminal_output +4126,5847925,"TERMINAL",0,0,"522",,terminal_output +4127,5848955,"TERMINAL",0,0,"633",,terminal_output +4128,5849999,"TERMINAL",0,0,"744",,terminal_output +4129,5851047,"TERMINAL",0,0,"855",,terminal_output +4130,5852086,"TERMINAL",0,0,"966",,terminal_output +4131,5853142,"TERMINAL",0,0,"2077",,terminal_output +4132,5854184,"TERMINAL",0,0,"188",,terminal_output +4133,5855233,"TERMINAL",0,0,"299",,terminal_output +4134,5856319,"TERMINAL",0,0,"35050",,terminal_output +4135,5857357,"TERMINAL",0,0,"411",,terminal_output +4136,5858384,"TERMINAL",0,0,"522",,terminal_output +4137,5859429,"TERMINAL",0,0,"633",,terminal_output +4138,5860488,"TERMINAL",0,0,"744",,terminal_output +4139,5861535,"TERMINAL",0,0,"855",,terminal_output +4140,5862560,"TERMINAL",0,0,"966",,terminal_output +4141,5863599,"TERMINAL",0,0,"3077",,terminal_output +4142,5864731,"TERMINAL",0,0,"188",,terminal_output +4143,5865698,"TERMINAL",0,0,"23:003:00",,terminal_output +4144,5866789,"TERMINAL",0,0,"411",,terminal_output +4145,5867775,"TERMINAL",0,0,"522",,terminal_output +4146,5868828,"TERMINAL",0,0,"633",,terminal_output +4147,5869875,"TERMINAL",0,0,"744",,terminal_output +4148,5870919,"TERMINAL",0,0,"855",,terminal_output +4149,5871953,"TERMINAL",0,0,"966",,terminal_output +4150,5873000,"TERMINAL",0,0,"4077",,terminal_output +4151,5874048,"TERMINAL",0,0,"188",,terminal_output +4152,5875093,"TERMINAL",0,0,"299",,terminal_output +4153,5876169,"TERMINAL",0,0,"31010",,terminal_output +4154,5877199,"TERMINAL",0,0,"411",,terminal_output +4155,5878280,"TERMINAL",0,0,"522",,terminal_output +4156,5879321,"TERMINAL",0,0,"633",,terminal_output +4157,5880386,"TERMINAL",0,0,"744",,terminal_output +4158,5881436,"TERMINAL",0,0,"855",,terminal_output +4159,5882456,"TERMINAL",0,0,"966",,terminal_output +4160,5883492,"TERMINAL",0,0,"5077",,terminal_output +4161,5884540,"TERMINAL",0,0,"188",,terminal_output +4162,5885576,"TERMINAL",0,0,"299",,terminal_output +4163,5886635,"TERMINAL",0,0,"32020",,terminal_output +4164,5887675,"TERMINAL",0,0,"422",,terminal_output +4165,5888737,"TERMINAL",0,0,"633",,terminal_output +4166,5889753,"TERMINAL",0,0,"744",,terminal_output +4167,5890802,"TERMINAL",0,0,"855",,terminal_output +4168,5891884,"TERMINAL",0,0,"966",,terminal_output +4169,5893019,"TERMINAL",0,0,"2:0077",,terminal_output +4170,5894117,"TERMINAL",0,0,"188",,terminal_output +4171,5895001,"TERMINAL",0,0,"299",,terminal_output +4172,5896130,"TERMINAL",0,0,"33030",,terminal_output +4173,5897167,"TERMINAL",0,0,"411",,terminal_output +4174,5898132,"TERMINAL",0,0,"522",,terminal_output +4175,5899188,"TERMINAL",0,0,"633",,terminal_output +4176,5900227,"TERMINAL",0,0,"744",,terminal_output +4177,5901274,"TERMINAL",0,0,"855",,terminal_output +4178,5902315,"TERMINAL",0,0,"966",,terminal_output +4179,5903352,"TERMINAL",0,0,"1077",,terminal_output +4180,5904448,"TERMINAL",0,0,"188",,terminal_output +4181,5905444,"TERMINAL",0,0,"299",,terminal_output +4182,5906496,"TERMINAL",0,0,"34040",,terminal_output +4183,5907545,"TERMINAL",0,0,"411",,terminal_output +4184,5908601,"TERMINAL",0,0,"522",,terminal_output +4185,5909642,"TERMINAL",0,0,"633",,terminal_output +4186,5910679,"TERMINAL",0,0,"755",,terminal_output +4187,5911726,"TERMINAL",0,0,"966",,terminal_output +4188,5912764,"TERMINAL",0,0,"2077",,terminal_output +4189,5913830,"TERMINAL",0,0,"188",,terminal_output +4190,5914865,"TERMINAL",0,0,"299",,terminal_output +4191,5915963,"TERMINAL",0,0,"35050",,terminal_output +4192,5916933,"TERMINAL",0,0,"411",,terminal_output +4193,5917979,"TERMINAL",0,0,"522",,terminal_output +4194,5919025,"TERMINAL",0,0,"633",,terminal_output +4195,5920180,"TERMINAL",0,0,"744",,terminal_output +4196,5921143,"TERMINAL",0,0,"855",,terminal_output +4197,5922145,"TERMINAL",0,0,"966",,terminal_output +4198,5923191,"TERMINAL",0,0,"3077",,terminal_output +4199,5924232,"TERMINAL",0,0,"188",,terminal_output +4200,5925271,"TERMINAL",0,0,"299",,terminal_output +4201,5926305,"TERMINAL",0,0,"34:004:00",,terminal_output +4202,5927346,"TERMINAL",0,0,"411",,terminal_output +4203,5928384,"TERMINAL",0,0,"522",,terminal_output +4204,5929440,"TERMINAL",0,0,"633",,terminal_output +4205,5930462,"TERMINAL",0,0,"744",,terminal_output +4206,5931536,"TERMINAL",0,0,"855",,terminal_output +4207,5932540,"TERMINAL",0,0,"966",,terminal_output +4208,5933584,"TERMINAL",0,0,"4077",,terminal_output +4209,5934984,"TERMINAL",0,0,"188",,terminal_output +4210,5935706,"TERMINAL",0,0,"21010",,terminal_output +4211,5936734,"TERMINAL",0,0,"411",,terminal_output +4212,5937751,"TERMINAL",0,0,"522",,terminal_output +4213,5938832,"TERMINAL",0,0,"633",,terminal_output +4214,5939846,"TERMINAL",0,0,"744",,terminal_output +4215,5940884,"TERMINAL",0,0,"855",,terminal_output +4216,5941938,"TERMINAL",0,0,"966",,terminal_output +4217,5942975,"TERMINAL",0,0,"5077",,terminal_output +4218,5944020,"TERMINAL",0,0,"188",,terminal_output +4219,5945071,"TERMINAL",0,0,"299",,terminal_output +4220,5946120,"TERMINAL",0,0,"32020",,terminal_output +4221,5947158,"TERMINAL",0,0,"411",,terminal_output +4222,5948225,"TERMINAL",0,0,"522",,terminal_output +4223,5949251,"TERMINAL",0,0,"633",,terminal_output +4224,5950291,"TERMINAL",0,0,"744",,terminal_output +4225,5951346,"TERMINAL",0,0,"855",,terminal_output +4226,5952394,"TERMINAL",0,0,"966",,terminal_output +4227,5953463,"TERMINAL",0,0,"3:0077",,terminal_output +4228,5954489,"TERMINAL",0,0,"188",,terminal_output +4229,5955530,"TERMINAL",0,0,"299",,terminal_output +4230,5956582,"TERMINAL",0,0,"33030",,terminal_output +4231,5957627,"TERMINAL",0,0,"411",,terminal_output +4232,5958664,"TERMINAL",0,0,"533",,terminal_output +4233,5959712,"TERMINAL",0,0,"744",,terminal_output +4234,5960762,"TERMINAL",0,0,"855",,terminal_output +4235,5961822,"TERMINAL",0,0,"966",,terminal_output +4236,5962852,"TERMINAL",0,0,"1077",,terminal_output +4237,5963897,"TERMINAL",0,0,"188",,terminal_output +4238,5964953,"TERMINAL",0,0,"299",,terminal_output +4239,5965996,"TERMINAL",0,0,"34040",,terminal_output +4240,5967084,"TERMINAL",0,0,"411",,terminal_output +4241,5968097,"TERMINAL",0,0,"522",,terminal_output +4242,5969129,"TERMINAL",0,0,"633",,terminal_output +4243,5970210,"TERMINAL",0,0,"744",,terminal_output +4244,5971212,"TERMINAL",0,0,"855",,terminal_output +4245,5972263,"TERMINAL",0,0,"966",,terminal_output +4246,5973318,"TERMINAL",0,0,"2077",,terminal_output +4247,5974403,"TERMINAL",0,0,"188",,terminal_output +4248,5975445,"TERMINAL",0,0,"299",,terminal_output +4249,5976449,"TERMINAL",0,0,"35050",,terminal_output +4250,5977499,"TERMINAL",0,0,"411",,terminal_output +4251,5978576,"TERMINAL",0,0,"522",,terminal_output +4252,5979587,"TERMINAL",0,0,"633",,terminal_output +4253,5980680,"TERMINAL",0,0,"744",,terminal_output +4254,5981671,"TERMINAL",0,0,"855",,terminal_output +4255,5982882,"TERMINAL",0,0,"3077",,terminal_output +4256,5983743,"TERMINAL",0,0,"188",,terminal_output +4257,5984843,"TERMINAL",0,0,"299",,terminal_output +4258,5985855,"TERMINAL",0,0,"35:005:00",,terminal_output +4259,5986877,"TERMINAL",0,0,"411",,terminal_output +4260,5987942,"TERMINAL",0,0,"522",,terminal_output +4261,5989036,"TERMINAL",0,0,"633",,terminal_output +4262,5990116,"TERMINAL",0,0,"744",,terminal_output +4263,5991071,"TERMINAL",0,0,"855",,terminal_output +4264,5992149,"TERMINAL",0,0,"966",,terminal_output +4265,5993200,"TERMINAL",0,0,"4077",,terminal_output +4266,5994184,"TERMINAL",0,0,"188",,terminal_output +4267,5995229,"TERMINAL",0,0,"299",,terminal_output +4268,5996269,"TERMINAL",0,0,"31010",,terminal_output +4269,5997310,"TERMINAL",0,0,"411",,terminal_output +4270,5998370,"TERMINAL",0,0,"522",,terminal_output +4271,5999498,"TERMINAL",0,0,"633",,terminal_output +4272,6000426,"TERMINAL",0,0,"744",,terminal_output +4273,6001467,"TERMINAL",0,0,"855",,terminal_output +4274,6002501,"TERMINAL",0,0,"966",,terminal_output +4275,6003548,"TERMINAL",0,0,"5077",,terminal_output +4276,6004614,"TERMINAL",0,0,"188",,terminal_output +4277,6005669,"TERMINAL",0,0,"299",,terminal_output +4278,6006892,"TERMINAL",0,0,"32121",,terminal_output +4279,6007782,"TERMINAL",0,0,"522",,terminal_output +4280,6008961,"TERMINAL",0,0,"633",,terminal_output +4281,6009850,"TERMINAL",0,0,"744",,terminal_output +4282,6010861,"TERMINAL",0,0,"855",,terminal_output +4283,6011886,"TERMINAL",0,0,"966",,terminal_output +4284,6012952,"TERMINAL",0,0,"4:0077",,terminal_output +4285,6013979,"TERMINAL",0,0,"188",,terminal_output +4286,6015072,"TERMINAL",0,0,"299",,terminal_output +4287,6016077,"TERMINAL",0,0,"33030",,terminal_output +4288,6017105,"TERMINAL",0,0,"411",,terminal_output +4289,6018147,"TERMINAL",0,0,"522",,terminal_output +4290,6019247,"TERMINAL",0,0,"633",,terminal_output +4291,6020248,"TERMINAL",0,0,"744",,terminal_output +4292,6021305,"TERMINAL",0,0,"855",,terminal_output +4293,6022317,"TERMINAL",0,0,"966",,terminal_output +4294,6023357,"TERMINAL",0,0,"1077",,terminal_output +4295,6024406,"TERMINAL",0,0,"188",,terminal_output +4296,6025443,"TERMINAL",0,0,"299",,terminal_output +4297,6026492,"TERMINAL",0,0,"34040",,terminal_output +4298,6027530,"TERMINAL",0,0,"411",,terminal_output +4299,6028654,"TERMINAL",0,0,"522",,terminal_output +4300,6029612,"TERMINAL",0,0,"633",,terminal_output +4301,6030650,"TERMINAL",0,0,"744",,terminal_output +4302,6031693,"TERMINAL",0,0,"866",,terminal_output +4303,6032744,"TERMINAL",0,0,"2077",,terminal_output +4304,6033779,"TERMINAL",0,0,"188",,terminal_output +4305,6034824,"TERMINAL",0,0,"299",,terminal_output +4306,6035866,"TERMINAL",0,0,"35050",,terminal_output +4307,6036907,"TERMINAL",0,0,"411",,terminal_output +4308,6037951,"TERMINAL",0,0,"522",,terminal_output +4309,6038997,"TERMINAL",0,0,"633",,terminal_output +4310,6040033,"TERMINAL",0,0,"744",,terminal_output +4311,6041080,"TERMINAL",0,0,"855",,terminal_output +4312,6042123,"TERMINAL",0,0,"966",,terminal_output +4313,6043162,"TERMINAL",0,0,"3077",,terminal_output +4314,6044199,"TERMINAL",0,0,"188",,terminal_output +4315,6045253,"TERMINAL",0,0,"299",,terminal_output +4316,6046297,"TERMINAL",0,0,"36:006:00",,terminal_output +4317,6047327,"TERMINAL",0,0,"411",,terminal_output +4318,6048374,"TERMINAL",0,0,"522",,terminal_output +4319,6049414,"TERMINAL",0,0,"633",,terminal_output +4320,6050474,"TERMINAL",0,0,"744",,terminal_output +4321,6051495,"TERMINAL",0,0,"855",,terminal_output +4322,6052541,"TERMINAL",0,0,"966",,terminal_output +4323,6053589,"TERMINAL",0,0,"4077",,terminal_output +4324,6054629,"TERMINAL",0,0,"188",,terminal_output +4325,6055674,"TERMINAL",0,0,"21010",,terminal_output +4326,6056706,"TERMINAL",0,0,"411",,terminal_output +4327,6057749,"TERMINAL",0,0,"522",,terminal_output +4328,6058837,"TERMINAL",0,0,"633",,terminal_output +4329,6059841,"TERMINAL",0,0,"744",,terminal_output +4330,6060873,"TERMINAL",0,0,"855",,terminal_output +4331,6061915,"TERMINAL",0,0,"966",,terminal_output +4332,6062958,"TERMINAL",0,0,"5077",,terminal_output +4333,6063995,"TERMINAL",0,0,"188",,terminal_output +4334,6065028,"TERMINAL",0,0,"299",,terminal_output +4335,6066081,"TERMINAL",0,0,"32020",,terminal_output +4336,6067120,"TERMINAL",0,0,"411",,terminal_output +4337,6068186,"TERMINAL",0,0,"522",,terminal_output +4338,6069265,"TERMINAL",0,0,"633",,terminal_output +4339,6070264,"TERMINAL",0,0,"744",,terminal_output +4340,6071305,"TERMINAL",0,0,"855",,terminal_output +4341,6072345,"TERMINAL",0,0,"966",,terminal_output +4342,6073386,"TERMINAL",0,0,"5:0077",,terminal_output +4343,6074433,"TERMINAL",0,0,"188",,terminal_output +4344,6075484,"TERMINAL",0,0,"299",,terminal_output +4345,6076513,"TERMINAL",0,0,"33030",,terminal_output +4346,6077559,"TERMINAL",0,0,"411",,terminal_output +4347,6078610,"TERMINAL",0,0,"522",,terminal_output +4348,6079652,"TERMINAL",0,0,"633",,terminal_output +4349,6080700,"TERMINAL",0,0,"755",,terminal_output +4350,6081742,"TERMINAL",0,0,"966",,terminal_output +4351,6082790,"TERMINAL",0,0,"1077",,terminal_output +4352,6083833,"TERMINAL",0,0,"188",,terminal_output +4353,6084874,"TERMINAL",0,0,"299",,terminal_output +4354,6085922,"TERMINAL",0,0,"34040",,terminal_output +4355,6086970,"TERMINAL",0,0,"411",,terminal_output +4356,6088023,"TERMINAL",0,0,"522",,terminal_output +4357,6089057,"TERMINAL",0,0,"633",,terminal_output +4358,6090105,"TERMINAL",0,0,"744",,terminal_output +4359,6091153,"TERMINAL",0,0,"855",,terminal_output +4360,6092203,"TERMINAL",0,0,"966",,terminal_output +4361,6093281,"TERMINAL",0,0,"2077",,terminal_output +4362,6094282,"TERMINAL",0,0,"188",,terminal_output +4363,6095317,"TERMINAL",0,0,"299",,terminal_output +4364,6096364,"TERMINAL",0,0,"35050",,terminal_output +4365,6097409,"TERMINAL",0,0,"411",,terminal_output +4366,6098446,"TERMINAL",0,0,"522",,terminal_output +4367,6099495,"TERMINAL",0,0,"633",,terminal_output +4368,6100545,"TERMINAL",0,0,"744",,terminal_output +4369,6101566,"TERMINAL",0,0,"855",,terminal_output +4370,6102605,"TERMINAL",0,0,"966",,terminal_output +4371,6103646,"TERMINAL",0,0,"3077",,terminal_output +4372,6104698,"TERMINAL",0,0,"199",,terminal_output +4373,6105748,"TERMINAL",0,0,"37:007:00",,terminal_output +4374,6106769,"TERMINAL",0,0,"411",,terminal_output +4375,6107808,"TERMINAL",0,0,"522",,terminal_output +4376,6108846,"TERMINAL",0,0,"633",,terminal_output +4377,6109896,"TERMINAL",0,0,"744",,terminal_output +4378,6110984,"TERMINAL",0,0,"855",,terminal_output +4379,6112011,"TERMINAL",0,0,"966",,terminal_output +4380,6113081,"TERMINAL",0,0,"4077",,terminal_output +4381,6114119,"TERMINAL",0,0,"188",,terminal_output +4382,6115170,"TERMINAL",0,0,"299",,terminal_output +4383,6116429,"TERMINAL",0,0,"31010",,terminal_output +4384,6117473,"TERMINAL",0,0,"411",,terminal_output +4385,6118500,"TERMINAL",0,0,"522",,terminal_output +4386,6119554,"TERMINAL",0,0,"633",,terminal_output +4387,6120602,"TERMINAL",0,0,"744",,terminal_output +4388,6121658,"TERMINAL",0,0,"855",,terminal_output +4389,6122700,"TERMINAL",0,0,"966",,terminal_output +4390,6123751,"TERMINAL",0,0,"5077",,terminal_output +4391,6124809,"TERMINAL",0,0,"199",,terminal_output +4392,6125890,"TERMINAL",0,0,"32020",,terminal_output +4393,6126894,"TERMINAL",0,0,"411",,terminal_output +4394,6127941,"TERMINAL",0,0,"522",,terminal_output +4395,6129000,"TERMINAL",0,0,"633",,terminal_output +4396,6130046,"TERMINAL",0,0,"744",,terminal_output +4397,6131025,"TERMINAL",0,0,"855",,terminal_output +4398,6132095,"TERMINAL",0,0,"966",,terminal_output +4399,6133139,"TERMINAL",0,0,"6:0077",,terminal_output +4400,6134173,"TERMINAL",0,0,"188",,terminal_output +4401,6135214,"TERMINAL",0,0,"299",,terminal_output +4402,6136261,"TERMINAL",0,0,"33030",,terminal_output +4403,6137304,"TERMINAL",0,0,"411",,terminal_output +4404,6138393,"TERMINAL",0,0,"522",,terminal_output +4405,6139397,"TERMINAL",0,0,"633",,terminal_output +4406,6140452,"TERMINAL",0,0,"744",,terminal_output +4407,6141514,"TERMINAL",0,0,"855",,terminal_output +4408,6142596,"TERMINAL",0,0,"966",,terminal_output +4409,6143653,"TERMINAL",0,0,"1077",,terminal_output +4410,6144723,"TERMINAL",0,0,"188",,terminal_output +4411,6145800,"TERMINAL",0,0,"299",,terminal_output +4412,6146823,"TERMINAL",0,0,"34141",,terminal_output +4413,6147895,"TERMINAL",0,0,"522",,terminal_output +4414,6148831,"TERMINAL",0,0,"633",,terminal_output +4415,6149839,"TERMINAL",0,0,"744",,terminal_output +4416,6150884,"TERMINAL",0,0,"855",,terminal_output +4417,6151940,"TERMINAL",0,0,"966",,terminal_output +4418,6152995,"TERMINAL",0,0,"2077",,terminal_output +4419,6154020,"TERMINAL",0,0,"188",,terminal_output +4420,6155055,"TERMINAL",0,0,"299",,terminal_output +4421,6156101,"TERMINAL",0,0,"35050",,terminal_output +4422,6157162,"TERMINAL",0,0,"411",,terminal_output +4423,6158202,"TERMINAL",0,0,"522",,terminal_output +4424,6159299,"TERMINAL",0,0,"633",,terminal_output +4425,6160302,"TERMINAL",0,0,"744",,terminal_output +4426,6161340,"TERMINAL",0,0,"855",,terminal_output +4427,6162400,"TERMINAL",0,0,"966",,terminal_output +4428,6163436,"TERMINAL",0,0,"3077",,terminal_output +4429,6164485,"TERMINAL",0,0,"188",,terminal_output +4430,6165536,"TERMINAL",0,0,"299",,terminal_output +4431,6166606,"TERMINAL",0,0,"38:008:00",,terminal_output +4432,6167651,"TERMINAL",0,0,"411",,terminal_output +4433,6168725,"TERMINAL",0,0,"533",,terminal_output +4434,6169746,"TERMINAL",0,0,"744",,terminal_output +4435,6170793,"TERMINAL",0,0,"855",,terminal_output +4436,6171847,"TERMINAL",0,0,"966",,terminal_output +4437,6172891,"TERMINAL",0,0,"4077",,terminal_output +4438,6173963,"TERMINAL",0,0,"188",,terminal_output +4439,6174983,"TERMINAL",0,0,"299",,terminal_output +4440,6176031,"TERMINAL",0,0,"31010",,terminal_output +4441,6177127,"TERMINAL",0,0,"411",,terminal_output +4442,6178115,"TERMINAL",0,0,"522",,terminal_output +4443,6179162,"TERMINAL",0,0,"633",,terminal_output +4444,6180218,"TERMINAL",0,0,"744",,terminal_output +4445,6181323,"TERMINAL",0,0,"855",,terminal_output +4446,6182304,"TERMINAL",0,0,"966",,terminal_output +4447,6183347,"TERMINAL",0,0,"5077",,terminal_output +4448,6184382,"TERMINAL",0,0,"188",,terminal_output +4449,6185423,"TERMINAL",0,0,"299",,terminal_output +4450,6186462,"TERMINAL",0,0,"32020",,terminal_output +4451,6187504,"TERMINAL",0,0,"411",,terminal_output +4452,6188554,"TERMINAL",0,0,"522",,terminal_output +4453,6189640,"TERMINAL",0,0,"633",,terminal_output +4454,6190655,"TERMINAL",0,0,"744",,terminal_output +4455,6191686,"TERMINAL",0,0,"866",,terminal_output +4456,6192734,"TERMINAL",0,0,"7:0077",,terminal_output +4457,6193780,"TERMINAL",0,0,"188",,terminal_output +4458,6194817,"TERMINAL",0,0,"299",,terminal_output +4459,6195864,"TERMINAL",0,0,"33030",,terminal_output +4460,6196896,"TERMINAL",0,0,"411",,terminal_output +4461,6197937,"TERMINAL",0,0,"522",,terminal_output +4462,6198988,"TERMINAL",0,0,"633",,terminal_output +4463,6200031,"TERMINAL",0,0,"744",,terminal_output +4464,6201073,"TERMINAL",0,0,"855",,terminal_output +4465,6202139,"TERMINAL",0,0,"966",,terminal_output +4466,6203190,"TERMINAL",0,0,"1077",,terminal_output +4467,6204223,"TERMINAL",0,0,"188",,terminal_output +4468,6205260,"TERMINAL",0,0,"299",,terminal_output +4469,6206314,"TERMINAL",0,0,"34040",,terminal_output +4470,6207350,"TERMINAL",0,0,"411",,terminal_output +4471,6208506,"TERMINAL",0,0,"522",,terminal_output +4472,6209566,"TERMINAL",0,0,"633",,terminal_output +4473,6210683,"TERMINAL",0,0,"744",,terminal_output +4474,6211527,"TERMINAL",0,0,"855",,terminal_output +4475,6212582,"TERMINAL",0,0,"966",,terminal_output +4476,6213642,"TERMINAL",0,0,"2077",,terminal_output +4477,6214671,"TERMINAL",0,0,"188",,terminal_output +4478,6215741,"TERMINAL",0,0,"25050",,terminal_output +4479,6216784,"TERMINAL",0,0,"411",,terminal_output +4480,6217833,"TERMINAL",0,0,"522",,terminal_output +4481,6218898,"TERMINAL",0,0,"633",,terminal_output +4482,6219996,"TERMINAL",0,0,"744",,terminal_output +4483,6221038,"TERMINAL",0,0,"855",,terminal_output +4484,6222134,"TERMINAL",0,0,"966",,terminal_output +4485,6223120,"TERMINAL",0,0,"3077",,terminal_output +4486,6224183,"TERMINAL",0,0,"188",,terminal_output +4487,6225425,"TERMINAL",0,0,"299",,terminal_output +4488,6226438,"TERMINAL",0,0,"39:009:00",,terminal_output +4489,6227480,"TERMINAL",0,0,"411",,terminal_output +4490,6228547,"TERMINAL",0,0,"522",,terminal_output +4491,6229606,"TERMINAL",0,0,"633",,terminal_output +4492,6230662,"TERMINAL",0,0,"744",,terminal_output +4493,6231675,"TERMINAL",0,0,"866",,terminal_output +4494,6232727,"TERMINAL",0,0,"4077",,terminal_output +4495,6233791,"TERMINAL",0,0,"188",,terminal_output +4496,6234975,"TERMINAL",0,0,"299",,terminal_output +4497,6235882,"TERMINAL",0,0,"31010",,terminal_output +4498,6236930,"TERMINAL",0,0,"411",,terminal_output +4499,6237981,"TERMINAL",0,0,"522",,terminal_output +4500,6239094,"TERMINAL",0,0,"633",,terminal_output +4501,6240163,"TERMINAL",0,0,"744",,terminal_output +4502,6241175,"TERMINAL",0,0,"855",,terminal_output +4503,6242209,"TERMINAL",0,0,"966",,terminal_output +4504,6243267,"TERMINAL",0,0,"5077",,terminal_output +4505,6244329,"TERMINAL",0,0,"188",,terminal_output +4506,6245382,"TERMINAL",0,0,"299",,terminal_output +4507,6246465,"TERMINAL",0,0,"32020",,terminal_output +4508,6247488,"TERMINAL",0,0,"411",,terminal_output +4509,6248539,"TERMINAL",0,0,"522",,terminal_output +4510,6249597,"TERMINAL",0,0,"633",,terminal_output +4511,6250652,"TERMINAL",0,0,"744",,terminal_output +4512,6251723,"TERMINAL",0,0,"866",,terminal_output +4513,6252738,"TERMINAL",0,0,"8:0077",,terminal_output +4514,6253781,"TERMINAL",0,0,"188",,terminal_output +4515,6254844,"TERMINAL",0,0,"299",,terminal_output +4516,6255893,"TERMINAL",0,0,"33030",,terminal_output +4517,6256931,"TERMINAL",0,0,"411",,terminal_output +4518,6257972,"TERMINAL",0,0,"522",,terminal_output +4519,6259059,"TERMINAL",0,0,"633",,terminal_output +4520,6260292,"TERMINAL",0,0,"744",,terminal_output +4521,6261121,"TERMINAL",0,0,"855",,terminal_output +4522,6262169,"TERMINAL",0,0,"966",,terminal_output +4523,6263220,"TERMINAL",0,0,"1077",,terminal_output +4524,6264258,"TERMINAL",0,0,"188",,terminal_output +4525,6265327,"TERMINAL",0,0,"299",,terminal_output +4526,6266362,"TERMINAL",0,0,"34040",,terminal_output +4527,6267407,"TERMINAL",0,0,"411",,terminal_output +4528,6268458,"TERMINAL",0,0,"522",,terminal_output +4529,6269503,"TERMINAL",0,0,"633",,terminal_output +4530,6270554,"TERMINAL",0,0,"744",,terminal_output +4531,6271604,"TERMINAL",0,0,"855",,terminal_output +4532,6272644,"TERMINAL",0,0,"966",,terminal_output +4533,6273683,"TERMINAL",0,0,"2088",,terminal_output +4534,6274716,"TERMINAL",0,0,"299",,terminal_output +4535,6275803,"TERMINAL",0,0,"35050",,terminal_output +4536,6276813,"TERMINAL",0,0,"411",,terminal_output +4537,6277860,"TERMINAL",0,0,"522",,terminal_output +4538,6278914,"TERMINAL",0,0,"633",,terminal_output +4539,6280071,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +4540,6280271,"TERMINAL",0,0,"744",,terminal_output +4541,6280996,"TERMINAL",0,0,"855",,terminal_output +4542,6282042,"TERMINAL",0,0,"966",,terminal_output +4543,6283082,"TERMINAL",0,0,"3077",,terminal_output +4544,6284123,"TERMINAL",0,0,"188",,terminal_output +4545,6285287,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",0,0,"",python,tab +4546,6285330,"TERMINAL",0,0,"299",,terminal_output +4547,6286227,"TERMINAL",0,0,"310:0010:00",,terminal_output +4548,6287257,"TERMINAL",0,0,"411",,terminal_output +4549,6287431,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4273,0,"",python,selection_mouse +4550,6288304,"TERMINAL",0,0,"522",,terminal_output +4551,6289356,"TERMINAL",0,0,"633",,terminal_output +4552,6290385,"TERMINAL",0,0,"744",,terminal_output +4553,6290589,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4257,0,"",python,selection_command +4554,6291437,"TERMINAL",0,0,"855",,terminal_output +4555,6291807,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4257,0,"#",python,content +4556,6291809,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4258,0,"",python,selection_keyboard +4557,6291864,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4258,0," ",python,content +4558,6291865,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4259,0,"",python,selection_keyboard +4559,6292428,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4258,0,"",python,selection_command +4560,6292558,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4202,0,"",python,selection_command +4561,6292601,"TERMINAL",0,0,"966",,terminal_output +4562,6292783,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4258,0,"",python,selection_command +4563,6292997,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4314,0,"",python,selection_command +4564,6293158,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4350,0,"",python,selection_command +4565,6293553,"TERMINAL",0,0,"4077",,terminal_output +4566,6294419,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4314,0,"",python,selection_command +4567,6294562,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4258,0,"",python,selection_command +4568,6294615,"TERMINAL",0,0,"188",,terminal_output +4569,6295012,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4202,0,"",python,selection_command +4570,6295444,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4201,0,"",python,selection_command +4571,6295618,"TERMINAL",0,0,"299",,terminal_output +4572,6296126,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4201,1,"",python,content +4573,6296287,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4201,1,"",python,content +4574,6296372,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4255,0,"",python,selection_command +4575,6296709,"TERMINAL",0,0,"31010",,terminal_output +4576,6297347,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",4216,0,"",python,selection_mouse +4577,6297751,"TERMINAL",0,0,"422",,terminal_output +4578,6298752,"TERMINAL",0,0,"633",,terminal_output +4579,6299786,"TERMINAL",0,0,"744",,terminal_output +4580,6300360,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py",3052,0,"",python,selection_mouse +4581,6300694,"genie.py",0,0,"",python,tab +4582,6300836,"TERMINAL",0,0,"855",,terminal_output +4583,6301992,"TERMINAL",0,0,"966",,terminal_output +4584,6302953,"TERMINAL",0,0,"5077",,terminal_output +4585,6303958,"TERMINAL",0,0,"188",,terminal_output +4586,6305011,"TERMINAL",0,0,"299",,terminal_output +4587,6306051,"TERMINAL",0,0,"32020",,terminal_output +4588,6307111,"TERMINAL",0,0,"411",,terminal_output +4589,6308162,"TERMINAL",0,0,"522",,terminal_output +4590,6309213,"TERMINAL",0,0,"633",,terminal_output +4591,6310256,"TERMINAL",0,0,"744",,terminal_output +4592,6311327,"TERMINAL",0,0,"855",,terminal_output +4593,6312362,"TERMINAL",0,0,"966",,terminal_output +4594,6313413,"TERMINAL",0,0,"9:0077",,terminal_output +4595,6314588,"TERMINAL",0,0,"188",,terminal_output +4596,6315556,"TERMINAL",0,0,"299",,terminal_output +4597,6316687,"TERMINAL",0,0,"33030",,terminal_output +4598,6317665,"TERMINAL",0,0,"411",,terminal_output +4599,6318749,"TERMINAL",0,0,"522",,terminal_output +4600,6319694,"TERMINAL",0,0,"644",,terminal_output +4601,6320830,"TERMINAL",0,0,"855",,terminal_output +4602,6321807,"TERMINAL",0,0,"966",,terminal_output +4603,6322929,"TERMINAL",0,0,"1077",,terminal_output +4604,6324039,"TERMINAL",0,0,"188",,terminal_output +4605,6325548,"TERMINAL",0,0,"299",,terminal_output +4606,6325961,"TERMINAL",0,0,"34040",,terminal_output +4607,6327008,"TERMINAL",0,0,"411",,terminal_output +4608,6328220,"TERMINAL",0,0,"522",,terminal_output +4609,6329339,"TERMINAL",0,0,"633",,terminal_output +4610,6330312,"TERMINAL",0,0,"744",,terminal_output +4611,6331359,"TERMINAL",0,0,"855",,terminal_output +4612,6332500,"TERMINAL",0,0,"966",,terminal_output +4613,6333320,"TERMINAL",0,0,"2077",,terminal_output +4614,6334305,"TERMINAL",0,0,"188",,terminal_output +4615,6335393,"TERMINAL",0,0,"299",,terminal_output +4616,6336393,"TERMINAL",0,0,"35050",,terminal_output +4617,6337424,"TERMINAL",0,0,"411",,terminal_output +4618,6338473,"TERMINAL",0,0,"522",,terminal_output +4619,6339520,"TERMINAL",0,0,"633",,terminal_output +4620,6340565,"TERMINAL",0,0,"744",,terminal_output +4621,6341606,"TERMINAL",0,0,"855",,terminal_output +4622,6342652,"TERMINAL",0,0,"966",,terminal_output +4623,6343696,"TERMINAL",0,0,"3088",,terminal_output +4624,6344778,"TERMINAL",0,0,"299",,terminal_output +4625,6345793,"TERMINAL",0,0,"31:001:00",,terminal_output +4626,6346843,"TERMINAL",0,0,"411",,terminal_output +4627,6347899,"TERMINAL",0,0,"522",,terminal_output +4628,6348938,"TERMINAL",0,0,"633",,terminal_output +4629,6349992,"TERMINAL",0,0,"744",,terminal_output +4630,6351044,"TERMINAL",0,0,"855",,terminal_output +4631,6352089,"TERMINAL",0,0,"966",,terminal_output +4632,6353135,"TERMINAL",0,0,"4077",,terminal_output +4633,6354187,"TERMINAL",0,0,"188",,terminal_output +4634,6355227,"TERMINAL",0,0,"299",,terminal_output +4635,6356304,"TERMINAL",0,0,"31010",,terminal_output +4636,6357363,"TERMINAL",0,0,"411",,terminal_output +4637,6358386,"TERMINAL",0,0,"522",,terminal_output +4638,6359415,"TERMINAL",0,0,"633",,terminal_output +4639,6360476,"TERMINAL",0,0,"744",,terminal_output +4640,6361515,"TERMINAL",0,0,"855",,terminal_output +4641,6362568,"TERMINAL",0,0,"966",,terminal_output +4642,6363614,"TERMINAL",0,0,"5077",,terminal_output +4643,6364409,"genie.py",5900,0,"",python,selection_mouse +4644,6364670,"TERMINAL",0,0,"188",,terminal_output +4645,6365699,"TERMINAL",0,0,"22020",,terminal_output +4646,6366801,"TERMINAL",0,0,"411",,terminal_output +4647,6367805,"TERMINAL",0,0,"522",,terminal_output +4648,6368852,"TERMINAL",0,0,"633",,terminal_output +4649,6369956,"TERMINAL",0,0,"744",,terminal_output +4650,6370945,"TERMINAL",0,0,"855",,terminal_output +4651,6372033,"TERMINAL",0,0,"966",,terminal_output +4652,6373107,"genie.py",5538,0,"",python,selection_mouse +4653,6373123,"TERMINAL",0,0,"30:0077",,terminal_output +4654,6373246,"genie.py",5533,14,"unmasked_ratio",python,selection_mouse +4655,6374133,"TERMINAL",0,0,"188",,terminal_output +4656,6375188,"TERMINAL",0,0,"299",,terminal_output +4657,6376241,"TERMINAL",0,0,"33030",,terminal_output +4658,6377293,"TERMINAL",0,0,"411",,terminal_output +4659,6378374,"TERMINAL",0,0,"522",,terminal_output +4660,6379322,"genie.py",5588,0,"",python,selection_mouse +4661,6379415,"TERMINAL",0,0,"633",,terminal_output +4662,6379439,"genie.py",5586,5,"steps",python,selection_mouse +4663,6380421,"TERMINAL",0,0,"744",,terminal_output +4664,6380545,"genie.py",5582,0,"",python,selection_mouse +4665,6380709,"genie.py",5581,4,"self",python,selection_mouse +4666,6380878,"genie.py",5581,10,"self.steps",python,selection_mouse +4667,6380914,"genie.py",5581,78,"self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)",python,selection_mouse +4668,6381143,"genie.py",5659,0,"",python,selection_mouse +4669,6381586,"TERMINAL",0,0,"855",,terminal_output +4670,6381701,"genie.py",5590,0,"",python,selection_mouse +4671,6381864,"genie.py",5586,5,"steps",python,selection_mouse +4672,6382064,"genie.py",5585,6,".steps",python,selection_mouse +4673,6382108,"genie.py",5581,10,"self.steps",python,selection_mouse +4674,6382509,"TERMINAL",0,0,"966",,terminal_output +4675,6382597,"genie.py",5582,0,"",python,selection_mouse +4676,6382689,"genie.py",5581,4,"self",python,selection_mouse +4677,6382905,"genie.py",5581,5,"self.",python,selection_mouse +4678,6382906,"genie.py",5581,10,"self.steps",python,selection_mouse +4679,6383392,"genie.py",5589,0,"",python,selection_mouse +4680,6383579,"TERMINAL",0,0,"1077",,terminal_output +4681,6384321,"genie.py",5593,0,"",python,selection_mouse +4682,6384590,"TERMINAL",0,0,"188",,terminal_output +4683,6384988,"genie.py",5591,0,"",python,selection_mouse +4684,6385627,"TERMINAL",0,0,"299",,terminal_output +4685,6386676,"TERMINAL",0,0,"34141",,terminal_output +4686,6387721,"TERMINAL",0,0,"522",,terminal_output +4687,6388763,"TERMINAL",0,0,"633",,terminal_output +4688,6389829,"TERMINAL",0,0,"744",,terminal_output +4689,6390909,"TERMINAL",0,0,"855",,terminal_output +4690,6392106,"TERMINAL",0,0,"966",,terminal_output +4691,6393048,"TERMINAL",0,0,"2077",,terminal_output +4692,6393998,"TERMINAL",0,0,"188",,terminal_output +4693,6395043,"TERMINAL",0,0,"299",,terminal_output +4694,6396106,"TERMINAL",0,0,"35050",,terminal_output +4695,6397147,"TERMINAL",0,0,"411",,terminal_output +4696,6398186,"TERMINAL",0,0,"522",,terminal_output +4697,6399232,"TERMINAL",0,0,"633",,terminal_output +4698,6400384,"TERMINAL",0,0,"744",,terminal_output +4699,6401318,"TERMINAL",0,0,"855",,terminal_output +4700,6402410,"TERMINAL",0,0,"966",,terminal_output +4701,6402687,"genie.py",5617,0,"",python,selection_mouse +4702,6402820,"genie.py",5617,1," ",python,selection_mouse +4703,6403410,"TERMINAL",0,0,"3077",,terminal_output +4704,6403720,"genie.py",5540,0,"",python,selection_mouse +4705,6403857,"genie.py",5533,14,"unmasked_ratio",python,selection_mouse +4706,6404460,"TERMINAL",0,0,"188",,terminal_output +4707,6405191,"genie.py",5627,0,"",python,selection_mouse +4708,6405314,"genie.py",5623,11,"temperature",python,selection_mouse +4709,6405513,"TERMINAL",0,0,"299",,terminal_output +4710,6405626,"genie.py",5623,12,"temperature ",python,selection_mouse +4711,6405626,"genie.py",5565,69,"* (step + 1) / (self.steps * 2))\n step_temp = self.temperature",python,selection_mouse +4712,6405627,"genie.py",5568,66,"step + 1) / (self.steps * 2))\n step_temp = self.temperature",python,selection_mouse +4713,6405667,"genie.py",5574,60," 1) / (self.steps * 2))\n step_temp = self.temperature",python,selection_mouse +4714,6405728,"genie.py",5575,59,"1) / (self.steps * 2))\n step_temp = self.temperature",python,selection_mouse +4715,6405729,"genie.py",5576,58,") / (self.steps * 2))\n step_temp = self.temperature",python,selection_mouse +4716,6405729,"genie.py",5578,56,"/ (self.steps * 2))\n step_temp = self.temperature",python,selection_mouse +4717,6405730,"genie.py",5579,55," (self.steps * 2))\n step_temp = self.temperature",python,selection_mouse +4718,6405779,"genie.py",5581,53,"self.steps * 2))\n step_temp = self.temperature",python,selection_mouse +4719,6405821,"genie.py",5586,48,"steps * 2))\n step_temp = self.temperature",python,selection_mouse +4720,6405910,"genie.py",5591,43," * 2))\n step_temp = self.temperature",python,selection_mouse +4721,6406048,"genie.py",5623,36,"temperature * (1.0 - unmasked_ratio)",python,selection_mouse +4722,6406591,"TERMINAL",0,0,"32:002:00",,terminal_output +4723,6406836,"genie.py",5659,0,"",python,selection_mouse +4724,6407558,"genie.py",5659,62,"\n final_logits = self.dynamics.dynamics(vid_embed)[:, -",python,selection_mouse +4725,6407559,"genie.py",5659,58,"\n final_logits = self.dynamics.dynamics(vid_embed)[",python,selection_mouse +4726,6407559,"genie.py",5651,8,"d_ratio)",python,selection_mouse +4727,6407560,"genie.py",5647,12,"asked_ratio)",python,selection_mouse +4728,6407560,"genie.py",5645,14,"nmasked_ratio)",python,selection_mouse +4729,6407560,"genie.py",5641,18," - unmasked_ratio)",python,selection_mouse +4730,6407579,"genie.py",5640,19,"0 - unmasked_ratio)",python,selection_mouse +4731,6407597,"genie.py",5638,21,"1.0 - unmasked_ratio)",python,selection_mouse +4732,6407610,"genie.py",5659,39,"\n final_logits = self.dynamics.d",python,selection_mouse +4733,6407641,"genie.py",5659,38,"\n final_logits = self.dynamics.",python,selection_mouse +4734,6407647,"genie.py",5659,37,"\n final_logits = self.dynamics",python,selection_mouse +4735,6407668,"genie.py",5659,36,"\n final_logits = self.dynamic",python,selection_mouse +4736,6407669,"TERMINAL",0,0,"411",,terminal_output +4737,6407685,"genie.py",5659,35,"\n final_logits = self.dynami",python,selection_mouse +4738,6407699,"genie.py",5659,34,"\n final_logits = self.dynam",python,selection_mouse +4739,6407746,"genie.py",5659,33,"\n final_logits = self.dyna",python,selection_mouse +4740,6407753,"genie.py",5659,32,"\n final_logits = self.dyn",python,selection_mouse +4741,6407792,"genie.py",5628,31,"rature * (1.0 - unmasked_ratio)",python,selection_mouse +4742,6407792,"genie.py",5627,32,"erature * (1.0 - unmasked_ratio)",python,selection_mouse +4743,6407805,"genie.py",5626,33,"perature * (1.0 - unmasked_ratio)",python,selection_mouse +4744,6407837,"genie.py",5625,34,"mperature * (1.0 - unmasked_ratio)",python,selection_mouse +4745,6407849,"genie.py",5624,35,"emperature * (1.0 - unmasked_ratio)",python,selection_mouse +4746,6407895,"genie.py",5623,36,"temperature * (1.0 - unmasked_ratio)",python,selection_mouse +4747,6407988,"genie.py",5622,37,".temperature * (1.0 - unmasked_ratio)",python,selection_mouse +4748,6408025,"genie.py",5621,38,"f.temperature * (1.0 - unmasked_ratio)",python,selection_mouse +4749,6408083,"genie.py",5620,39,"lf.temperature * (1.0 - unmasked_ratio)",python,selection_mouse +4750,6408124,"genie.py",5619,40,"elf.temperature * (1.0 - unmasked_ratio)",python,selection_mouse +4751,6408551,"genie.py",5618,41,"self.temperature * (1.0 - unmasked_ratio)",python,selection_mouse +4752,6408637,"TERMINAL",0,0,"522",,terminal_output +4753,6409208,"genie.py",5618,0,"",python,selection_mouse +4754,6409209,"genie.py",5618,4,"self",python,selection_mouse +4755,6409454,"genie.py",5618,5,"self.",python,selection_mouse +4756,6409455,"genie.py",5618,16,"self.temperature",python,selection_mouse +4757,6409526,"genie.py",5618,19,"self.temperature * ",python,selection_mouse +4758,6409558,"genie.py",5618,21,"self.temperature * (1",python,selection_mouse +4759,6409558,"genie.py",5618,22,"self.temperature * (1.",python,selection_mouse +4760,6409572,"genie.py",5618,23,"self.temperature * (1.0",python,selection_mouse +4761,6409603,"genie.py",5618,24,"self.temperature * (1.0 ",python,selection_mouse +4762,6409615,"genie.py",5618,25,"self.temperature * (1.0 -",python,selection_mouse +4763,6409644,"genie.py",5618,40,"self.temperature * (1.0 - unmasked_ratio",python,selection_mouse +4764,6409732,"TERMINAL",0,0,"644",,terminal_output +4765,6409942,"genie.py",5618,41,"self.temperature * (1.0 - unmasked_ratio)",python,selection_mouse +4766,6410318,"genie.py",5659,0,"",python,selection_mouse +4767,6410860,"genie.py",5658,1,")",python,selection_mouse +4768,6410861,"genie.py",5644,15,"unmasked_ratio)",python,selection_mouse +4769,6410861,"genie.py",5637,22,"(1.0 - unmasked_ratio)",python,selection_mouse +4770,6410862,"genie.py",5634,25," * (1.0 - unmasked_ratio)",python,selection_mouse +4771,6410862,"genie.py",5659,37,"\n final_logits = self.dynamics",python,selection_mouse +4772,6410921,"TERMINAL",0,0,"855",,terminal_output +4773,6410975,"genie.py",5659,29,"\n final_logits = self.",python,selection_mouse +4774,6411025,"genie.py",5659,28,"\n final_logits = self",python,selection_mouse +4775,6411164,"genie.py",5659,24,"\n final_logits = ",python,selection_mouse +4776,6411247,"genie.py",5618,41,"self.temperature * (1.0 - unmasked_ratio)",python,selection_mouse +4777,6411786,"TERMINAL",0,0,"966",,terminal_output +4778,6411963,"genie.py",5619,0,"",python,selection_mouse +4779,6411964,"genie.py",5618,4,"self",python,selection_mouse +4780,6412230,"genie.py",5618,5,"self.",python,selection_mouse +4781,6412231,"genie.py",5618,16,"self.temperature",python,selection_mouse +4782,6412270,"genie.py",5618,17,"self.temperature ",python,selection_mouse +4783,6412279,"genie.py",5618,19,"self.temperature * ",python,selection_mouse +4784,6412317,"genie.py",5618,20,"self.temperature * (",python,selection_mouse +4785,6412318,"genie.py",5618,22,"self.temperature * (1.",python,selection_mouse +4786,6412346,"genie.py",5618,23,"self.temperature * (1.0",python,selection_mouse +4787,6412362,"genie.py",5618,24,"self.temperature * (1.0 ",python,selection_mouse +4788,6412500,"genie.py",5618,25,"self.temperature * (1.0 -",python,selection_mouse +4789,6412500,"genie.py",5618,26,"self.temperature * (1.0 - ",python,selection_mouse +4790,6412563,"genie.py",5618,40,"self.temperature * (1.0 - unmasked_ratio",python,selection_mouse +4791,6412829,"TERMINAL",0,0,"4077",,terminal_output +4792,6412924,"genie.py",5645,0,"",python,selection_mouse +4793,6413879,"TERMINAL",0,0,"188",,terminal_output +4794,6415001,"TERMINAL",0,0,"299",,terminal_output +4795,6416102,"TERMINAL",0,0,"31010",,terminal_output +4796,6417027,"TERMINAL",0,0,"411",,terminal_output +4797,6418053,"TERMINAL",0,0,"522",,terminal_output +4798,6419098,"TERMINAL",0,0,"633",,terminal_output +4799,6420150,"genie.py",5611,0,"",python,selection_mouse +4800,6420161,"TERMINAL",0,0,"744",,terminal_output +4801,6420292,"genie.py",5606,9,"step_temp",python,selection_mouse +4802,6421201,"TERMINAL",0,0,"855",,terminal_output +4803,6421576,"genie.py",5541,0,"",python,selection_mouse +4804,6421694,"genie.py",5533,14,"unmasked_ratio",python,selection_mouse +4805,6422253,"TERMINAL",0,0,"966",,terminal_output +4806,6423310,"TERMINAL",0,0,"5077",,terminal_output +4807,6424334,"TERMINAL",0,0,"188",,terminal_output +4808,6425413,"TERMINAL",0,0,"299",,terminal_output +4809,6426476,"TERMINAL",0,0,"32020",,terminal_output +4810,6427462,"TERMINAL",0,0,"411",,terminal_output +4811,6428535,"TERMINAL",0,0,"522",,terminal_output +4812,6429546,"TERMINAL",0,0,"633",,terminal_output +4813,6430596,"TERMINAL",0,0,"744",,terminal_output +4814,6431637,"TERMINAL",0,0,"855",,terminal_output +4815,6432677,"TERMINAL",0,0,"977",,terminal_output +4816,6433720,"TERMINAL",0,0,"1:0188",,terminal_output +4817,6434778,"TERMINAL",0,0,"299",,terminal_output +4818,6435831,"TERMINAL",0,0,"33030",,terminal_output +4819,6436872,"TERMINAL",0,0,"411",,terminal_output +4820,6437949,"TERMINAL",0,0,"522",,terminal_output +4821,6438974,"TERMINAL",0,0,"633",,terminal_output +4822,6440020,"TERMINAL",0,0,"744",,terminal_output +4823,6441060,"TERMINAL",0,0,"855",,terminal_output +4824,6442100,"TERMINAL",0,0,"966",,terminal_output +4825,6443143,"TERMINAL",0,0,"1077",,terminal_output +4826,6444185,"TERMINAL",0,0,"188",,terminal_output +4827,6445243,"TERMINAL",0,0,"299",,terminal_output +4828,6446280,"TERMINAL",0,0,"34040",,terminal_output +4829,6447188,"genie.py",6392,0,"",python,selection_mouse +4830,6447346,"genie.py",6381,14,"new_token_idxs",python,selection_mouse +4831,6447415,"TERMINAL",0,0,"411",,terminal_output +4832,6448429,"TERMINAL",0,0,"522",,terminal_output +4833,6449077,"genie.py",6411,0,"",python,selection_mouse +4834,6449232,"genie.py",6408,4,"mask",python,selection_mouse +4835,6449497,"TERMINAL",0,0,"633",,terminal_output +4836,6450464,"TERMINAL",0,0,"744",,terminal_output +4837,6451508,"TERMINAL",0,0,"855",,terminal_output +4838,6452560,"TERMINAL",0,0,"966",,terminal_output +4839,6453616,"TERMINAL",0,0,"2077",,terminal_output +4840,6454652,"TERMINAL",0,0,"188",,terminal_output +4841,6455702,"TERMINAL",0,0,"25050",,terminal_output +4842,6456756,"TERMINAL",0,0,"411",,terminal_output +4843,6457231,"genie.py",6332,0,"",python,selection_mouse +4844,6457379,"genie.py",6332,4,"mask",python,selection_mouse +4845,6457820,"TERMINAL",0,0,"522",,terminal_output +4846,6458857,"TERMINAL",0,0,"633",,terminal_output +4847,6459896,"TERMINAL",0,0,"744",,terminal_output +4848,6461063,"TERMINAL",0,0,"855",,terminal_output +4849,6461973,"TERMINAL",0,0,"966",,terminal_output +4850,6463077,"TERMINAL",0,0,"3077",,terminal_output +4851,6464064,"TERMINAL",0,0,"188",,terminal_output +4852,6465107,"TERMINAL",0,0,"299",,terminal_output +4853,6466166,"TERMINAL",0,0,"33:003:00",,terminal_output +4854,6467198,"TERMINAL",0,0,"411",,terminal_output +4855,6468238,"TERMINAL",0,0,"522",,terminal_output +4856,6469297,"TERMINAL",0,0,"633",,terminal_output +4857,6470320,"TERMINAL",0,0,"744",,terminal_output +4858,6471360,"TERMINAL",0,0,"855",,terminal_output +4859,6472444,"TERMINAL",0,0,"966",,terminal_output +4860,6473449,"TERMINAL",0,0,"4077",,terminal_output +4861,6474499,"TERMINAL",0,0,"188",,terminal_output +4862,6475570,"TERMINAL",0,0,"299",,terminal_output +4863,6476624,"TERMINAL",0,0,"31010",,terminal_output +4864,6477628,"TERMINAL",0,0,"411",,terminal_output +4865,6478663,"TERMINAL",0,0,"522",,terminal_output +4866,6479842,"TERMINAL",0,0,"744",,terminal_output +4867,6480754,"TERMINAL",0,0,"855",,terminal_output +4868,6481802,"TERMINAL",0,0,"966",,terminal_output +4869,6482836,"genie.py",6901,0,"",python,selection_mouse +4870,6482839,"TERMINAL",0,0,"5077",,terminal_output +4871,6482952,"genie.py",6897,8,"new_mask",python,selection_mouse +4872,6483893,"TERMINAL",0,0,"188",,terminal_output +4873,6484953,"TERMINAL",0,0,"299",,terminal_output +4874,6485981,"TERMINAL",0,0,"32020",,terminal_output +4875,6487011,"TERMINAL",0,0,"411",,terminal_output +4876,6488064,"TERMINAL",0,0,"522",,terminal_output +4877,6489114,"TERMINAL",0,0,"633",,terminal_output +4878,6490173,"TERMINAL",0,0,"744",,terminal_output +4879,6491209,"TERMINAL",0,0,"855",,terminal_output +4880,6492270,"TERMINAL",0,0,"966",,terminal_output +4881,6493313,"TERMINAL",0,0,"2:0077",,terminal_output +4882,6494351,"TERMINAL",0,0,"188",,terminal_output +4883,6495405,"TERMINAL",0,0,"299",,terminal_output +4884,6496430,"TERMINAL",0,0,"33030",,terminal_output +4885,6497483,"TERMINAL",0,0,"411",,terminal_output +4886,6498535,"TERMINAL",0,0,"522",,terminal_output +4887,6499593,"TERMINAL",0,0,"633",,terminal_output +4888,6500630,"TERMINAL",0,0,"744",,terminal_output +4889,6501665,"TERMINAL",0,0,"866",,terminal_output +4890,6502733,"TERMINAL",0,0,"1077",,terminal_output +4891,6503744,"TERMINAL",0,0,"188",,terminal_output +4892,6504794,"TERMINAL",0,0,"299",,terminal_output +4893,6505830,"TERMINAL",0,0,"34040",,terminal_output +4894,6506876,"TERMINAL",0,0,"411",,terminal_output +4895,6507924,"TERMINAL",0,0,"522",,terminal_output +4896,6508970,"TERMINAL",0,0,"633",,terminal_output +4897,6510021,"TERMINAL",0,0,"744",,terminal_output +4898,6511072,"TERMINAL",0,0,"855",,terminal_output +4899,6512132,"TERMINAL",0,0,"966",,terminal_output +4900,6513222,"TERMINAL",0,0,"2077",,terminal_output +4901,6514218,"TERMINAL",0,0,"188",,terminal_output +4902,6515268,"TERMINAL",0,0,"299",,terminal_output +4903,6516313,"TERMINAL",0,0,"35050",,terminal_output +4904,6517375,"TERMINAL",0,0,"411",,terminal_output +4905,6518410,"TERMINAL",0,0,"522",,terminal_output +4906,6519455,"TERMINAL",0,0,"633",,terminal_output +4907,6520505,"TERMINAL",0,0,"744",,terminal_output +4908,6521546,"TERMINAL",0,0,"855",,terminal_output +4909,6522603,"TERMINAL",0,0,"966",,terminal_output +4910,6523647,"TERMINAL",0,0,"3077",,terminal_output +4911,6524679,"TERMINAL",0,0,"199",,terminal_output +4912,6525717,"TERMINAL",0,0,"34:004:00",,terminal_output +4913,6526765,"TERMINAL",0,0,"411",,terminal_output +4914,6527799,"TERMINAL",0,0,"522",,terminal_output +4915,6528850,"TERMINAL",0,0,"633",,terminal_output +4916,6529900,"TERMINAL",0,0,"744",,terminal_output +4917,6530937,"TERMINAL",0,0,"855",,terminal_output +4918,6531987,"TERMINAL",0,0,"966",,terminal_output +4919,6533033,"TERMINAL",0,0,"4077",,terminal_output +4920,6534092,"TERMINAL",0,0,"188",,terminal_output +4921,6535130,"TERMINAL",0,0,"299",,terminal_output +4922,6536182,"TERMINAL",0,0,"31010",,terminal_output +4923,6537222,"TERMINAL",0,0,"411",,terminal_output +4924,6538265,"TERMINAL",0,0,"522",,terminal_output +4925,6539330,"TERMINAL",0,0,"633",,terminal_output +4926,6540358,"TERMINAL",0,0,"744",,terminal_output +4927,6541437,"TERMINAL",0,0,"855",,terminal_output +4928,6542463,"TERMINAL",0,0,"966",,terminal_output +4929,6543497,"TERMINAL",0,0,"5077",,terminal_output +4930,6544552,"TERMINAL",0,0,"188",,terminal_output +4931,6545590,"TERMINAL",0,0,"299",,terminal_output +4932,6546646,"TERMINAL",0,0,"32020",,terminal_output +4933,6547710,"TERMINAL",0,0,"422",,terminal_output +4934,6548760,"TERMINAL",0,0,"633",,terminal_output +4935,6549804,"TERMINAL",0,0,"744",,terminal_output +4936,6550850,"TERMINAL",0,0,"855",,terminal_output +4937,6551897,"TERMINAL",0,0,"966",,terminal_output +4938,6552949,"TERMINAL",0,0,"3:0077",,terminal_output +4939,6553989,"TERMINAL",0,0,"188",,terminal_output +4940,6555038,"TERMINAL",0,0,"299",,terminal_output +4941,6556081,"TERMINAL",0,0,"33030",,terminal_output +4942,6556422,"genie.py",5354,0,"",python,selection_mouse +4943,6557124,"TERMINAL",0,0,"411",,terminal_output +4944,6557257,"genie.py",5337,0,"",python,selection_mouse +4945,6558166,"TERMINAL",0,0,"522",,terminal_output +4946,6558547,"genie.py",5336,0,"",python,selection_command +4947,6559207,"TERMINAL",0,0,"633",,terminal_output +4948,6560252,"TERMINAL",0,0,"744",,terminal_output +4949,6561305,"TERMINAL",0,0,"855",,terminal_output +4950,6561771,"genie.py",6081,0,"",python,selection_mouse +4951,6561776,"genie.py",6080,0,"",python,selection_command +4952,6561926,"genie.py",6081,0,"",python,selection_mouse +4953,6561931,"genie.py",6080,0,"",python,selection_command +4954,6562334,"TERMINAL",0,0,"966",,terminal_output +4955,6562648,"genie.py",5946,0,"",python,selection_mouse +4956,6562658,"genie.py",5945,0,"",python,selection_command +4957,6562812,"genie.py",5946,0,"",python,selection_mouse +4958,6562855,"genie.py",5945,0,"",python,selection_command +4959,6563333,"genie.py",6030,0,"",python,selection_mouse +4960,6563348,"genie.py",6029,0,"",python,selection_command +4961,6563447,"TERMINAL",0,0,"1077",,terminal_output +4962,6563524,"genie.py",6030,0,"",python,selection_mouse +4963,6563561,"genie.py",6029,0,"",python,selection_command +4964,6564313,"genie.py",5946,0,"",python,selection_mouse +4965,6564319,"genie.py",5945,0,"",python,selection_command +4966,6564443,"genie.py",5946,0,"",python,selection_mouse +4967,6564449,"genie.py",5945,0,"",python,selection_command +4968,6564464,"TERMINAL",0,0,"188",,terminal_output +4969,6564637,"genie.py",5901,46," rng, _rng = jax.random.split(rng)\n",python,selection_mouse +4970,6564685,"genie.py",5902,45," rng, _rng = jax.random.split(rng)\n",python,selection_command +4971,6565462,"TERMINAL",0,0,"299",,terminal_output +4972,6565597,"genie.py",5946,0,"",python,selection_mouse +4973,6565640,"genie.py",5945,0,"",python,selection_command +4974,6565770,"genie.py",5946,0,"",python,selection_mouse +4975,6565811,"genie.py",5945,0,"",python,selection_command +4976,6565953,"genie.py",5901,46," rng, _rng = jax.random.split(rng)\n",python,selection_mouse +4977,6565991,"genie.py",5902,45," rng, _rng = jax.random.split(rng)\n",python,selection_command +4978,6566403,"genie.py",5946,0,"",python,selection_mouse +4979,6566411,"genie.py",5945,0,"",python,selection_command +4980,6566548,"TERMINAL",0,0,"34040",,terminal_output +4981,6566554,"genie.py",5946,0,"",python,selection_mouse +4982,6566591,"genie.py",5945,0,"",python,selection_command +4983,6566735,"genie.py",5946,0,"",python,selection_mouse +4984,6566736,"genie.py",5945,0,"",python,selection_command +4985,6566911,"genie.py",5946,0,"",python,selection_mouse +4986,6566956,"genie.py",5945,0,"",python,selection_command +4987,6567093,"genie.py",5901,46," rng, _rng = jax.random.split(rng)\n",python,selection_mouse +4988,6567147,"genie.py",5902,45," rng, _rng = jax.random.split(rng)\n",python,selection_command +4989,6567265,"genie.py",5946,0,"",python,selection_mouse +4990,6567270,"genie.py",5945,0,"",python,selection_command +4991,6567440,"genie.py",5946,0,"",python,selection_mouse +4992,6567457,"genie.py",5945,0,"",python,selection_command +4993,6567550,"TERMINAL",0,0,"411",,terminal_output +4994,6567636,"genie.py",5901,46," rng, _rng = jax.random.split(rng)\n",python,selection_mouse +4995,6567677,"genie.py",5902,45," rng, _rng = jax.random.split(rng)\n",python,selection_command +4996,6567816,"genie.py",5946,0,"",python,selection_mouse +4997,6567842,"genie.py",5945,0,"",python,selection_command +4998,6568478,"genie.py",5946,0,"",python,selection_mouse +4999,6568507,"genie.py",5945,0,"",python,selection_command +5000,6568598,"TERMINAL",0,0,"522",,terminal_output +5001,6568659,"genie.py",5946,0,"",python,selection_mouse +5002,6568676,"genie.py",5945,0,"",python,selection_command +5003,6568853,"genie.py",5901,46," rng, _rng = jax.random.split(rng)\n",python,selection_mouse +5004,6568897,"genie.py",5902,45," rng, _rng = jax.random.split(rng)\n",python,selection_command +5005,6569627,"TERMINAL",0,0,"633",,terminal_output +5006,6570681,"TERMINAL",0,0,"755",,terminal_output +5007,6571788,"TERMINAL",0,0,"966",,terminal_output +5008,6572764,"TERMINAL",0,0,"2077",,terminal_output +5009,6574049,"TERMINAL",0,0,"188",,terminal_output +5010,6574856,"TERMINAL",0,0,"299",,terminal_output +5011,6575891,"TERMINAL",0,0,"35050",,terminal_output +5012,6576930,"TERMINAL",0,0,"411",,terminal_output +5013,6578014,"genie.py",5819,0,"",python,selection_mouse +5014,6578021,"genie.py",5818,0,"",python,selection_command +5015,6578022,"TERMINAL",0,0,"522",,terminal_output +5016,6579027,"TERMINAL",0,0,"633",,terminal_output +5017,6580087,"TERMINAL",0,0,"744",,terminal_output +5018,6581125,"TERMINAL",0,0,"855",,terminal_output +5019,6582168,"TERMINAL",0,0,"966",,terminal_output +5020,6583232,"TERMINAL",0,0,"3077",,terminal_output +5021,6584278,"TERMINAL",0,0,"188",,terminal_output +5022,6585320,"TERMINAL",0,0,"299",,terminal_output +5023,6586377,"TERMINAL",0,0,"35:005:00",,terminal_output +5024,6587452,"TERMINAL",0,0,"411",,terminal_output +5025,6588483,"TERMINAL",0,0,"522",,terminal_output +5026,6589503,"TERMINAL",0,0,"633",,terminal_output +5027,6590545,"TERMINAL",0,0,"744",,terminal_output +5028,6591578,"TERMINAL",0,0,"855",,terminal_output +5029,6592629,"TERMINAL",0,0,"966",,terminal_output +5030,6593638,"genie.py",5522,0,"",python,selection_mouse +5031,6593858,"TERMINAL",0,0,"4088",,terminal_output +5032,6594880,"TERMINAL",0,0,"299",,terminal_output +5033,6596000,"TERMINAL",0,0,"31010",,terminal_output +5034,6597051,"TERMINAL",0,0,"411",,terminal_output +5035,6598094,"TERMINAL",0,0,"522",,terminal_output +5036,6599139,"TERMINAL",0,0,"633",,terminal_output +5037,6600193,"TERMINAL",0,0,"744",,terminal_output +5038,6601257,"TERMINAL",0,0,"855",,terminal_output +5039,6602272,"TERMINAL",0,0,"966",,terminal_output +5040,6602976,"genie.py",5354,0,"",python,selection_mouse +5041,6603331,"TERMINAL",0,0,"5077",,terminal_output +5042,6603697,"genie.py",5275,0,"",python,selection_mouse +5043,6604371,"TERMINAL",0,0,"188",,terminal_output +5044,6605402,"genie.py",4886,0,"",python,selection_mouse +5045,6605472,"TERMINAL",0,0,"299",,terminal_output +5046,6606095,"genie.py",4902,0,"",python,selection_mouse +5047,6606099,"genie.py",4901,0,"",python,selection_command +5048,6606479,"TERMINAL",0,0,"32020",,terminal_output +5049,6606648,"genie.py",4895,0,"",python,selection_mouse +5050,6606809,"genie.py",4893,5,"shape",python,selection_mouse +5051,6607428,"genie.py",4902,0,"",python,selection_mouse +5052,6607459,"genie.py",4901,0,"",python,selection_command +5053,6607540,"TERMINAL",0,0,"411",,terminal_output +5054,6608592,"TERMINAL",0,0,"522",,terminal_output +5055,6609615,"TERMINAL",0,0,"633",,terminal_output +5056,6610760,"TERMINAL",0,0,"744",,terminal_output +5057,6611397,"TERMINAL",0,0,"bash",,terminal_focus +5058,6611755,"TERMINAL",0,0,"966",,terminal_output +5059,6612805,"TERMINAL",0,0,"4:0077",,terminal_output +5060,6613826,"TERMINAL",0,0,"188",,terminal_output +5061,6614903,"TERMINAL",0,0,"299",,terminal_output +5062,6615924,"TERMINAL",0,0,"33030",,terminal_output +5063,6616974,"TERMINAL",0,0,"411",,terminal_output +5064,6617965,"TERMINAL",0,0,"ls $ws_dir",,terminal_command +5065,6618015,"TERMINAL",0,0,"]633;E;2025-07-01 00:34:05 ls $ws_dir ;9a6fb2c6-f596-403e-97b8-ced5ab6848ce]633;C",,terminal_output +5066,6618034,"TERMINAL",0,0,"522",,terminal_output +5067,6618187,"TERMINAL",0,0,"checkpoints knoms_mp4 knoms_tfrecords_2_shards_overfit open_ai_minecraft_first_try overfit_dir\r\ncoinrun knoms_mp4_clips knoms_tfrecords_500_shards open_ai_minecraft_first_try_npy procgen_env_16_episodes_20000\r\ndata_knoms knoms_npy knoms_tfrecords_500_shards_overfit_1 open_ai_minecraft_first_try_tfrecord\r\ndummy knoms_tfrecords knoms_tfrecords_500_shards_overfit_10 open_ai_minecraft_npy\r\nknoms_arrayrecords_500_shards knoms_tfrecords_200_shards open_ai_minecraft open_ai_minecraft_tfrecord\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +5068,6619070,"TERMINAL",0,0,"633",,terminal_output +5069,6620155,"TERMINAL",0,0,"744",,terminal_output +5070,6621172,"TERMINAL",0,0,"855",,terminal_output +5071,6622223,"TERMINAL",0,0,"966",,terminal_output +5072,6623268,"TERMINAL",0,0,"1077",,terminal_output +5073,6623995,"TERMINAL",0,0,"ls $ws_dir/checkpoints",,terminal_command +5074,6624041,"TERMINAL",0,0,"]633;E;2025-07-01 00:34:11 ls $ws_dir/checkpoints;9a6fb2c6-f596-403e-97b8-ced5ab6848ce]633;C",,terminal_output +5075,6624137,"TERMINAL",0,0,"3285360 3285642 3292119 3292156 3292207 3292306 3292310 3292314 3296719 3297546 3299271 3306965\r\n3285369 3292019 3292139 3292206 3292255 3292307 3292313 3296462 3297342 3297579 3306801 tokenizer\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +5076,6624316,"TERMINAL",0,0,"188",,terminal_output +5077,6625362,"TERMINAL",0,0,"299",,terminal_output +5078,6626475,"TERMINAL",0,0,"34040",,terminal_output +5079,6627479,"TERMINAL",0,0,"411",,terminal_output +5080,6628519,"TERMINAL",0,0,"522",,terminal_output +5081,6629546,"TERMINAL",0,0,"633",,terminal_output +5082,6630598,"TERMINAL",0,0,"744",,terminal_output +5083,6631658,"TERMINAL",0,0,"855",,terminal_output +5084,6632682,"TERMINAL",0,0,"977",,terminal_output +5085,6633716,"TERMINAL",0,0,"2188",,terminal_output +5086,6634822,"TERMINAL",0,0,"299",,terminal_output +5087,6635308,"TERMINAL",0,0,"ls $ws_dir/../checkpoints",,terminal_command +5088,6635401,"TERMINAL",0,0,"]633;E;2025-07-01 00:34:22 ls $ws_dir/../checkpoints;9a6fb2c6-f596-403e-97b8-ced5ab6848ce]633;C0000 3290366 3290440 3292328 3292333 3292338 3294603 3296574 3297577 3297671 3299062 3299069 3300233 3301025 3301031 dyn tokenizer\r\n3290283 3290367 3291405 3292329 3292334 3292339 3296502 3296575 3297578 3297693 3299063 3299258 3300290 3301026 3306801 dynamics_ckpt_dir tokenizer_ckpt_dir\r\n3290284 3290391 3292213 3292330 3292335 3294600 3296540 3297569 3297582 3297706 3299065 3299259 3300658 3301027 3307618 lam\r\n3290295 3290392 3292221 3292331 3292336 3294601 3296571 3297575 3297586 3297727 3299066 3299272 3300663 3301029 3307619 lam-1-action\r\n3290296 3290439 3292258 3292332 3292337 3294602 3296573 3297576 3297606 3299016 3299068 3299579 3300672 3301030 debug lam_ckpt_dir\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +5089,6635820,"TERMINAL",0,0,"35050",,terminal_output +5090,6636944,"TERMINAL",0,0,"411",,terminal_output +5091,6637908,"TERMINAL",0,0,"522",,terminal_output +5092,6638952,"TERMINAL",0,0,"633",,terminal_output +5093,6639995,"TERMINAL",0,0,"744",,terminal_output +5094,6641116,"TERMINAL",0,0,"855",,terminal_output +5095,6642072,"TERMINAL",0,0,"966",,terminal_output +5096,6643123,"TERMINAL",0,0,"3077",,terminal_output +5097,6644162,"TERMINAL",0,0,"188",,terminal_output +5098,6645205,"TERMINAL",0,0,"299",,terminal_output +5099,6646059,"TERMINAL",0,0,"ls $ws_dir/../checkpoints/3307618",,terminal_command +5100,6646160,"TERMINAL",0,0,"]633;E;2025-07-01 00:34:33 ls $ws_dir/../checkpoints/3307618;9a6fb2c6-f596-403e-97b8-ced5ab6848ce]633;Cgenie_1751322003_1000 genie_1751322003_2000 genie_1751322003_3500 genie_1751322003_500 genie_1751322003_6000 genie_1751322003_7500 genie_1751322003_9000\r\ngenie_1751322003_10000 genie_1751322003_2500 genie_1751322003_4000 genie_1751322003_5000 genie_1751322003_6500 genie_1751322003_8000 genie_1751322003_9500\r\ngenie_1751322003_1500 genie_1751322003_3000 genie_1751322003_4500 genie_1751322003_5500 genie_1751322003_7000 genie_1751322003_8500\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +5101,6646277,"TERMINAL",0,0,"36:006:00",,terminal_output +5102,6647295,"TERMINAL",0,0,"411",,terminal_output +5103,6648345,"TERMINAL",0,0,"522",,terminal_output +5104,6649419,"TERMINAL",0,0,"633",,terminal_output +5105,6650438,"TERMINAL",0,0,"744",,terminal_output +5106,6651503,"TERMINAL",0,0,"855",,terminal_output +5107,6652544,"TERMINAL",0,0,"966",,terminal_output +5108,6653615,"TERMINAL",0,0,"4077",,terminal_output +5109,6654619,"TERMINAL",0,0,"188",,terminal_output +5110,6655658,"TERMINAL",0,0,"299",,terminal_output +5111,6656696,"TERMINAL",0,0,"31111",,terminal_output +5112,6657740,"TERMINAL",0,0,"522",,terminal_output +5113,6658780,"TERMINAL",0,0,"633",,terminal_output +5114,6659823,"TERMINAL",0,0,"744",,terminal_output +5115,6660867,"TERMINAL",0,0,"855",,terminal_output +5116,6661912,"TERMINAL",0,0,"966",,terminal_output +5117,6662960,"TERMINAL",0,0,"5077",,terminal_output +5118,6664011,"TERMINAL",0,0,"188",,terminal_output +5119,6665050,"TERMINAL",0,0,"299",,terminal_output +5120,6666088,"TERMINAL",0,0,"32020",,terminal_output +5121,6667141,"TERMINAL",0,0,"411",,terminal_output +5122,6668188,"TERMINAL",0,0,"522",,terminal_output +5123,6669237,"TERMINAL",0,0,"633",,terminal_output +5124,6670277,"TERMINAL",0,0,"744",,terminal_output +5125,6671348,"TERMINAL",0,0,"855",,terminal_output +5126,6672371,"TERMINAL",0,0,"966",,terminal_output +5127,6673419,"TERMINAL",0,0,"5:0077",,terminal_output +5128,6674512,"TERMINAL",0,0,"188",,terminal_output +5129,6675516,"TERMINAL",0,0,"299",,terminal_output +5130,6676584,"TERMINAL",0,0,"33030",,terminal_output +5131,6677615,"TERMINAL",0,0,"411",,terminal_output +5132,6678654,"TERMINAL",0,0,"522",,terminal_output +5133,6679698,"TERMINAL",0,0,"644",,terminal_output +5134,6680742,"TERMINAL",0,0,"855",,terminal_output +5135,6681780,"TERMINAL",0,0,"966",,terminal_output +5136,6682874,"TERMINAL",0,0,"1077",,terminal_output +5137,6683915,"TERMINAL",0,0,"188",,terminal_output +5138,6684911,"TERMINAL",0,0,"299",,terminal_output +5139,6685967,"TERMINAL",0,0,"34040",,terminal_output +5140,6686997,"TERMINAL",0,0,"411",,terminal_output +5141,6688046,"TERMINAL",0,0,"522",,terminal_output +5142,6689113,"TERMINAL",0,0,"633",,terminal_output +5143,6690132,"TERMINAL",0,0,"744",,terminal_output +5144,6691188,"TERMINAL",0,0,"855",,terminal_output +5145,6692232,"TERMINAL",0,0,"966",,terminal_output +5146,6693279,"TERMINAL",0,0,"2077",,terminal_output +5147,6694322,"TERMINAL",0,0,"188",,terminal_output +5148,6695368,"TERMINAL",0,0,"299",,terminal_output +5149,6696409,"TERMINAL",0,0,"35050",,terminal_output +5150,6697447,"TERMINAL",0,0,"411",,terminal_output +5151,6698500,"TERMINAL",0,0,"522",,terminal_output +5152,6699550,"TERMINAL",0,0,"633",,terminal_output +5153,6700574,"TERMINAL",0,0,"744",,terminal_output +5154,6701724,"TERMINAL",0,0,"855",,terminal_output +5155,6702662,"TERMINAL",0,0,"966",,terminal_output +5156,6703708,"TERMINAL",0,0,"3188",,terminal_output +5157,6704758,"TERMINAL",0,0,"299",,terminal_output +5158,6705820,"TERMINAL",0,0,"37:007:00",,terminal_output +5159,6706862,"TERMINAL",0,0,"411",,terminal_output +5160,6707900,"TERMINAL",0,0,"522",,terminal_output +5161,6708938,"TERMINAL",0,0,"633",,terminal_output +5162,6709989,"TERMINAL",0,0,"744",,terminal_output +5163,6711027,"TERMINAL",0,0,"855",,terminal_output +5164,6712075,"TERMINAL",0,0,"966",,terminal_output +5165,6713114,"TERMINAL",0,0,"4077",,terminal_output +5166,6714155,"TERMINAL",0,0,"188",,terminal_output +5167,6715201,"TERMINAL",0,0,"299",,terminal_output +5168,6716237,"TERMINAL",0,0,"31010",,terminal_output +5169,6717276,"TERMINAL",0,0,"411",,terminal_output +5170,6718318,"TERMINAL",0,0,"522",,terminal_output +5171,6719353,"TERMINAL",0,0,"633",,terminal_output +5172,6720393,"TERMINAL",0,0,"744",,terminal_output +5173,6721440,"TERMINAL",0,0,"855",,terminal_output +5174,6722573,"TERMINAL",0,0,"966",,terminal_output +5175,6723530,"TERMINAL",0,0,"5077",,terminal_output +5176,6724574,"TERMINAL",0,0,"188",,terminal_output +5177,6725623,"TERMINAL",0,0,"299",,terminal_output +5178,6726660,"TERMINAL",0,0,"32020",,terminal_output +5179,6727703,"TERMINAL",0,0,"422",,terminal_output +5180,6728739,"TERMINAL",0,0,"633",,terminal_output +5181,6729798,"TERMINAL",0,0,"744",,terminal_output +5182,6730818,"TERMINAL",0,0,"855",,terminal_output +5183,6731925,"TERMINAL",0,0,"966",,terminal_output +5184,6732914,"TERMINAL",0,0,"6:0077",,terminal_output +5185,6733953,"TERMINAL",0,0,"188",,terminal_output +5186,6734997,"TERMINAL",0,0,"299",,terminal_output +5187,6736043,"TERMINAL",0,0,"33030",,terminal_output +5188,6737148,"TERMINAL",0,0,"411",,terminal_output +5189,6738159,"TERMINAL",0,0,"522",,terminal_output +5190,6739170,"TERMINAL",0,0,"633",,terminal_output +5191,6740213,"TERMINAL",0,0,"744",,terminal_output +5192,6741321,"TERMINAL",0,0,"855",,terminal_output +5193,6742294,"TERMINAL",0,0,"966",,terminal_output +5194,6743337,"TERMINAL",0,0,"1077",,terminal_output +5195,6744374,"TERMINAL",0,0,"188",,terminal_output +5196,6745410,"TERMINAL",0,0,"299",,terminal_output +5197,6746447,"TERMINAL",0,0,"34040",,terminal_output +5198,6747497,"TERMINAL",0,0,"411",,terminal_output +5199,6748632,"TERMINAL",0,0,"522",,terminal_output +5200,6749601,"TERMINAL",0,0,"633",,terminal_output +5201,6750609,"TERMINAL",0,0,"744",,terminal_output +5202,6751651,"TERMINAL",0,0,"855",,terminal_output +5203,6752691,"TERMINAL",0,0,"977",,terminal_output +5204,6753742,"TERMINAL",0,0,"2188",,terminal_output +5205,6754779,"TERMINAL",0,0,"299",,terminal_output +5206,6755826,"TERMINAL",0,0,"35050",,terminal_output +5207,6756861,"TERMINAL",0,0,"411",,terminal_output +5208,6757955,"TERMINAL",0,0,"522",,terminal_output +5209,6758940,"TERMINAL",0,0,"633",,terminal_output +5210,6759996,"TERMINAL",0,0,"744",,terminal_output +5211,6761030,"TERMINAL",0,0,"855",,terminal_output +5212,6762066,"TERMINAL",0,0,"966",,terminal_output +5213,6763104,"TERMINAL",0,0,"3077",,terminal_output +5214,6764153,"TERMINAL",0,0,"188",,terminal_output +5215,6765200,"TERMINAL",0,0,"299",,terminal_output +5216,6766244,"TERMINAL",0,0,"38:008:00",,terminal_output +5217,6767292,"TERMINAL",0,0,"411",,terminal_output +5218,6768330,"TERMINAL",0,0,"522",,terminal_output +5219,6769385,"TERMINAL",0,0,"633",,terminal_output +5220,6770416,"TERMINAL",0,0,"744",,terminal_output +5221,6771460,"TERMINAL",0,0,"855",,terminal_output +5222,6772510,"TERMINAL",0,0,"966",,terminal_output +5223,6773555,"TERMINAL",0,0,"4077",,terminal_output +5224,6774835,"TERMINAL",0,0,"188",,terminal_output +5225,6775766,"TERMINAL",0,0,"299",,terminal_output +5226,6776711,"TERMINAL",0,0,"31111",,terminal_output +5227,6777751,"TERMINAL",0,0,"522",,terminal_output +5228,6778798,"TERMINAL",0,0,"633",,terminal_output +5229,6779843,"TERMINAL",0,0,"744",,terminal_output +5230,6780888,"TERMINAL",0,0,"855",,terminal_output +5231,6781931,"TERMINAL",0,0,"966",,terminal_output +5232,6782995,"TERMINAL",0,0,"5077",,terminal_output +5233,6784033,"TERMINAL",0,0,"188",,terminal_output +5234,6785082,"TERMINAL",0,0,"299",,terminal_output +5235,6786126,"TERMINAL",0,0,"32020",,terminal_output +5236,6787162,"TERMINAL",0,0,"411",,terminal_output +5237,6788192,"TERMINAL",0,0,"522",,terminal_output +5238,6789234,"TERMINAL",0,0,"633",,terminal_output +5239,6790276,"TERMINAL",0,0,"744",,terminal_output +5240,6791323,"TERMINAL",0,0,"855",,terminal_output +5241,6792358,"TERMINAL",0,0,"966",,terminal_output +5242,6793423,"TERMINAL",0,0,"7:0077",,terminal_output +5243,6794444,"TERMINAL",0,0,"188",,terminal_output +5244,6795528,"TERMINAL",0,0,"299",,terminal_output +5245,6796594,"TERMINAL",0,0,"33030",,terminal_output +5246,6797574,"TERMINAL",0,0,"411",,terminal_output +5247,6798620,"TERMINAL",0,0,"522",,terminal_output +5248,6799660,"TERMINAL",0,0,"633",,terminal_output +5249,6800814,"TERMINAL",0,0,"755",,terminal_output +5250,6801651,"TERMINAL",0,0,"ls $ws_dir/../checkpoints/3307618",,terminal_command +5251,6801698,"TERMINAL",0,0,"]633;E;2025-07-01 00:37:08 ls $ws_dir/../checkpoints/3307618;9a6fb2c6-f596-403e-97b8-ced5ab6848ce]633;Cgenie_1751322003_1000 genie_1751322003_11500 genie_1751322003_2500 genie_1751322003_4500 genie_1751322003_6000 genie_1751322003_8000\r\ngenie_1751322003_10000 genie_1751322003_12000 genie_1751322003_3000 genie_1751322003_500 genie_1751322003_6500 genie_1751322003_8500\r\ngenie_1751322003_10500 genie_1751322003_1500 genie_1751322003_3500 genie_1751322003_5000 genie_1751322003_7000 genie_1751322003_9000\r\ngenie_1751322003_11000 genie_1751322003_2000 genie_1751322003_4000 genie_1751322003_5500 genie_1751322003_7500 genie_1751322003_9500\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +5252,6801759,"TERMINAL",0,0,"966",,terminal_output +5253,6802800,"TERMINAL",0,0,"1077",,terminal_output +5254,6803870,"TERMINAL",0,0,"188",,terminal_output +5255,6804886,"TERMINAL",0,0,"299",,terminal_output +5256,6805931,"TERMINAL",0,0,"34040",,terminal_output +5257,6807101,"TERMINAL",0,0,"411",,terminal_output +5258,6808034,"TERMINAL",0,0,"522",,terminal_output +5259,6808608,"TERMINAL",0,0,"echo $ws_dir/../checkpoints/3307618",,terminal_command +5260,6808651,"TERMINAL",0,0,"]633;E;2025-07-01 00:37:15 echo $ws_dir/../checkpoints/3307618;9a6fb2c6-f596-403e-97b8-ced5ab6848ce]633;C/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/../checkpoints/3307618\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +5261,6809070,"TERMINAL",0,0,"633",,terminal_output +5262,6810090,"TERMINAL",0,0,"744",,terminal_output +5263,6811127,"TERMINAL",0,0,"855",,terminal_output +5264,6812179,"TERMINAL",0,0,"966",,terminal_output +5265,6813235,"TERMINAL",0,0,"2077",,terminal_output +5266,6813415,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +5267,6814263,"TERMINAL",0,0,"188",,terminal_output +5268,6815306,"TERMINAL",0,0,"299",,terminal_output +5269,6815502,"scripts_horeka/overfit_sample_tiny/sample.sh",893,0,"",shellscript,selection_mouse +5270,6816353,"TERMINAL",0,0,"35050",,terminal_output +5271,6816578,"scripts_horeka/overfit_sample_tiny/sample.sh",892,0,"",shellscript,selection_command +5272,6817412,"TERMINAL",0,0,"411",,terminal_output +5273,6818022,"scripts_horeka/overfit_sample_tiny/sample.sh",951,0,"\nCHECKPOINT_PATH=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/0000/genie_1751301068_2000/",shellscript,content +5274,6818059,"scripts_horeka/overfit_sample_tiny/sample.sh",952,0,"",shellscript,selection_command +5275,6818365,"scripts_horeka/overfit_sample_tiny/sample.sh",840,0,"",shellscript,selection_command +5276,6818484,"TERMINAL",0,0,"522",,terminal_output +5277,6819463,"scripts_horeka/overfit_sample_tiny/sample.sh",840,0,"#",shellscript,content +5278,6819464,"scripts_horeka/overfit_sample_tiny/sample.sh",841,0,"",shellscript,selection_keyboard +5279,6819544,"scripts_horeka/overfit_sample_tiny/sample.sh",841,0," ",shellscript,content +5280,6819545,"scripts_horeka/overfit_sample_tiny/sample.sh",842,0,"",shellscript,selection_keyboard +5281,6819545,"TERMINAL",0,0,"633",,terminal_output +5282,6819795,"scripts_horeka/overfit_sample_tiny/sample.sh",841,0,"",shellscript,selection_command +5283,6819963,"scripts_horeka/overfit_sample_tiny/sample.sh",955,0,"",shellscript,selection_command +5284,6820350,"scripts_horeka/overfit_sample_tiny/sample.sh",956,0,"",shellscript,selection_command +5285,6820582,"TERMINAL",0,0,"744",,terminal_output +5286,6820847,"scripts_horeka/overfit_sample_tiny/sample.sh",957,0,"",shellscript,selection_command +5287,6820954,"scripts_horeka/overfit_sample_tiny/sample.sh",958,0,"",shellscript,selection_command +5288,6820955,"scripts_horeka/overfit_sample_tiny/sample.sh",959,0,"",shellscript,selection_command +5289,6820955,"scripts_horeka/overfit_sample_tiny/sample.sh",960,0,"",shellscript,selection_command +5290,6820956,"scripts_horeka/overfit_sample_tiny/sample.sh",961,0,"",shellscript,selection_command +5291,6821002,"scripts_horeka/overfit_sample_tiny/sample.sh",962,0,"",shellscript,selection_command +5292,6821002,"scripts_horeka/overfit_sample_tiny/sample.sh",963,0,"",shellscript,selection_command +5293,6821036,"scripts_horeka/overfit_sample_tiny/sample.sh",964,0,"",shellscript,selection_command +5294,6821088,"scripts_horeka/overfit_sample_tiny/sample.sh",965,0,"",shellscript,selection_command +5295,6821130,"scripts_horeka/overfit_sample_tiny/sample.sh",966,0,"",shellscript,selection_command +5296,6821180,"scripts_horeka/overfit_sample_tiny/sample.sh",967,0,"",shellscript,selection_command +5297,6821180,"scripts_horeka/overfit_sample_tiny/sample.sh",968,0,"",shellscript,selection_command +5298,6821190,"scripts_horeka/overfit_sample_tiny/sample.sh",969,0,"",shellscript,selection_command +5299,6821243,"scripts_horeka/overfit_sample_tiny/sample.sh",970,0,"",shellscript,selection_command +5300,6821584,"TERMINAL",0,0,"855",,terminal_output +5301,6822769,"TERMINAL",0,0,"966",,terminal_output +5302,6822973,"scripts_horeka/overfit_sample_tiny/sample.sh",970,95,"",shellscript,content +5303,6823002,"scripts_horeka/overfit_sample_tiny/sample.sh",969,0,"",shellscript,selection_command +5304,6823548,"scripts_horeka/overfit_sample_tiny/sample.sh",970,0,"",shellscript,selection_command +5305,6823692,"TERMINAL",0,0,"3077",,terminal_output +5306,6824279,"scripts_horeka/overfit_sample_tiny/sample.sh",970,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/../checkpoints/3307618",shellscript,content +5307,6824767,"TERMINAL",0,0,"199",,terminal_output +5308,6825032,"scripts_horeka/overfit_sample_tiny/sample.sh",1052,0,"",shellscript,selection_command +5309,6825750,"TERMINAL",0,0,"39:009:00",,terminal_output +5310,6826822,"TERMINAL",0,0,"411",,terminal_output +5311,6827842,"TERMINAL",0,0,"522",,terminal_output +5312,6828920,"TERMINAL",0,0,"633",,terminal_output +5313,6829936,"TERMINAL",0,0,"744",,terminal_output +5314,6830980,"TERMINAL",0,0,"855",,terminal_output +5315,6832001,"TERMINAL",0,0,"966",,terminal_output +5316,6833046,"TERMINAL",0,0,"watch",,terminal_focus +5317,6833163,"TERMINAL",0,0,"4077",,terminal_output +5318,6833275,"TERMINAL",0,0,"Every 1.0s: squeue --mehkn1990.localdomain: Tue Jul 1 00:37:40 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3307618 accelerat train_dy tum_cte0 R19:07\t 1 hkn04203307619 accelerat train_dy tum_cte0 R19:07\t 1 hkn0420",,terminal_output +5319,6834291,"TERMINAL",0,0,"188",,terminal_output +5320,6834426,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +5321,6837607,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5 --mem=50G",,terminal_command +5322,6837660,"TERMINAL",0,0,"]633;E;2025-07-01 00:37:44 salloc --time=01:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5 --mem=50G;4d11dbdc-690b-4257-b927-bbd493ebfa56]633;Csalloc: Pending job allocation 3307628\r\nsalloc: job 3307628 queued and waiting for resources\r\n",,terminal_output +5323,6839259,"TERMINAL",0,0,"bash",,terminal_focus +5324,6840783,"TERMINAL",0,0,"queue",,terminal_command +5325,6840829,"TERMINAL",0,0,"]633;E;2025-07-01 00:37:48 queue;9a6fb2c6-f596-403e-97b8-ced5ab6848ce]633;C",,terminal_output +5326,6840933,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Tue Jul 1 00:37:48 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3307628 accelerat interact tum_cte0 PD\t0:00\t 1 (Priority)3307618 accelerat train_dy tum_cte0 R19:15\t 1 hkn04203307619 accelerat train_dy tum_cte0 R19:15\t 1 hkn0420",,terminal_output +5327,6841930,"TERMINAL",0,0,"966",,terminal_output +5328,6842666,"TERMINAL",0,0,"salloc",,terminal_focus +5329,6842976,"TERMINAL",0,0,"5077",,terminal_output +5330,6844019,"TERMINAL",0,0,"188",,terminal_output +5331,6844136,"TERMINAL",0,0,"watch",,terminal_focus +5332,6844660,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +5333,6852673,"TERMINAL",0,0,"idling^C",,terminal_command +5334,6852714,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;9a6fb2c6-f596-403e-97b8-ced5ab6848ce]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +5335,6856112,"TERMINAL",0,0,"",,terminal_focus +5336,6868542,"TERMINAL",0,0,"salloc",,terminal_focus +5337,6869391,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/sample.sh",,terminal_output +5338,6870119,"TERMINAL",0,0,"\r\n",,terminal_output +5339,6870710,"TERMINAL",0,0,"bash",,terminal_focus +5340,6873096,"TERMINAL",0,0,"reka/overfit_sample_tiny/sample.^C",,terminal_command +5341,6873132,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;5f295fc2-38bc-4f2f-8728-c40f4dd3f6f9]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +5342,6886912,"TERMINAL",0,0,"salloc: job 3307628 has been allocated resources\r\nsalloc: Granted job allocation 3307628\r\n",,terminal_output +5343,6887054,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +5344,6894424,"TERMINAL",0,0,"salloc",,terminal_focus +5345,6914113,"TERMINAL",0,0,"salloc: Nodes hkn0420 are ready for job\r\n",,terminal_output +5346,6914294,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/sample.sh\r\n",,terminal_output +5347,6914922,"TERMINAL",0,0,"]0;tum_cte0515@hkn0420:~/Projects/jafar[?2004h[tum_cte0515@hkn0420 jafar]$ sh scripts_horeka/overfit_sample_tiny/sample.sh\r\n[?2004l\r",,terminal_output +5348,6915071,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/../checkpoints/3307618\r\n",,terminal_output +5349,6919224,"TERMINAL",0,0,"2025-07-01 00:39:06.556120: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5350,6923461,"TERMINAL",0,0,"2025-07-01 00:39:10.796318: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5351,6926223,"sample.py",0,0,"",python,tab +5352,6931026,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +5353,6931948,"TERMINAL",0,0,"2025-07-01 00:39:19.281172: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5354,6934378,"scripts_horeka/overfit_sample_tiny/sample.sh",1618,0,"",shellscript,selection_mouse +5355,6934392,"scripts_horeka/overfit_sample_tiny/sample.sh",1617,0,"",shellscript,selection_command +5356,6935618,"scripts_horeka/overfit_sample_tiny/sample.sh",1615,0,"",shellscript,selection_mouse +5357,6936587,"scripts_horeka/overfit_sample_tiny/sample.sh",1598,0,"",shellscript,selection_mouse +5358,6936637,"scripts_horeka/overfit_sample_tiny/sample.sh",1597,0,"",shellscript,selection_command +5359,6937735,"scripts_horeka/overfit_sample_tiny/sample.sh",1547,0,"",shellscript,selection_mouse +5360,6938688,"scripts_horeka/overfit_sample_tiny/sample.sh",1548,0,"",shellscript,selection_command +5361,6939110,"scripts_horeka/overfit_sample_tiny/sample.sh",1547,1,"",shellscript,content +5362,6939226,"scripts_horeka/overfit_sample_tiny/sample.sh",1546,1,"",shellscript,content +5363,6939328,"TERMINAL",0,0,"2025-07-01 00:39:26.662319: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5364,6943994,"TERMINAL",0,0,"^CTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/compiler.py"", line 335, in backend_compile\r\n return backend.compile(\r\njaxlib._jax.XlaRuntimeError: INTERNAL: ptxas exited with non-zero error code 2, output: \r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 84, in \r\n params = genie.init(_rng, dummy_inputs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/traceback_util.py"", line 182, in reraise_with_filtered_traceback\r\n return fun(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 2452, in init\r\n _, v_out = self.init_with_output(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/traceback_util.py"", line 182, in reraise_with_filtered_traceback\r\n return fun(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 2304, in init_with_output\r\n return init_with_output(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/scope.py"", line 1115, in wrapper\r\n return apply(fn, mutable=mutable, flags=init_flags)(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/core/scope.py"", line 1079, in wrapper\r\n y = fn(root, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 3093, in scope_fn\r\n return fn(module.clone(parent=scope, _deep_clone=True), *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 699, in wrapped_module_method\r\n return self._call_wrapped_method(fun, args, kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 1216, in _call_wrapped_method\r\n y = run_fun(self, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 82, in __call__\r\n dyna_outputs = self.dynamics(outputs, training)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 699, in wrapped_module_method\r\n return self._call_wrapped_method(fun, args, kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 1216, in _call_wrapped_method\r\n y = run_fun(self, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/dynamics.py"", line 51, in __call__\r\n logits = self.dynamics(vid_embed)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 699, in wrapped_module_method\r\n return self._call_wrapped_method(fun, args, kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 1216, in _call_wrapped_method\r\n y = run_fun(self, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 79, in __call__\r\n x = nn.Sequential(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 699, in wrapped_module_method\r\n return self._call_wrapped_method(fun, args, kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 1216, in _call_wrapped_method\r\n y = run_fun(self, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/combinators.py"", line 106, in __call__\r\n outputs = self.layers[0](*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 699, in wrapped_module_method\r\n return self._call_wrapped_method(fun, args, kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/module.py"", line 1216, in _call_wrapped_method\r\n y = run_fun(self, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/normalization.py"", line 518, in __call__\r\n return _normalize(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/flax/linen/normalization.py"", line 200, in _normalize\r\n y = x - mean\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/array_methods.py"", line 583, in deferring_binary_op\r\n return binary_op(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/numpy/ufunc_api.py"", line 180, in __call__\r\n return call(*args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/traceback_util.py"", line 182, in reraise_with_filtered_traceback\r\n return fun(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 334, in cache_miss\r\n executable, pgle_profiler) = _python_pjit_helper(fun, jit_info, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 195, in _python_pjit_helper\r\n out_flat, compiled, profiler = _pjit_call_impl_python(*args_flat, **p.params)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/pjit.py"", line 1862, in _pjit_call_impl_python\r\n ).compile()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/interpreters/pxla.py"", line 2467, in compile\r\n executable = UnloadedMeshExecutable.from_hlo(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/interpreters/pxla.py"", line 3009, in from_hlo\r\n xla_executable = _cached_compilation(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/interpreters/pxla.py"", line 2800, in _cached_compilation\r\n xla_executable = compiler.compile_or_get_cached(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/compiler.py"", line 447, in compile_or_get_cached\r\n return _compile_and_write_cache(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/compiler.py"", line 719, in _compile_and_write_cache\r\n executable = backend_compile(\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/profiler.py"", line 354, in wrapper\r\n return func(*args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/jax/_src/compiler.py"", line 335, in backend_compile\r\n return backend.compile(\r\nKeyboardInterrupt\r\n",,terminal_output +5365,6944185,"TERMINAL",0,0,"^CException ignored in: .remove at 0x1458a5693370>\r\nTraceback (most recent call last):\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/weakref.py"", line 370, in remove\r\n def remove(k, selfref=ref(self)):\r\nKeyboardInterrupt: \r\n",,terminal_output +5366,6944386,"TERMINAL",0,0,"^CException ignored in: .remove at 0x1458a5693370>\r\nTraceback (most recent call last):\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/weakref.py"", line 370, in remove\r\n def remove(k, selfref=ref(self)):\r\nKeyboardInterrupt: \r\n",,terminal_output +5367,6944625,"TERMINAL",0,0,"^C",,terminal_output +5368,6944766,"TERMINAL",0,0,"^C",,terminal_output +5369,6944947,"TERMINAL",0,0,"\r\n]0;tum_cte0515@hkn0420:~/Projects/jafar[?2004h[tum_cte0515@hkn0420 jafar]$ ",,terminal_output +5370,6945027,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_cte0515@hkn0420:~/Projects/jafar[?2004h[tum_cte0515@hkn0420 jafar]$ ",,terminal_output +5371,6945589,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/sample.sh",,terminal_output +5372,6945727,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +5373,6945869,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/../checkpoints/3307618\r\n",,terminal_output +5374,6948561,"TERMINAL",0,0,"2025-07-01 00:39:35.887066: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5375,6952704,"TERMINAL",0,0,"2025-07-01 00:39:39.902900: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5376,6960882,"TERMINAL",0,0,"2025-07-01 00:39:48.215433: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5377,6968036,"TERMINAL",0,0,"2025-07-01 00:39:55.298042: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5378,6974408,"TERMINAL",0,0,"2025-07-01 00:40:01.740176: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5379,6979904,"TERMINAL",0,0,"2025-07-01 00:40:07.239257: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5380,6982782,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 85, in \r\n ckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 300, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 319, in _restore\r\n return self._handler.restore(directory, args=args)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 803, in restore\r\n structure, use_zarr3_metadata = self._get_internal_metadata(directory)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 959, in _get_internal_metadata\r\n raise FileNotFoundError(\r\nFileNotFoundError: No structure could be identified for the checkpoint at /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/../checkpoints/3307618.\r\n",,terminal_output +5381,6984857,"TERMINAL",0,0,"]0;tum_cte0515@hkn0420:~/Projects/jafar[?2004h[tum_cte0515@hkn0420 jafar]$ ",,terminal_output +5382,7043185,"TERMINAL",0,0,"bash",,terminal_focus +5383,7045175,"TERMINAL",0,0,"cd /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/../checkpoints/3307618",,terminal_command +5384,7045211,"TERMINAL",0,0,"]633;E;2025-07-01 00:41:12 cd /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/../checkpoints/3307618;5f295fc2-38bc-4f2f-8728-c40f4dd3f6f9]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3307618]633;D;0",,terminal_output +5385,7046491,"TERMINAL",0,0,"pwd",,terminal_command +5386,7046534,"TERMINAL",0,0,"]633;E;2025-07-01 00:41:13 pwd;5f295fc2-38bc-4f2f-8728-c40f4dd3f6f9]633;C/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3307618\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3307618]633;D;0",,terminal_output +5387,7050654,"TERMINAL",0,0,"ls",,terminal_command +5388,7050714,"TERMINAL",0,0,"]633;E;2025-07-01 00:41:17 ls;5f295fc2-38bc-4f2f-8728-c40f4dd3f6f9]633;C",,terminal_output +5389,7050890,"TERMINAL",0,0,"genie_1751322003_1000 genie_1751322003_12000 genie_1751322003_14500 genie_1751322003_2500 genie_1751322003_500 genie_1751322003_7000 genie_1751322003_9500\r\ngenie_1751322003_10000 genie_1751322003_12500 genie_1751322003_1500 genie_1751322003_3000 genie_1751322003_5000 genie_1751322003_7500\r\ngenie_1751322003_10500 genie_1751322003_13000 genie_1751322003_15000 genie_1751322003_3500 genie_1751322003_5500 genie_1751322003_8000\r\ngenie_1751322003_11000 genie_1751322003_13500 genie_1751322003_15500 genie_1751322003_4000 genie_1751322003_6000 genie_1751322003_8500\r\ngenie_1751322003_11500 genie_1751322003_14000 genie_1751322003_2000 genie_1751322003_4500 genie_1751322003_6500 genie_1751322003_9000\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3307618]633;D;0",,terminal_output +5390,7058946,"TERMINAL",0,0,"srun",,terminal_focus +5391,7062384,"sample.py",0,0,"",python,tab +5392,7063139,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +5393,7064930,"scripts_horeka/overfit_sample_tiny/sample.sh",1103,0,"",shellscript,selection_mouse +5394,7065484,"scripts_horeka/overfit_sample_tiny/sample.sh",1053,0,"",shellscript,selection_mouse +5395,7066006,"scripts_horeka/overfit_sample_tiny/sample.sh",1617,0,"",shellscript,selection_keyboard +5396,7067654,"scripts_horeka/overfit_sample_tiny/sample.sh",1053,0,"",shellscript,selection_mouse +5397,7068867,"scripts_horeka/overfit_sample_tiny/sample.sh",1053,0,"/",shellscript,content +5398,7068869,"scripts_horeka/overfit_sample_tiny/sample.sh",1054,0,"",shellscript,selection_keyboard +5399,7069123,"scripts_horeka/overfit_sample_tiny/sample.sh",1054,0,"genie_1751322003_15500",shellscript,content +5400,7071117,"scripts_horeka/overfit_sample_tiny/sample.sh",1076,0,"/",shellscript,content +5401,7071118,"scripts_horeka/overfit_sample_tiny/sample.sh",1077,0,"",shellscript,selection_keyboard +5402,7074715,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/sample.sh",,terminal_output +5403,7075078,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +5404,7075267,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/../checkpoints/3307618/genie_1751322003_15500/\r\n",,terminal_output +5405,7078045,"TERMINAL",0,0,"2025-07-01 00:41:45.383878: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5406,7082288,"TERMINAL",0,0,"2025-07-01 00:41:49.613780: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5407,7090403,"TERMINAL",0,0,"2025-07-01 00:41:57.740870: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5408,7097764,"TERMINAL",0,0,"2025-07-01 00:42:05.097722: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5409,7104279,"TERMINAL",0,0,"2025-07-01 00:42:11.612393: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5410,7110200,"TERMINAL",0,0,"2025-07-01 00:42:17.535843: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5411,7113155,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +5412,7123325,"TERMINAL",0,0,"jax.errors.SimplifiedTraceback: For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/sample.py"", line 127, in \r\n action_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/genie.py"", line 140, in vq_encode\r\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/models/lam.py"", line 81, in vq_encode\r\n z_q, z, emb, indices = self.vq(z, training)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/utils/nn.py"", line 107, in setup\r\n self.param(\r\nflax.errors.ScopeParamShapeError: Initializer expected to generate shape (6, 32) but got shape (1, 32) instead for parameter ""codebook"" in ""/lam/vq"". (https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.ScopeParamShapeError)\r\n",,terminal_output +5413,7124477,"TERMINAL",0,0,"]0;tum_cte0515@hkn0420:~/Projects/jafar[?2004h[tum_cte0515@hkn0420 jafar]$ ",,terminal_output +5414,7147993,"scripts_horeka/train_tokenizer.sh",0,0,"",shellscript,tab +5415,7149866,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +5416,7151966,"scripts_horeka/overfit_sample_tiny/sample.sh",1572,0,"",shellscript,selection_mouse +5417,7152517,"scripts_horeka/overfit_sample_tiny/sample.sh",1620,0,"",shellscript,selection_mouse +5418,7153085,"scripts_horeka/overfit_sample_tiny/sample.sh",1599,0,"",shellscript,selection_mouse +5419,7154755,"scripts_horeka/overfit_sample_tiny/sample.sh",1598,1,"",shellscript,content +5420,7155225,"scripts_horeka/overfit_sample_tiny/sample.sh",1598,0,"6",shellscript,content +5421,7155226,"scripts_horeka/overfit_sample_tiny/sample.sh",1599,0,"",shellscript,selection_keyboard +5422,7156725,"scripts_horeka/overfit_sample_tiny/sample.sh",1620,0,"",shellscript,selection_mouse +5423,7160561,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/sample.sh",,terminal_output +5424,7160850,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +5425,7161023,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/../checkpoints/3307618/genie_1751322003_15500/\r\n",,terminal_output +5426,7163825,"TERMINAL",0,0,"2025-07-01 00:43:11.158284: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5427,7167861,"TERMINAL",0,0,"2025-07-01 00:43:15.155962: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5428,7175713,"TERMINAL",0,0,"2025-07-01 00:43:23.031055: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5429,7182762,"TERMINAL",0,0,"2025-07-01 00:43:30.056938: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5430,7189091,"TERMINAL",0,0,"2025-07-01 00:43:36.421458: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5431,7194757,"TERMINAL",0,0,"2025-07-01 00:43:42.081579: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5432,7197842,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +5433,7208829,"TERMINAL",0,0,"====================================================================================================\r\nFrame 1\r\n====================================================================================================\r\n",,terminal_output +5434,7209380,"TERMINAL",0,0,"2025-07-01 00:43:56.713319: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5435,7212362,"TERMINAL",0,0,"2025-07-01 00:43:59.663674: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5436,7220284,"TERMINAL",0,0,"2025-07-01 00:44:07.613111: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5437,7223887,"TERMINAL",0,0,"2025-07-01 00:44:11.186075: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5438,7228913,"TERMINAL",0,0,"2025-07-01 00:44:16.220573: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5439,7231134,"TERMINAL",0,0,"====================================================================================================\r\nFrame 2\r\n====================================================================================================\r\n",,terminal_output +5440,7231663,"TERMINAL",0,0,"2025-07-01 00:44:18.999963: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5441,7234621,"TERMINAL",0,0,"2025-07-01 00:44:21.944605: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5442,7242629,"TERMINAL",0,0,"2025-07-01 00:44:29.964633: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5443,7245379,"TERMINAL",0,0,"2025-07-01 00:44:32.715159: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5444,7251098,"TERMINAL",0,0,"====================================================================================================\r\nFrame 3\r\n====================================================================================================\r\n",,terminal_output +5445,7251657,"TERMINAL",0,0,"2025-07-01 00:44:38.990885: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5446,7254625,"TERMINAL",0,0,"2025-07-01 00:44:41.954733: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5447,7262863,"TERMINAL",0,0,"2025-07-01 00:44:50.198381: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5448,7265683,"TERMINAL",0,0,"2025-07-01 00:44:52.969822: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5449,7271926,"TERMINAL",0,0,"====================================================================================================\r\nFrame 4\r\n====================================================================================================\r\n",,terminal_output +5450,7272531,"TERMINAL",0,0,"2025-07-01 00:44:59.848742: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5451,7275459,"TERMINAL",0,0,"2025-07-01 00:45:02.797438: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5452,7284350,"TERMINAL",0,0,"2025-07-01 00:45:11.687164: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5453,7287182,"TERMINAL",0,0,"2025-07-01 00:45:14.521813: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5454,7293242,"TERMINAL",0,0,"====================================================================================================\r\nFrame 5\r\n====================================================================================================\r\n",,terminal_output +5455,7293889,"TERMINAL",0,0,"2025-07-01 00:45:21.191647: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5456,7297008,"TERMINAL",0,0,"2025-07-01 00:45:24.337522: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5457,7305540,"TERMINAL",0,0,"2025-07-01 00:45:32.876611: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5458,7308386,"TERMINAL",0,0,"2025-07-01 00:45:35.723702: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5459,7314716,"TERMINAL",0,0,"====================================================================================================\r\nFrame 6\r\n====================================================================================================\r\n",,terminal_output +5460,7315339,"TERMINAL",0,0,"2025-07-01 00:45:42.665252: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5461,7318567,"TERMINAL",0,0,"2025-07-01 00:45:45.850005: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5462,7326879,"TERMINAL",0,0,"2025-07-01 00:45:54.215868: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5463,7329636,"TERMINAL",0,0,"2025-07-01 00:45:56.962953: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5464,7336176,"TERMINAL",0,0,"====================================================================================================\r\nFrame 7\r\n====================================================================================================\r\n",,terminal_output +5465,7336881,"TERMINAL",0,0,"2025-07-01 00:46:04.140949: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5466,7339704,"TERMINAL",0,0,"2025-07-01 00:46:07.034108: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5467,7348495,"TERMINAL",0,0,"2025-07-01 00:46:15.829494: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5468,7351359,"TERMINAL",0,0,"2025-07-01 00:46:18.692029: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5469,7358168,"TERMINAL",0,0,"====================================================================================================\r\nFrame 8\r\n====================================================================================================\r\n",,terminal_output +5470,7358812,"TERMINAL",0,0,"2025-07-01 00:46:26.147384: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5471,7362056,"TERMINAL",0,0,"2025-07-01 00:46:29.389730: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5472,7371546,"TERMINAL",0,0,"2025-07-01 00:46:38.883547: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5473,7374582,"TERMINAL",0,0,"2025-07-01 00:46:41.920123: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5474,7381296,"TERMINAL",0,0,"====================================================================================================\r\nFrame 9\r\n====================================================================================================\r\n",,terminal_output +5475,7381957,"TERMINAL",0,0,"2025-07-01 00:46:49.295011: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5476,7394092,"TERMINAL",0,0,"2025-07-01 00:47:01.430820: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5477,7397201,"TERMINAL",0,0,"2025-07-01 00:47:04.539568: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5478,7403876,"TERMINAL",0,0,"====================================================================================================\r\nFrame 10\r\n====================================================================================================\r\n",,terminal_output +5479,7404572,"TERMINAL",0,0,"2025-07-01 00:47:11.911455: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5480,7416905,"TERMINAL",0,0,"2025-07-01 00:47:24.176044: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5481,7419810,"TERMINAL",0,0,"2025-07-01 00:47:27.098030: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5482,7426881,"TERMINAL",0,0,"====================================================================================================\r\nFrame 11\r\n====================================================================================================\r\n",,terminal_output +5483,7427586,"TERMINAL",0,0,"2025-07-01 00:47:34.921298: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5484,7439523,"TERMINAL",0,0,"2025-07-01 00:47:46.860032: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5485,7442643,"TERMINAL",0,0,"2025-07-01 00:47:49.980738: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5486,7449505,"TERMINAL",0,0,"====================================================================================================\r\nFrame 12\r\n====================================================================================================\r\n",,terminal_output +5487,7450194,"TERMINAL",0,0,"2025-07-01 00:47:57.525033: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5488,7462393,"TERMINAL",0,0,"2025-07-01 00:48:09.724550: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5489,7465532,"TERMINAL",0,0,"2025-07-01 00:48:12.852309: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5490,7473003,"TERMINAL",0,0,"====================================================================================================\r\nFrame 13\r\n====================================================================================================\r\n",,terminal_output +5491,7473728,"TERMINAL",0,0,"2025-07-01 00:48:21.045538: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5492,7486213,"TERMINAL",0,0,"2025-07-01 00:48:33.547830: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5493,7489313,"TERMINAL",0,0,"2025-07-01 00:48:36.605694: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5494,7496566,"TERMINAL",0,0,"====================================================================================================\r\nFrame 14\r\n====================================================================================================\r\n",,terminal_output +5495,7497286,"TERMINAL",0,0,"2025-07-01 00:48:44.528369: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5496,7509408,"TERMINAL",0,0,"2025-07-01 00:48:56.746314: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5497,7512618,"TERMINAL",0,0,"2025-07-01 00:48:59.955181: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5498,7519991,"TERMINAL",0,0,"====================================================================================================\r\nFrame 15\r\n====================================================================================================\r\n",,terminal_output +5499,7520637,"TERMINAL",0,0,"2025-07-01 00:49:07.976159: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5500,7532676,"TERMINAL",0,0,"2025-07-01 00:49:19.986961: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5501,7535825,"TERMINAL",0,0,"2025-07-01 00:49:23.121566: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5502,7547515,"TERMINAL",0,0,"SSIM: 0.2604583501815796\r\n",,terminal_output +5503,7552125,"TERMINAL",0,0,"]0;tum_cte0515@hkn0420:~/Projects/jafar[?2004h[tum_cte0515@hkn0420 jafar]$ ",,terminal_output +5504,7633734,"TERMINAL",0,0,"bash",,terminal_focus +5505,7650728,"TERMINAL",0,0,"cd ../3307619",,terminal_command +5506,7650742,"TERMINAL",0,0,"]633;E;2025-07-01 00:51:18 cd ../3307619;5f295fc2-38bc-4f2f-8728-c40f4dd3f6f9]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3307619]633;D;0",,terminal_output +5507,7651473,"TERMINAL",0,0,"ls",,terminal_command +5508,7651557,"TERMINAL",0,0,"]633;E;2025-07-01 00:51:18 ls;5f295fc2-38bc-4f2f-8728-c40f4dd3f6f9]633;C",,terminal_output +5509,7651615,"TERMINAL",0,0,"genie_1751322003_1000 genie_1751322003_13500 genie_1751322003_17000 genie_1751322003_20500 genie_1751322003_3000 genie_1751322003_6500\r\ngenie_1751322003_10000 genie_1751322003_14000 genie_1751322003_17500 genie_1751322003_21000 genie_1751322003_3500 genie_1751322003_7000\r\ngenie_1751322003_10500 genie_1751322003_14500 genie_1751322003_18000 genie_1751322003_21500 genie_1751322003_4000 genie_1751322003_7500\r\ngenie_1751322003_11000 genie_1751322003_1500 genie_1751322003_18500 genie_1751322003_22000 genie_1751322003_4500 genie_1751322003_8000\r\ngenie_1751322003_11500 genie_1751322003_15000 genie_1751322003_19000 genie_1751322003_22500 genie_1751322003_500 genie_1751322003_8500\r\ngenie_1751322003_12000 genie_1751322003_15500 genie_1751322003_19500 genie_1751322003_23000 genie_1751322003_5000 genie_1751322003_9000\r\ngenie_1751322003_12500 genie_1751322003_16000 genie_1751322003_2000 genie_1751322003_23500 genie_1751322003_5500 genie_1751322003_9500\r\ngenie_1751322003_13000 genie_1751322003_16500 genie_1751322003_20000 genie_1751322003_2500 genie_1751322003_6000\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3307619]633;D;0",,terminal_output +5510,7666155,"TERMINAL",0,0,"cd genie_1751322003_23500",,terminal_command +5511,7666197,"TERMINAL",0,0,"]633;E;2025-07-01 00:51:33 cd genie_1751322003_23500;5f295fc2-38bc-4f2f-8728-c40f4dd3f6f9]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3307619/genie_1751322003_23500]633;D;0",,terminal_output +5512,7666854,"TERMINAL",0,0,"pwd",,terminal_command +5513,7666940,"TERMINAL",0,0,"]633;E;2025-07-01 00:51:34 pwd;5f295fc2-38bc-4f2f-8728-c40f4dd3f6f9]633;C/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3307619/genie_1751322003_23500\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3307619/genie_1751322003_23500]633;D;0",,terminal_output +5514,7672040,"scripts_horeka/overfit_sample_tiny/sample.sh",0,0,"",shellscript,tab +5515,7672889,"scripts_horeka/overfit_sample_tiny/sample.sh",1021,0,"",shellscript,selection_mouse +5516,7673426,"scripts_horeka/overfit_sample_tiny/sample.sh",1020,0,"",shellscript,selection_command +5517,7673890,"scripts_horeka/overfit_sample_tiny/sample.sh",954,0,"",shellscript,selection_command +5518,7674425,"scripts_horeka/overfit_sample_tiny/sample.sh",954,0,"#",shellscript,content +5519,7674426,"scripts_horeka/overfit_sample_tiny/sample.sh",955,0,"",shellscript,selection_keyboard +5520,7674509,"scripts_horeka/overfit_sample_tiny/sample.sh",955,0," ",shellscript,content +5521,7674510,"scripts_horeka/overfit_sample_tiny/sample.sh",956,0,"",shellscript,selection_keyboard +5522,7674961,"scripts_horeka/overfit_sample_tiny/sample.sh",955,0,"",shellscript,selection_command +5523,7675815,"scripts_horeka/overfit_sample_tiny/sample.sh",1079,0,"\n",shellscript,content +5524,7677776,"scripts_horeka/overfit_sample_tiny/sample.sh",954,0,"",shellscript,selection_command +5525,7678762,"scripts_horeka/overfit_sample_tiny/sample.sh",1079,0,"\n# CHECKPOINT_PATH=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/../checkpoints/3307618/genie_1751322003_15500/",shellscript,content +5526,7678796,"scripts_horeka/overfit_sample_tiny/sample.sh",1080,0,"",shellscript,selection_command +5527,7679432,"scripts_horeka/overfit_sample_tiny/sample.sh",1080,1,"",shellscript,content +5528,7680012,"scripts_horeka/overfit_sample_tiny/sample.sh",1080,1,"",shellscript,content +5529,7681191,"scripts_horeka/overfit_sample_tiny/sample.sh",1095,0,"",shellscript,selection_command +5530,7681856,"scripts_horeka/overfit_sample_tiny/sample.sh",1096,0,"",shellscript,selection_command +5531,7683219,"scripts_horeka/overfit_sample_tiny/sample.sh",1096,107,"",shellscript,content +5532,7683231,"scripts_horeka/overfit_sample_tiny/sample.sh",1095,0,"",shellscript,selection_command +5533,7683975,"scripts_horeka/overfit_sample_tiny/sample.sh",1096,0,"",shellscript,selection_command +5534,7684618,"scripts_horeka/overfit_sample_tiny/sample.sh",1096,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3307619/genie_1751322003_23500",shellscript,content +5535,7685222,"scripts_horeka/overfit_sample_tiny/sample.sh",1193,0,"",shellscript,selection_command +5536,7686735,"scripts_horeka/overfit_sample_tiny/sample.sh",1194,0,"",shellscript,selection_command +5537,7687485,"scripts_horeka/overfit_sample_tiny/sample.sh",1193,0,"",shellscript,selection_command +5538,7688487,"scripts_horeka/overfit_sample_tiny/sample.sh",1194,0,"",shellscript,selection_command +5539,7688809,"scripts_horeka/overfit_sample_tiny/sample.sh",1194,0,"/",shellscript,content +5540,7688810,"scripts_horeka/overfit_sample_tiny/sample.sh",1195,0,"",shellscript,selection_keyboard +5541,7688929,"scripts_horeka/overfit_sample_tiny/sample.sh",1194,0,"",shellscript,selection_command +5542,7696028,"TERMINAL",0,0,"srun",,terminal_focus +5543,7697631,"TERMINAL",0,0,"q",,terminal_output +5544,7698143,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +5545,7698239,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5546,7698320,"TERMINAL",0,0,"[?25lu[?25h",,terminal_output +5547,7698418,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5548,7698558,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0420.localdomain: Tue Jul 1 00:52:05 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3307628 accelerat interact tum_cte0 R13:31\t 1 hkn04203307618 accelerat train_dy tum_cte0 R33:32\t 1 hkn04203307619 accelerat train_dy tum_cte0 R33:32\t 1 hkn0420",,terminal_output +5549,7699555,"TERMINAL",0,0,"6233",,terminal_output +5550,7700609,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0420:~/Projects/jafar[?2004h[tum_cte0515@hkn0420 jafar]$ ",,terminal_output +5551,7700967,"TERMINAL",0,0,"queue",,terminal_output +5552,7701469,"TERMINAL",0,0,"sh scripts_horeka/overfit_sample_tiny/sample.sh",,terminal_output +5553,7702127,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +5554,7702232,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/3307619/genie_1751322003_23500/\r\n",,terminal_output +5555,7705988,"TERMINAL",0,0,"2025-07-01 00:52:13.302481: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5556,7709975,"TERMINAL",0,0,"2025-07-01 00:52:17.313586: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5557,7718661,"TERMINAL",0,0,"2025-07-01 00:52:25.997331: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5558,7725852,"TERMINAL",0,0,"2025-07-01 00:52:33.182275: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5559,7732214,"TERMINAL",0,0,"2025-07-01 00:52:39.550652: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5560,7738070,"TERMINAL",0,0,"2025-07-01 00:52:45.344936: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5561,7741081,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/.venv/lib/python3.10/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1251: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +5562,7752396,"TERMINAL",0,0,"====================================================================================================\r\nFrame 1\r\n====================================================================================================\r\n",,terminal_output +5563,7752962,"TERMINAL",0,0,"2025-07-01 00:53:00.282797: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5564,7755893,"TERMINAL",0,0,"2025-07-01 00:53:03.230850: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5565,7763829,"TERMINAL",0,0,"2025-07-01 00:53:11.154974: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5566,7767355,"TERMINAL",0,0,"2025-07-01 00:53:14.694632: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5567,7772248,"TERMINAL",0,0,"2025-07-01 00:53:19.584059: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5568,7774449,"TERMINAL",0,0,"====================================================================================================\r\nFrame 2\r\n====================================================================================================\r\n",,terminal_output +5569,7775012,"TERMINAL",0,0,"2025-07-01 00:53:22.346603: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5570,7777758,"TERMINAL",0,0,"2025-07-01 00:53:25.092268: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5571,7786096,"TERMINAL",0,0,"2025-07-01 00:53:33.416132: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5572,7788832,"TERMINAL",0,0,"2025-07-01 00:53:36.150872: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5573,7794850,"TERMINAL",0,0,"====================================================================================================\r\nFrame 3\r\n====================================================================================================\r\n",,terminal_output +5574,7795408,"TERMINAL",0,0,"2025-07-01 00:53:42.742986: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5575,7798344,"TERMINAL",0,0,"2025-07-01 00:53:45.681935: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5576,7806726,"TERMINAL",0,0,"2025-07-01 00:53:53.992800: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5577,7809363,"TERMINAL",0,0,"2025-07-01 00:53:56.690497: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5578,7815277,"TERMINAL",0,0,"====================================================================================================\r\nFrame 4\r\n====================================================================================================\r\n",,terminal_output +5579,7815865,"TERMINAL",0,0,"2025-07-01 00:54:03.195509: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5580,7819012,"TERMINAL",0,0,"2025-07-01 00:54:06.310529: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5581,7827637,"TERMINAL",0,0,"2025-07-01 00:54:14.971734: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5582,7830711,"TERMINAL",0,0,"2025-07-01 00:54:18.048885: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5583,7836950,"TERMINAL",0,0,"====================================================================================================\r\nFrame 5\r\n====================================================================================================\r\n",,terminal_output +5584,7837543,"TERMINAL",0,0,"2025-07-01 00:54:24.871524: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5585,7840383,"TERMINAL",0,0,"2025-07-01 00:54:27.720223: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5586,7849112,"TERMINAL",0,0,"2025-07-01 00:54:36.447200: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5587,7852129,"TERMINAL",0,0,"2025-07-01 00:54:39.407910: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5588,7858725,"TERMINAL",0,0,"====================================================================================================\r\nFrame 6\r\n====================================================================================================\r\n",,terminal_output +5589,7859357,"TERMINAL",0,0,"2025-07-01 00:54:46.696069: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5590,7862321,"TERMINAL",0,0,"2025-07-01 00:54:49.649810: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5591,7871130,"TERMINAL",0,0,"2025-07-01 00:54:58.446376: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5592,7874068,"TERMINAL",0,0,"2025-07-01 00:55:01.402920: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5593,7880593,"TERMINAL",0,0,"====================================================================================================\r\nFrame 7\r\n====================================================================================================\r\n",,terminal_output +5594,7881233,"TERMINAL",0,0,"2025-07-01 00:55:08.572317: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5595,7884388,"TERMINAL",0,0,"2025-07-01 00:55:11.725594: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5596,7893164,"TERMINAL",0,0,"2025-07-01 00:55:20.468160: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5597,7896065,"TERMINAL",0,0,"2025-07-01 00:55:23.398714: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5598,7902594,"TERMINAL",0,0,"====================================================================================================\r\nFrame 8\r\n====================================================================================================\r\n",,terminal_output +5599,7903243,"TERMINAL",0,0,"2025-07-01 00:55:30.577238: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5600,7906256,"TERMINAL",0,0,"2025-07-01 00:55:33.590331: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5601,7915888,"TERMINAL",0,0,"2025-07-01 00:55:43.223300: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5602,7918837,"TERMINAL",0,0,"2025-07-01 00:55:46.166830: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5603,7925439,"TERMINAL",0,0,"====================================================================================================\r\nFrame 9\r\n====================================================================================================\r\n",,terminal_output +5604,7926173,"TERMINAL",0,0,"2025-07-01 00:55:53.464037: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5605,7938127,"TERMINAL",0,0,"2025-07-01 00:56:05.446223: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5606,7941032,"TERMINAL",0,0,"2025-07-01 00:56:08.370028: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5607,7947663,"TERMINAL",0,0,"====================================================================================================\r\nFrame 10\r\n====================================================================================================\r\n",,terminal_output +5608,7948351,"TERMINAL",0,0,"2025-07-01 00:56:15.681440: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5609,7960522,"TERMINAL",0,0,"2025-07-01 00:56:27.858366: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5610,7963873,"TERMINAL",0,0,"2025-07-01 00:56:31.212087: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5611,7970842,"TERMINAL",0,0,"====================================================================================================\r\nFrame 11\r\n====================================================================================================\r\n",,terminal_output +5612,7971532,"TERMINAL",0,0,"2025-07-01 00:56:38.868473: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5613,7983799,"TERMINAL",0,0,"2025-07-01 00:56:51.135093: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5614,7987060,"TERMINAL",0,0,"2025-07-01 00:56:54.397645: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5615,7994210,"TERMINAL",0,0,"====================================================================================================\r\nFrame 12\r\n====================================================================================================\r\n",,terminal_output +5616,7994933,"TERMINAL",0,0,"2025-07-01 00:57:02.255955: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5617,8007027,"TERMINAL",0,0,"2025-07-01 00:57:14.343281: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5618,8010287,"TERMINAL",0,0,"2025-07-01 00:57:17.624703: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5619,8017422,"TERMINAL",0,0,"====================================================================================================\r\nFrame 13\r\n====================================================================================================\r\n",,terminal_output +5620,8018151,"TERMINAL",0,0,"2025-07-01 00:57:25.479281: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5621,8030270,"TERMINAL",0,0,"2025-07-01 00:57:37.595680: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5622,8033594,"TERMINAL",0,0,"2025-07-01 00:57:40.932953: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5623,8040974,"TERMINAL",0,0,"====================================================================================================\r\nFrame 14\r\n====================================================================================================\r\n",,terminal_output +5624,8041697,"TERMINAL",0,0,"2025-07-01 00:57:49.032767: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5625,8053864,"TERMINAL",0,0,"2025-07-01 00:58:01.200536: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5626,8056981,"TERMINAL",0,0,"2025-07-01 00:58:04.308266: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5627,8064297,"TERMINAL",0,0,"====================================================================================================\r\nFrame 15\r\n====================================================================================================\r\n",,terminal_output +5628,8064982,"TERMINAL",0,0,"2025-07-01 00:58:12.317206: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5629,8077162,"TERMINAL",0,0,"2025-07-01 00:58:24.500915: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5630,8080302,"TERMINAL",0,0,"2025-07-01 00:58:27.561496: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n",,terminal_output +5631,8090272,"TERMINAL",0,0,"SSIM: 0.5124529600143433\r\n",,terminal_output +5632,8094696,"TERMINAL",0,0,"]0;tum_cte0515@hkn0420:~/Projects/jafar[?2004h[tum_cte0515@hkn0420 jafar]$ ",,terminal_output +5633,8295760,"TERMINAL",0,0,"\r[tum_cte0515@hkn0420 jafar]$ ",,terminal_output diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-d47b23a7-dd0b-41de-b03c-909a13a5be1a1752656548428-2025_07_16-11.03.16.676/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-d47b23a7-dd0b-41de-b03c-909a13a5be1a1752656548428-2025_07_16-11.03.16.676/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..22cdef755474908ff44b7948da10f1597b136d5f --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-d47b23a7-dd0b-41de-b03c-909a13a5be1a1752656548428-2025_07_16-11.03.16.676/source.csv @@ -0,0 +1,2096 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +1,5,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n min_lr: float = 0.0\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n grad_clip_threshold: float = 10e5\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(jnp.float32) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n metrics[""grad_norm""] = optax.global_norm(grads)\n print(metrics[""grad_norm""])\n \n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=jnp.float32\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n # Restore full dynamics model\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, grain_iterator, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +2,214,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"11:03:16 AM [info] Activating crowd-code\n11:03:16 AM [info] Recording started\n11:03:16 AM [info] Initializing git provider using file system watchers...\n",Log,tab +3,368,"extension-output-pdoom-org.crowd-code-#1-crowd-code",153,0,"11:03:16 AM [info] Git repository found\n11:03:16 AM [info] Git provider initialized successfully\n11:03:16 AM [info] Initial git state: [object Object]\n",Log,content +4,3136,"TERMINAL",0,0,"/bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command +5,3161,"TERMINAL",0,0,"]633;E;2025-07-16 11:03:19 /bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;338fa6e6-0b90-4248-acfa-7ea5cbb7d10b]633;C]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output +6,7349,"train_dynamics.py",0,0,"",python,tab +7,11262,"TERMINAL",0,0,"undefinedjafar[tum_cte0515@hkn1991 jafar]$ source .venv/bin/activate",,terminal_command +8,11282,"TERMINAL",0,0,"]633;E;2025-07-16 11:03:27 source .venv/bin/activate;f5ee9b33-fe39-4ef6-94c4-a8ea744acfe0]633;C]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +9,15176,"TERMINAL",0,0,"",,terminal_focus +10,17484,"TERMINAL",0,0,"bash",,terminal_focus +11,17485,"TERMINAL",0,0,"bash",,terminal_focus +12,17485,"TERMINAL",0,0,"bash",,terminal_focus +13,19433,"TERMINAL",0,0,"bash",,terminal_focus +14,23399,"TERMINAL",0,0,"bash",,terminal_focus +15,23402,"TERMINAL",0,0,"bash",,terminal_focus +16,744480,"train_dynamics.py",0,0,"",python,tab +17,744483,"train_dynamics.py",7162,0,"",python,selection_mouse +18,744489,"train_dynamics.py",7161,0,"",python,selection_command +19,744998,"train_dynamics.py",7043,0,"",python,selection_mouse +20,745575,"train_dynamics.py",7162,0,"",python,selection_mouse +21,745591,"train_dynamics.py",7161,0,"",python,selection_command +22,746629,"TERMINAL",0,0,"bash",,terminal_focus +23,749742,"TERMINAL",0,0,"git branch",,terminal_command +24,749826,"TERMINAL",0,0,"]633;E;2025-07-16 11:15:46 git branch;f5ee9b33-fe39-4ef6-94c4-a8ea744acfe0]633;C[?1h=\r add-wandb-name-and-tags\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/explicit-image-dims\r\n fix-sampling\r\n grain-dataloader\r\n logging-variants\r\n lr-schedules\r\n:",,terminal_output +25,750830,"TERMINAL",0,0,"\r* main\r\n:",,terminal_output +26,751017,"TERMINAL",0,0,"\r metrics-logging-for-dynamics-model\r\n:",,terminal_output +27,751145,"TERMINAL",0,0,"\r monkey-patch\r\n:",,terminal_output +28,751252,"TERMINAL",0,0,"\r preprocess_video\r\n:",,terminal_output +29,751444,"TERMINAL",0,0,"\r revised-dataloader\r\n:",,terminal_output +30,751594,"TERMINAL",0,0,"\r runner\r\n:",,terminal_output +31,751731,"TERMINAL",0,0,"\r runner-grain\r\n:",,terminal_output +32,753157,"TERMINAL",0,0,"\r sample-from-different-topologies\r\n:",,terminal_output +33,753318,"TERMINAL",0,0,"\r speedup-tfrecord-preprocessing\r\n:",,terminal_output +34,753634,"TERMINAL",0,0,"\r tmp\r\n:",,terminal_output +35,753868,"TERMINAL",0,0,"\r\r(END)",,terminal_output +36,754855,"TERMINAL",0,0,"\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +37,768823,"TERMINAL",0,0,"git checkout -b ""grad-norm-log-and-clip""",,terminal_command +38,768872,"TERMINAL",0,0,"]633;E;2025-07-16 11:16:05 git checkout -b ""grad-norm-log-and-clip"";f5ee9b33-fe39-4ef6-94c4-a8ea744acfe0]633;C",,terminal_output +39,769051,"TERMINAL",0,0,"Switched to a new branch 'grad-norm-log-and-clip'\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +40,770300,"train_dynamics.py",0,0,"Switched from branch 'main' to 'grad-norm-log-and-clip'",python,git_branch_checkout +41,771032,"TERMINAL",0,0,"bash",,terminal_focus +42,776178,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command +43,777550,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n min_lr: float = 0.0\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n grad_clip_threshold: float = 10e5\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(jnp.float32) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n metrics[""grad_norm""] = optax.global_norm(grads)\n print(metrics[""grad_norm""])\n \n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=jnp.float32\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n # Restore full dynamics model\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, grain_iterator, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +44,777551,"train_dynamics.py",6989,0,"",python,selection_mouse +45,780518,"train_dynamics.py",6900,0,"",python,selection_command +46,780662,"train_dynamics.py",6837,0,"",python,selection_command +47,780849,"train_dynamics.py",6788,0,"",python,selection_command +48,781176,"train_dynamics.py",6837,0,"",python,selection_command +49,781643,"train_dynamics.py",6900,0,"",python,selection_command +50,781927,"train_dynamics.py",6837,0,"",python,selection_command +51,782146,"train_dynamics.py",6900,0,"",python,selection_command +52,782293,"train_dynamics.py",6837,0,"",python,selection_command +53,782382,"train_dynamics.py",6900,0,"",python,selection_command +54,782508,"train_dynamics.py",6837,0,"",python,selection_command +55,782605,"train_dynamics.py",6900,0,"",python,selection_command +56,782720,"train_dynamics.py",6837,0,"",python,selection_command +57,782797,"train_dynamics.py",6900,0,"",python,selection_command +58,782934,"train_dynamics.py",6837,0,"",python,selection_command +59,783035,"train_dynamics.py",6900,0,"",python,selection_command +60,783151,"train_dynamics.py",6837,0,"",python,selection_command +61,783232,"train_dynamics.py",6900,0,"",python,selection_command +62,783369,"train_dynamics.py",6837,0,"",python,selection_command +63,783446,"train_dynamics.py",6900,0,"",python,selection_command +64,783587,"train_dynamics.py",6837,0,"",python,selection_command +65,783674,"train_dynamics.py",6900,0,"",python,selection_command +66,783805,"train_dynamics.py",6837,0,"",python,selection_command +67,783883,"train_dynamics.py",6900,0,"",python,selection_command +68,784040,"train_dynamics.py",6837,0,"",python,selection_command +69,784339,"train_dynamics.py",6788,0,"",python,selection_command +70,784394,"train_dynamics.py",6837,0,"",python,selection_command +71,784613,"train_dynamics.py",6788,0,"",python,selection_command +72,784691,"train_dynamics.py",6837,0,"",python,selection_command +73,784856,"train_dynamics.py",6900,0,"",python,selection_command +74,785014,"train_dynamics.py",6989,0,"",python,selection_command +75,785134,"train_dynamics.py",6900,0,"",python,selection_command +76,785233,"train_dynamics.py",6989,0,"",python,selection_command +77,785420,"train_dynamics.py",6900,0,"",python,selection_command +78,785647,"train_dynamics.py",6837,0,"",python,selection_command +79,785815,"train_dynamics.py",6900,0,"",python,selection_command +80,786006,"train_dynamics.py",6989,0,"",python,selection_command +81,786146,"train_dynamics.py",6900,0,"",python,selection_command +82,786414,"train_dynamics.py",6837,0,"",python,selection_command +83,786594,"train_dynamics.py",6900,0,"",python,selection_command +84,786773,"train_dynamics.py",6837,0,"",python,selection_command +85,786828,"train_dynamics.py",6900,0,"",python,selection_command +86,786951,"train_dynamics.py",6837,0,"",python,selection_command +87,787016,"train_dynamics.py",6900,0,"",python,selection_command +88,787167,"train_dynamics.py",6837,0,"",python,selection_command +89,787255,"train_dynamics.py",6900,0,"",python,selection_command +90,787381,"train_dynamics.py",6837,0,"",python,selection_command +91,787450,"train_dynamics.py",6900,0,"",python,selection_command +92,787586,"train_dynamics.py",6837,0,"",python,selection_command +93,787685,"train_dynamics.py",6900,0,"",python,selection_command +94,787796,"train_dynamics.py",6837,0,"",python,selection_command +95,787882,"train_dynamics.py",6900,0,"",python,selection_command +96,788020,"train_dynamics.py",6837,0,"",python,selection_command +97,788120,"train_dynamics.py",6900,0,"",python,selection_command +98,788244,"train_dynamics.py",6837,0,"",python,selection_command +99,788313,"train_dynamics.py",6900,0,"",python,selection_command +100,788447,"train_dynamics.py",6837,0,"",python,selection_command +101,788547,"train_dynamics.py",6900,0,"",python,selection_command +102,788669,"train_dynamics.py",6837,0,"",python,selection_command +103,788749,"train_dynamics.py",6900,0,"",python,selection_command +104,788899,"train_dynamics.py",6837,0,"",python,selection_command +105,789002,"train_dynamics.py",6900,0,"",python,selection_command +106,789106,"train_dynamics.py",6837,0,"",python,selection_command +107,789203,"train_dynamics.py",6900,0,"",python,selection_command +108,789338,"train_dynamics.py",6837,0,"",python,selection_command +109,789435,"train_dynamics.py",6900,0,"",python,selection_command +110,789567,"train_dynamics.py",6837,0,"",python,selection_command +111,789669,"train_dynamics.py",6900,0,"",python,selection_command +112,789797,"train_dynamics.py",6837,0,"",python,selection_command +113,789887,"train_dynamics.py",6900,0,"",python,selection_command +114,790015,"train_dynamics.py",6837,0,"",python,selection_command +115,790106,"train_dynamics.py",6900,0,"",python,selection_command +116,790250,"train_dynamics.py",6837,0,"",python,selection_command +117,790335,"train_dynamics.py",6900,0,"",python,selection_command +118,790460,"train_dynamics.py",6837,0,"",python,selection_command +119,790541,"train_dynamics.py",6900,0,"",python,selection_command +120,790684,"train_dynamics.py",6837,0,"",python,selection_command +121,790802,"train_dynamics.py",6900,0,"",python,selection_command +122,790937,"train_dynamics.py",6837,0,"",python,selection_command +123,791064,"train_dynamics.py",6900,0,"",python,selection_command +124,791211,"train_dynamics.py",6837,0,"",python,selection_command +125,791314,"train_dynamics.py",6900,0,"",python,selection_command +126,791463,"train_dynamics.py",6837,0,"",python,selection_command +127,791546,"train_dynamics.py",6900,0,"",python,selection_command +128,791703,"train_dynamics.py",6837,0,"",python,selection_command +129,791932,"train_dynamics.py",6788,0,"",python,selection_command +130,792011,"train_dynamics.py",6837,0,"",python,selection_command +131,792284,"train_dynamics.py",6900,0,"",python,selection_command +132,792530,"train_dynamics.py",6837,0,"",python,selection_command +133,792700,"train_dynamics.py",6900,0,"",python,selection_command +134,792869,"train_dynamics.py",6837,0,"",python,selection_command +135,792969,"train_dynamics.py",6900,0,"",python,selection_command +136,793103,"train_dynamics.py",6837,0,"",python,selection_command +137,793214,"train_dynamics.py",6900,0,"",python,selection_command +138,793364,"train_dynamics.py",6837,0,"",python,selection_command +139,793465,"train_dynamics.py",6900,0,"",python,selection_command +140,793624,"train_dynamics.py",6837,0,"",python,selection_command +141,793720,"train_dynamics.py",6900,0,"",python,selection_command +142,796147,"train_dynamics.py",6504,0,"",python,selection_mouse +143,796949,"train_dynamics.py",0,0,"",python,selection_command +144,797251,"train_dynamics.py",0,40,"from dataclasses import dataclass, field",python,selection_command +145,797804,"train_dynamics.py",0,12259,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n min_lr: float = 0.0\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n grad_clip_threshold: float = 10e5\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(jnp.float32) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n metrics[""grad_norm""] = optax.global_norm(grads)\n print(metrics[""grad_norm""])\n \n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=jnp.float32\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n # Restore full dynamics model\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, grain_iterator, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,selection_command +146,1053318,"train_dynamics.py",12107,0,"",python,selection_mouse +147,1053331,"train_dynamics.py",12106,0,"",python,selection_command +148,1065563,"train_dynamics.py",2795,0,"",python,selection_mouse +149,1066169,"train_dynamics.py",2485,0,"",python,selection_mouse +150,1067498,"train_dynamics.py",3324,0,"",python,selection_mouse +151,1068027,"train_dynamics.py",3279,0,"",python,selection_mouse +152,1068847,"train_dynamics.py",3212,0,"",python,selection_mouse +153,1069539,"train_dynamics.py",3284,0,"",python,selection_mouse +154,1071080,"train_dynamics.py",3293,0,"\n ",python,content +155,1071529,"train_dynamics.py",3298,0," g_norm_clipped = jnp.minimum(raw_grad_norm, args.grad_clip_threshold)\n metrics[""clipped_grad_norm""] = g_norm_clipped",python,content +156,1072973,"train_dynamics.py",3302,0,"",python,selection_mouse +157,1073352,"train_dynamics.py",3298,4,"",python,content +158,1073922,"train_dynamics.py",3297,0,"",python,selection_command +159,1074628,"train_dynamics.py",3407,0,"",python,selection_mouse +160,1075409,"train_dynamics.py",3334,0,"",python,selection_mouse +161,1075578,"train_dynamics.py",3327,13,"raw_grad_norm",python,selection_mouse +162,1076721,"train_dynamics.py",3331,0,"",python,selection_mouse +163,1077416,"train_dynamics.py",3255,0,"",python,selection_mouse +164,1078414,"train_dynamics.py",3245,0,"",python,selection_mouse +165,1079239,"train_dynamics.py",3246,0,"",python,selection_mouse +166,1079451,"train_dynamics.py",3246,1,"m",python,selection_mouse +167,1079470,"train_dynamics.py",3246,2,"me",python,selection_mouse +168,1079488,"train_dynamics.py",3246,4,"metr",python,selection_mouse +169,1079504,"train_dynamics.py",3246,5,"metri",python,selection_mouse +170,1079519,"train_dynamics.py",3246,6,"metric",python,selection_mouse +171,1079537,"train_dynamics.py",3246,7,"metrics",python,selection_mouse +172,1079554,"train_dynamics.py",3246,9,"metrics[""",python,selection_mouse +173,1079572,"train_dynamics.py",3246,10,"metrics[""g",python,selection_mouse +174,1079628,"train_dynamics.py",3246,11,"metrics[""gr",python,selection_mouse +175,1079629,"train_dynamics.py",3246,12,"metrics[""gra",python,selection_mouse +176,1079681,"train_dynamics.py",3246,13,"metrics[""grad",python,selection_mouse +177,1079682,"train_dynamics.py",3246,14,"metrics[""grad_",python,selection_mouse +178,1079701,"train_dynamics.py",3246,15,"metrics[""grad_n",python,selection_mouse +179,1079757,"train_dynamics.py",3246,16,"metrics[""grad_no",python,selection_mouse +180,1079758,"train_dynamics.py",3246,17,"metrics[""grad_nor",python,selection_mouse +181,1079810,"train_dynamics.py",3246,18,"metrics[""grad_norm",python,selection_mouse +182,1079863,"train_dynamics.py",3246,19,"metrics[""grad_norm""",python,selection_mouse +183,1080075,"train_dynamics.py",3246,20,"metrics[""grad_norm""]",python,selection_mouse +184,1083247,"train_dynamics.py",3381,0,"",python,selection_mouse +185,1083853,"train_dynamics.py",3433,0,"",python,selection_mouse +186,1084477,"train_dynamics.py",3418,32,"",python,content +187,1084512,"train_dynamics.py",3421,0,"",python,selection_command +188,1084580,"train_dynamics.py",3372,0,"",python,selection_command +189,1084892,"train_dynamics.py",3373,0,"",python,selection_command +190,1085394,"train_dynamics.py",3374,0,"",python,selection_command +191,1085430,"train_dynamics.py",3375,0,"",python,selection_command +192,1085451,"train_dynamics.py",3376,0,"",python,selection_command +193,1085503,"train_dynamics.py",3377,0,"",python,selection_command +194,1085508,"train_dynamics.py",3378,0,"",python,selection_command +195,1085536,"train_dynamics.py",3379,0,"",python,selection_command +196,1085600,"train_dynamics.py",3380,0,"",python,selection_command +197,1085601,"train_dynamics.py",3381,0,"",python,selection_command +198,1085638,"train_dynamics.py",3382,0,"",python,selection_command +199,1086029,"train_dynamics.py",3381,0,"",python,selection_command +200,1086222,"train_dynamics.py",3381,1,"",python,content +201,1086389,"train_dynamics.py",3381,1,"",python,content +202,1086587,"train_dynamics.py",3381,1,"",python,content +203,1086743,"train_dynamics.py",3381,1,"",python,content +204,1086915,"train_dynamics.py",3381,1,"",python,content +205,1087107,"train_dynamics.py",3381,1,"",python,content +206,1087295,"train_dynamics.py",3381,1,"",python,content +207,1087476,"train_dynamics.py",3381,1,"",python,content +208,1087820,"train_dynamics.py",3307,0,"",python,selection_command +209,1088004,"train_dynamics.py",3255,0,"",python,selection_command +210,1088932,"train_dynamics.py",3254,0,"",python,selection_command +211,1089421,"train_dynamics.py",3253,0,"",python,selection_command +212,1089443,"train_dynamics.py",3252,0,"",python,selection_command +213,1089474,"train_dynamics.py",3251,0,"",python,selection_command +214,1089499,"train_dynamics.py",3250,0,"",python,selection_command +215,1089537,"train_dynamics.py",3249,0,"",python,selection_command +216,1089611,"train_dynamics.py",3248,0,"",python,selection_command +217,1089612,"train_dynamics.py",3247,0,"",python,selection_command +218,1089926,"train_dynamics.py",3246,0,"",python,selection_command +219,1090180,"train_dynamics.py",3246,7,"",python,content +220,1090527,"train_dynamics.py",3246,2,"",python,content +221,1090800,"train_dynamics.py",3246,9,"",python,content +222,1091097,"train_dynamics.py",3246,3,"",python,content +223,1091674,"train_dynamics.py",3246,1,"",python,content +224,1092720,"train_dynamics.py",3246,0,"r",python,content +225,1092721,"train_dynamics.py",3247,0,"",python,selection_keyboard +226,1092909,"train_dynamics.py",3247,0,"a",python,content +227,1092910,"train_dynamics.py",3248,0,"",python,selection_keyboard +228,1093642,"train_dynamics.py",3248,0,"w",python,content +229,1093643,"train_dynamics.py",3249,0,"",python,selection_keyboard +230,1094339,"train_dynamics.py",3249,0," ",python,content +231,1094340,"train_dynamics.py",3250,0,"",python,selection_keyboard +232,1094611,"train_dynamics.py",3250,0,"=",python,content +233,1094612,"train_dynamics.py",3251,0,"",python,selection_keyboard +234,1094932,"train_dynamics.py",3250,0,"",python,selection_command +235,1095136,"train_dynamics.py",3249,0,"",python,selection_command +236,1095617,"train_dynamics.py",3249,0,"_",python,content +237,1095618,"train_dynamics.py",3250,0,"",python,selection_keyboard +238,1096665,"train_dynamics.py",3250,0,"r",python,content +239,1096666,"train_dynamics.py",3251,0,"",python,selection_keyboard +240,1096969,"train_dynamics.py",3250,1,"",python,content +241,1097229,"train_dynamics.py",3250,0,"g",python,content +242,1097230,"train_dynamics.py",3251,0,"",python,selection_keyboard +243,1097267,"train_dynamics.py",3251,0,"r",python,content +244,1097268,"train_dynamics.py",3252,0,"",python,selection_keyboard +245,1097566,"train_dynamics.py",3252,0,"d",python,content +246,1097566,"train_dynamics.py",3253,0,"",python,selection_keyboard +247,1097886,"train_dynamics.py",3252,1,"",python,content +248,1097956,"train_dynamics.py",3252,0,"a",python,content +249,1097956,"train_dynamics.py",3253,0,"",python,selection_keyboard +250,1098100,"train_dynamics.py",3253,0,"d",python,content +251,1098101,"train_dynamics.py",3254,0,"",python,selection_keyboard +252,1098235,"train_dynamics.py",3254,0,"_",python,content +253,1098236,"train_dynamics.py",3255,0,"",python,selection_keyboard +254,1098503,"train_dynamics.py",3255,0,"m",python,content +255,1098504,"train_dynamics.py",3256,0,"",python,selection_keyboard +256,1098928,"train_dynamics.py",3255,1,"",python,content +257,1099184,"train_dynamics.py",3255,0,"n",python,content +258,1099185,"train_dynamics.py",3256,0,"",python,selection_keyboard +259,1099309,"train_dynamics.py",3256,0,"o",python,content +260,1099310,"train_dynamics.py",3257,0,"",python,selection_keyboard +261,1099386,"train_dynamics.py",3257,0,"r",python,content +262,1099387,"train_dynamics.py",3258,0,"",python,selection_keyboard +263,1099440,"train_dynamics.py",3258,0,"m",python,content +264,1099441,"train_dynamics.py",3259,0,"",python,selection_keyboard +265,1099714,"train_dynamics.py",3258,0,"",python,selection_command +266,1101075,"train_dynamics.py",3268,0,"",python,selection_mouse +267,1101192,"train_dynamics.py",3268,11,"global_norm",python,selection_mouse +268,1101809,"train_dynamics.py",3306,0,"",python,selection_mouse +269,1102284,"train_dynamics.py",3190,0,"",python,selection_mouse +270,1103209,"train_dynamics.py",3241,0,"\n ",python,content +271,1105553,"train_dynamics.py",3246,0,"#",python,content +272,1105554,"train_dynamics.py",3247,0,"",python,selection_keyboard +273,1105710,"train_dynamics.py",3247,0," ",python,content +274,1105711,"train_dynamics.py",3248,0,"",python,selection_keyboard +275,1106056,"train_dynamics.py",3248,0,"e",python,content +276,1106057,"train_dynamics.py",3249,0,"",python,selection_keyboard +277,1106366,"train_dynamics.py",3249,0,"t",python,content +278,1106367,"train_dynamics.py",3250,0,"",python,selection_keyboard +279,1106669,"train_dynamics.py",3249,1,"",python,content +280,1106830,"train_dynamics.py",3249,0,"x",python,content +281,1106830,"train_dynamics.py",3250,0,"",python,selection_keyboard +282,1106940,"train_dynamics.py",3250,0,"t",python,content +283,1106941,"train_dynamics.py",3251,0,"",python,selection_keyboard +284,1107059,"train_dynamics.py",3251,0,"r",python,content +285,1107060,"train_dynamics.py",3252,0,"",python,selection_keyboard +286,1107249,"train_dynamics.py",3252,0,"a",python,content +287,1107250,"train_dynamics.py",3253,0,"",python,selection_keyboard +288,1107348,"train_dynamics.py",3253,0,"c",python,content +289,1107349,"train_dynamics.py",3254,0,"",python,selection_keyboard +290,1107440,"train_dynamics.py",3254,0,"t",python,content +291,1107441,"train_dynamics.py",3255,0,"",python,selection_keyboard +292,1107526,"train_dynamics.py",3255,0," ",python,content +293,1107527,"train_dynamics.py",3256,0,"",python,selection_keyboard +294,1107607,"train_dynamics.py",3256,0,"a",python,content +295,1107607,"train_dynamics.py",3257,0,"",python,selection_keyboard +296,1107666,"train_dynamics.py",3257,0,"n",python,content +297,1107667,"train_dynamics.py",3258,0,"",python,selection_keyboard +298,1107755,"train_dynamics.py",3258,0,"d",python,content +299,1107756,"train_dynamics.py",3259,0,"",python,selection_keyboard +300,1107822,"train_dynamics.py",3259,0," ",python,content +301,1107822,"train_dynamics.py",3260,0,"",python,selection_keyboard +302,1108030,"train_dynamics.py",3260,0,"n",python,content +303,1108031,"train_dynamics.py",3261,0,"",python,selection_keyboard +304,1108285,"train_dynamics.py",3260,1,"",python,content +305,1108469,"train_dynamics.py",3260,0,"m",python,content +306,1108470,"train_dynamics.py",3261,0,"",python,selection_keyboard +307,1108603,"train_dynamics.py",3261,0,"a",python,content +308,1108604,"train_dynamics.py",3262,0,"",python,selection_keyboard +309,1108972,"train_dynamics.py",3262,0,"n",python,content +310,1108973,"train_dynamics.py",3263,0,"",python,selection_keyboard +311,1109121,"train_dynamics.py",3263,0,"u",python,content +312,1109122,"train_dynamics.py",3264,0,"",python,selection_keyboard +313,1109251,"train_dynamics.py",3264,0,"a",python,content +314,1109252,"train_dynamics.py",3265,0,"",python,selection_keyboard +315,1109318,"train_dynamics.py",3265,0,"l",python,content +316,1109319,"train_dynamics.py",3266,0,"",python,selection_keyboard +317,1109438,"train_dynamics.py",3266,0,"l",python,content +318,1109439,"train_dynamics.py",3267,0,"",python,selection_keyboard +319,1109470,"train_dynamics.py",3267,0,"y",python,content +320,1109471,"train_dynamics.py",3268,0,"",python,selection_keyboard +321,1109562,"train_dynamics.py",3268,0," ",python,content +322,1109563,"train_dynamics.py",3269,0,"",python,selection_keyboard +323,1109699,"train_dynamics.py",3269,0,"c",python,content +324,1109700,"train_dynamics.py",3270,0,"",python,selection_keyboard +325,1109768,"train_dynamics.py",3270,0,"l",python,content +326,1109769,"train_dynamics.py",3271,0,"",python,selection_keyboard +327,1109952,"train_dynamics.py",3271,0,"i",python,content +328,1109952,"train_dynamics.py",3272,0,"",python,selection_keyboard +329,1110119,"train_dynamics.py",3272,0,"p",python,content +330,1110120,"train_dynamics.py",3273,0,"",python,selection_keyboard +331,1110232,"train_dynamics.py",3273,0," ",python,content +332,1110233,"train_dynamics.py",3274,0,"",python,selection_keyboard +333,1110544,"train_dynamics.py",3274,0,"g",python,content +334,1110545,"train_dynamics.py",3275,0,"",python,selection_keyboard +335,1110600,"train_dynamics.py",3275,0,"r",python,content +336,1110601,"train_dynamics.py",3276,0,"",python,selection_keyboard +337,1110756,"train_dynamics.py",3276,0,"a",python,content +338,1110757,"train_dynamics.py",3277,0,"",python,selection_keyboard +339,1110831,"train_dynamics.py",3277,0,"d",python,content +340,1110832,"train_dynamics.py",3278,0,"",python,selection_keyboard +341,1110893,"train_dynamics.py",3278,0," ",python,content +342,1110894,"train_dynamics.py",3279,0,"",python,selection_keyboard +343,1111001,"train_dynamics.py",3279,0,"n",python,content +344,1111002,"train_dynamics.py",3280,0,"",python,selection_keyboard +345,1111167,"train_dynamics.py",3280,0,"o",python,content +346,1111167,"train_dynamics.py",3281,0,"",python,selection_keyboard +347,1111195,"train_dynamics.py",3281,0,"r",python,content +348,1111195,"train_dynamics.py",3282,0,"",python,selection_keyboard +349,1111279,"train_dynamics.py",3282,0,"m",python,content +350,1111279,"train_dynamics.py",3283,0,"",python,selection_keyboard +351,1111364,"train_dynamics.py",3283,0," ",python,content +352,1111365,"train_dynamics.py",3284,0,"",python,selection_keyboard +353,1111531,"train_dynamics.py",3284,0,"f",python,content +354,1111532,"train_dynamics.py",3285,0,"",python,selection_keyboard +355,1111617,"train_dynamics.py",3285,0,"o",python,content +356,1111618,"train_dynamics.py",3286,0,"",python,selection_keyboard +357,1111732,"train_dynamics.py",3286,0,"r",python,content +358,1111733,"train_dynamics.py",3287,0,"",python,selection_keyboard +359,1111767,"train_dynamics.py",3287,0," ",python,content +360,1111768,"train_dynamics.py",3288,0,"",python,selection_keyboard +361,1111878,"train_dynamics.py",3288,0,"l",python,content +362,1111879,"train_dynamics.py",3289,0,"",python,selection_keyboard +363,1112074,"train_dynamics.py",3289,0,"o",python,content +364,1112075,"train_dynamics.py",3290,0,"",python,selection_keyboard +365,1112216,"train_dynamics.py",3290,0,"g",python,content +366,1112217,"train_dynamics.py",3291,0,"",python,selection_keyboard +367,1112373,"train_dynamics.py",3291,0,"g",python,content +368,1112374,"train_dynamics.py",3292,0,"",python,selection_keyboard +369,1112374,"train_dynamics.py",3292,0,"i",python,content +370,1112375,"train_dynamics.py",3293,0,"",python,selection_keyboard +371,1112388,"train_dynamics.py",3293,0,"n",python,content +372,1112389,"train_dynamics.py",3294,0,"",python,selection_keyboard +373,1112530,"train_dynamics.py",3294,0,"g",python,content +374,1112531,"train_dynamics.py",3295,0,"",python,selection_keyboard +375,1113015,"train_dynamics.py",3295,0," ",python,content +376,1113016,"train_dynamics.py",3296,0,"",python,selection_keyboard +377,1113376,"train_dynamics.py",3296,0,"()",python,content +378,1113377,"train_dynamics.py",3297,0,"",python,selection_keyboard +379,1114279,"train_dynamics.py",3297,0,"a",python,content +380,1114280,"train_dynamics.py",3298,0,"",python,selection_keyboard +381,1114437,"train_dynamics.py",3298,0,"c",python,content +382,1114438,"train_dynamics.py",3299,0,"",python,selection_keyboard +383,1114783,"train_dynamics.py",3299,0,"u",python,content +384,1114784,"train_dynamics.py",3300,0,"",python,selection_keyboard +385,1115051,"train_dynamics.py",3299,1,"",python,content +386,1115571,"train_dynamics.py",3299,0,"t",python,content +387,1115571,"train_dynamics.py",3300,0,"",python,selection_keyboard +388,1115650,"train_dynamics.py",3300,0,"u",python,content +389,1115651,"train_dynamics.py",3301,0,"",python,selection_keyboard +390,1115752,"train_dynamics.py",3301,0,"a",python,content +391,1115753,"train_dynamics.py",3302,0,"",python,selection_keyboard +392,1115835,"train_dynamics.py",3302,0,"l",python,content +393,1115836,"train_dynamics.py",3303,0,"",python,selection_keyboard +394,1115943,"train_dynamics.py",3303,0," ",python,content +395,1115943,"train_dynamics.py",3304,0,"",python,selection_keyboard +396,1116720,"train_dynamics.py",3304,0,"c",python,content +397,1116721,"train_dynamics.py",3305,0,"",python,selection_keyboard +398,1116786,"train_dynamics.py",3305,0,"l",python,content +399,1116787,"train_dynamics.py",3306,0,"",python,selection_keyboard +400,1116975,"train_dynamics.py",3306,0,"i",python,content +401,1116976,"train_dynamics.py",3307,0,"",python,selection_keyboard +402,1117161,"train_dynamics.py",3307,0,"p",python,content +403,1117162,"train_dynamics.py",3308,0,"",python,selection_keyboard +404,1117289,"train_dynamics.py",3308,0,"p",python,content +405,1117290,"train_dynamics.py",3309,0,"",python,selection_keyboard +406,1117478,"train_dynamics.py",3309,0,"i",python,content +407,1117479,"train_dynamics.py",3310,0,"",python,selection_keyboard +408,1117529,"train_dynamics.py",3310,0,"n",python,content +409,1117530,"train_dynamics.py",3311,0,"",python,selection_keyboard +410,1117627,"train_dynamics.py",3311,0,"g",python,content +411,1117628,"train_dynamics.py",3312,0,"",python,selection_keyboard +412,1117725,"train_dynamics.py",3312,0," ",python,content +413,1117726,"train_dynamics.py",3313,0,"",python,selection_keyboard +414,1117828,"train_dynamics.py",3313,0,"i",python,content +415,1117829,"train_dynamics.py",3314,0,"",python,selection_keyboard +416,1117945,"train_dynamics.py",3314,0,"s",python,content +417,1117946,"train_dynamics.py",3315,0,"",python,selection_keyboard +418,1117965,"train_dynamics.py",3315,0," ",python,content +419,1117966,"train_dynamics.py",3316,0,"",python,selection_keyboard +420,1118419,"train_dynamics.py",3316,0,"d",python,content +421,1118420,"train_dynamics.py",3317,0,"",python,selection_keyboard +422,1118429,"train_dynamics.py",3317,0,"o",python,content +423,1118430,"train_dynamics.py",3318,0,"",python,selection_keyboard +424,1118632,"train_dynamics.py",3318,0,"n",python,content +425,1118633,"train_dynamics.py",3319,0,"",python,selection_keyboard +426,1119799,"train_dynamics.py",3319,0,"t",python,content +427,1119800,"train_dynamics.py",3320,0,"",python,selection_keyboard +428,1119903,"train_dynamics.py",3320,0," ",python,content +429,1119904,"train_dynamics.py",3321,0,"",python,selection_keyboard +430,1120004,"train_dynamics.py",3321,0,"i",python,content +431,1120005,"train_dynamics.py",3322,0,"",python,selection_keyboard +432,1120109,"train_dynamics.py",3322,0,"n",python,content +433,1120110,"train_dynamics.py",3323,0,"",python,selection_keyboard +434,1120260,"train_dynamics.py",3323,0," ",python,content +435,1120261,"train_dynamics.py",3324,0,"",python,selection_keyboard +436,1120457,"train_dynamics.py",3324,0,"t",python,content +437,1120458,"train_dynamics.py",3325,0,"",python,selection_keyboard +438,1120549,"train_dynamics.py",3325,0,"h",python,content +439,1120550,"train_dynamics.py",3326,0,"",python,selection_keyboard +440,1120696,"train_dynamics.py",3326,0,"e",python,content +441,1120697,"train_dynamics.py",3327,0,"",python,selection_keyboard +442,1120763,"train_dynamics.py",3327,0," ",python,content +443,1120764,"train_dynamics.py",3328,0,"",python,selection_keyboard +444,1120969,"train_dynamics.py",3328,0,"o",python,content +445,1120970,"train_dynamics.py",3329,0,"",python,selection_keyboard +446,1121120,"train_dynamics.py",3329,0,"p",python,content +447,1121121,"train_dynamics.py",3330,0,"",python,selection_keyboard +448,1135573,"train_dynamics.py",3330,0,"t",python,content +449,1135574,"train_dynamics.py",3331,0,"",python,selection_keyboard +450,1135792,"train_dynamics.py",3331,0,"a",python,content +451,1135793,"train_dynamics.py",3332,0,"",python,selection_keyboard +452,1136142,"train_dynamics.py",3332,0,"x",python,content +453,1136143,"train_dynamics.py",3333,0,"",python,selection_keyboard +454,1136729,"train_dynamics.py",3333,0,"-",python,content +455,1136730,"train_dynamics.py",3334,0,"",python,selection_keyboard +456,1137047,"train_dynamics.py",3333,1,"",python,content +457,1137220,"train_dynamics.py",3333,0,".",python,content +458,1137221,"train_dynamics.py",3334,0,"",python,selection_keyboard +459,1137469,"train_dynamics.py",3334,0,"c",python,content +460,1137470,"train_dynamics.py",3335,0,"",python,selection_keyboard +461,1137617,"train_dynamics.py",3335,0,"h",python,content +462,1137618,"train_dynamics.py",3336,0,"",python,selection_keyboard +463,1137769,"train_dynamics.py",3336,0,"a",python,content +464,1137770,"train_dynamics.py",3337,0,"",python,selection_keyboard +465,1137849,"train_dynamics.py",3337,0,"i",python,content +466,1137850,"train_dynamics.py",3338,0,"",python,selection_keyboard +467,1137946,"train_dynamics.py",3338,0,"n",python,content +468,1137947,"train_dynamics.py",3339,0,"",python,selection_keyboard +469,1138327,"train_dynamics.py",3338,0,"",python,selection_command +470,1139980,"train_dynamics.py",3740,0,"",python,selection_mouse +471,1141992,"train_dynamics.py",3320,0,"",python,selection_mouse +472,1143262,"train_dynamics.py",3319,0,"",python,selection_command +473,1143791,"train_dynamics.py",3319,1,"e",python,content +474,1144956,"train_dynamics.py",3501,0,"",python,selection_mouse +475,1144957,"train_dynamics.py",3500,0,"",python,selection_command +476,1149505,"train_dynamics.py",3494,0,"",python,selection_mouse +477,1150048,"train_dynamics.py",3480,0,"",python,selection_mouse +478,1150177,"train_dynamics.py",3473,9,"grad_norm",python,selection_mouse +479,1150815,"train_dynamics.py",3524,0,"",python,selection_mouse +480,1152677,"train_dynamics.py",3487,0,"",python,selection_mouse +481,1153269,"train_dynamics.py",3506,0,"",python,selection_mouse +482,1153273,"train_dynamics.py",3505,0,"",python,selection_command +483,1154187,"train_dynamics.py",3533,0,"",python,selection_mouse +484,1176291,"train_dynamics.py",3506,0,"",python,selection_mouse +485,1176303,"train_dynamics.py",3505,0,"",python,selection_command +486,1177015,"train_dynamics.py",3267,0,"",python,selection_mouse +487,1177215,"train_dynamics.py",3266,1,"l",python,selection_mouse +488,1303524,"train_dynamics.py",3185,0,"",python,selection_mouse +489,1304040,"train_dynamics.py",3260,0,"",python,selection_mouse +490,1304941,"train_dynamics.py",3404,0,"",python,selection_mouse +491,1305084,"train_dynamics.py",3390,14,"g_norm_clipped",python,selection_mouse +492,1305873,"train_dynamics.py",3190,0,"",python,selection_mouse +493,1306026,"train_dynamics.py",3186,7,"metrics",python,selection_mouse +494,1310766,"train_dynamics.py",3256,0,"",python,selection_mouse +495,1310915,"train_dynamics.py",3256,3,"and",python,selection_mouse +496,1311195,"train_dynamics.py",3179,80,"recon, metrics)), grads = grad_fn(state.params, state, inputs)\n # extract and",python,selection_mouse +497,1311592,"train_dynamics.py",3182,0,"",python,selection_mouse +498,1311593,"train_dynamics.py",3179,5,"recon",python,selection_mouse +499,1315149,"train_dynamics.py",3725,0,"",python,selection_mouse +500,1315268,"train_dynamics.py",3724,5,"recon",python,selection_mouse +501,1324633,"train_dynamics.py",0,0,"",python,tab +502,1324736,"train_dynamics.py",926,0,"",python,selection_command +503,1336130,"train_dynamics.py",6266,0,"",python,selection_mouse +504,1340024,"train_dynamics.py",6250,40,"",python,content +505,1340051,"train_dynamics.py",6258,0,"",python,selection_command +506,1345020,"train_lam.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n vq_beta: float = 0.25\n min_lr: float = 0.0\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n vq_reset_thresh: int = 50\n grad_clip_threshold: float = 10e5\n # LAM\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 6\n patch_size: int = 16\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.0\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_lam""\n tags: list[str] = field(default_factory=lambda: [""lam""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_checkpoint_keep_period: int = 20000\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef lam_loss_fn(params, state, inputs):\n # --- Compute loss ---\n inputs[""videos""] = inputs[""videos""].astype(jnp.float32) / 255.0\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n gt_future_frames = inputs[""videos""][:, 1:]\n mse = jnp.square(gt_future_frames - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = gt_future_frames.clip(0, 1).reshape(-1, *gt_future_frames.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n count_fn = jax.vmap(lambda i: (outputs[""indices""] == i).sum())\n index_counts = count_fn(jnp.arange(args.num_latents))\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=(index_counts != 0).mean(),\n )\n return loss, (outputs[""recon""], index_counts, metrics)\n\n\n@jax.jit\ndef train_step(state, inputs, action_last_active):\n # --- Update model ---\n rng, inputs[""rng""] = jax.random.split(inputs[""rng""])\n grad_fn = jax.value_and_grad(lam_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, idx_counts, metrics)), grads = grad_fn(state.params, state, inputs)\n metrics[""grad_norm""] = optax.global_norm(grads)\n state = state.apply_gradients(grads=grads)\n\n # --- Reset inactive latent actions ---\n codebook = state.params[""params""][""vq""][""codebook""]\n num_codes = len(codebook)\n active_codes = idx_counts != 0.0\n action_last_active = jnp.where(active_codes, 0, action_last_active + 1)\n p_code = active_codes / active_codes.sum()\n reset_idxs = jax.random.choice(rng, num_codes, shape=(num_codes,), p=p_code)\n do_reset = action_last_active >= args.vq_reset_thresh\n new_codebook = jnp.where(\n jnp.expand_dims(do_reset, -1), codebook[reset_idxs], codebook\n )\n state.params[""params""][""vq""][""codebook""] = new_codebook\n action_last_active = jnp.where(do_reset, 0, action_last_active)\n return state, loss, recon, action_last_active, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n # Track when each action was last sampled\n action_last_active = jnp.zeros(args.num_latents)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n rng, _rng = jax.random.split(rng)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = lam.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=lam.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n action_last_active = jax.device_put(action_last_active, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, action_last_active, metrics = train_step(\n train_state, inputs, action_last_active\n )\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0][1:]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +507,1345022,"train_lam.py",1032,0,"",python,selection_command +508,2109315,"train_lam.py",0,0,"",python,tab +509,2109316,"train_lam.py",3309,0,"",python,selection_mouse +510,2109905,"train_lam.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n vq_beta: float = 0.25\n min_lr: float = 0.0\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n vq_reset_thresh: int = 50\n # LAM\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 6\n patch_size: int = 16\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.0\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_lam""\n tags: list[str] = field(default_factory=lambda: [""lam""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_checkpoint_keep_period: int = 20000\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef lam_loss_fn(params, state, inputs):\n # --- Compute loss ---\n inputs[""videos""] = inputs[""videos""].astype(jnp.float32) / 255.0\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n gt_future_frames = inputs[""videos""][:, 1:]\n mse = jnp.square(gt_future_frames - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = gt_future_frames.clip(0, 1).reshape(-1, *gt_future_frames.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n count_fn = jax.vmap(lambda i: (outputs[""indices""] == i).sum())\n index_counts = count_fn(jnp.arange(args.num_latents))\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=(index_counts != 0).mean(),\n )\n return loss, (outputs[""recon""], index_counts, metrics)\n\n\n@jax.jit\ndef train_step(state, inputs, action_last_active):\n # --- Update model ---\n rng, inputs[""rng""] = jax.random.split(inputs[""rng""])\n grad_fn = jax.value_and_grad(lam_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, idx_counts, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n\n # --- Reset inactive latent actions ---\n codebook = state.params[""params""][""vq""][""codebook""]\n num_codes = len(codebook)\n active_codes = idx_counts != 0.0\n action_last_active = jnp.where(active_codes, 0, action_last_active + 1)\n p_code = active_codes / active_codes.sum()\n reset_idxs = jax.random.choice(rng, num_codes, shape=(num_codes,), p=p_code)\n do_reset = action_last_active >= args.vq_reset_thresh\n new_codebook = jnp.where(\n jnp.expand_dims(do_reset, -1), codebook[reset_idxs], codebook\n )\n state.params[""params""][""vq""][""codebook""] = new_codebook\n action_last_active = jnp.where(do_reset, 0, action_last_active)\n return state, loss, recon, action_last_active, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n # Track when each action was last sampled\n action_last_active = jnp.zeros(args.num_latents)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n rng, _rng = jax.random.split(rng)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = lam.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=lam.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n action_last_active = jax.device_put(action_last_active, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, action_last_active, metrics = train_step(\n train_state, inputs, action_last_active\n )\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0][1:]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +511,2109906,"train_lam.py",2567,0,"",python,selection_mouse +512,2111551,"train_lam.py",0,0,"",python,tab +513,2111557,"train_dynamics.py",0,0,"",python,tab +514,2113453,"train_dynamics.py",6556,0,"",python,selection_mouse +515,2113466,"train_dynamics.py",6555,0,"",python,selection_command +516,2114342,"train_dynamics.py",6535,21," tx = optax.chain(",python,selection_command +517,2114588,"train_dynamics.py",6535,86," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),",python,selection_command +518,2114743,"train_dynamics.py",6535,172," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)",python,selection_command +519,2114880,"train_dynamics.py",6535,182," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )",python,selection_command +520,2115142,"train_dynamics.py",6535,0,"",python,selection_command +521,2120858,"train_dynamics.py",6535,21," tx = optax.chain(",python,selection_command +522,2121128,"train_dynamics.py",6535,86," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),",python,selection_command +523,2121298,"train_dynamics.py",6535,172," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)",python,selection_command +524,2121426,"train_dynamics.py",6535,182," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )",python,selection_command +525,2121697,"train_dynamics.py",6535,0,"",python,selection_command +526,2162633,"TERMINAL",0,0,"bash",,terminal_focus +527,2162634,"TERMINAL",0,0,"bash",,terminal_focus +528,2163896,"train_dynamics.py",0,0,"",python,tab +529,2171468,"train_dynamics.py",3244,0,"",python,selection_mouse +530,2211150,"train_dynamics.py",3377,0,"",python,selection_mouse +531,2211750,"train_dynamics.py",3279,0,"",python,selection_mouse +532,2212376,"train_dynamics.py",3242,98," # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)",python,selection_command +533,2212592,"train_dynamics.py",3242,143," # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)",python,selection_command +534,2212740,"train_dynamics.py",3242,217," # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n g_norm_clipped = jnp.minimum(raw_grad_norm, args.grad_clip_threshold)",python,selection_command +535,2212885,"train_dynamics.py",3242,259," # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n g_norm_clipped = jnp.minimum(raw_grad_norm, args.grad_clip_threshold)\n metrics[""grad_norm""] = g_norm_clipped",python,selection_command +536,2213436,"train_dynamics.py",3242,0,"",python,selection_command +537,2218667,"train_lam.py",0,0,"",python,tab +538,2221570,"train_lam.py",0,0,"",python,tab +539,2221575,"train_lam.py",1032,0,"",python,selection_command +540,2224622,"train_lam.py",3299,0,"",python,selection_mouse +541,2225443,"train_lam.py",3343,0,"\n # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n g_norm_clipped = jnp.minimum(raw_grad_norm, args.grad_clip_threshold)\n metrics[""grad_norm""] = g_norm_clipped",python,content +542,2225454,"train_lam.py",3348,0,"",python,selection_command +543,2226100,"train_lam.py",3296,0,"",python,selection_command +544,2226539,"train_lam.py",3292,52,"",python,content +545,2226572,"train_lam.py",3296,0,"",python,selection_command +546,2226887,"train_lam.py",3395,0,"",python,selection_command +547,2227071,"train_lam.py",3440,0,"",python,selection_command +548,2227223,"train_lam.py",3514,0,"",python,selection_command +549,2228010,"train_lam.py",3551,0,"\n ",python,content +550,2228259,"train_lam.py",3552,4,"",python,content +551,2233794,"train_lam.py",0,0,"",python,tab +552,2235040,"train_dynamics.py",0,0,"",python,tab +553,2235044,"train_dynamics.py",926,0,"",python,selection_command +554,2239228,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 0.0\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n grad_clip_threshold: float = 10e5\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n inputs[""videos""] = inputs[""videos""].astype(jnp.float32) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n metrics[""grad_norm""] = optax.global_norm(grads)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +555,2239230,"train_tokenizer.py",1006,0,"",python,selection_command +556,2241006,"train_tokenizer.py",3261,0,"",python,selection_mouse +557,2241824,"train_tokenizer.py",3296,0,"\n metrics[""grad_norm""] = optax.global_norm(grads)",python,content +558,2241834,"train_tokenizer.py",3301,0,"",python,selection_command +559,2242632,"train_tokenizer.py",3301,52,"",python,content +560,2242641,"train_tokenizer.py",3261,0,"",python,selection_command +561,2245362,"train_dynamics.py",0,0,"",python,tab +562,2246488,"train_dynamics.py",3248,0,"",python,selection_mouse +563,2247118,"train_dynamics.py",3242,98," # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)",python,selection_command +564,2247333,"train_dynamics.py",3242,143," # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)",python,selection_command +565,2247472,"train_dynamics.py",3242,217," # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n g_norm_clipped = jnp.minimum(raw_grad_norm, args.grad_clip_threshold)",python,selection_command +566,2247623,"train_dynamics.py",3242,259," # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n g_norm_clipped = jnp.minimum(raw_grad_norm, args.grad_clip_threshold)\n metrics[""grad_norm""] = g_norm_clipped",python,selection_command +567,2248158,"train_dynamics.py",3242,260,"",python,content +568,2248162,"train_dynamics.py",3245,0,"",python,selection_command +569,2249942,"train_dynamics.py",3246,0,"# extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n g_norm_clipped = jnp.minimum(raw_grad_norm, args.grad_clip_threshold)\n metrics[""grad_norm""] = g_norm_clipped\n ",python,content +570,2249953,"train_dynamics.py",3248,0,"",python,selection_command +571,2251578,"train_tokenizer.py",0,0,"",python,tab +572,2252833,"train_tokenizer.py",3296,0,"\n # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n g_norm_clipped = jnp.minimum(raw_grad_norm, args.grad_clip_threshold)\n metrics[""grad_norm""] = g_norm_clipped",python,content +573,2252846,"train_tokenizer.py",3301,0,"",python,selection_command +574,2253302,"train_tokenizer.py",3249,0,"",python,selection_command +575,2253752,"train_tokenizer.py",3245,52,"",python,content +576,2253765,"train_tokenizer.py",3249,0,"",python,selection_command +577,2256124,"train_tokenizer.py",3502,0,"",python,selection_mouse +578,2268063,"train_dynamics.py",0,0,"",python,tab +579,2270085,"train_dynamics.py",0,0,"",python,tab +580,2281826,"train_dynamics.py",6556,0,"",python,selection_mouse +581,2281828,"train_dynamics.py",6555,0,"",python,selection_command +582,2282923,"train_dynamics.py",6535,21," tx = optax.chain(",python,selection_command +583,2283156,"train_dynamics.py",6535,86," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),",python,selection_command +584,2283416,"train_dynamics.py",6535,172," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)",python,selection_command +585,2283752,"train_dynamics.py",6535,182," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )",python,selection_command +586,2283943,"train_dynamics.py",6535,0,"",python,selection_command +587,2285397,"train_tokenizer.py",0,0,"",python,tab +588,2285398,"train_tokenizer.py",6084,0,"",python,selection_mouse +589,2286354,"train_tokenizer.py",6150,0,"\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )",python,content +590,2286365,"train_tokenizer.py",6155,0,"",python,selection_command +591,2287100,"train_tokenizer.py",6072,0,"",python,selection_command +592,2287455,"train_tokenizer.py",6068,83,"",python,content +593,2287468,"train_tokenizer.py",6072,0,"",python,selection_command +594,2292366,"train_lam.py",0,0,"",python,tab +595,2297264,"train_dynamics.py",0,0,"",python,tab +596,2297265,"train_dynamics.py",6556,0,"",python,selection_mouse +597,2297277,"train_dynamics.py",6555,0,"",python,selection_command +598,2298239,"train_dynamics.py",6535,21," tx = optax.chain(",python,selection_command +599,2298445,"train_dynamics.py",6535,86," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),",python,selection_command +600,2298594,"train_dynamics.py",6535,172," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)",python,selection_command +601,2298744,"train_dynamics.py",6535,182," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )",python,selection_command +602,2298970,"train_dynamics.py",6535,0,"",python,selection_command +603,2300299,"train_lam.py",0,0,"",python,tab +604,2300300,"train_lam.py",6550,0,"",python,selection_mouse +605,2300313,"train_lam.py",6549,0,"",python,selection_command +606,2300926,"train_lam.py",6555,0,"",python,selection_command +607,2302696,"train_lam.py",6633,0,"\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )",python,content +608,2302748,"train_lam.py",6638,0,"",python,selection_command +609,2303365,"train_lam.py",6555,0,"",python,selection_command +610,2303761,"train_lam.py",6551,83,"",python,content +611,2303780,"train_lam.py",6555,0,"",python,selection_command +612,2369627,"train_dynamics.py",0,0,"",python,tab +613,2381019,"train_lam.py",0,0,"",python,tab +614,2381021,"train_lam.py",1032,0,"",python,selection_command +615,2385367,"train_tokenizer.py",0,0,"",python,tab +616,2385368,"train_tokenizer.py",1006,0,"",python,selection_command +617,2478585,"train_lam.py",0,0,"",python,tab +618,2478587,"train_lam.py",5903,0,"",python,selection_mouse +619,2478593,"train_lam.py",5902,0,"",python,selection_command +620,2486899,"train_lam.py",1342,0,"",python,selection_mouse +621,2486910,"train_lam.py",1341,0,"",python,selection_command +622,2490410,"train_tokenizer.py",0,0,"",python,tab +623,2490411,"train_tokenizer.py",1043,0,"",python,selection_mouse +624,2490435,"train_tokenizer.py",1042,0,"",python,selection_command +625,2491020,"train_tokenizer.py",1024,0,"",python,selection_mouse +626,2491519,"train_tokenizer.py",1033,0,"",python,selection_mouse +627,2492188,"train_tokenizer.py",1017,0,"",python,selection_mouse +628,2492342,"train_tokenizer.py",1010,19,"grad_clip_threshold",python,selection_mouse +629,2492471,"train_tokenizer.py",1006,38," grad_clip_threshold: float = 10e5\n",python,selection_mouse +630,2493795,"train_tokenizer.py",1017,0,"",python,selection_mouse +631,2493796,"train_tokenizer.py",1010,19,"grad_clip_threshold",python,selection_mouse +632,2494400,"train_tokenizer.py",1017,0,"",python,selection_mouse +633,2495218,"train_tokenizer.py",1010,19,"grad_clip_threshold",python,selection_mouse +634,2495859,"train_tokenizer.py",1017,0,"",python,selection_mouse +635,2496318,"train_tokenizer.py",1010,19,"grad_clip_threshold",python,selection_mouse +636,2496480,"train_tokenizer.py",1006,38," grad_clip_threshold: float = 10e5\n",python,selection_mouse +637,2514958,"train_tokenizer.py",3489,0,"",python,selection_mouse +638,2516689,"train_tokenizer.py",3578,0,"",python,selection_mouse +639,2516691,"train_tokenizer.py",3577,0,"",python,selection_command +640,2517274,"train_tokenizer.py",3504,0,"",python,selection_mouse +641,2517275,"train_tokenizer.py",3503,0,"",python,selection_command +642,2520455,"train_tokenizer.py",3428,0,"",python,selection_mouse +643,2520601,"train_tokenizer.py",3422,13,"raw_grad_norm",python,selection_mouse +644,2521255,"train_tokenizer.py",3496,0,"",python,selection_mouse +645,2521835,"train_tokenizer.py",3533,0,"",python,selection_mouse +646,2522382,"train_tokenizer.py",3490,0,"",python,selection_mouse +647,2523436,"train_tokenizer.py",3409,0,"",python,selection_mouse +648,2523678,"train_tokenizer.py",3409,2," j",python,selection_mouse +649,2523678,"train_tokenizer.py",3409,3," jn",python,selection_mouse +650,2523710,"train_tokenizer.py",3409,5," jnp.",python,selection_mouse +651,2523711,"train_tokenizer.py",3409,7," jnp.mi",python,selection_mouse +652,2523731,"train_tokenizer.py",3409,9," jnp.mini",python,selection_mouse +653,2523784,"train_tokenizer.py",3409,12," jnp.minimum",python,selection_mouse +654,2523788,"train_tokenizer.py",3409,13," jnp.minimum(",python,selection_mouse +655,2523789,"train_tokenizer.py",3409,15," jnp.minimum(ra",python,selection_mouse +656,2523792,"train_tokenizer.py",3409,16," jnp.minimum(raw",python,selection_mouse +657,2523807,"train_tokenizer.py",3409,17," jnp.minimum(raw_",python,selection_mouse +658,2524228,"train_tokenizer.py",3426,0,"",python,selection_mouse +659,2524882,"train_tokenizer.py",3410,0,"",python,selection_mouse +660,2525013,"train_tokenizer.py",3410,3,"jnp",python,selection_mouse +661,2525189,"train_tokenizer.py",3410,11,"jnp.minimum",python,selection_mouse +662,2525209,"train_tokenizer.py",3410,25,"jnp.minimum(raw_grad_norm",python,selection_mouse +663,2525264,"train_tokenizer.py",3410,31,"jnp.minimum(raw_grad_norm, args",python,selection_mouse +664,2525265,"train_tokenizer.py",3410,51,"jnp.minimum(raw_grad_norm, args.grad_clip_threshold",python,selection_mouse +665,2525318,"train_tokenizer.py",3410,94,"jnp.minimum(raw_grad_norm, args.grad_clip_threshold)\n metrics[""grad_norm""] = g_norm_clipped",python,selection_mouse +666,2525673,"train_tokenizer.py",3410,51,"jnp.minimum(raw_grad_norm, args.grad_clip_threshold",python,selection_mouse +667,2526256,"train_tokenizer.py",3410,52,"jnp.minimum(raw_grad_norm, args.grad_clip_threshold)",python,selection_mouse +668,2527341,"train_tokenizer.py",3410,52,"",python,content +669,2527357,"train_tokenizer.py",3409,0,"",python,selection_command +670,2527447,"train_tokenizer.py",3432,0,"",python,selection_command +671,2528002,"train_tokenizer.py",3433,0,"",python,selection_command +672,2528509,"train_tokenizer.py",3434,0,"",python,selection_command +673,2528537,"train_tokenizer.py",3435,0,"",python,selection_command +674,2528557,"train_tokenizer.py",3436,0,"",python,selection_command +675,2528574,"train_tokenizer.py",3437,0,"",python,selection_command +676,2530097,"train_tokenizer.py",3438,0,"jnp.minimum(raw_grad_norm, args.grad_clip_threshold)",python,content +677,2530108,"train_tokenizer.py",3489,0,"",python,selection_command +678,2530712,"train_tokenizer.py",3490,0,"",python,selection_command +679,2531241,"train_tokenizer.py",3490,14,"",python,content +680,2531250,"train_tokenizer.py",3489,0,"",python,selection_command +681,2532410,"train_tokenizer.py",3490,0,"",python,selection_command +682,2532689,"train_tokenizer.py",3490,0," ",python,content +683,2532690,"train_tokenizer.py",3491,0,"",python,selection_keyboard +684,2532825,"train_tokenizer.py",3491,0,"i",python,content +685,2532826,"train_tokenizer.py",3492,0,"",python,selection_keyboard +686,2532893,"train_tokenizer.py",3492,0,"f",python,content +687,2532894,"train_tokenizer.py",3493,0,"",python,selection_keyboard +688,2532990,"train_tokenizer.py",3493,0," ",python,content +689,2532992,"train_tokenizer.py",3494,0,"",python,selection_keyboard +690,2533484,"train_tokenizer.py",3494,0,"a",python,content +691,2533485,"train_tokenizer.py",3495,0,"",python,selection_keyboard +692,2533724,"train_tokenizer.py",3495,0,"r",python,content +693,2533725,"train_tokenizer.py",3496,0,"",python,selection_keyboard +694,2533877,"train_tokenizer.py",3496,0,"g",python,content +695,2533878,"train_tokenizer.py",3497,0,"",python,selection_keyboard +696,2534285,"train_tokenizer.py",3494,3,"args",python,content +697,2534715,"train_tokenizer.py",3498,0,".",python,content +698,2534717,"train_tokenizer.py",3499,0,"",python,selection_keyboard +699,2535889,"train_tokenizer.py",3499,0,"g",python,content +700,2535891,"train_tokenizer.py",3500,0,"",python,selection_keyboard +701,2536006,"train_tokenizer.py",3500,0,"r",python,content +702,2536008,"train_tokenizer.py",3501,0,"",python,selection_keyboard +703,2536205,"train_tokenizer.py",3501,0,"a",python,content +704,2536206,"train_tokenizer.py",3502,0,"",python,selection_keyboard +705,2537331,"train_tokenizer.py",3499,3,"grad_clip_threshold",python,content +706,2537781,"train_tokenizer.py",3518,0," ",python,content +707,2537781,"train_tokenizer.py",3519,0,"",python,selection_keyboard +708,2538221,"train_tokenizer.py",3519,0,"e",python,content +709,2538222,"train_tokenizer.py",3520,0,"",python,selection_keyboard +710,2538320,"train_tokenizer.py",3520,0,"l",python,content +711,2538322,"train_tokenizer.py",3521,0,"",python,selection_keyboard +712,2538445,"train_tokenizer.py",3521,0,"s",python,content +713,2538446,"train_tokenizer.py",3522,0,"",python,selection_keyboard +714,2538641,"train_tokenizer.py",3522,0,"e",python,content +715,2538641,"train_tokenizer.py",3523,0,"",python,selection_keyboard +716,2538692,"train_tokenizer.py",3523,0," ",python,content +717,2538693,"train_tokenizer.py",3524,0,"",python,selection_keyboard +718,2538842,"train_tokenizer.py",3524,0,"r",python,content +719,2538843,"train_tokenizer.py",3525,0,"",python,selection_keyboard +720,2539501,"train_tokenizer.py",3525,0,"a",python,content +721,2539502,"train_tokenizer.py",3526,0,"",python,selection_keyboard +722,2539741,"train_tokenizer.py",3526,0,"w",python,content +723,2539742,"train_tokenizer.py",3527,0,"",python,selection_keyboard +724,2540354,"train_tokenizer.py",3524,3,"raw_grad_norm",python,content +725,2541989,"train_tokenizer.py",3410,0,"",python,selection_mouse +726,2543251,"train_tokenizer.py",3409,0,"",python,selection_command +727,2543798,"train_tokenizer.py",3389,22,"",python,content +728,2543835,"train_tokenizer.py",3393,0,"",python,selection_command +729,2551490,"train_tokenizer.py",3397,0,"",python,selection_mouse +730,2553156,"train_tokenizer.py",3396,0,"",python,selection_mouse +731,2555843,"train_tokenizer.py",1019,0,"",python,selection_mouse +732,2555993,"train_tokenizer.py",1010,19,"grad_clip_threshold",python,selection_mouse +733,2556130,"train_tokenizer.py",1006,38," grad_clip_threshold: float = 10e5\n",python,selection_mouse +734,2557302,"train_tokenizer.py",1010,0,"",python,selection_command +735,2568723,"train_tokenizer.py",1006,0,"",python,selection_command +736,2571920,"train_tokenizer.py",1006,0," grad_clip_threshold: float | None = None\n",python,content +737,2571925,"train_tokenizer.py",1051,38,"",python,content +738,2590463,"sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n\n\ndef _sampling_wrapper(module, batch):\n return module.sample(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n sampling_fn = jax.jit(nn.apply(_sampling_wrapper, genie)) \n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\n generated_vid = sampling_fn(\n params,\n batch\n )\n return generated_vid\n\n# --- Get video + latent actions ---\narray_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n]\ndataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n)\nvideo_batch = next(iter(dataloader))\n# Get latent actions for all videos in the batch\nbatch = dict(videos=video_batch)\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(video_batch.shape[0], args.seq_len - 1, 1)\n\n# --- Sample + evaluate video ---\nvid = _autoreg_sample(rng, video_batch, action_batch)\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\nprint(f""SSIM: {ssim}"")\n\n# --- Construct video ---\ntrue_videos = (video_batch * 255).astype(np.uint8)\npred_videos = (vid * 255).astype(np.uint8)\nvideo_comparison = np.zeros((2, *vid.shape), dtype=np.uint8)\nvideo_comparison[0] = true_videos[:, :args.seq_len]\nvideo_comparison[1] = pred_videos\nframes = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n# --- Save video --- \nimgs = [Image.fromarray(img) for img in frames]\n# Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\nfor t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(action_batch.shape[0]):\n action = action_batch[row, t, 0]\n y_offset = row * video_batch.shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\nimgs[0].save(\n f""generation_{time.time()}.gif"",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n)\n",python,tab +739,2596124,"train_lam.py",0,0,"",python,tab +740,2596125,"train_lam.py",428,0,"",python,selection_mouse +741,2596140,"train_lam.py",427,0,"",python,selection_command +742,2600774,"train_dynamics.py",0,0,"",python,tab +743,2601174,"train_dynamics.py",10226,0,"",python,selection_mouse +744,2610916,"train_tokenizer.py",0,0,"",python,tab +745,2617431,"train_tokenizer.py",1006,0,"",python,selection_command +746,2617704,"train_tokenizer.py",1006,44," grad_clip_threshold: float = 10e5",python,content +747,2619117,"train_tokenizer.py",1006,0," grad_clip_threshold: Optional[float] = None\n",python,content +748,2619121,"train_tokenizer.py",1054,38,"",python,content +749,2623692,"train_tokenizer.py",50,0,"",python,selection_mouse +750,2623695,"train_tokenizer.py",49,0,"",python,selection_command +751,2624227,"train_tokenizer.py",50,0,"\n",python,content +752,2629381,"train_tokenizer.py",51,0,"from typing import Optional\n\n",python,content +753,2629385,"train_tokenizer.py",79,1,"",python,content +754,2631496,"train_tokenizer.py",265,0,"",python,selection_mouse +755,2632171,"train_tokenizer.py",264,0,"",python,selection_command +756,2632794,"train_tokenizer.py",80,0,"",python,selection_mouse +757,2633059,"train_tokenizer.py",79,1,"\n",python,selection_mouse +758,2633290,"train_tokenizer.py",79,0,"",python,selection_mouse +759,2633617,"train_tokenizer.py",79,1,"",python,content +760,2640550,"train_tokenizer.py",6299,0,"",python,selection_mouse +761,2640550,"train_tokenizer.py",6298,0,"",python,selection_command +762,2641772,"train_tokenizer.py",6233,0,"",python,selection_mouse +763,2642418,"train_tokenizer.py",6170,0,"",python,selection_mouse +764,2643077,"train_tokenizer.py",6131,0,"",python,selection_mouse +765,2643603,"train_tokenizer.py",6218,0,"",python,selection_mouse +766,2643764,"train_tokenizer.py",6216,5,"optax",python,selection_mouse +767,2644306,"train_tokenizer.py",6165,0,"",python,selection_mouse +768,2644465,"train_tokenizer.py",6157,19,"clip_by_global_norm",python,selection_mouse +769,2644994,"train_tokenizer.py",6299,0,"",python,selection_mouse +770,2645004,"train_tokenizer.py",6298,0,"",python,selection_command +771,2645586,"train_tokenizer.py",6188,0,"",python,selection_mouse +772,2645736,"train_tokenizer.py",6182,19,"grad_clip_threshold",python,selection_mouse +773,2646577,"train_tokenizer.py",6151,0,"",python,selection_mouse +774,2647355,"train_tokenizer.py",6299,0,"",python,selection_mouse +775,2647356,"train_tokenizer.py",6298,0,"",python,selection_command +776,2648232,"train_tokenizer.py",6212,0,"",python,selection_command +777,2648379,"train_tokenizer.py",6147,0,"",python,selection_command +778,2648513,"train_tokenizer.py",6125,0,"",python,selection_command +779,2648648,"train_tokenizer.py",6115,0,"",python,selection_command +780,2648991,"train_tokenizer.py",6116,0,"\n ",python,content +781,2649675,"train_tokenizer.py",6121,0,"i",python,content +782,2649676,"train_tokenizer.py",6122,0,"",python,selection_keyboard +783,2649762,"train_tokenizer.py",6122,0,"f",python,content +784,2649763,"train_tokenizer.py",6123,0,"",python,selection_keyboard +785,2649827,"train_tokenizer.py",6123,0," ",python,content +786,2649828,"train_tokenizer.py",6124,0,"",python,selection_keyboard +787,2651394,"train_tokenizer.py",6124,0,"a",python,content +788,2651395,"train_tokenizer.py",6125,0,"",python,selection_keyboard +789,2651948,"train_tokenizer.py",6125,0,"t",python,content +790,2651949,"train_tokenizer.py",6126,0,"",python,selection_keyboard +791,2652094,"train_tokenizer.py",6126,0,"g",python,content +792,2652095,"train_tokenizer.py",6127,0,"",python,selection_keyboard +793,2652971,"train_tokenizer.py",6126,1,"",python,content +794,2653089,"train_tokenizer.py",6125,1,"",python,content +795,2653209,"train_tokenizer.py",6125,0,"r",python,content +796,2653210,"train_tokenizer.py",6126,0,"",python,selection_keyboard +797,2653438,"train_tokenizer.py",6126,0,"g",python,content +798,2653439,"train_tokenizer.py",6127,0,"",python,selection_keyboard +799,2654136,"train_tokenizer.py",6124,3,"args",python,content +800,2654373,"train_tokenizer.py",6128,0,".",python,content +801,2654378,"train_tokenizer.py",6129,0,"",python,selection_keyboard +802,2660873,"train_tokenizer.py",6128,1,"",python,content +803,2661044,"train_tokenizer.py",6124,4,"",python,content +804,2661436,"train_tokenizer.py",6121,3,"",python,content +805,2661769,"train_tokenizer.py",6117,4,"",python,content +806,2662308,"train_tokenizer.py",6116,1,"",python,content +807,2663127,"train_tokenizer.py",6115,0,"",python,selection_command +808,2663258,"train_tokenizer.py",6121,0,"",python,selection_command +809,2663768,"train_tokenizer.py",6117,21," tx = optax.chain(",python,selection_command +810,2664123,"train_tokenizer.py",6117,86," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),",python,selection_command +811,2664276,"train_tokenizer.py",6117,172," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)",python,selection_command +812,2664440,"train_tokenizer.py",6117,182," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )",python,selection_command +813,2664582,"train_tokenizer.py",6117,271," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)",python,selection_command +814,2664952,"train_tokenizer.py",6117,182," tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )",python,selection_command +815,2665269,"train_tokenizer.py",6121,0,"",python,selection_command +816,2674262,"train_tokenizer.py",6117,0,"",python,selection_command +817,2676793,"train_tokenizer.py",6117,0," if args.grad_clip_threshold and args.grad_clip_threshold > 0:\n",python,content +818,2676876,"train_tokenizer.py",6183,0," tx = optax.chain(\n",python,content +819,2676900,"train_tokenizer.py",6183,47," tx = optax.chain(",python,content +820,2676937,"train_tokenizer.py",6370,0," else:\n",python,content +821,2677084,"train_tokenizer.py",6380,0," tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n",python,content +822,2683387,"train_tokenizer.py",6149,0,"",python,selection_mouse +823,2683554,"train_tokenizer.py",6149,1,"a",python,selection_mouse +824,2683555,"train_tokenizer.py",6149,3,"and",python,selection_mouse +825,2683555,"train_tokenizer.py",6149,5,"and a",python,selection_mouse +826,2683576,"train_tokenizer.py",6149,7,"and arg",python,selection_mouse +827,2683588,"train_tokenizer.py",6149,10,"and args.g",python,selection_mouse +828,2683605,"train_tokenizer.py",6149,11,"and args.gr",python,selection_mouse +829,2683659,"train_tokenizer.py",6149,14,"and args.grad_",python,selection_mouse +830,2683659,"train_tokenizer.py",6149,15,"and args.grad_c",python,selection_mouse +831,2683673,"train_tokenizer.py",6116,33,"\n if args.grad_clip_threshold ",python,selection_mouse +832,2684005,"train_tokenizer.py",6149,30,"and args.grad_clip_threshold >",python,selection_mouse +833,2684065,"train_tokenizer.py",6149,31,"and args.grad_clip_threshold > ",python,selection_mouse +834,2684255,"train_tokenizer.py",6149,32,"and args.grad_clip_threshold > 0",python,selection_mouse +835,2684794,"train_tokenizer.py",6149,32,"",python,content +836,2686689,"train_tokenizer.py",6148,0,"",python,selection_command +837,2687085,"train_tokenizer.py",6148,1,"",python,content +838,2689092,"train_tokenizer.py",6116,0,"",python,selection_mouse +839,2689106,"train_tokenizer.py",6115,0,"",python,selection_command +840,2692327,"train_tokenizer.py",6752,0,"",python,selection_mouse +841,2704355,"train_tokenizer.py",79,0,"",python,selection_mouse +842,2704884,"train_tokenizer.py",78,0,"",python,selection_mouse +843,2704885,"train_tokenizer.py",77,0,"",python,selection_command +844,2709042,"train_dynamics.py",0,0,"",python,tab +845,2709044,"train_dynamics.py",50,0,"",python,selection_mouse +846,2709047,"train_dynamics.py",49,0,"",python,selection_command +847,2709801,"train_dynamics.py",50,0,"\nfrom typing import Optional",python,content +848,2709807,"train_dynamics.py",51,0,"",python,selection_command +849,2715599,"train_lam.py",0,0,"",python,tab +850,2716438,"train_lam.py",50,0,"",python,selection_mouse +851,2716451,"train_lam.py",49,0,"",python,selection_command +852,2717033,"train_lam.py",50,0,"\nfrom typing import Optional",python,content +853,2717056,"train_lam.py",51,0,"",python,selection_command +854,2718903,"train_lam.py",128,0,"",python,selection_mouse +855,2719873,"train_lam.py",55,0,"",python,selection_mouse +856,2720919,"train_tokenizer.py",0,0,"",python,tab +857,2720920,"train_tokenizer.py",1071,0,"",python,selection_mouse +858,2722803,"train_lam.py",0,0,"",python,tab +859,2722803,"train_lam.py",1079,0,"",python,selection_mouse +860,2723495,"train_lam.py",1097,0,"\n grad_clip_threshold: Optional[float] = None",python,content +861,2723508,"train_lam.py",1102,0,"",python,selection_command +862,2723806,"train_lam.py",1064,0,"",python,selection_command +863,2724100,"train_lam.py",1060,38,"",python,content +864,2724131,"train_lam.py",1064,0,"",python,selection_command +865,2727292,"train_dynamics.py",0,0,"",python,tab +866,2729125,"train_dynamics.py",966,0,"",python,selection_mouse +867,2729501,"train_dynamics.py",991,0,"\n grad_clip_threshold: Optional[float] = None",python,content +868,2729521,"train_dynamics.py",996,0,"",python,selection_command +869,2729865,"train_dynamics.py",958,0,"",python,selection_command +870,2730261,"train_dynamics.py",954,38,"",python,content +871,2730280,"train_dynamics.py",958,0,"",python,selection_command +872,2736947,"train_tokenizer.py",0,0,"",python,tab +873,2736948,"train_tokenizer.py",3319,0,"",python,selection_mouse +874,2737896,"train_tokenizer.py",3283,98," # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)",python,selection_command +875,2738130,"train_tokenizer.py",3283,143," # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)",python,selection_command +876,2738254,"train_tokenizer.py",3283,270," # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n metrics[""grad_norm""] = jnp.minimum(raw_grad_norm, args.grad_clip_threshold) if args.grad_clip_threshold else raw_grad_norm",python,selection_command +877,2738563,"train_tokenizer.py",3283,0,"",python,selection_command +878,2742185,"train_dynamics.py",0,0,"",python,tab +879,2742186,"train_dynamics.py",3518,0,"",python,selection_mouse +880,2742884,"train_dynamics.py",3539,0,"\n # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n metrics[""grad_norm""] = jnp.minimum(raw_grad_norm, args.grad_clip_threshold) if args.grad_clip_threshold else raw_grad_norm",python,content +881,2742903,"train_dynamics.py",3544,0,"",python,selection_command +882,2743298,"train_dynamics.py",3502,0,"",python,selection_command +883,2743635,"train_dynamics.py",3498,42,"",python,content +884,2743659,"train_dynamics.py",3502,0,"",python,selection_command +885,2744250,"train_dynamics.py",3601,0,"",python,selection_command +886,2744483,"train_dynamics.py",3502,0,"",python,selection_command +887,2745655,"train_dynamics.py",3428,0,"",python,selection_command +888,2746398,"train_dynamics.py",3379,119,"",python,content +889,2746442,"train_dynamics.py",3383,0,"",python,selection_command +890,2746639,"train_dynamics.py",3284,0,"",python,selection_command +891,2746918,"train_dynamics.py",3280,99,"",python,content +892,2746945,"train_dynamics.py",3284,0,"",python,selection_command +893,2747795,"train_dynamics.py",3280,98," # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)",python,selection_command +894,2748006,"train_dynamics.py",3280,143," # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)",python,selection_command +895,2748150,"train_dynamics.py",3280,270," # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n metrics[""grad_norm""] = jnp.minimum(raw_grad_norm, args.grad_clip_threshold) if args.grad_clip_threshold else raw_grad_norm",python,selection_command +896,2748378,"train_dynamics.py",3280,0,"",python,selection_command +897,2751181,"train_lam.py",0,0,"",python,tab +898,2754918,"train_lam.py",3345,0,"",python,selection_mouse +899,2755569,"train_lam.py",3428,0,"\n # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n metrics[""grad_norm""] = jnp.minimum(raw_grad_norm, args.grad_clip_threshold) if args.grad_clip_threshold else raw_grad_norm",python,content +900,2755581,"train_lam.py",3433,0,"",python,selection_command +901,2756000,"train_lam.py",3334,0,"",python,selection_command +902,2756317,"train_lam.py",3330,99,"",python,content +903,2756332,"train_lam.py",3334,0,"",python,selection_command +904,2756413,"train_lam.py",3433,0,"",python,selection_command +905,2757055,"train_lam.py",3478,0,"",python,selection_command +906,2757770,"train_lam.py",3605,0,"",python,selection_command +907,2758558,"train_lam.py",3601,44," raw_grad_norm = optax.global_norm(grads)",python,selection_command +908,2759138,"train_lam.py",3601,118," raw_grad_norm = optax.global_norm(grads)\n g_norm_clipped = jnp.minimum(raw_grad_norm, args.grad_clip_threshold)",python,selection_command +909,2759346,"train_lam.py",3601,160," raw_grad_norm = optax.global_norm(grads)\n g_norm_clipped = jnp.minimum(raw_grad_norm, args.grad_clip_threshold)\n metrics[""grad_norm""] = g_norm_clipped",python,selection_command +910,2759831,"train_lam.py",3601,161,"",python,content +911,2777059,"train_tokenizer.py",0,0,"",python,tab +912,2777060,"train_tokenizer.py",6149,0,"",python,selection_mouse +913,2777070,"train_tokenizer.py",6148,0,"",python,selection_command +914,2777898,"train_tokenizer.py",6117,32," if args.grad_clip_threshold:",python,selection_command +915,2778129,"train_tokenizer.py",6117,58," if args.grad_clip_threshold:\n tx = optax.chain(",python,selection_command +916,2778287,"train_tokenizer.py",6117,123," if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),",python,selection_command +917,2778391,"train_tokenizer.py",6117,209," if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)",python,selection_command +918,2778536,"train_tokenizer.py",6117,219," if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )",python,selection_command +919,2778673,"train_tokenizer.py",6117,229," if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n else:",python,selection_command +920,2778816,"train_tokenizer.py",6117,316," if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n else:\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)",python,selection_command +921,2780082,"train_tokenizer.py",6117,0,"",python,selection_command +922,2783940,"train_lam.py",0,0,"",python,tab +923,2783940,"train_lam.py",6782,0,"",python,selection_mouse +924,2783941,"train_lam.py",6781,0,"",python,selection_command +925,2784400,"train_lam.py",6782,0,"\n if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n else:\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)",python,content +926,2784428,"train_lam.py",6787,0,"",python,selection_command +927,2784978,"train_lam.py",6777,0,"",python,selection_command +928,2785461,"train_lam.py",6687,96,"",python,content +929,2785474,"train_lam.py",6691,0,"",python,selection_command +930,2785662,"train_lam.py",6626,0,"",python,selection_command +931,2786030,"train_lam.py",6600,87,"",python,content +932,2786068,"train_lam.py",6604,0,"",python,selection_command +933,2797556,"train_tokenizer.py",0,0,"",python,tab +934,2804732,"train_lam.py",0,0,"",python,tab +935,2805591,"train_dynamics.py",0,0,"",python,tab +936,2809327,"train_tokenizer.py",0,0,"",python,tab +937,2810599,"train_tokenizer.py",0,0,"",python,tab +938,2813671,"train_tokenizer.py",0,0,"",python,tab +939,2832007,"train_tokenizer.py",6122,0,"",python,selection_mouse +940,2832755,"train_tokenizer.py",6117,32," if args.grad_clip_threshold:",python,selection_command +941,2832989,"train_tokenizer.py",6117,58," if args.grad_clip_threshold:\n tx = optax.chain(",python,selection_command +942,2833120,"train_tokenizer.py",6117,123," if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),",python,selection_command +943,2833270,"train_tokenizer.py",6117,209," if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)",python,selection_command +944,2833401,"train_tokenizer.py",6117,219," if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )",python,selection_command +945,2833565,"train_tokenizer.py",6117,229," if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n else:",python,selection_command +946,2833692,"train_tokenizer.py",6117,316," if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n else:\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)",python,selection_command +947,2833975,"train_tokenizer.py",6117,0,"",python,selection_command +948,2835381,"train_dynamics.py",0,0,"",python,tab +949,2835382,"train_dynamics.py",6766,0,"",python,selection_mouse +950,2835389,"train_dynamics.py",6765,0,"",python,selection_command +951,2835946,"train_dynamics.py",6766,0,"\n if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n else:\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)",python,content +952,2835954,"train_dynamics.py",6771,0,"",python,selection_command +953,2836260,"train_dynamics.py",6761,0,"",python,selection_command +954,2836684,"train_dynamics.py",6671,96,"",python,content +955,2836697,"train_dynamics.py",6675,0,"",python,selection_command +956,2836823,"train_dynamics.py",6610,0,"",python,selection_command +957,2837186,"train_dynamics.py",6584,87,"",python,content +958,2837202,"train_dynamics.py",6588,0,"",python,selection_command +959,2866691,"train_tokenizer_bak.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import count_parameters_by_component\nfrom utils.logger import CompositeLogger\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 0.0\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log_dir: str = ""logs/"" \n loggers: list[str] = field(default_factory=lambda: [""console""]) # options: console, local, tb, wandb\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if jax.process_index() == 0:\n cfg = vars(args).copy()\n cfg[""model_param_count""] = param_counts\n logger = CompositeLogger(args.loggers, cfg)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer__\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n array_record_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in dataloader) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n step += 1\n\n # --- Logging ---\n if step % args.log_interval == 0 and jax.process_index() == 0:\n logger.log_metrics(\n {\n ""loss"": loss,\n **metrics,\n },\n step\n )\n if step % args.log_image_interval == 0:\n\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=np.asarray(gt_seq[0] * 255.).astype(np.uint8),\n recon=np.asarray(recon_seq[0] * 255.).astype(np.uint8),\n true_vs_recon=np.asarray(comparison_seq.astype(np.uint8)\n ),\n )\n logger.log_images(log_images, step)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab +960,2867361,"train_lam.py",0,0,"",python,tab +961,2867363,"train_lam.py",51,0,"",python,selection_command +962,2888133,"train_dynamics.py",0,0,"",python,tab +963,2889523,"train_tokenizer.py",0,0,"",python,tab +964,2895901,"TERMINAL",0,0,"idling",,terminal_command +965,2895951,"TERMINAL",0,0,"]633;E;2025-07-16 11:51:32 idling;97c203bb-2de3-4bf0-b19e-fa122ab0b933]633;C",,terminal_output +966,2896027,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1991.localdomain: Wed Jul 16 11:51:32 2025Partition dev_cpuonly: 11 nodes idle\rPartition cpuonly:\t 1 nodes idle\rPartition dev_accelerated:\t 1 nodes idle\rPartition accelerated:\t 0 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 1 nodes idle\rPartition large:\t 7 nodes idle",,terminal_output +967,2897088,"TERMINAL",0,0,"3",,terminal_output +968,2898095,"TERMINAL",0,0,"4",,terminal_output +969,2898136,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +970,2901703,"TERMINAL",0,0,"salloc --time=10:00:00 --partition=accelerated --nodes=2 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5",,terminal_command +971,2901762,"TERMINAL",0,0,"]633;E;2025-07-16 11:51:38 salloc --time=10:00:00 --partition=accelerated --nodes=2 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5;97c203bb-2de3-4bf0-b19e-fa122ab0b933]633;Csalloc: Pending job allocation 3349982\r\nsalloc: job 3349982 queued and waiting for resources\r\n",,terminal_output +972,3403659,"TERMINAL",0,0,"",,terminal_focus +973,3406779,"TERMINAL",0,0,"idling",,terminal_command +974,3406830,"TERMINAL",0,0,"]633;E;2025-07-16 12:00:03 idling;dea9d5fc-91fd-447c-886d-4b0240ae057d]633;C",,terminal_output +975,3406892,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1991.localdomain: Wed Jul 16 12:00:03 2025Partition dev_cpuonly: 11 nodes idle\rPartition cpuonly:\t 2 nodes idle\rPartition dev_accelerated:\t 0 nodes idle\rPartition accelerated:\t 0 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 1 nodes idle\rPartition large:\t 7 nodes idle",,terminal_output +976,3407968,"TERMINAL",0,0,"4",,terminal_output +977,3408960,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +978,3463382,"train_tokenizer.py",0,0,"",python,tab +979,3466966,"TERMINAL",0,0,"bash",,terminal_focus +980,3518697,"train_tokenizer.py",11477,0,"",python,selection_mouse +981,3519587,"train_tokenizer.py",11431,0,"",python,selection_command +982,3519776,"train_tokenizer.py",11405,0,"",python,selection_command +983,3519962,"train_tokenizer.py",11374,0,"",python,selection_command +984,3520133,"train_tokenizer.py",11298,0,"",python,selection_command +985,3520658,"train_tokenizer.py",11374,0,"",python,selection_command +986,3520912,"train_tokenizer.py",11298,0,"",python,selection_command +987,3521151,"train_tokenizer.py",11297,0,"",python,selection_command +988,3521726,"train_tokenizer.py",11242,0,"",python,selection_command +989,3522635,"train_tokenizer.py",11243,0,"",python,selection_command +990,3522833,"train_tokenizer.py",11242,0,"",python,selection_command +991,3522995,"train_tokenizer.py",11243,0,"",python,selection_command +992,3523082,"train_tokenizer.py",11242,0,"",python,selection_command +993,3523216,"train_tokenizer.py",11243,0,"",python,selection_command +994,3523300,"train_tokenizer.py",11242,0,"",python,selection_command +995,3523419,"train_tokenizer.py",11243,0,"",python,selection_command +996,3523493,"train_tokenizer.py",11242,0,"",python,selection_command +997,3523626,"train_tokenizer.py",11243,0,"",python,selection_command +998,3523698,"train_tokenizer.py",11242,0,"",python,selection_command +999,3523835,"train_tokenizer.py",11243,0,"",python,selection_command +1000,3523921,"train_tokenizer.py",11242,0,"",python,selection_command +1001,3524058,"train_tokenizer.py",11243,0,"",python,selection_command +1002,3524139,"train_tokenizer.py",11242,0,"",python,selection_command +1003,3524299,"train_tokenizer.py",11243,0,"",python,selection_command +1004,3524764,"train_tokenizer.py",11170,0,"",python,selection_command +1005,3525257,"train_tokenizer.py",11100,0,"",python,selection_command +1006,3525324,"train_tokenizer.py",11057,0,"",python,selection_command +1007,3525349,"train_tokenizer.py",11008,0,"",python,selection_command +1008,3525359,"train_tokenizer.py",10931,0,"",python,selection_command +1009,3525416,"train_tokenizer.py",10853,0,"",python,selection_command +1010,3525417,"train_tokenizer.py",10773,0,"",python,selection_command +1011,3525434,"train_tokenizer.py",10751,0,"",python,selection_command +1012,3525491,"train_tokenizer.py",10682,0,"",python,selection_command +1013,3525500,"train_tokenizer.py",10627,0,"",python,selection_command +1014,3525558,"train_tokenizer.py",10545,0,"",python,selection_command +1015,3525563,"train_tokenizer.py",10493,0,"",python,selection_command +1016,3525618,"train_tokenizer.py",10444,0,"",python,selection_command +1017,3525624,"train_tokenizer.py",10388,0,"",python,selection_command +1018,3525678,"train_tokenizer.py",10366,0,"",python,selection_command +1019,3525679,"train_tokenizer.py",10340,0,"",python,selection_command +1020,3525735,"train_tokenizer.py",10301,0,"",python,selection_command +1021,3525748,"train_tokenizer.py",10259,0,"",python,selection_command +1022,3525802,"train_tokenizer.py",10217,0,"",python,selection_command +1023,3525802,"train_tokenizer.py",10191,0,"",python,selection_command +1024,3526002,"train_tokenizer.py",10160,0,"",python,selection_command +1025,3526158,"train_tokenizer.py",10081,0,"",python,selection_command +1026,3526552,"train_tokenizer.py",10160,0,"",python,selection_command +1027,3527050,"train_tokenizer.py",10191,0,"",python,selection_command +1028,3527112,"train_tokenizer.py",10217,0,"",python,selection_command +1029,3527112,"train_tokenizer.py",10259,0,"",python,selection_command +1030,3527131,"train_tokenizer.py",10301,0,"",python,selection_command +1031,3527187,"train_tokenizer.py",10340,0,"",python,selection_command +1032,3527191,"train_tokenizer.py",10366,0,"",python,selection_command +1033,3527245,"train_tokenizer.py",10388,0,"",python,selection_command +1034,3527257,"train_tokenizer.py",10444,0,"",python,selection_command +1035,3527318,"train_tokenizer.py",10493,0,"",python,selection_command +1036,3527319,"train_tokenizer.py",10545,0,"",python,selection_command +1037,3527378,"train_tokenizer.py",10627,0,"",python,selection_command +1038,3527379,"train_tokenizer.py",10682,0,"",python,selection_command +1039,3527434,"train_tokenizer.py",10751,0,"",python,selection_command +1040,3527436,"train_tokenizer.py",10773,0,"",python,selection_command +1041,3527467,"train_tokenizer.py",10853,0,"",python,selection_command +1042,3527522,"train_tokenizer.py",10931,0,"",python,selection_command +1043,3527523,"train_tokenizer.py",11008,0,"",python,selection_command +1044,3527578,"train_tokenizer.py",11057,0,"",python,selection_command +1045,3527584,"train_tokenizer.py",11100,0,"",python,selection_command +1046,3527642,"train_tokenizer.py",11170,0,"",python,selection_command +1047,3527642,"train_tokenizer.py",11243,0,"",python,selection_command +1048,3527695,"train_tokenizer.py",11298,0,"",python,selection_command +1049,3527700,"train_tokenizer.py",11374,0,"",python,selection_command +1050,3527758,"train_tokenizer.py",11405,0,"",python,selection_command +1051,3527765,"train_tokenizer.py",11431,0,"",python,selection_command +1052,3527942,"train_tokenizer.py",11477,0,"",python,selection_command +1053,3528221,"train_tokenizer.py",11431,0,"",python,selection_command +1054,3528710,"train_tokenizer.py",11405,0,"",python,selection_command +1055,3528765,"train_tokenizer.py",11374,0,"",python,selection_command +1056,3528766,"train_tokenizer.py",11298,0,"",python,selection_command +1057,3528832,"train_tokenizer.py",11243,0,"",python,selection_command +1058,3528833,"train_tokenizer.py",11170,0,"",python,selection_command +1059,3528889,"train_tokenizer.py",11100,0,"",python,selection_command +1060,3528889,"train_tokenizer.py",11057,0,"",python,selection_command +1061,3528948,"train_tokenizer.py",11008,0,"",python,selection_command +1062,3528949,"train_tokenizer.py",10931,0,"",python,selection_command +1063,3529005,"train_tokenizer.py",10853,0,"",python,selection_command +1064,3529006,"train_tokenizer.py",10773,0,"",python,selection_command +1065,3529062,"train_tokenizer.py",10751,0,"",python,selection_command +1066,3529118,"train_tokenizer.py",10682,0,"",python,selection_command +1067,3529147,"train_tokenizer.py",10627,0,"",python,selection_command +1068,3529168,"train_tokenizer.py",10545,0,"",python,selection_command +1069,3529179,"train_tokenizer.py",10493,0,"",python,selection_command +1070,3529232,"train_tokenizer.py",10444,0,"",python,selection_command +1071,3529233,"train_tokenizer.py",10388,0,"",python,selection_command +1072,3529252,"train_tokenizer.py",10366,0,"",python,selection_command +1073,3529310,"train_tokenizer.py",10340,0,"",python,selection_command +1074,3529313,"train_tokenizer.py",10301,0,"",python,selection_command +1075,3529372,"train_tokenizer.py",10259,0,"",python,selection_command +1076,3529381,"train_tokenizer.py",10217,0,"",python,selection_command +1077,3529435,"train_tokenizer.py",10191,0,"",python,selection_command +1078,3529436,"train_tokenizer.py",10160,0,"",python,selection_command +1079,3529492,"train_tokenizer.py",10081,0,"",python,selection_command +1080,3529499,"train_tokenizer.py",10056,0,"",python,selection_command +1081,3529558,"train_tokenizer.py",10026,0,"",python,selection_command +1082,3529562,"train_tokenizer.py",10013,0,"",python,selection_command +1083,3529620,"train_tokenizer.py",10026,0,"",python,selection_command +1084,3530121,"train_tokenizer.py",10056,0,"",python,selection_command +1085,3530198,"train_tokenizer.py",10081,0,"",python,selection_command +1086,3530201,"train_tokenizer.py",10160,0,"",python,selection_command +1087,3530205,"train_tokenizer.py",10191,0,"",python,selection_command +1088,3530247,"train_tokenizer.py",10217,0,"",python,selection_command +1089,3530299,"train_tokenizer.py",10259,0,"",python,selection_command +1090,3530299,"train_tokenizer.py",10301,0,"",python,selection_command +1091,3530352,"train_tokenizer.py",10340,0,"",python,selection_command +1092,3530353,"train_tokenizer.py",10366,0,"",python,selection_command +1093,3530413,"train_tokenizer.py",10388,0,"",python,selection_command +1094,3530415,"train_tokenizer.py",10444,0,"",python,selection_command +1095,3530479,"train_tokenizer.py",10493,0,"",python,selection_command +1096,3530479,"train_tokenizer.py",10545,0,"",python,selection_command +1097,3530545,"train_tokenizer.py",10627,0,"",python,selection_command +1098,3530546,"train_tokenizer.py",10682,0,"",python,selection_command +1099,3530602,"train_tokenizer.py",10751,0,"",python,selection_command +1100,3530603,"train_tokenizer.py",10773,0,"",python,selection_command +1101,3530662,"train_tokenizer.py",10853,0,"",python,selection_command +1102,3530678,"train_tokenizer.py",10931,0,"",python,selection_command +1103,3530688,"train_tokenizer.py",11008,0,"",python,selection_command +1104,3530748,"train_tokenizer.py",11057,0,"",python,selection_command +1105,3530749,"train_tokenizer.py",11100,0,"",python,selection_command +1106,3530812,"train_tokenizer.py",11170,0,"",python,selection_command +1107,3530813,"train_tokenizer.py",11243,0,"",python,selection_command +1108,3530866,"train_tokenizer.py",11298,0,"",python,selection_command +1109,3530866,"train_tokenizer.py",11374,0,"",python,selection_command +1110,3530932,"train_tokenizer.py",11405,0,"",python,selection_command +1111,3530961,"train_tokenizer.py",11431,0,"",python,selection_command +1112,3531059,"train_tokenizer.py",11405,0,"",python,selection_command +1113,3531563,"train_tokenizer.py",11374,0,"",python,selection_command +1114,3531570,"train_tokenizer.py",11298,0,"",python,selection_command +1115,3531622,"train_tokenizer.py",11243,0,"",python,selection_command +1116,3531634,"train_tokenizer.py",11170,0,"",python,selection_command +1117,3531692,"train_tokenizer.py",11100,0,"",python,selection_command +1118,3531698,"train_tokenizer.py",11057,0,"",python,selection_command +1119,3531762,"train_tokenizer.py",11008,0,"",python,selection_command +1120,3531763,"train_tokenizer.py",10931,0,"",python,selection_command +1121,3531815,"train_tokenizer.py",10853,0,"",python,selection_command +1122,3531816,"train_tokenizer.py",10773,0,"",python,selection_command +1123,3531875,"train_tokenizer.py",10751,0,"",python,selection_command +1124,3531876,"train_tokenizer.py",10682,0,"",python,selection_command +1125,3531929,"train_tokenizer.py",10627,0,"",python,selection_command +1126,3531934,"train_tokenizer.py",10545,0,"",python,selection_command +1127,3532005,"train_tokenizer.py",10493,0,"",python,selection_command +1128,3532006,"train_tokenizer.py",10444,0,"",python,selection_command +1129,3532066,"train_tokenizer.py",10388,0,"",python,selection_command +1130,3532066,"train_tokenizer.py",10366,0,"",python,selection_command +1131,3532086,"train_tokenizer.py",10340,0,"",python,selection_command +1132,3532142,"train_tokenizer.py",10301,0,"",python,selection_command +1133,3532142,"train_tokenizer.py",10259,0,"",python,selection_command +1134,3532202,"train_tokenizer.py",10217,0,"",python,selection_command +1135,3532209,"train_tokenizer.py",10191,0,"",python,selection_command +1136,3532246,"train_tokenizer.py",10160,0,"",python,selection_command +1137,3532257,"train_tokenizer.py",10191,0,"",python,selection_command +1138,3532775,"train_tokenizer.py",10217,0,"",python,selection_command +1139,3532783,"train_tokenizer.py",10259,0,"",python,selection_command +1140,3532842,"train_tokenizer.py",10301,0,"",python,selection_command +1141,3532843,"train_tokenizer.py",10340,0,"",python,selection_command +1142,3532899,"train_tokenizer.py",10366,0,"",python,selection_command +1143,3532906,"train_tokenizer.py",10388,0,"",python,selection_command +1144,3532972,"train_tokenizer.py",10444,0,"",python,selection_command +1145,3532972,"train_tokenizer.py",10493,0,"",python,selection_command +1146,3533028,"train_tokenizer.py",10545,0,"",python,selection_command +1147,3533029,"train_tokenizer.py",10627,0,"",python,selection_command +1148,3533085,"train_tokenizer.py",10682,0,"",python,selection_command +1149,3533086,"train_tokenizer.py",10751,0,"",python,selection_command +1150,3533149,"train_tokenizer.py",10773,0,"",python,selection_command +1151,3533150,"train_tokenizer.py",10853,0,"",python,selection_command +1152,3533202,"train_tokenizer.py",10931,0,"",python,selection_command +1153,3533203,"train_tokenizer.py",11008,0,"",python,selection_command +1154,3533252,"train_tokenizer.py",11057,0,"",python,selection_command +1155,3533309,"train_tokenizer.py",11100,0,"",python,selection_command +1156,3533309,"train_tokenizer.py",11170,0,"",python,selection_command +1157,3533344,"train_tokenizer.py",11243,0,"",python,selection_command +1158,3533357,"train_tokenizer.py",11298,0,"",python,selection_command +1159,3533412,"train_tokenizer.py",11374,0,"",python,selection_command +1160,3533418,"train_tokenizer.py",11405,0,"",python,selection_command +1161,3533478,"train_tokenizer.py",11431,0,"",python,selection_command +1162,3533483,"train_tokenizer.py",11477,0,"",python,selection_command +1163,3533561,"train_tokenizer.py",11513,0,"",python,selection_command +1164,3533562,"train_tokenizer.py",11477,0,"",python,selection_command +1165,3534037,"train_tokenizer.py",11431,0,"",python,selection_command +1166,3534096,"train_tokenizer.py",11405,0,"",python,selection_command +1167,3534096,"train_tokenizer.py",11374,0,"",python,selection_command +1168,3534106,"train_tokenizer.py",11298,0,"",python,selection_command +1169,3534159,"train_tokenizer.py",11243,0,"",python,selection_command +1170,3534166,"train_tokenizer.py",11170,0,"",python,selection_command +1171,3534222,"train_tokenizer.py",11100,0,"",python,selection_command +1172,3534231,"train_tokenizer.py",11057,0,"",python,selection_command +1173,3534262,"train_tokenizer.py",11008,0,"",python,selection_command +1174,3534319,"train_tokenizer.py",10931,0,"",python,selection_command +1175,3534320,"train_tokenizer.py",10853,0,"",python,selection_command +1176,3534380,"train_tokenizer.py",10773,0,"",python,selection_command +1177,3534381,"train_tokenizer.py",10751,0,"",python,selection_command +1178,3534417,"train_tokenizer.py",10682,0,"",python,selection_command +1179,3534469,"train_tokenizer.py",10627,0,"",python,selection_command +1180,3534470,"train_tokenizer.py",10545,0,"",python,selection_command +1181,3534525,"train_tokenizer.py",10493,0,"",python,selection_command +1182,3534528,"train_tokenizer.py",10444,0,"",python,selection_command +1183,3534585,"train_tokenizer.py",10388,0,"",python,selection_command +1184,3534588,"train_tokenizer.py",10366,0,"",python,selection_command +1185,3534649,"train_tokenizer.py",10340,0,"",python,selection_command +1186,3534649,"train_tokenizer.py",10301,0,"",python,selection_command +1187,3534709,"train_tokenizer.py",10259,0,"",python,selection_command +1188,3534715,"train_tokenizer.py",10217,0,"",python,selection_command +1189,3534769,"train_tokenizer.py",10191,0,"",python,selection_command +1190,3534770,"train_tokenizer.py",10160,0,"",python,selection_command +1191,3534839,"train_tokenizer.py",10081,0,"",python,selection_command +1192,3534840,"train_tokenizer.py",10056,0,"",python,selection_command +1193,3534871,"train_tokenizer.py",10026,0,"",python,selection_command +1194,3534890,"train_tokenizer.py",10056,0,"",python,selection_command +1195,3535405,"train_tokenizer.py",10081,0,"",python,selection_command +1196,3535411,"train_tokenizer.py",10160,0,"",python,selection_command +1197,3535469,"train_tokenizer.py",10191,0,"",python,selection_command +1198,3535472,"train_tokenizer.py",10217,0,"",python,selection_command +1199,3535529,"train_tokenizer.py",10259,0,"",python,selection_command +1200,3535539,"train_tokenizer.py",10301,0,"",python,selection_command +1201,3535596,"train_tokenizer.py",10340,0,"",python,selection_command +1202,3535596,"train_tokenizer.py",10366,0,"",python,selection_command +1203,3535659,"train_tokenizer.py",10388,0,"",python,selection_command +1204,3535680,"train_tokenizer.py",10444,0,"",python,selection_command +1205,3535692,"train_tokenizer.py",10493,0,"",python,selection_command +1206,3535749,"train_tokenizer.py",10545,0,"",python,selection_command +1207,3535751,"train_tokenizer.py",10627,0,"",python,selection_command +1208,3535805,"train_tokenizer.py",10682,0,"",python,selection_command +1209,3535807,"train_tokenizer.py",10751,0,"",python,selection_command +1210,3535865,"train_tokenizer.py",10773,0,"",python,selection_command +1211,3535866,"train_tokenizer.py",10853,0,"",python,selection_command +1212,3535919,"train_tokenizer.py",10931,0,"",python,selection_command +1213,3535923,"train_tokenizer.py",11008,0,"",python,selection_command +1214,3535976,"train_tokenizer.py",11057,0,"",python,selection_command +1215,3535982,"train_tokenizer.py",11100,0,"",python,selection_command +1216,3536042,"train_tokenizer.py",11170,0,"",python,selection_command +1217,3536050,"train_tokenizer.py",11243,0,"",python,selection_command +1218,3536106,"train_tokenizer.py",11298,0,"",python,selection_command +1219,3536123,"train_tokenizer.py",11374,0,"",python,selection_command +1220,3536134,"train_tokenizer.py",11405,0,"",python,selection_command +1221,3536186,"train_tokenizer.py",11431,0,"",python,selection_command +1222,3536190,"train_tokenizer.py",11405,0,"",python,selection_command +1223,3536708,"train_tokenizer.py",11374,0,"",python,selection_command +1224,3536720,"train_tokenizer.py",11298,0,"",python,selection_command +1225,3536775,"train_tokenizer.py",11243,0,"",python,selection_command +1226,3536776,"train_tokenizer.py",11170,0,"",python,selection_command +1227,3536829,"train_tokenizer.py",11100,0,"",python,selection_command +1228,3536836,"train_tokenizer.py",11057,0,"",python,selection_command +1229,3536892,"train_tokenizer.py",11008,0,"",python,selection_command +1230,3536902,"train_tokenizer.py",10931,0,"",python,selection_command +1231,3536955,"train_tokenizer.py",10853,0,"",python,selection_command +1232,3536957,"train_tokenizer.py",10773,0,"",python,selection_command +1233,3537009,"train_tokenizer.py",10751,0,"",python,selection_command +1234,3537018,"train_tokenizer.py",10682,0,"",python,selection_command +1235,3537069,"train_tokenizer.py",10627,0,"",python,selection_command +1236,3537083,"train_tokenizer.py",10545,0,"",python,selection_command +1237,3537156,"train_tokenizer.py",10493,0,"",python,selection_command +1238,3537156,"train_tokenizer.py",10444,0,"",python,selection_command +1239,3537166,"train_tokenizer.py",10388,0,"",python,selection_command +1240,3537229,"train_tokenizer.py",10366,0,"",python,selection_command +1241,3537230,"train_tokenizer.py",10340,0,"",python,selection_command +1242,3537296,"train_tokenizer.py",10301,0,"",python,selection_command +1243,3537296,"train_tokenizer.py",10259,0,"",python,selection_command +1244,3537353,"train_tokenizer.py",10217,0,"",python,selection_command +1245,3537353,"train_tokenizer.py",10191,0,"",python,selection_command +1246,3537409,"train_tokenizer.py",10160,0,"",python,selection_command +1247,3537425,"train_tokenizer.py",10081,0,"",python,selection_command +1248,3537483,"train_tokenizer.py",10056,0,"",python,selection_command +1249,3537484,"train_tokenizer.py",10081,0,"",python,selection_command +1250,3538050,"train_tokenizer.py",10160,0,"",python,selection_command +1251,3538050,"train_tokenizer.py",10191,0,"",python,selection_command +1252,3538051,"train_tokenizer.py",10217,0,"",python,selection_command +1253,3538053,"train_tokenizer.py",10259,0,"",python,selection_command +1254,3538122,"train_tokenizer.py",10301,0,"",python,selection_command +1255,3538137,"train_tokenizer.py",10340,0,"",python,selection_command +1256,3538207,"train_tokenizer.py",10366,0,"",python,selection_command +1257,3538207,"train_tokenizer.py",10388,0,"",python,selection_command +1258,3538207,"train_tokenizer.py",10444,0,"",python,selection_command +1259,3538247,"train_tokenizer.py",10493,0,"",python,selection_command +1260,3538300,"train_tokenizer.py",10545,0,"",python,selection_command +1261,3538303,"train_tokenizer.py",10627,0,"",python,selection_command +1262,3538387,"train_tokenizer.py",10682,0,"",python,selection_command +1263,3538389,"train_tokenizer.py",10751,0,"",python,selection_command +1264,3538391,"train_tokenizer.py",10773,0,"",python,selection_command +1265,3538476,"train_tokenizer.py",10853,0,"",python,selection_command +1266,3538476,"train_tokenizer.py",10931,0,"",python,selection_command +1267,3538485,"train_tokenizer.py",11008,0,"",python,selection_command +1268,3538539,"train_tokenizer.py",11057,0,"",python,selection_command +1269,3538539,"train_tokenizer.py",11100,0,"",python,selection_command +1270,3538595,"train_tokenizer.py",11170,0,"",python,selection_command +1271,3538597,"train_tokenizer.py",11243,0,"",python,selection_command +1272,3538662,"train_tokenizer.py",11298,0,"",python,selection_command +1273,3538663,"train_tokenizer.py",11374,0,"",python,selection_command +1274,3538719,"train_tokenizer.py",11405,0,"",python,selection_command +1275,3538723,"train_tokenizer.py",11431,0,"",python,selection_command +1276,3538779,"train_tokenizer.py",11477,0,"",python,selection_command +1277,3538782,"train_tokenizer.py",11513,0,"",python,selection_command +1278,3538856,"train_tokenizer.py",11589,0,"",python,selection_command +1279,3538863,"train_tokenizer.py",11513,0,"",python,selection_command +1280,3539323,"train_tokenizer.py",11477,0,"",python,selection_command +1281,3539375,"train_tokenizer.py",11431,0,"",python,selection_command +1282,3539376,"train_tokenizer.py",11405,0,"",python,selection_command +1283,3539429,"train_tokenizer.py",11374,0,"",python,selection_command +1284,3539436,"train_tokenizer.py",11298,0,"",python,selection_command +1285,3539495,"train_tokenizer.py",11243,0,"",python,selection_command +1286,3539500,"train_tokenizer.py",11170,0,"",python,selection_command +1287,3539564,"train_tokenizer.py",11100,0,"",python,selection_command +1288,3539570,"train_tokenizer.py",11057,0,"",python,selection_command +1289,3539597,"train_tokenizer.py",11008,0,"",python,selection_command +1290,3539662,"train_tokenizer.py",10931,0,"",python,selection_command +1291,3539663,"train_tokenizer.py",10853,0,"",python,selection_command +1292,3539716,"train_tokenizer.py",10773,0,"",python,selection_command +1293,3539716,"train_tokenizer.py",10751,0,"",python,selection_command +1294,3539769,"train_tokenizer.py",10682,0,"",python,selection_command +1295,3539769,"train_tokenizer.py",10627,0,"",python,selection_command +1296,3539829,"train_tokenizer.py",10545,0,"",python,selection_command +1297,3539834,"train_tokenizer.py",10493,0,"",python,selection_command +1298,3539892,"train_tokenizer.py",10444,0,"",python,selection_command +1299,3539893,"train_tokenizer.py",10388,0,"",python,selection_command +1300,3539946,"train_tokenizer.py",10366,0,"",python,selection_command +1301,3539946,"train_tokenizer.py",10340,0,"",python,selection_command +1302,3539999,"train_tokenizer.py",10301,0,"",python,selection_command +1303,3540006,"train_tokenizer.py",10259,0,"",python,selection_command +1304,3540059,"train_tokenizer.py",10217,0,"",python,selection_command +1305,3540066,"train_tokenizer.py",10191,0,"",python,selection_command +1306,3540119,"train_tokenizer.py",10160,0,"",python,selection_command +1307,3540134,"train_tokenizer.py",10081,0,"",python,selection_command +1308,3540189,"train_tokenizer.py",10056,0,"",python,selection_command +1309,3540197,"train_tokenizer.py",10026,0,"",python,selection_command +1310,3540237,"train_tokenizer.py",10013,0,"",python,selection_command +1311,3540249,"train_tokenizer.py",10026,0,"",python,selection_command +1312,3540748,"train_tokenizer.py",10056,0,"",python,selection_command +1313,3540768,"train_tokenizer.py",10081,0,"",python,selection_command +1314,3540826,"train_tokenizer.py",10160,0,"",python,selection_command +1315,3540826,"train_tokenizer.py",10191,0,"",python,selection_command +1316,3540882,"train_tokenizer.py",10217,0,"",python,selection_command +1317,3540883,"train_tokenizer.py",10259,0,"",python,selection_command +1318,3540945,"train_tokenizer.py",10301,0,"",python,selection_command +1319,3540946,"train_tokenizer.py",10340,0,"",python,selection_command +1320,3541004,"train_tokenizer.py",10366,0,"",python,selection_command +1321,3541025,"train_tokenizer.py",10388,0,"",python,selection_command +1322,3541096,"train_tokenizer.py",10444,0,"",python,selection_command +1323,3541096,"train_tokenizer.py",10493,0,"",python,selection_command +1324,3541101,"train_tokenizer.py",10545,0,"",python,selection_command +1325,3541152,"train_tokenizer.py",10627,0,"",python,selection_command +1326,3541153,"train_tokenizer.py",10682,0,"",python,selection_command +1327,3541209,"train_tokenizer.py",10751,0,"",python,selection_command +1328,3541221,"train_tokenizer.py",10773,0,"",python,selection_command +1329,3541292,"train_tokenizer.py",10853,0,"",python,selection_command +1330,3541299,"train_tokenizer.py",10931,0,"",python,selection_command +1331,3541307,"train_tokenizer.py",11008,0,"",python,selection_command +1332,3541366,"train_tokenizer.py",11057,0,"",python,selection_command +1333,3541380,"train_tokenizer.py",11100,0,"",python,selection_command +1334,3541444,"train_tokenizer.py",11170,0,"",python,selection_command +1335,3541445,"train_tokenizer.py",11243,0,"",python,selection_command +1336,3541458,"train_tokenizer.py",11298,0,"",python,selection_command +1337,3541519,"train_tokenizer.py",11374,0,"",python,selection_command +1338,3541521,"train_tokenizer.py",11405,0,"",python,selection_command +1339,3541553,"train_tokenizer.py",11431,0,"",python,selection_command +1340,3541606,"train_tokenizer.py",11477,0,"",python,selection_command +1341,3541606,"train_tokenizer.py",11513,0,"",python,selection_command +1342,3541642,"train_tokenizer.py",11589,0,"",python,selection_command +1343,3541692,"train_tokenizer.py",11630,0,"",python,selection_command +1344,3541751,"train_tokenizer.py",11589,0,"",python,selection_command +1345,3550047,"train_tokenizer.py",11432,0,"",python,selection_mouse +1346,3551455,"train_tokenizer.py",11431,0,"",python,selection_mouse +1347,3551798,"train_tokenizer.py",11405,0,"",python,selection_command +1348,3552030,"train_tokenizer.py",11374,0,"",python,selection_command +1349,3552530,"train_tokenizer.py",11298,0,"",python,selection_command +1350,3552533,"train_tokenizer.py",11243,0,"",python,selection_command +1351,3552586,"train_tokenizer.py",11170,0,"",python,selection_command +1352,3552601,"train_tokenizer.py",11100,0,"",python,selection_command +1353,3552656,"train_tokenizer.py",11057,0,"",python,selection_command +1354,3552670,"train_tokenizer.py",11008,0,"",python,selection_command +1355,3552723,"train_tokenizer.py",10931,0,"",python,selection_command +1356,3552724,"train_tokenizer.py",10853,0,"",python,selection_command +1357,3552776,"train_tokenizer.py",10773,0,"",python,selection_command +1358,3552777,"train_tokenizer.py",10751,0,"",python,selection_command +1359,3552813,"train_tokenizer.py",10682,0,"",python,selection_command +1360,3552880,"train_tokenizer.py",10627,0,"",python,selection_command +1361,3552881,"train_tokenizer.py",10545,0,"",python,selection_command +1362,3552901,"train_tokenizer.py",10493,0,"",python,selection_command +1363,3552960,"train_tokenizer.py",10444,0,"",python,selection_command +1364,3552960,"train_tokenizer.py",10388,0,"",python,selection_command +1365,3553017,"train_tokenizer.py",10366,0,"",python,selection_command +1366,3553018,"train_tokenizer.py",10340,0,"",python,selection_command +1367,3553052,"train_tokenizer.py",10301,0,"",python,selection_command +1368,3553106,"train_tokenizer.py",10259,0,"",python,selection_command +1369,3553117,"train_tokenizer.py",10217,0,"",python,selection_command +1370,3553173,"train_tokenizer.py",10191,0,"",python,selection_command +1371,3553335,"train_tokenizer.py",10160,0,"",python,selection_command +1372,3553502,"train_tokenizer.py",10081,0,"",python,selection_command +1373,3559258,"train_dynamics.py",0,0,"",python,tab +1374,3559259,"train_dynamics.py",51,0,"",python,selection_command +1375,3563623,"train_tokenizer.py",0,0,"",python,tab +1376,3563629,"train_tokenizer.py",51,0,"",python,selection_command +1377,3567307,"train_lam.py",0,0,"",python,tab +1378,3567308,"train_lam.py",51,0,"",python,selection_command +1379,3570787,"train_lam.py",164,0,"",python,selection_mouse +1380,3571256,"train_lam.py",93,0,"",python,selection_mouse +1381,3571266,"train_lam.py",92,0,"",python,selection_command +1382,3574880,"train_lam.py",120,0,"",python,selection_command +1383,3576254,"train_lam.py",94,38,"",python,content +1384,3578344,"train_tokenizer.py",0,0,"",python,tab +1385,3579316,"train_tokenizer.py",119,0,"",python,selection_mouse +1386,3579681,"train_tokenizer.py",94,38,"",python,content +1387,3581712,"train_dynamics.py",0,0,"",python,tab +1388,3581713,"train_dynamics.py",51,0,"",python,selection_command +1389,3583124,"train_dynamics.py",335,0,"",python,selection_mouse +1390,3583134,"train_dynamics.py",334,0,"",python,selection_command +1391,3590277,"train_tokenizer.py",0,0,"",python,tab +1392,3590281,"train_tokenizer.py",51,0,"",python,selection_command +1393,3609303,"TERMINAL",0,0,"git status",,terminal_command +1394,3609314,"TERMINAL",0,0,"]633;E;2025-07-16 12:03:25 git status;f5ee9b33-fe39-4ef6-94c4-a8ea744acfe0]633;COn branch grad-norm-log-and-clip\r\nChanges to be committed:\r\n (use ""git restore --staged ..."" to unstage)\r\n\tmodified: train_dynamics.py\r\n\tmodified: train_lam.py\r\n\tmodified: train_tokenizer.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tlocal-logs/\r\n\tlogs/\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tsample.py_bak\r\n\tscripts_cremers/\r\n\tscripts_horeka/\r\n\tslurm-3309772.out\r\n\tslurm/\r\n\ttrain_tokenizer_bak.py\r\n\tutils/logger_bak.py\r\n\tutils/visualizer.py\r\n\tweekend-job-requeuer.sh\r\n\tweekend-job-starter.sh\r\n\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +1395,3611152,"TERMINAL",0,0,"bash",,terminal_focus +1396,3612775,"TERMINAL",0,0,"git status",,terminal_command +1397,3612803,"TERMINAL",0,0,"]633;E;2025-07-16 12:03:29 git status;dea9d5fc-91fd-447c-886d-4b0240ae057d]633;COn branch grad-norm-log-and-clip\r\nChanges to be committed:\r\n (use ""git restore --staged ..."" to unstage)\r\n\tmodified: train_dynamics.py\r\n\tmodified: train_lam.py\r\n\tmodified: train_tokenizer.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tlocal-logs/\r\n\tlogs/\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tsample.py_bak\r\n\tscripts_cremers/\r\n\tscripts_horeka/\r\n\tslurm-3309772.out\r\n\tslurm/\r\n\ttrain_tokenizer_bak.py\r\n\tutils/logger_bak.py\r\n\tutils/visualizer.py\r\n\tweekend-job-requeuer.sh\r\n\tweekend-job-starter.sh\r\n\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1398,3630688,"TERMINAL",0,0,"git commit -m ""added grad norm logging and grad clipping""",,terminal_command +1399,3630724,"TERMINAL",0,0,"]633;E;2025-07-16 12:03:47 git commit -m ""added grad norm logging and grad clipping"";dea9d5fc-91fd-447c-886d-4b0240ae057d]633;C",,terminal_output +1400,3630817,"TERMINAL",0,0,"[grad-norm-log-and-clip 0053b7b] added grad norm logging and grad clipping\r\n 3 files changed, 38 insertions(+), 5 deletions(-)\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +1401,3632820,"TERMINAL",0,0,"git push",,terminal_command +1402,3632834,"TERMINAL",0,0,"]633;E;2025-07-16 12:03:49 git push;dea9d5fc-91fd-447c-886d-4b0240ae057d]633;Cfatal: The current branch grad-norm-log-and-clip has no upstream branch.\r\nTo push the current branch and set the remote as upstream, use\r\n\r\n git push --set-upstream origin grad-norm-log-and-clip\r\n\r\nTo have this happen automatically for branches without a tracking\r\nupstream, see 'push.autoSetupRemote' in 'git help config'.\r\n\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;128",,terminal_output +1403,3636959,"TERMINAL",0,0,"git push --set-upstream origin grad-norm-log-and-clip",,terminal_command +1404,3637011,"TERMINAL",0,0,"]633;E;2025-07-16 12:03:53 git push --set-upstream origin grad-norm-log-and-clip;dea9d5fc-91fd-447c-886d-4b0240ae057d]633;C",,terminal_output +1405,3638314,"TERMINAL",0,0,"Enumerating objects: 9, done.\r\nCounting objects: 11% (1/9)\rCounting objects: 22% (2/9)\rCounting objects: 33% (3/9)\rCounting objects: 44% (4/9)\rCounting objects: 55% (5/9)\rCounting objects: 66% (6/9)\rCounting objects: 77% (7/9)\rCounting objects: 88% (8/9)\rCounting objects: 100% (9/9)\rCounting objects: 100% (9/9), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 20% (1/5)\rCompressing objects: 40% (2/5)\rCompressing objects: 60% (3/5)\rCompressing objects: 80% (4/5)\rCompressing objects: 100% (5/5)\rCompressing objects: 100% (5/5), done.\r\nWriting objects: 20% (1/5)\rWriting objects: 40% (2/5)\rWriting objects: 60% (3/5)\rWriting objects: 80% (4/5)\rWriting objects: 100% (5/5)\rWriting objects: 100% (5/5), 1.31 KiB | 1.31 MiB/s, done.\r\nTotal 5 (delta 4), reused 0 (delta 0), pack-reused 0\r\n",,terminal_output +1406,3638422,"TERMINAL",0,0,"remote: Resolving deltas: 0% (0/4)\rremote: Resolving deltas: 25% (1/4)\rremote: Resolving deltas: 50% (2/4)\rremote: Resolving deltas: 75% (3/4)\rremote: Resolving deltas: 100% (4/4)\rremote: Resolving deltas: 100% (4/4), completed with 4 local objects.\r\n",,terminal_output +1407,3638703,"TERMINAL",0,0,"remote: \r\nremote: Create a pull request for 'grad-norm-log-and-clip' on GitHub by visiting:\r\nremote: https://github.com/p-doom/jafar/pull/new/grad-norm-log-and-clip\r\nremote: \r\nTo github.com:p-doom/jafar.git\r\n * [new branch] grad-norm-log-and-clip -> grad-norm-log-and-clip\r\nbranch 'grad-norm-log-and-clip' set up to track 'origin/grad-norm-log-and-clip'.\r\n",,terminal_output +1408,3638733,"TERMINAL",0,0,"]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1409,3709443,"TERMINAL",0,0,"git merge main",,terminal_command +1410,3709454,"TERMINAL",0,0,"]633;E;2025-07-16 12:05:06 git merge main;dea9d5fc-91fd-447c-886d-4b0240ae057d]633;CAlready up to date.\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1411,3713307,"TERMINAL",0,0,"git checkout main",,terminal_command +1412,3713343,"TERMINAL",0,0,"]633;E;2025-07-16 12:05:09 git checkout main;dea9d5fc-91fd-447c-886d-4b0240ae057d]633;CSwitched to branch 'main'\r\nYour branch is up to date with 'origin/main'.\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1413,3714672,"TERMINAL",0,0,"git pull",,terminal_command +1414,3714747,"TERMINAL",0,0,"]633;E;2025-07-16 12:05:11 git pull;dea9d5fc-91fd-447c-886d-4b0240ae057d]633;C",,terminal_output +1415,3715705,"",0,0,"Switched from branch 'grad-norm-log-and-clip' to 'main'",,git_branch_checkout +1416,3716470,"TERMINAL",0,0,"remote: Enumerating objects: 74, done.\r\nremote: Counting objects: 1% (1/64)\rremote: Counting objects: 3% (2/64)\rremote: Counting objects: 4% (3/64)\rremote: Counting objects: 6% (4/64)\rremote: Counting objects: 7% (5/64)\rremote: Counting objects: 9% (6/64)\rremote: Counting objects: 10% (7/64)\rremote: Counting objects: 12% (8/64)\rremote: Counting objects: 14% (9/64)\rremote: Counting objects: 15% (10/64)\rremote: Counting objects: 17% (11/64)\rremote: Counting objects: 18% (12/64)\rremote: Counting objects: 20% (13/64)\rremote: Counting objects: 21% (14/64)\rremote: Counting objects: 23% (15/64)\rremote: Counting objects: 25% (16/64)\rremote: Counting objects: 26% (17/64)\rremote: Counting objects: 28% (18/64)\rremote: Counting objects: 29% (19/64)\rremote: Counting objects: 31% (20/64)\rremote: Counting objects: 32% (21/64)\rremote: Counting objects: 34% (22/64)\rremote: Counting objects: 35% (23/64)\rremote: Counting objects: 37% (24/64)\rremote: Counting objects: 39% (25/64)\rremote: Counting objects: 40% (26/64)\rremote: Counting objects: 42% (27/64)\rremote: Counting objects: 43% (28/64)\rremote: Counting objects: 45% (29/64)\rremote: Counting objects: 46% (30/64)\rremote: Counting objects: 48% (31/64)\rremote: Counting objects: 50% (32/64)\rremote: Counting objects: 51% (33/64)\rremote: Counting objects: 53% (34/64)\rremote: Counting objects: 54% (35/64)\rremote: Counting objects: 56% (36/64)\rremote: Counting objects: 57% (37/64)\rremote: Counting objects: 59% (38/64)\rremote: Counting objects: 60% (39/64)\rremote: Counting objects: 62% (40/64)\rremote: Counting objects: 64% (41/64)\rremote: Counting objects: 65% (42/64)\rremote: Counting objects: 67% (43/64)\rremote: Counting objects: 68% (44/64)\rremote: Counting objects: 70% (45/64)\rremote: Counting objects: 71% (46/64)\rremote: Counting objects: 73% (47/64)\rremote: Counting objects: 75% (48/64)\rremote: Counting objects: 76% (49/64)\rremote: Counting objects: 78% (50/64)\rremote: Counting objects: 79% (51/64)\rremote: Counting objects: 81% (52/64)\rremote: Counting objects: 82% (53/64)\rremote: Counting objects: 84% (54/64)\rremote: Counting objects: 85% (55/64)\rremote: Counting objects: 87% (56/64)\rremote: Counting objects: 89% (57/64)\rremote: Counting objects: 90% (58/64)\rremote: Counting objects: 92% (59/64)\rremote: Counting objects: 93% (60/64)\rremote: Counting objects: 95% (61/64)\rremote: Counting objects: 96% (62/64)\rremote: Counting objects: 98% (63/64)\rremote: Counting objects: 100% (64/64)\rremote: Counting objects: 100% (64/64), done.\r\nremote: Compressing objects: 3% (1/26)\rremote: Compressing objects: 7% (2/26)\rremote: Compressing objects: 11% (3/26)\rremote: Compressing objects: 15% (4/26)\rremote: Compressing objects: 19% (5/26)\rremote: Compressing objects: 23% (6/26)\rremote: Compressing objects: 26% (7/26)\rremote: Compressing objects: 30% (8/26)\rremote: Compressing objects: 34% (9/26)\rremote: Compressing objects: 38% (10/26)\rremote: Compressing objects: 42% (11/26)\rremote: Compressing objects: 46% (12/26)\rremote: Compressing objects: 50% (13/26)\rremote: Compressing objects: 53% (14/26)\rremote: Compressing objects: 57% (15/26)\rremote: Compressing objects: 61% (16/26)\rremote: Compressing objects: 65% (17/26)\rremote: Compressing objects: 69% (18/26)\rremote: Compressing objects: 73% (19/26)\rremote: Compressing objects: 76% (20/26)\rremote: Compressing objects: 80% (21/26)\rremote: Compressing objects: 84% (22/26)\rremote: Compressing objects: 88% (23/26)\rremote: Compressing objects: 92% (24/26)\rremote: Compressing objects: 96% (25/26)\rremote: Compressing objects: 100% (26/26)\rremote: Compressing objects: 100% (26/26), done.\r\nUnpacking objects: 1% (1/74)\rUnpacking objects: 2% (2/74)\r",,terminal_output +1417,3716573,"TERMINAL",0,0,"Unpacking objects: 4% (3/74)\rUnpacking objects: 5% (4/74)\rUnpacking objects: 6% (5/74)\rUnpacking objects: 8% (6/74)\rremote: Total 74 (delta 49), reused 49 (delta 38), pack-reused 10 (from 1)\r\nUnpacking objects: 9% (7/74)\rUnpacking objects: 10% (8/74)\rUnpacking objects: 12% (9/74)\r",,terminal_output +1418,3716757,"TERMINAL",0,0,"Unpacking objects: 13% (10/74)\rUnpacking objects: 14% (11/74)\rUnpacking objects: 16% (12/74)\rUnpacking objects: 17% (13/74)\rUnpacking objects: 18% (14/74)\rUnpacking objects: 20% (15/74)\rUnpacking objects: 21% (16/74)\rUnpacking objects: 22% (17/74)\rUnpacking objects: 24% (18/74)\rUnpacking objects: 25% (19/74)\rUnpacking objects: 27% (20/74)\rUnpacking objects: 28% (21/74)\r",,terminal_output +1419,3716986,"TERMINAL",0,0,"Unpacking objects: 29% (22/74)\rUnpacking objects: 31% (23/74)\rUnpacking objects: 32% (24/74)\rUnpacking objects: 33% (25/74)\rUnpacking objects: 35% (26/74)\rUnpacking objects: 36% (27/74)\rUnpacking objects: 37% (28/74)\rUnpacking objects: 39% (29/74)\rUnpacking objects: 40% (30/74)\rUnpacking objects: 41% (31/74)\rUnpacking objects: 43% (32/74)\rUnpacking objects: 44% (33/74)\rUnpacking objects: 45% (34/74)\rUnpacking objects: 47% (35/74)\rUnpacking objects: 48% (36/74)\rUnpacking objects: 50% (37/74)\rUnpacking objects: 51% (38/74)\r",,terminal_output +1420,3717048,"TERMINAL",0,0,"Unpacking objects: 52% (39/74)\rUnpacking objects: 54% (40/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 55% (41/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 56% (42/74), 24.92 KiB | 48.00 KiB/s\r",,terminal_output +1421,3717289,"TERMINAL",0,0,"Unpacking objects: 58% (43/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 59% (44/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 60% (45/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 62% (46/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 63% (47/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 64% (48/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 66% (49/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 67% (50/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 68% (51/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 70% (52/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 71% (53/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 72% (54/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 74% (55/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 75% (56/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 77% (57/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 78% (58/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 79% (59/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 81% (60/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 82% (61/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 83% (62/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 85% (63/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 86% (64/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 87% (65/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 89% (66/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 90% (67/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 91% (68/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 93% (69/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 94% (70/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 95% (71/74), 24.92 KiB | 48.00 KiB/s\r",,terminal_output +1422,3717388,"TERMINAL",0,0,"Unpacking objects: 97% (72/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 98% (73/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 100% (74/74), 24.92 KiB | 48.00 KiB/s\rUnpacking objects: 100% (74/74), 35.45 KiB | 40.00 KiB/s, done.\r\n",,terminal_output +1423,3717541,"TERMINAL",0,0,"From github.com:p-doom/jafar\r\n 101989c..86041a3 main -> origin/main\r\n * [new branch] cudnn-flash-attn-mixed-precision -> origin/cudnn-flash-attn-mixed-precision\r\n * [new branch] cudnn-flash-attn-mixed-precision-2 -> origin/cudnn-flash-attn-mixed-precision-2\r\n * [new branch] cudnn-flash-attn-mixed-precision-3 -> origin/cudnn-flash-attn-mixed-precision-3\r\n * [new branch] cudnn-flash-attn-mixed-precision-4 -> origin/cudnn-flash-attn-mixed-precision-4\r\n * [new branch] hotfix/wandb-resume-run -> origin/hotfix/wandb-resume-run\r\n 7fa456d..cc47337 lr-schedules -> origin/lr-schedules\r\n ac1f4e2..87bb6a5 mixed-precision-training -> origin/mixed-precision-training\r\n",,terminal_output +1424,3717663,"TERMINAL",0,0,"Updating 101989c..86041a3\r\n",,terminal_output +1425,3717791,"TERMINAL",0,0,"Fast-forward\r\n genie.py | 13 +++++++++++++\r\n models/dynamics.py | 10 +++++++++-\r\n models/lam.py | 20 ++++++++++++++++++--\r\n models/tokenizer.py | 11 +++++++++++\r\n sample.py | 5 +++++\r\n train_dynamics.py | 50 +++++++++++++++++++++++++++++++++++++++++---------\r\n train_lam.py | 29 ++++++++++++++++++++++-------\r\n train_tokenizer.py | 32 +++++++++++++++++++++++++-------\r\n utils/lr_utils.py | 24 ++++++++++++++++++++++++\r\n utils/nn.py | 53 +++++++++++++++++++++++++++++++++++++++++++++--------\r\n utils/parameter_utils.py | 8 ++++----\r\n 11 files changed, 217 insertions(+), 38 deletions(-)\r\n create mode 100644 utils/lr_utils.py\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1426,3721243,"TERMINAL",0,0,"git branch",,terminal_command +1427,3721282,"TERMINAL",0,0,"]633;E;2025-07-16 12:05:17 git branch;dea9d5fc-91fd-447c-886d-4b0240ae057d]633;C[?1h=\r add-wandb-name-and-tags\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/explicit-image-dims\r\n fix-sampling\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n logging-variants\r\n lr-schedules\r\n* main\r\n metrics-logging-for-dynamics-model\r\n monkey-patch\r\n preprocess_video\r\n revised-dataloader\r\n runner\r\n runner-grain\r\n sample-from-different-topologies\r\n speedup-tfrecord-preprocessing\r\n tmp\r\n\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1428,3730800,"TERMINAL",0,0,"git checkout grad-norm-log-and-clip",,terminal_command +1429,3730888,"TERMINAL",0,0,"]633;E;2025-07-16 12:05:27 git checkout grad-norm-log-and-clip;dea9d5fc-91fd-447c-886d-4b0240ae057d]633;CSwitched to branch 'grad-norm-log-and-clip'\r\nYour branch is up to date with 'origin/grad-norm-log-and-clip'.\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1430,3733326,"TERMINAL",0,0,"git merge main",,terminal_command +1431,3733406,"TERMINAL",0,0,"]633;E;2025-07-16 12:05:29 git merge main;dea9d5fc-91fd-447c-886d-4b0240ae057d]633;CAuto-merging train_dynamics.py\r\nCONFLICT (content): Merge conflict in train_dynamics.py\r\nAuto-merging train_lam.py\r\nCONFLICT (content): Merge conflict in train_lam.py\r\nAuto-merging train_tokenizer.py\r\nCONFLICT (content): Merge conflict in train_tokenizer.py\r\nAutomatic merge failed; fix conflicts and then commit the result.\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;1",,terminal_output +1432,3735703,"",0,0,"Switched from branch 'main' to 'grad-norm-log-and-clip'",,git_branch_checkout +1433,3736589,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import Optional\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n warmup_steps: int = 5000\n<<<<<<< HEAD\n grad_clip_threshold: Optional[float] = None\n=======\n lr_schedule : str = ""wsd"" # supported options: wsd, cos\n>>>>>>> main\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n metrics[""grad_norm""] = jnp.minimum(raw_grad_norm, args.grad_clip_threshold) if args.grad_clip_threshold else raw_grad_norm\n \n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=args.dtype\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n<<<<<<< HEAD\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n else:\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n=======\n lr_schedule = get_lr_schedule(args.lr_schedule, \n args.init_lr, \n args.max_lr, \n args.decay_end, \n args.num_steps, \n args.warmup_steps, \n args.wsd_decay_steps)\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n>>>>>>> main\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n # Restore full dynamics model\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, grain_iterator, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +1434,3741033,"train_dynamics.py",0,0,"",python,tab +1435,3764512,"train_dynamics.py",1146,0," lr_schedule : str = ""wsd"" # supported options: wsd, cos\n grad_clip_threshold: Optional[float] = None\n",python,content +1436,3777826,"train_dynamics.py",7778,0,"",python,selection_command +1437,3798280,"train_dynamics.py",7774,211," lr_schedule = get_lr_schedule(args.lr_schedule, \n args.init_lr, \n args.max_lr, \n args.decay_end, \n args.num_steps, \n args.warmup_steps, \n args.wsd_decay_steps)\n if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n else:\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n",python,content +1438,3807005,"train_dynamics.py",8453,0,"",python,selection_mouse +1439,3807210,"train_dynamics.py",8453,2,"mu",python,selection_mouse +1440,3807244,"train_dynamics.py",8453,4,"mu_d",python,selection_mouse +1441,3807264,"train_dynamics.py",8453,5,"mu_dt",python,selection_mouse +1442,3807275,"train_dynamics.py",8453,6,"mu_dty",python,selection_mouse +1443,3807289,"train_dynamics.py",8453,7,"mu_dtyp",python,selection_mouse +1444,3807318,"train_dynamics.py",8453,8,"mu_dtype",python,selection_mouse +1445,3807319,"train_dynamics.py",8453,9,"mu_dtype=",python,selection_mouse +1446,3807370,"train_dynamics.py",8453,10,"mu_dtype=a",python,selection_mouse +1447,3807371,"train_dynamics.py",8453,11,"mu_dtype=ar",python,selection_mouse +1448,3807372,"train_dynamics.py",8453,12,"mu_dtype=arg",python,selection_mouse +1449,3807403,"train_dynamics.py",8453,13,"mu_dtype=args",python,selection_mouse +1450,3807424,"train_dynamics.py",8453,14,"mu_dtype=args.",python,selection_mouse +1451,3807480,"train_dynamics.py",8453,15,"mu_dtype=args.d",python,selection_mouse +1452,3807544,"train_dynamics.py",8453,16,"mu_dtype=args.dt",python,selection_mouse +1453,3807600,"train_dynamics.py",8453,17,"mu_dtype=args.dty",python,selection_mouse +1454,3807745,"train_dynamics.py",8453,18,"mu_dtype=args.dtyp",python,selection_mouse +1455,3808101,"train_dynamics.py",8453,19,"mu_dtype=args.dtype",python,selection_mouse +1456,3809259,"train_dynamics.py",8453,0,"",python,selection_command +1457,3810604,"train_dynamics.py",8344,0,"",python,selection_mouse +1458,3811886,"train_dynamics.py",8344,0,",",python,content +1459,3811888,"train_dynamics.py",8345,0,"",python,selection_keyboard +1460,3811940,"train_dynamics.py",8345,0," ",python,content +1461,3811942,"train_dynamics.py",8346,0,"",python,selection_keyboard +1462,3812487,"train_dynamics.py",8346,0,"grad-norm-log-and-clip",python,content +1463,3814105,"train_dynamics.py",8346,22,"",python,content +1464,3814743,"train_dynamics.py",8345,0,"",python,selection_command +1465,3814839,"train_dynamics.py",8346,0,"mu_dtype=args.dtype",python,content +1466,3814848,"train_dynamics.py",8364,0,"",python,selection_command +1467,3828765,"train_dynamics.py",0,0,"",python,tab +1468,3830857,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import Optional\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n init_lr: float = 0.0\n max_lr: float = 3e-4\n decay_end: float = 0.0\n wsd_decay_steps: int = 20000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n lr_schedule: str = ""wsd"" # supported options: wsd, cos \n warmup_steps: int = 10000\n grad_clip_threshold: Optional[float] = None\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n # FIXME (f.srambical): Can we even do native int8 training without casting the video at all?\n # FIXME (f.srambical): If the tokenizer is the reason for the dynamics model being memory-bound,\n # should we at least train the tokenizer natively in int8?\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n metrics[""grad_norm""] = jnp.minimum(raw_grad_norm, args.grad_clip_threshold) if args.grad_clip_threshold else raw_grad_norm\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n<<<<<<< HEAD\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n else:\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n=======\n lr_schedule = get_lr_schedule(args.lr_schedule, \n args.init_lr, \n args.max_lr, \n args.decay_end, \n args.num_steps, \n args.warmup_steps, \n args.wsd_decay_steps)\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n>>>>>>> main\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +1469,3837691,"train_dynamics.py",0,0,"",python,tab +1470,3842558,"train_lam.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import Optional\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n vq_beta: float = 0.25\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n warmup_steps: int = 5000\n lr_schedule : str = ""wsd"" # supported options: wsd, cos\n vq_reset_thresh: int = 50\n grad_clip_threshold: Optional[float] = None\n # LAM\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 6\n patch_size: int = 16\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.0\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_lam""\n tags: list[str] = field(default_factory=lambda: [""lam""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_checkpoint_keep_period: int = 20000\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef lam_loss_fn(params, state, inputs):\n # --- Compute loss ---\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n gt_future_frames = inputs[""videos""][:, 1:]\n mse = jnp.square(gt_future_frames - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = gt_future_frames.clip(0, 1).reshape(-1, *gt_future_frames.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n count_fn = jax.vmap(lambda i: (outputs[""indices""] == i).sum())\n index_counts = count_fn(jnp.arange(args.num_latents))\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=(index_counts != 0).mean(),\n )\n return loss, (outputs[""recon""], index_counts, metrics)\n\n\n@jax.jit\ndef train_step(state, inputs, action_last_active):\n # --- Update model ---\n rng, inputs[""rng""] = jax.random.split(inputs[""rng""])\n grad_fn = jax.value_and_grad(lam_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, idx_counts, metrics)), grads = grad_fn(state.params, state, inputs)\n # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n metrics[""grad_norm""] = jnp.minimum(raw_grad_norm, args.grad_clip_threshold) if args.grad_clip_threshold else raw_grad_norm\n\n state = state.apply_gradients(grads=grads)\n\n # --- Reset inactive latent actions ---\n codebook = state.params[""params""][""vq""][""codebook""]\n num_codes = len(codebook)\n active_codes = idx_counts != 0.0\n action_last_active = jnp.where(active_codes, 0, action_last_active + 1)\n p_code = active_codes / active_codes.sum()\n reset_idxs = jax.random.choice(rng, num_codes, shape=(num_codes,), p=p_code)\n do_reset = action_last_active >= args.vq_reset_thresh\n new_codebook = jnp.where(\n jnp.expand_dims(do_reset, -1), codebook[reset_idxs], codebook\n )\n state.params[""params""][""vq""][""codebook""] = new_codebook\n action_last_active = jnp.where(do_reset, 0, action_last_active)\n return state, loss, recon, action_last_active, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n )\n # Track when each action was last sampled\n action_last_active = jnp.zeros(args.num_latents)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n rng, _rng = jax.random.split(rng)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = lam.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n<<<<<<< HEAD\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n else:\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n=======\n lr_schedule = get_lr_schedule(args.lr_schedule, \n args.init_lr, \n args.max_lr, \n args.decay_end, \n args.num_steps, \n args.warmup_steps, \n args.wsd_decay_steps)\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n>>>>>>> main\n train_state = TrainState.create(apply_fn=lam.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n action_last_active = jax.device_put(action_last_active, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, action_last_active, metrics = train_step(\n train_state, inputs, action_last_active\n )\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0][1:]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +1471,3844453,"train_lam.py",0,0,"",python,tab +1472,3848773,"train_lam.py",6850,0,"",python,selection_command +1473,3858293,"train_lam.py",6846,211," lr_schedule = get_lr_schedule(args.lr_schedule, \n args.init_lr, \n args.max_lr, \n args.decay_end, \n args.num_steps, \n args.warmup_steps, \n args.wsd_decay_steps)\n if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n else:\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n",python,content +1474,3860270,"train_lam.py",7523,0,"",python,selection_mouse +1475,3860467,"train_lam.py",7523,2,", ",python,selection_mouse +1476,3860468,"train_lam.py",7523,4,", mu",python,selection_mouse +1477,3860510,"train_lam.py",7523,5,", mu_",python,selection_mouse +1478,3860535,"train_lam.py",7523,6,", mu_d",python,selection_mouse +1479,3860545,"train_lam.py",7523,7,", mu_dt",python,selection_mouse +1480,3860566,"train_lam.py",7523,8,", mu_dty",python,selection_mouse +1481,3860581,"train_lam.py",7523,9,", mu_dtyp",python,selection_mouse +1482,3860590,"train_lam.py",7523,10,", mu_dtype",python,selection_mouse +1483,3860646,"train_lam.py",7523,11,", mu_dtype=",python,selection_mouse +1484,3860647,"train_lam.py",7523,12,", mu_dtype=a",python,selection_mouse +1485,3860686,"train_lam.py",7523,13,", mu_dtype=ar",python,selection_mouse +1486,3860686,"train_lam.py",7523,14,", mu_dtype=arg",python,selection_mouse +1487,3860686,"train_lam.py",7523,15,", mu_dtype=args",python,selection_mouse +1488,3860748,"train_lam.py",7523,16,", mu_dtype=args.",python,selection_mouse +1489,3860805,"train_lam.py",7523,17,", mu_dtype=args.d",python,selection_mouse +1490,3860916,"train_lam.py",7523,18,", mu_dtype=args.dt",python,selection_mouse +1491,3860998,"train_lam.py",7523,19,", mu_dtype=args.dty",python,selection_mouse +1492,3861133,"train_lam.py",7523,20,", mu_dtype=args.dtyp",python,selection_mouse +1493,3861930,"train_lam.py",7523,21,", mu_dtype=args.dtype",python,selection_mouse +1494,3862704,"train_lam.py",7523,0,"",python,selection_command +1495,3863677,"train_lam.py",7416,0,"",python,selection_mouse +1496,3864764,"train_lam.py",7415,0,"",python,selection_command +1497,3865136,"train_lam.py",7416,0,", mu_dtype=args.dtype",python,content +1498,3865151,"train_lam.py",7436,0,"",python,selection_command +1499,3897684,"train_dynamics.py",0,0,"",python,tab +1500,3901815,"train_lam.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import Optional\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n vq_beta: float = 0.25\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n warmup_steps: int = 5000\n lr_schedule : str = ""wsd"" # supported options: wsd, cos\n vq_reset_thresh: int = 50\n grad_clip_threshold: Optional[float] = None\n # LAM\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 6\n patch_size: int = 16\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.0\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_lam""\n tags: list[str] = field(default_factory=lambda: [""lam""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_checkpoint_keep_period: int = 20000\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef lam_loss_fn(params, state, inputs):\n # --- Compute loss ---\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n gt_future_frames = inputs[""videos""][:, 1:]\n mse = jnp.square(gt_future_frames - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = gt_future_frames.clip(0, 1).reshape(-1, *gt_future_frames.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n count_fn = jax.vmap(lambda i: (outputs[""indices""] == i).sum())\n index_counts = count_fn(jnp.arange(args.num_latents))\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=(index_counts != 0).mean(),\n )\n return loss, (outputs[""recon""], index_counts, metrics)\n\n\n@jax.jit\ndef train_step(state, inputs, action_last_active):\n # --- Update model ---\n rng, inputs[""rng""] = jax.random.split(inputs[""rng""])\n grad_fn = jax.value_and_grad(lam_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, idx_counts, metrics)), grads = grad_fn(state.params, state, inputs)\n # extract and manually clip grad norm for logging (actual clipping is done in the optax.chain)\n raw_grad_norm = optax.global_norm(grads)\n metrics[""grad_norm""] = jnp.minimum(raw_grad_norm, args.grad_clip_threshold) if args.grad_clip_threshold else raw_grad_norm\n\n state = state.apply_gradients(grads=grads)\n\n # --- Reset inactive latent actions ---\n codebook = state.params[""params""][""vq""][""codebook""]\n num_codes = len(codebook)\n active_codes = idx_counts != 0.0\n action_last_active = jnp.where(active_codes, 0, action_last_active + 1)\n p_code = active_codes / active_codes.sum()\n reset_idxs = jax.random.choice(rng, num_codes, shape=(num_codes,), p=p_code)\n do_reset = action_last_active >= args.vq_reset_thresh\n new_codebook = jnp.where(\n jnp.expand_dims(do_reset, -1), codebook[reset_idxs], codebook\n )\n state.params[""params""][""vq""][""codebook""] = new_codebook\n action_last_active = jnp.where(do_reset, 0, action_last_active)\n return state, loss, recon, action_last_active, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n )\n # Track when each action was last sampled\n action_last_active = jnp.zeros(args.num_latents)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n rng, _rng = jax.random.split(rng)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = lam.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(args.lr_schedule, \n args.init_lr, \n args.max_lr, \n args.decay_end, \n args.num_steps, \n args.warmup_steps, \n args.wsd_decay_steps)\n if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n )\n else:\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n train_state = TrainState.create(apply_fn=lam.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n action_last_active = jax.device_put(action_last_active, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, action_last_active, metrics = train_step(\n train_state, inputs, action_last_active\n )\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0][1:]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +1501,3901817,"train_lam.py",505,0,"",python,selection_command +1502,3905268,"train_tokenizer.py",0,0,"",python,tab +1503,3908254,"train_tokenizer.py",0,0,"",python,tab +1504,3912933,"train_tokenizer.py",6624,211," lr_schedule = get_lr_schedule(args.lr_schedule, \n args.init_lr, \n args.max_lr, \n args.decay_end, \n args.num_steps, \n args.warmup_steps, \n args.wsd_decay_steps)\n if args.grad_clip_threshold:\n tx = optax.chain(\n optax.clip_by_global_norm(args.grad_clip_threshold),\n optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n )\n else:\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n",python,content +1505,3914634,"train_tokenizer.py",7301,0,"",python,selection_mouse +1506,3914803,"train_tokenizer.py",7301,1,",",python,selection_mouse +1507,3914824,"train_tokenizer.py",7301,3,", m",python,selection_mouse +1508,3914836,"train_tokenizer.py",7301,5,", mu_",python,selection_mouse +1509,3914886,"train_tokenizer.py",7301,6,", mu_d",python,selection_mouse +1510,3914886,"train_tokenizer.py",7301,8,", mu_dty",python,selection_mouse +1511,3914887,"train_tokenizer.py",7301,9,", mu_dtyp",python,selection_mouse +1512,3914946,"train_tokenizer.py",7301,11,", mu_dtype=",python,selection_mouse +1513,3914951,"train_tokenizer.py",7301,12,", mu_dtype=a",python,selection_mouse +1514,3915009,"train_tokenizer.py",7301,13,", mu_dtype=ar",python,selection_mouse +1515,3915016,"train_tokenizer.py",7301,14,", mu_dtype=arg",python,selection_mouse +1516,3915076,"train_tokenizer.py",7301,15,", mu_dtype=args",python,selection_mouse +1517,3915084,"train_tokenizer.py",7301,16,", mu_dtype=args.",python,selection_mouse +1518,3915142,"train_tokenizer.py",7301,17,", mu_dtype=args.d",python,selection_mouse +1519,3915149,"train_tokenizer.py",7301,18,", mu_dtype=args.dt",python,selection_mouse +1520,3915170,"train_tokenizer.py",7301,19,", mu_dtype=args.dty",python,selection_mouse +1521,3915229,"train_tokenizer.py",7301,20,", mu_dtype=args.dtyp",python,selection_mouse +1522,3915279,"train_tokenizer.py",7301,21,", mu_dtype=args.dtype",python,selection_mouse +1523,3917005,"train_tokenizer.py",7301,0,"",python,selection_command +1524,3919231,"train_tokenizer.py",7193,0,"",python,selection_mouse +1525,3920485,"train_tokenizer.py",7194,0,", mu_dtype=args.dtype",python,content +1526,3920501,"train_tokenizer.py",7214,0,"",python,selection_command +1527,3930144,"TERMINAL",0,0,"git status",,terminal_command +1528,3930156,"TERMINAL",0,0,"]633;E;2025-07-16 12:08:46 git status;dea9d5fc-91fd-447c-886d-4b0240ae057d]633;COn branch grad-norm-log-and-clip\r\nYour branch is up to date with 'origin/grad-norm-log-and-clip'.\r\n\r\nAll conflicts fixed but you are still merging.\r\n (use ""git commit"" to conclude merge)\r\n\r\nChanges to be committed:\r\n\tmodified: genie.py\r\n\tmodified: models/dynamics.py\r\n\tmodified: models/lam.py\r\n\tmodified: models/tokenizer.py\r\n\tmodified: sample.py\r\n\tmodified: train_dynamics.py\r\n\tmodified: train_lam.py\r\n\tmodified: train_tokenizer.py\r\n\tnew file: utils/lr_utils.py\r\n\tmodified: utils/nn.py\r\n\tmodified: utils/parameter_utils.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tlocal-logs/\r\n\tlogs/\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tsample.py_bak\r\n\tscripts_cremers/\r\n\tscripts_horeka/\r\n\tslurm-3309772.out\r\n\tslurm/\r\n\ttrain_tokenizer_bak.py\r\n\tutils/logger_bak.py\r\n\tutils/visualizer.py\r\n\tweekend-job-requeuer.sh\r\n\tweekend-job-starter.sh\r\n\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1529,3942550,"TERMINAL",0,0,"git commit -am ""merge main into here""",,terminal_command +1530,3942599,"TERMINAL",0,0,"]633;E;2025-07-16 12:08:59 git commit -am ""merge main into here"";dea9d5fc-91fd-447c-886d-4b0240ae057d]633;C",,terminal_output +1531,3942619,"TERMINAL",0,0,"[grad-norm-log-and-clip f43fcbb] merge main into here\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1532,3944760,"TERMINAL",0,0,"git push",,terminal_command +1533,3944791,"TERMINAL",0,0,"]633;E;2025-07-16 12:09:01 git push;dea9d5fc-91fd-447c-886d-4b0240ae057d]633;C",,terminal_output +1534,3946144,"TERMINAL",0,0,"Enumerating objects: 13, done.\r\nCounting objects: 7% (1/13)\rCounting objects: 15% (2/13)\rCounting objects: 23% (3/13)\rCounting objects: 30% (4/13)\rCounting objects: 38% (5/13)\rCounting objects: 46% (6/13)\rCounting objects: 53% (7/13)\rCounting objects: 61% (8/13)\rCounting objects: 69% (9/13)\rCounting objects: 76% (10/13)\rCounting objects: 84% (11/13)\rCounting objects: 92% (12/13)\rCounting objects: 100% (13/13)\rCounting objects: 100% (13/13), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 20% (1/5)\rCompressing objects: 40% (2/5)\rCompressing objects: 60% (3/5)\rCompressing objects: 80% (4/5)\rCompressing objects: 100% (5/5)\rCompressing objects: 100% (5/5), done.\r\nWriting objects: 20% (1/5)\rWriting objects: 40% (2/5)\rWriting objects: 60% (3/5)\rWriting objects: 80% (4/5)\rWriting objects: 100% (5/5)\rWriting objects: 100% (5/5), 1.32 KiB | 677.00 KiB/s, done.\r\nTotal 5 (delta 4), reused 0 (delta 0), pack-reused 0\r\n",,terminal_output +1535,3946245,"TERMINAL",0,0,"remote: Resolving deltas: 0% (0/4)\rremote: Resolving deltas: 25% (1/4)\rremote: Resolving deltas: 50% (2/4)\rremote: Resolving deltas: 75% (3/4)\rremote: Resolving deltas: 100% (4/4)\rremote: Resolving deltas: 100% (4/4), completed with 4 local objects.\r\n",,terminal_output +1536,3946490,"TERMINAL",0,0,"To github.com:p-doom/jafar.git\r\n 0053b7b..f43fcbb grad-norm-log-and-clip -> grad-norm-log-and-clip\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1537,3960199,"TERMINAL",0,0,"bash",,terminal_focus +1538,3962017,"TERMINAL",0,0,"uv pip install -U jax[cuda12]",,terminal_command +1539,3963812,"TERMINAL",0,0,"⠋ Resolving dependencies... \r⠙ Resolving dependencies... \r⠋ Resolving dependencies... \r⠙ Resolving dependencies... ",,terminal_output +1540,3963966,"TERMINAL",0,0,"\r⠙ jax==0.6.2 ",,terminal_output +1541,3964024,"TERMINAL",0,0,"\r⠹ jax==0.6.2 ",,terminal_output +1542,3964132,"TERMINAL",0,0,"\r⠹ jax==0.6.2 \r⠹ jaxlib==0.6.2 ",,terminal_output +1543,3964196,"TERMINAL",0,0,"\r⠹ ml-dtypes==0.5.1 \r⠹ jax==0.6.2 \r⠹ jaxlib==0.6.2 \r⠹ ml-dtypes==0.5.1 \r⠹ jax==0.6.2 \r⠹ jaxlib==0.6.2 \r⠹ ml-dtypes==0.5.1 \r⠹ numpy==2.2.6 \r⠹ numpy==2.2.6 \r⠹ numpy==2.2.6 \r⠹ numpy==2.2.6 \r⠹ opt-einsum==3.4.0 \r⠹ jax==0.6.2 \r⠹ numpy==2.2.6 \r⠹ jaxlib==0.6.2 \r⠹ ml-dtypes==0.5.1 \r⠹ numpy==2.2.6 \r⠹ opt-einsum==3.4.0 \r⠹ scipy==1.15.3 \r⠹ jax-cuda12-plugin==0.6.2 \r⠹ jax-cuda12-plugin==0.6.2 ",,terminal_output +1544,3964251,"TERMINAL",0,0,"\r⠸ jax-cuda12-plugin==0.6.2 ",,terminal_output +1545,3964310,"TERMINAL",0,0,"\r⠸ nvidia-cublas-cu12==12.9.1.4 \rResolved 20 packages in 492ms\r\n⠋ Preparing packages... (0/0) \r⠋ Preparing packages... (0/17) \r⠙ Preparing packages... (0/17) ",,terminal_output +1546,3964369,"TERMINAL",0,0,"\r⠙ Preparing packages... (0/17)\r\nnumpy  ------------------------------ 0 B/16.02 MiB \r\r⠙ Preparing packages... (0/17)\r\nnumpy  ------------------------------ 0 B/16.02 MiB \r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 0 B/10.31 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB \r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 14.04 KiB/10.31 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB \r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 14.04 KiB/10.31 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB \r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 14.04 KiB/10.31 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB \r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 14.04 KiB/10.31 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 0 B/191.57 MiB \r\r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 14.04 KiB/10.31 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 14.83 KiB/191.57 MiB \r\r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 14.04 KiB/10.31 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 14.83 KiB/191.57 MiB \r\r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 14.04 KiB/10.31 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 14.83 KiB/191.57 MiB \r\r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 14.04 KiB/10.31 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 14.83 KiB/191.57 MiB \r\r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 14.04 KiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 0 B/15.14 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 14.83 KiB/191.57 MiB \r\r\r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 14.04 KiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 0 B/15.14 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 14.83 KiB/191.57 MiB \r\r\r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 14.04 KiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 0 B/15.14 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 14.83 KiB/191.57 MiB \r\r\r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 14.04 KiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 0 B/15.14 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 14.83 KiB/191.57 MiB \r\r\r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 14.04 KiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 0 B/15.14 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 14.83 KiB/191.57 MiB \r\r\r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-cupti-cu12 ------------------------------ 14.04 KiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 0 B/15.14 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB\r\njax-cuda12-pjrt  ------------------------------ 0 B/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 14.83 KiB/191.57 MiB \r\r\r\r\r\r⠙ Preparing packages... (0/17)\r\nnvidia-cuda-runtime-cu12 ------------------------------ 0 B/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 14.04 KiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 0 B/15.14 MiB\r\nnumpy  ------------------------------ 0 B/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 0 B/85.42 MiB\r\njax-cuda12-pjrt  ------------------------------ 0 B/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 14.83 KiB/191.57 MiB ",,terminal_output +1547,3964428,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠙ Preparing packages... (0/17)\r\njax  ------------------------------ 14.92 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 14.90 KiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 62.04 KiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 128.00 KiB/15.14 MiB\r\nnumpy  ------------------------------ 16.00 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 14.89 KiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 112.00 KiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 16.00 KiB/85.42 MiB\r\njaxlib  ------------------------------ 16.00 KiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 14.83 KiB/118.83 MiB",,terminal_output +1548,3964486,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (0/17)\r\njax  ------------------------------ 14.92 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 30.90 KiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 78.04 KiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 353.16 KiB/15.14 MiB\r\nnumpy  ------------------------------ 16.00 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 323.73 KiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 304.00 KiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 334.72 KiB/85.42 MiB\r\njaxlib  ------------------------------ 16.00 KiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 14.83 KiB/118.83 MiB",,terminal_output +1549,3964556,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (0/17)\r\njax  ------------------------------ 14.92 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 220.17 KiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 94.04 KiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 573.98 KiB/15.14 MiB\r\nnumpy  ------------------------------ 16.00 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 520.56 KiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 496.00 KiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 528.00 KiB/85.42 MiB\r\njaxlib  ------------------------------ 16.00 KiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 14.83 KiB/118.83 MiB\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (0/17)\r\njax  ------------------------------ 14.92 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 252.17 KiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 142.04 KiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 784.00 KiB/15.14 MiB\r\nnumpy  ------------------------------ 16.00 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 744.56 KiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 688.00 KiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 751.45 KiB/85.42 MiB\r\njaxlib  ------------------------------ 32.00 KiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 30.83 KiB/118.83 MiB",,terminal_output +1550,3964610,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (0/17)\r\njax  ------------------------------ 30.92 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 268.17 KiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 142.04 KiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 947.12 KiB/15.14 MiB\r\nnumpy  ------------------------------ 16.00 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 920.56 KiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 848.00 KiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 912.00 KiB/85.42 MiB\r\njaxlib  ------------------------------ 875.00 KiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 30.83 KiB/118.83 MiB",,terminal_output +1551,3964669,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (0/17)\r\njax  ------------------------------ 46.92 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 364.07 KiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 190.04 KiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 1.08 MiB/15.14 MiB\r\nnumpy  ------------------------------ 32.00 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 1.08 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 1.02 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 1.05 MiB/85.42 MiB\r\njaxlib  ------------------------------ 1.04 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 46.72 KiB/118.83 MiB",,terminal_output +1552,3964729,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (0/17)\r\njax  ------------------------------ 126.92 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 524.17 KiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 1.23 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 1.19 MiB/15.14 MiB\r\nnumpy  ------------------------------ 63.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 1.19 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 1.16 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 1.15 MiB/85.42 MiB\r\njaxlib  ------------------------------ 1.14 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 78.62 KiB/118.83 MiB",,terminal_output +1553,3964803,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (0/17)\r\njax  ------------------------------ 158.92 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 540.17 KiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 1.40 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 1.35 MiB/15.14 MiB\r\nnumpy  ------------------------------ 95.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 1.35 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 1.30 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 1.31 MiB/85.42 MiB\r\njaxlib  ------------------------------ 1.31 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 110.62 KiB/118.83 MiB\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (0/17)\r\njax  ------------------------------ 222.81 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 586.76 KiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 1.50 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 1.44 MiB/15.14 MiB\r\nnumpy  ------------------------------ 190.96 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 1.47 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 1.42 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 1.41 MiB/85.42 MiB\r\njaxlib  ------------------------------ 1.39 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 126.62 KiB/118.83 MiB",,terminal_output +1554,3964859,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (0/17)\r\njax  ------------------------------ 270.81 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 604.17 KiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 1.64 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 1.56 MiB/15.14 MiB\r\nnumpy  ------------------------------ 207.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 1.59 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 1.58 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 1.59 MiB/85.42 MiB\r\njaxlib  ------------------------------ 1.53 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 1.64 MiB/118.83 MiB",,terminal_output +1555,3964918,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (0/17)\r\njax  ------------------------------ 286.92 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 620.17 KiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 1.90 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 1.80 MiB/15.14 MiB\r\nnumpy  ------------------------------ 255.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 1.83 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 1.80 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 1.81 MiB/85.42 MiB\r\njaxlib  ------------------------------ 1.78 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 1.87 MiB/118.83 MiB",,terminal_output +1556,3965019,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (0/17)\r\njax  ------------------------------ 318.92 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 636.17 KiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 2.08 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 1.94 MiB/15.14 MiB\r\nnumpy  ------------------------------ 319.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 1.97 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 1.97 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 1.96 MiB/85.42 MiB\r\njaxlib  ------------------------------ 1.92 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 2.03 MiB/118.83 MiB\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (0/17)\r\njax  ------------------------------ 366.92 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 668.17 KiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 2.18 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 2.03 MiB/15.14 MiB\r\nnumpy  ------------------------------ 383.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 2.07 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 2.05 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 2.03 MiB/85.42 MiB\r\njaxlib  ------------------------------ 2.03 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 2.12 MiB/118.83 MiB",,terminal_output +1557,3965079,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (0/17)\r\njax  ------------------------------ 500.68 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 700.17 KiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 2.25 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 2.09 MiB/15.14 MiB\r\nnumpy  ------------------------------ 431.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 2.14 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 2.14 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 2.11 MiB/85.42 MiB\r\njaxlib  ------------------------------ 2.11 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 2.20 MiB/118.83 MiB",,terminal_output +1558,3965135,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (0/17)\r\njax  ------------------------------ 516.68 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 2.28 MiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 2.37 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 2.20 MiB/15.14 MiB\r\nnumpy  ------------------------------ 431.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 2.23 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 2.27 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 2.22 MiB/85.42 MiB\r\njaxlib  ------------------------------ 2.22 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 2.31 MiB/118.83 MiB",,terminal_output +1559,3965203,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (0/17)\r\njax  ------------------------------ 551.19 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 2.51 MiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 2.62 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 2.44 MiB/15.14 MiB\r\nnumpy  ------------------------------ 431.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 2.53 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 2.52 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 2.48 MiB/85.42 MiB\r\njaxlib  ------------------------------ 2.48 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 2.57 MiB/118.83 MiB\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (0/17)\r\njax  ------------------------------ 567.19 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 2.75 MiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 2.89 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 2.71 MiB/15.14 MiB\r\nnumpy  ------------------------------ 431.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 2.79 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 2.80 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 2.78 MiB/85.42 MiB\r\njaxlib  ------------------------------ 2.72 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 2.87 MiB/118.83 MiB",,terminal_output +1560,3965259,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (0/17)\r\njax  ------------------------------ 600.56 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 3.04 MiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 3.17 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 3.00 MiB/15.14 MiB\r\nnumpy  ------------------------------ 447.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 3.07 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 3.06 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 3.00 MiB/85.42 MiB\r\njaxlib  ------------------------------ 3.02 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 3.10 MiB/118.83 MiB",,terminal_output +1561,3965320,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (0/17)\r\njax  ------------------------------ 600.56 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 3.31 MiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 3.44 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 3.29 MiB/15.14 MiB\r\nnumpy  ------------------------------ 447.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 3.37 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 3.31 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 3.22 MiB/85.42 MiB\r\njaxlib  ------------------------------ 3.29 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 3.39 MiB/118.83 MiB",,terminal_output +1562,3965387,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (0/17)\r\njax  ------------------------------ 616.56 KiB/2.60 MiB\r\nnvidia-cuda-runtime-cu12 ------------------------------ 3.32 MiB/3.33 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 3.71 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 3.56 MiB/15.14 MiB\r\nnumpy  ------------------------------ 447.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 3.62 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 3.58 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 3.59 MiB/85.42 MiB\r\njaxlib  ------------------------------ 3.58 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 3.71 MiB/118.83 MiB\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (0/17)\r\njax  ------------------------------ 632.56 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 3.81 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 3.65 MiB/15.14 MiB\r\nnumpy  ------------------------------ 447.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 3.76 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 3.67 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 3.67 MiB/85.42 MiB\r\njaxlib  ------------------------------ 3.72 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 3.83 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 3.68 MiB/119.46 MiB",,terminal_output +1563,3965505,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (0/17)\r\njax  ------------------------------ 632.56 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 3.95 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 3.79 MiB/15.14 MiB\r\nnumpy  ------------------------------ 447.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 3.89 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 3.84 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 3.83 MiB/85.42 MiB\r\njaxlib  ------------------------------ 3.83 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 3.95 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 3.82 MiB/119.46 MiB\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (0/17)\r\njax  ------------------------------ 648.56 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 4.17 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 4.00 MiB/15.14 MiB\r\nnumpy  ------------------------------ 463.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 4.12 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 4.08 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 4.03 MiB/85.42 MiB\r\njaxlib  ------------------------------ 4.08 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 4.20 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 4.04 MiB/119.46 MiB\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (0/17)\r\njax  ------------------------------ 680.56 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 4.35 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 4.16 MiB/15.14 MiB\r\nnumpy  ------------------------------ 463.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 4.33 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 4.25 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 4.23 MiB/85.42 MiB\r\njaxlib  ------------------------------ 4.26 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 4.38 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 4.18 MiB/119.46 MiB",,terminal_output +1564,3965561,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (1/17)\r\njax  ------------------------------ 696.56 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 4.65 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 4.47 MiB/15.14 MiB\r\nnumpy  ------------------------------ 463.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 4.61 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 4.54 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 4.56 MiB/85.42 MiB\r\njaxlib  ------------------------------ 4.52 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 4.70 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 4.49 MiB/119.46 MiB",,terminal_output +1565,3965620,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (1/17)\r\njax  ------------------------------ 728.56 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 4.84 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 4.64 MiB/15.14 MiB\r\nnumpy  ------------------------------ 479.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 4.77 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 4.67 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 4.65 MiB/85.42 MiB\r\njaxlib  ------------------------------ 4.69 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 4.88 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 4.65 MiB/119.46 MiB",,terminal_output +1566,3965679,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (1/17)\r\njax  ------------------------------ 744.56 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 4.98 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 4.83 MiB/15.14 MiB\r\nnumpy  ------------------------------ 479.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 4.98 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 4.90 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 4.87 MiB/85.42 MiB\r\njaxlib  ------------------------------ 4.84 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 5.02 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 4.80 MiB/119.46 MiB",,terminal_output +1567,3965751,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (1/17)\r\njax  ------------------------------ 760.56 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 5.26 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 5.06 MiB/15.14 MiB\r\nnumpy  ------------------------------ 479.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 5.24 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 5.17 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 5.15 MiB/85.42 MiB\r\njaxlib  ------------------------------ 5.09 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 5.26 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 5.05 MiB/119.46 MiB\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (1/17)\r\njax  ------------------------------ 792.56 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 5.56 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 5.36 MiB/15.14 MiB\r\nnumpy  ------------------------------ 479.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 5.58 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 5.45 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 5.47 MiB/85.42 MiB\r\njaxlib  ------------------------------ 5.40 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 5.60 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 5.36 MiB/119.46 MiB",,terminal_output +1568,3965813,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (1/17)\r\njax  ------------------------------ 808.56 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 5.85 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 5.70 MiB/15.14 MiB\r\nnumpy  ------------------------------ 495.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 5.89 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 5.77 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 5.78 MiB/85.42 MiB\r\njaxlib  ------------------------------ 5.72 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 5.92 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 5.66 MiB/119.46 MiB",,terminal_output +1569,3965868,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (1/17)\r\njax  ------------------------------ 840.11 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 6.15 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 6.03 MiB/15.14 MiB\r\nnumpy  ------------------------------ 495.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 6.20 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 6.09 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 6.15 MiB/85.42 MiB\r\njaxlib  ------------------------------ 6.05 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 6.30 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 6.02 MiB/119.46 MiB",,terminal_output +1570,3965926,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (1/17)\r\njax  ------------------------------ 888.35 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 6.44 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 6.36 MiB/15.14 MiB\r\nnumpy  ------------------------------ 495.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 6.52 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 6.41 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 6.43 MiB/85.42 MiB\r\njaxlib  ------------------------------ 6.37 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 6.56 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 6.36 MiB/119.46 MiB",,terminal_output +1571,3965983,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (1/17)\r\njax  ------------------------------ 904.35 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 6.79 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 6.64 MiB/15.14 MiB\r\nnumpy  ------------------------------ 495.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 6.85 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 6.73 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 6.72 MiB/85.42 MiB\r\njaxlib  ------------------------------ 6.71 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 6.88 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 6.66 MiB/119.46 MiB",,terminal_output +1572,3966037,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (1/17)\r\njax  ------------------------------ 920.56 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 7.06 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 6.95 MiB/15.14 MiB\r\nnumpy  ------------------------------ 511.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 7.17 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 7.15 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 7.05 MiB/85.42 MiB\r\njaxlib  ------------------------------ 7.00 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 7.26 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 6.98 MiB/119.46 MiB",,terminal_output +1573,3966155,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (1/17)\r\njax  ------------------------------ 936.56 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 7.30 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 7.20 MiB/15.14 MiB\r\nnumpy  ------------------------------ 511.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 7.42 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 7.35 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 7.23 MiB/85.42 MiB\r\njaxlib  ------------------------------ 7.22 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 7.42 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 7.18 MiB/119.46 MiB\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (1/17)\r\njax  ------------------------------ 952.56 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 7.71 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 7.53 MiB/15.14 MiB\r\nnumpy  ------------------------------ 511.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 7.79 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 7.69 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 7.59 MiB/85.42 MiB\r\njaxlib  ------------------------------ 7.55 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 7.76 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 7.54 MiB/119.46 MiB\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (1/17)\r\njax  ------------------------------ 963.56 KiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 8.05 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 7.89 MiB/15.14 MiB\r\nnumpy  ------------------------------ 511.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 8.11 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 8.07 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 7.88 MiB/85.42 MiB\r\njaxlib  ------------------------------ 7.92 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 8.12 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 7.82 MiB/119.46 MiB",,terminal_output +1574,3966259,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (1/17)\r\njax  ------------------------------ 1.05 MiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 8.34 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 8.22 MiB/15.14 MiB\r\nnumpy  ------------------------------ 511.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 8.43 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 8.35 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 8.25 MiB/85.42 MiB\r\njaxlib  ------------------------------ 8.23 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 8.42 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 8.11 MiB/119.46 MiB\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (1/17)\r\njax  ------------------------------ 1.05 MiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 8.68 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 8.56 MiB/15.14 MiB\r\nnumpy  ------------------------------ 511.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 8.72 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 8.71 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 8.62 MiB/85.42 MiB\r\njaxlib  ------------------------------ 8.58 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 8.76 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 8.47 MiB/119.46 MiB",,terminal_output +1575,3966319,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (1/17)\r\njax  ------------------------------ 1.14 MiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 9.01 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 8.87 MiB/15.14 MiB\r\nnumpy  ------------------------------ 527.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 9.08 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 9.02 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 8.92 MiB/85.42 MiB\r\njaxlib  ------------------------------ 8.91 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 9.06 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 8.80 MiB/119.46 MiB",,terminal_output +1576,3966373,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (1/17)\r\njax  ------------------------------ 1.19 MiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 9.35 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 9.23 MiB/15.14 MiB\r\nnumpy  ------------------------------ 527.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 9.38 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 9.35 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 9.23 MiB/85.42 MiB\r\njaxlib  ------------------------------ 9.24 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 9.39 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 9.13 MiB/119.46 MiB",,terminal_output +1577,3966475,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (1/17)\r\njax  ------------------------------ 1.22 MiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 9.65 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 9.51 MiB/15.14 MiB\r\nnumpy  ------------------------------ 527.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 9.75 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 9.65 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 9.50 MiB/85.42 MiB\r\njaxlib  ------------------------------ 9.64 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 9.74 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 9.43 MiB/119.46 MiB\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (1/17)\r\njax  ------------------------------ 1.22 MiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 9.99 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 9.87 MiB/15.14 MiB\r\nnumpy  ------------------------------ 527.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 10.06 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 9.99 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 9.86 MiB/85.42 MiB\r\njaxlib  ------------------------------ 9.92 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 10.01 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 9.79 MiB/119.46 MiB",,terminal_output +1578,3966533,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (1/17)\r\njax  ------------------------------ 1.24 MiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 10.24 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 10.13 MiB/15.14 MiB\r\nnumpy  ------------------------------ 527.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 10.34 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 10.29 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 10.20 MiB/85.42 MiB\r\njaxlib  ------------------------------ 10.23 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 10.31 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 10.05 MiB/119.46 MiB",,terminal_output +1579,3966604,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (1/17)\r\njax  ------------------------------ 1.30 MiB/2.60 MiB\r\nnvidia-cuda-cupti-cu12  ------------------------------ 10.31 MiB/10.31 MiB\r\njax-cuda12-plugin  ------------------------------ 10.45 MiB/15.14 MiB\r\nnumpy  ------------------------------ 527.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 10.63 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 10.66 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 10.53 MiB/85.42 MiB\r\njaxlib  ------------------------------ 10.54 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 10.60 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 10.35 MiB/119.46 MiB\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (1/17)\r\njax  ------------------------------ 1.30 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 10.63 MiB/15.14 MiB\r\nnumpy  ------------------------------ 527.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 10.82 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 10.80 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 10.66 MiB/85.42 MiB\r\njaxlib  ------------------------------ 10.73 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 10.77 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 10.54 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 11.03 MiB/191.57 MiB\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (1/17)\r\njax  ------------------------------ 1.30 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 10.65 MiB/15.14 MiB\r\nnumpy  ------------------------------ 527.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 10.84 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 10.82 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 10.67 MiB/85.42 MiB\r\njaxlib  ------------------------------ 10.77 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 10.88 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 10.55 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 11.05 MiB/191.57 MiB",,terminal_output +1580,3966661,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (1/17)\r\njax  ------------------------------ 1.30 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 10.93 MiB/15.14 MiB\r\nnumpy  ------------------------------ 527.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 11.09 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 11.07 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 10.94 MiB/85.42 MiB\r\njaxlib  ------------------------------ 11.01 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 11.01 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 10.83 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 11.31 MiB/191.57 MiB",,terminal_output +1581,3966721,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (1/17)\r\njax  ------------------------------ 1.31 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 11.24 MiB/15.14 MiB\r\nnumpy  ------------------------------ 527.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 11.44 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 11.27 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 11.30 MiB/85.42 MiB\r\njaxlib  ------------------------------ 11.29 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 11.38 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 11.16 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 11.64 MiB/191.57 MiB",,terminal_output +1582,3966804,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (2/17)\r\njax  ------------------------------ 1.31 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 11.48 MiB/15.14 MiB\r\nnumpy  ------------------------------ 527.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 11.69 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 11.27 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 11.48 MiB/85.42 MiB\r\njaxlib  ------------------------------ 11.58 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 11.56 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 11.40 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 11.89 MiB/191.57 MiB\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (2/17)\r\njax  ------------------------------ 1.35 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 11.82 MiB/15.14 MiB\r\nnumpy  ------------------------------ 543.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 11.96 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 11.29 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 11.80 MiB/85.42 MiB\r\njaxlib  ------------------------------ 11.89 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 11.90 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 11.69 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 12.20 MiB/191.57 MiB",,terminal_output +1583,3966861,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (2/17)\r\njax  ------------------------------ 1.50 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 12.02 MiB/15.14 MiB\r\nnumpy  ------------------------------ 543.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 12.15 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 11.35 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 12.00 MiB/85.42 MiB\r\njaxlib  ------------------------------ 12.12 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 12.12 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 11.91 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 12.39 MiB/191.57 MiB",,terminal_output +1584,3966917,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (2/17)\r\njax  ------------------------------ 1.55 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 12.13 MiB/15.14 MiB\r\nnumpy  ------------------------------ 543.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 12.32 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 11.37 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 12.14 MiB/85.42 MiB\r\njaxlib  ------------------------------ 12.25 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 12.24 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 12.04 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 12.57 MiB/191.57 MiB",,terminal_output +1585,3966974,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (2/17)\r\njax  ------------------------------ 1.56 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 12.44 MiB/15.14 MiB\r\nnumpy  ------------------------------ 543.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 12.62 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 11.52 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 12.47 MiB/85.42 MiB\r\njaxlib  ------------------------------ 12.57 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 12.56 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 12.32 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 12.82 MiB/191.57 MiB",,terminal_output +1586,3967076,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (2/17)\r\njax  ------------------------------ 1.58 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 12.74 MiB/15.14 MiB\r\nnumpy  ------------------------------ 543.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 12.89 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 12.81 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 12.64 MiB/85.42 MiB\r\njaxlib  ------------------------------ 12.74 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 12.87 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 12.52 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 13.03 MiB/191.57 MiB\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (2/17)\r\njax  ------------------------------ 1.60 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 12.95 MiB/15.14 MiB\r\nnumpy  ------------------------------ 556.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 13.12 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 13.14 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 13.01 MiB/85.42 MiB\r\njaxlib  ------------------------------ 13.12 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 13.18 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 12.96 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 13.40 MiB/191.57 MiB",,terminal_output +1587,3967179,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (2/17)\r\njax  ------------------------------ 1.61 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 13.37 MiB/15.14 MiB\r\nnumpy  ------------------------------ 556.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 13.49 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 13.42 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 13.33 MiB/85.42 MiB\r\njaxlib  ------------------------------ 13.42 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 13.49 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 13.26 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 13.70 MiB/191.57 MiB\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (2/17)\r\njax  ------------------------------ 1.61 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 13.68 MiB/15.14 MiB\r\nnumpy  ------------------------------ 556.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 13.80 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 13.81 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 13.65 MiB/85.42 MiB\r\njaxlib  ------------------------------ 13.75 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 13.81 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 13.55 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 14.06 MiB/191.57 MiB",,terminal_output +1588,3967303,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (2/17)\r\njax  ------------------------------ 1.63 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 13.96 MiB/15.14 MiB\r\nnumpy  ------------------------------ 556.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 14.09 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 14.11 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 13.97 MiB/85.42 MiB\r\njaxlib  ------------------------------ 14.04 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 14.12 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 13.90 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 14.36 MiB/191.57 MiB\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (2/17)\r\njax  ------------------------------ 1.63 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 14.26 MiB/15.14 MiB\r\nnumpy  ------------------------------ 556.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 14.43 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 14.52 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 14.30 MiB/85.42 MiB\r\njaxlib  ------------------------------ 14.39 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 14.42 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 14.18 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 14.74 MiB/191.57 MiB\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (2/17)\r\njax  ------------------------------ 1.63 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 14.65 MiB/15.14 MiB\r\nnumpy  ------------------------------ 556.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 14.78 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 14.86 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 14.67 MiB/85.42 MiB\r\njaxlib  ------------------------------ 14.71 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 14.78 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 14.57 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 15.03 MiB/191.57 MiB",,terminal_output +1589,3967363,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (2/17)\r\njax  ------------------------------ 1.64 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 14.96 MiB/15.14 MiB\r\nnumpy  ------------------------------ 556.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 15.10 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 15.19 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 14.99 MiB/85.42 MiB\r\njaxlib  ------------------------------ 15.04 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 15.09 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 14.90 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 15.31 MiB/191.57 MiB",,terminal_output +1590,3967419,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (2/17)\r\njax  ------------------------------ 1.69 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 15.14 MiB/15.14 MiB\r\nnumpy  ------------------------------ 556.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 15.42 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 15.52 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 15.31 MiB/85.42 MiB\r\njaxlib  ------------------------------ 15.40 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 15.44 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 15.22 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 15.64 MiB/191.57 MiB",,terminal_output +1591,3967525,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (2/17)\r\njax  ------------------------------ 1.71 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 15.14 MiB/15.14 MiB\r\nnumpy  ------------------------------ 572.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 15.74 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 15.91 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 15.69 MiB/85.42 MiB\r\njaxlib  ------------------------------ 15.75 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 15.78 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 15.61 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 16.02 MiB/191.57 MiB\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (2/17)\r\njax  ------------------------------ 1.71 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 15.14 MiB/15.14 MiB\r\nnumpy  ------------------------------ 572.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 16.18 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 16.16 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 16.03 MiB/85.42 MiB\r\njaxlib  ------------------------------ 16.08 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 16.17 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 15.97 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 16.33 MiB/191.57 MiB",,terminal_output +1592,3967582,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (2/17)\r\njax  ------------------------------ 1.72 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 15.14 MiB/15.14 MiB\r\nnumpy  ------------------------------ 572.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 16.43 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 16.47 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 16.41 MiB/85.42 MiB\r\njaxlib  ------------------------------ 16.44 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 16.53 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 16.35 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 16.70 MiB/191.57 MiB",,terminal_output +1593,3967724,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (2/17)\r\njax  ------------------------------ 1.72 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 15.14 MiB/15.14 MiB\r\nnumpy  ------------------------------ 572.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 16.79 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 16.83 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 16.75 MiB/85.42 MiB\r\njaxlib  ------------------------------ 16.77 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 16.87 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 16.72 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 17.06 MiB/191.57 MiB\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (2/17)\r\njax  ------------------------------ 1.75 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 15.14 MiB/15.14 MiB\r\nnumpy  ------------------------------ 572.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 17.08 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 17.11 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 17.09 MiB/85.42 MiB\r\njaxlib  ------------------------------ 17.05 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 17.14 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 16.99 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 17.36 MiB/191.57 MiB",,terminal_output +1594,3967779,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (2/17)\r\njax  ------------------------------ 1.76 MiB/2.60 MiB\r\njax-cuda12-plugin  ------------------------------ 15.14 MiB/15.14 MiB\r\nnumpy  ------------------------------ 572.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 17.34 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 17.36 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 17.39 MiB/85.42 MiB\r\njaxlib  ------------------------------ 17.32 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 17.42 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 17.36 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 17.62 MiB/191.57 MiB\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (2/17)\r\njax  ------------------------------ 1.76 MiB/2.60 MiB\r\nnumpy  ------------------------------ 572.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 17.55 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 17.53 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 17.46 MiB/85.42 MiB\r\njaxlib  ------------------------------ 17.52 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 17.57 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 17.44 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 17.81 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 17.54 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (2/17)\r\njax  ------------------------------ 1.78 MiB/2.60 MiB\r\nnumpy  ------------------------------ 572.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 17.74 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 17.78 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 17.73 MiB/85.42 MiB\r\njaxlib  ------------------------------ 17.72 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 17.75 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 17.64 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 18.00 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 17.71 MiB/307.61 MiB",,terminal_output +1595,3967882,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (2/17)\r\njax  ------------------------------ 1.79 MiB/2.60 MiB\r\nnumpy  ------------------------------ 572.93 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 18.20 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 18.08 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 18.07 MiB/85.42 MiB\r\njaxlib  ------------------------------ 18.08 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 18.09 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 18.12 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 18.38 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 18.09 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (2/17)\r\njax  ------------------------------ 1.81 MiB/2.60 MiB\r\nnumpy  ------------------------------ 575.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 18.49 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 18.56 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 18.45 MiB/85.42 MiB\r\njaxlib  ------------------------------ 18.52 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 18.57 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 18.48 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 18.73 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 18.48 MiB/307.61 MiB",,terminal_output +1596,3968002,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (2/17)\r\njax  ------------------------------ 1.81 MiB/2.60 MiB\r\nnumpy  ------------------------------ 607.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 18.98 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 18.97 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 18.75 MiB/85.42 MiB\r\njaxlib  ------------------------------ 18.88 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 18.93 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 18.84 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 19.17 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 18.84 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (3/17)\r\njax  ------------------------------ 1.81 MiB/2.60 MiB\r\nnumpy  ------------------------------ 607.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 19.36 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 19.35 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 19.14 MiB/85.42 MiB\r\njaxlib  ------------------------------ 19.30 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 19.28 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 19.18 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 19.44 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 19.21 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (3/17)\r\njax  ------------------------------ 1.83 MiB/2.60 MiB\r\nnumpy  ------------------------------ 607.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 19.74 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 19.74 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 19.61 MiB/85.42 MiB\r\njaxlib  ------------------------------ 19.56 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 19.65 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 19.56 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 19.92 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 19.56 MiB/307.61 MiB",,terminal_output +1597,3968060,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (3/17)\r\njax  ------------------------------ 1.85 MiB/2.60 MiB\r\nnumpy  ------------------------------ 607.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 20.12 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 20.10 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 19.98 MiB/85.42 MiB\r\njaxlib  ------------------------------ 19.98 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 20.06 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 19.92 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 20.27 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 20.03 MiB/307.61 MiB",,terminal_output +1598,3968117,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (3/17)\r\njax  ------------------------------ 1.87 MiB/2.60 MiB\r\nnumpy  ------------------------------ 607.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 20.47 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 20.49 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 20.37 MiB/85.42 MiB\r\njaxlib  ------------------------------ 20.37 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 20.44 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 20.31 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 20.66 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 20.43 MiB/307.61 MiB",,terminal_output +1599,3968174,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (3/17)\r\njax  ------------------------------ 1.87 MiB/2.60 MiB\r\nnumpy  ------------------------------ 607.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 20.86 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 20.90 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 20.75 MiB/85.42 MiB\r\njaxlib  ------------------------------ 20.76 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 20.83 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 20.65 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 21.05 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 20.81 MiB/307.61 MiB",,terminal_output +1600,3968231,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (3/17)\r\njax  ------------------------------ 1.87 MiB/2.60 MiB\r\nnumpy  ------------------------------ 623.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 21.22 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 21.30 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 21.15 MiB/85.42 MiB\r\njaxlib  ------------------------------ 21.22 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 21.20 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 21.00 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 21.45 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 21.17 MiB/307.61 MiB",,terminal_output +1601,3968355,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (3/17)\r\njax  ------------------------------ 1.87 MiB/2.60 MiB\r\nnumpy  ------------------------------ 623.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 21.60 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 21.65 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 21.51 MiB/85.42 MiB\r\njaxlib  ------------------------------ 21.52 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 21.57 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 21.39 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 21.84 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 21.58 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (3/17)\r\njax  ------------------------------ 1.87 MiB/2.60 MiB\r\nnumpy  ------------------------------ 623.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 22.00 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 21.95 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 21.89 MiB/85.42 MiB\r\njaxlib  ------------------------------ 21.88 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 21.93 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 21.73 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 22.19 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 21.95 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (3/17)\r\njax  ------------------------------ 1.89 MiB/2.60 MiB\r\nnumpy  ------------------------------ 623.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 22.35 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 22.31 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 22.30 MiB/85.42 MiB\r\njaxlib  ------------------------------ 22.27 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 22.19 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 22.09 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 22.48 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 22.32 MiB/307.61 MiB",,terminal_output +1602,3968414,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (3/17)\r\njax  ------------------------------ 1.89 MiB/2.60 MiB\r\nnumpy  ------------------------------ 639.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 22.63 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 22.68 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 22.69 MiB/85.42 MiB\r\njaxlib  ------------------------------ 22.55 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 22.62 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 22.43 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 22.92 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 22.64 MiB/307.61 MiB",,terminal_output +1603,3968474,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (3/17)\r\njax  ------------------------------ 1.89 MiB/2.60 MiB\r\nnumpy  ------------------------------ 639.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 23.02 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 23.07 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 22.95 MiB/85.42 MiB\r\njaxlib  ------------------------------ 22.95 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 22.93 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 22.82 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 23.25 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 22.99 MiB/307.61 MiB",,terminal_output +1604,3968529,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (3/17)\r\njax  ------------------------------ 1.89 MiB/2.60 MiB\r\nnumpy  ------------------------------ 655.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 23.43 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 23.45 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 23.33 MiB/85.42 MiB\r\njaxlib  ------------------------------ 23.23 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 23.26 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 23.19 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 23.64 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 23.38 MiB/307.61 MiB",,terminal_output +1605,3968636,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (3/17)\r\njax  ------------------------------ 1.89 MiB/2.60 MiB\r\nnumpy  ------------------------------ 687.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 23.75 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 23.84 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 23.75 MiB/85.42 MiB\r\njaxlib  ------------------------------ 23.69 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 23.62 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 23.50 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 24.00 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 23.63 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (3/17)\r\njax  ------------------------------ 1.91 MiB/2.60 MiB\r\nnumpy  ------------------------------ 703.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 24.12 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 24.26 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 24.06 MiB/85.42 MiB\r\njaxlib  ------------------------------ 24.03 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 24.00 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 23.93 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 24.26 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 24.00 MiB/307.61 MiB",,terminal_output +1606,3968736,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (3/17)\r\njax  ------------------------------ 1.91 MiB/2.60 MiB\r\nnumpy  ------------------------------ 719.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 24.59 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 24.57 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 24.48 MiB/85.42 MiB\r\njaxlib  ------------------------------ 24.47 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 24.45 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 24.22 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 24.62 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 24.41 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (3/17)\r\njax  ------------------------------ 1.91 MiB/2.60 MiB\r\nnumpy  ------------------------------ 719.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 24.88 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 25.00 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 24.95 MiB/85.42 MiB\r\njaxlib  ------------------------------ 24.78 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 24.75 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 24.65 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 25.06 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 24.76 MiB/307.61 MiB",,terminal_output +1607,3968788,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (3/17)\r\njax  ------------------------------ 1.91 MiB/2.60 MiB\r\nnumpy  ------------------------------ 719.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 25.27 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 25.34 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 25.24 MiB/85.42 MiB\r\njaxlib  ------------------------------ 25.16 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 25.15 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 25.03 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 25.43 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 25.15 MiB/307.61 MiB",,terminal_output +1608,3968853,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (3/17)\r\njax  ------------------------------ 1.91 MiB/2.60 MiB\r\nnumpy  ------------------------------ 735.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 25.65 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 25.76 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 25.62 MiB/85.42 MiB\r\njaxlib  ------------------------------ 25.55 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 25.51 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 25.41 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 25.68 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 25.51 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (3/17)\r\njax  ------------------------------ 1.91 MiB/2.60 MiB\r\nnumpy  ------------------------------ 735.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 25.91 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 26.12 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 26.00 MiB/85.42 MiB\r\njaxlib  ------------------------------ 25.84 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 25.90 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 25.78 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 26.00 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 25.92 MiB/307.61 MiB",,terminal_output +1609,3968909,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (3/17)\r\njax  ------------------------------ 1.91 MiB/2.60 MiB\r\nnumpy  ------------------------------ 735.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 26.39 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 26.49 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 26.30 MiB/85.42 MiB\r\njaxlib  ------------------------------ 26.20 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 26.32 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 26.18 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 26.40 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 26.22 MiB/307.61 MiB",,terminal_output +1610,3968969,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (3/17)\r\njax  ------------------------------ 1.91 MiB/2.60 MiB\r\nnumpy  ------------------------------ 735.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 26.77 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 26.79 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 26.77 MiB/85.42 MiB\r\njaxlib  ------------------------------ 26.60 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 26.70 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 26.47 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 26.88 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 26.60 MiB/307.61 MiB",,terminal_output +1611,3969024,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (3/17)\r\njax  ------------------------------ 1.92 MiB/2.60 MiB\r\nnumpy  ------------------------------ 735.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 27.03 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 27.26 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 27.06 MiB/85.42 MiB\r\njaxlib  ------------------------------ 26.99 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 27.09 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 26.97 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 27.29 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 26.99 MiB/307.61 MiB",,terminal_output +1612,3969084,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (3/17)\r\njax  ------------------------------ 1.92 MiB/2.60 MiB\r\nnumpy  ------------------------------ 735.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 27.50 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 27.56 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 27.44 MiB/85.42 MiB\r\njaxlib  ------------------------------ 27.38 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 27.39 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 27.25 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 27.57 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 27.51 MiB/307.61 MiB",,terminal_output +1613,3969286,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (3/17)\r\njax  ------------------------------ 1.92 MiB/2.60 MiB\r\nnumpy  ------------------------------ 735.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 27.79 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 27.95 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 27.82 MiB/85.42 MiB\r\njaxlib  ------------------------------ 27.74 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 27.82 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 27.62 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 27.91 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 27.77 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (3/17)\r\njax  ------------------------------ 1.92 MiB/2.60 MiB\r\nnumpy  ------------------------------ 735.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 28.15 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 28.37 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 28.23 MiB/85.42 MiB\r\njaxlib  ------------------------------ 28.11 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 28.23 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 28.04 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 28.30 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 28.15 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (3/17)\r\njax  ------------------------------ 1.93 MiB/2.60 MiB\r\nnumpy  ------------------------------ 735.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 28.52 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 28.73 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 28.62 MiB/85.42 MiB\r\njaxlib  ------------------------------ 28.57 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 28.75 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 28.37 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 28.68 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 28.55 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (3/17)\r\njax  ------------------------------ 1.93 MiB/2.60 MiB\r\nnumpy  ------------------------------ 735.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 28.89 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 29.08 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 29.02 MiB/85.42 MiB\r\njaxlib  ------------------------------ 28.92 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 29.01 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 28.76 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 29.05 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 28.97 MiB/307.61 MiB",,terminal_output +1614,3969364,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (3/17)\r\njax  ------------------------------ 1.95 MiB/2.60 MiB\r\nnumpy  ------------------------------ 751.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 29.29 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 29.42 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 29.43 MiB/85.42 MiB\r\njaxlib  ------------------------------ 29.23 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 29.40 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 29.15 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 29.43 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 29.34 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (3/17)\r\njax  ------------------------------ 1.95 MiB/2.60 MiB\r\nnumpy  ------------------------------ 751.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 29.69 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 29.88 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 29.74 MiB/85.42 MiB\r\njaxlib  ------------------------------ 29.71 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 29.71 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 29.56 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 29.80 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 29.64 MiB/307.61 MiB",,terminal_output +1615,3969420,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (3/17)\r\njax  ------------------------------ 1.95 MiB/2.60 MiB\r\nnumpy  ------------------------------ 751.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 30.01 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 30.18 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 30.15 MiB/85.42 MiB\r\njaxlib  ------------------------------ 30.09 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 30.19 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 29.90 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 30.12 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 30.06 MiB/307.61 MiB",,terminal_output +1616,3969479,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (3/17)\r\njax  ------------------------------ 1.95 MiB/2.60 MiB\r\nnumpy  ------------------------------ 751.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 30.36 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 30.58 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 30.58 MiB/85.42 MiB\r\njaxlib  ------------------------------ 30.40 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 30.48 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 30.26 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 30.51 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 30.48 MiB/307.61 MiB",,terminal_output +1617,3969582,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (3/17)\r\njax  ------------------------------ 1.95 MiB/2.60 MiB\r\nnumpy  ------------------------------ 751.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 30.73 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 31.00 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 30.86 MiB/85.42 MiB\r\njaxlib  ------------------------------ 30.79 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 30.89 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 30.53 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 30.88 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 30.91 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (3/17)\r\njax  ------------------------------ 1.95 MiB/2.60 MiB\r\nnumpy  ------------------------------ 751.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 31.01 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 31.39 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 31.32 MiB/85.42 MiB\r\njaxlib  ------------------------------ 31.09 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 31.15 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 31.06 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 31.18 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 31.17 MiB/307.61 MiB",,terminal_output +1618,3969686,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (3/17)\r\njax  ------------------------------ 1.97 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 31.40 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 31.88 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 31.61 MiB/85.42 MiB\r\njaxlib  ------------------------------ 31.46 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 31.68 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 31.34 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 31.58 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 31.54 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (3/17)\r\njax  ------------------------------ 1.97 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 31.80 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 32.16 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 32.02 MiB/85.42 MiB\r\njaxlib  ------------------------------ 31.83 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 32.04 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 31.73 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 31.97 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 32.03 MiB/307.61 MiB",,terminal_output +1619,3969738,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (3/17)\r\njax  ------------------------------ 1.98 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 32.17 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 32.58 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 32.52 MiB/85.42 MiB\r\njaxlib  ------------------------------ 32.21 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 32.36 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 32.10 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 32.35 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 32.31 MiB/307.61 MiB",,terminal_output +1620,3969803,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (3/17)\r\njax  ------------------------------ 1.98 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 32.66 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 32.99 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 32.86 MiB/85.42 MiB\r\njaxlib  ------------------------------ 32.63 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 32.87 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 32.46 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 32.72 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 32.73 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (3/17)\r\njax  ------------------------------ 1.98 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 32.95 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 33.39 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 33.29 MiB/85.42 MiB\r\njaxlib  ------------------------------ 33.03 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 33.12 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 32.87 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 33.13 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 33.12 MiB/307.61 MiB",,terminal_output +1621,3969861,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (3/17)\r\njax  ------------------------------ 2.00 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 33.37 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 33.81 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 33.69 MiB/85.42 MiB\r\njaxlib  ------------------------------ 33.41 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 33.51 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 33.28 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 33.50 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 33.55 MiB/307.61 MiB",,terminal_output +1622,3969920,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (3/17)\r\njax  ------------------------------ 2.00 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 33.75 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 34.12 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 34.07 MiB/85.42 MiB\r\njaxlib  ------------------------------ 33.78 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 33.81 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 33.69 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 33.80 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 33.95 MiB/307.61 MiB",,terminal_output +1623,3970022,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (3/17)\r\njax  ------------------------------ 2.00 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 34.16 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 34.53 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 34.49 MiB/85.42 MiB\r\njaxlib  ------------------------------ 34.17 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 34.16 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 33.97 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 34.18 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 34.23 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (3/17)\r\njax  ------------------------------ 2.00 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 34.42 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 34.94 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 34.79 MiB/85.42 MiB\r\njaxlib  ------------------------------ 34.47 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 34.55 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 34.36 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 34.55 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 34.62 MiB/307.61 MiB",,terminal_output +1624,3970078,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (3/17)\r\njax  ------------------------------ 2.00 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 34.71 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 35.30 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 35.13 MiB/85.42 MiB\r\njaxlib  ------------------------------ 34.78 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 34.97 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 34.66 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 34.97 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 35.01 MiB/307.61 MiB",,terminal_output +1625,3970135,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (3/17)\r\njax  ------------------------------ 2.00 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 35.12 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 35.69 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 35.50 MiB/85.42 MiB\r\njaxlib  ------------------------------ 35.23 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 35.26 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 35.08 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 35.28 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 35.32 MiB/307.61 MiB",,terminal_output +1626,3970252,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (3/17)\r\njax  ------------------------------ 2.01 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 35.42 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 36.02 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 35.89 MiB/85.42 MiB\r\njaxlib  ------------------------------ 35.57 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 35.55 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 35.36 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 35.69 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 35.60 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (3/17)\r\njax  ------------------------------ 2.02 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 35.78 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 36.52 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 36.15 MiB/85.42 MiB\r\njaxlib  ------------------------------ 35.85 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 36.05 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 35.89 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 35.98 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 36.00 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (3/17)\r\njax  ------------------------------ 2.02 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 36.16 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 36.82 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 36.53 MiB/85.42 MiB\r\njaxlib  ------------------------------ 36.27 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 36.33 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 36.18 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 36.39 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 36.39 MiB/307.61 MiB",,terminal_output +1627,3970310,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (3/17)\r\njax  ------------------------------ 2.02 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 36.58 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 37.22 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 36.89 MiB/85.42 MiB\r\njaxlib  ------------------------------ 36.65 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 36.75 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 36.50 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 36.78 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 36.80 MiB/307.61 MiB",,terminal_output +1628,3970413,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (3/17)\r\njax  ------------------------------ 2.02 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 36.97 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 37.59 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 37.21 MiB/85.42 MiB\r\njaxlib  ------------------------------ 37.07 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 37.19 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 36.99 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 37.20 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 37.22 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (3/17)\r\njax  ------------------------------ 2.02 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 37.32 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 37.99 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 37.68 MiB/85.42 MiB\r\njaxlib  ------------------------------ 37.46 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 37.58 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 37.39 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 37.62 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 37.61 MiB/307.61 MiB",,terminal_output +1629,3970471,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (3/17)\r\njax  ------------------------------ 2.02 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 37.77 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 38.39 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 38.09 MiB/85.42 MiB\r\njaxlib  ------------------------------ 37.90 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 37.98 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 37.83 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 38.03 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 38.00 MiB/307.61 MiB",,terminal_output +1630,3970529,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (3/17)\r\njax  ------------------------------ 2.02 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 37.91 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 38.65 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 38.52 MiB/85.42 MiB\r\njaxlib  ------------------------------ 38.36 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 38.43 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 38.17 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 38.51 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 38.58 MiB/307.61 MiB",,terminal_output +1631,3970662,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (3/17)\r\njax  ------------------------------ 2.04 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 37.91 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 38.66 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 39.00 MiB/85.42 MiB\r\njaxlib  ------------------------------ 38.98 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 38.91 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 38.65 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 39.03 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 39.05 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (3/17)\r\njax  ------------------------------ 2.04 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-nvjitlink-cu12  ------------------------------ 37.91 MiB/37.91 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 38.66 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 39.62 MiB/85.42 MiB\r\njaxlib  ------------------------------ 39.44 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 39.50 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 39.16 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 39.58 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 39.56 MiB/307.61 MiB\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (3/17)\r\njax  ------------------------------ 2.04 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 38.66 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 39.73 MiB/85.42 MiB\r\njaxlib  ------------------------------ 39.56 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 39.62 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 39.38 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 39.70 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 39.69 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 39.56 MiB/322.45 MiB\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (3/17)\r\njax  ------------------------------ 2.04 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-cuda-nvcc-cu12  ------------------------------ 38.67 MiB/38.67 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 40.09 MiB/85.42 MiB\r\njaxlib  ------------------------------ 39.92 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 40.01 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 39.80 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 40.09 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 39.93 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 39.80 MiB/322.45 MiB\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (3/17)\r\njax  ------------------------------ 2.04 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 40.09 MiB/85.42 MiB\r\njaxlib  ------------------------------ 39.95 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 40.01 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 39.80 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 40.09 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 40.05 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 39.92 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 40.22 MiB/349.49 MiB",,terminal_output +1632,3970723,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (3/17)\r\njax  ------------------------------ 2.06 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 40.59 MiB/85.42 MiB\r\njaxlib  ------------------------------ 40.32 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 40.36 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 40.16 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 40.61 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 40.46 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 40.25 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 40.61 MiB/349.49 MiB",,terminal_output +1633,3970775,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (5/17)\r\njax  ------------------------------ 2.06 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 40.97 MiB/85.42 MiB\r\njaxlib  ------------------------------ 40.84 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 40.86 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 40.66 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 40.98 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 40.91 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 40.75 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 41.11 MiB/349.49 MiB",,terminal_output +1634,3970835,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (5/17)\r\njax  ------------------------------ 2.06 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 41.45 MiB/85.42 MiB\r\njaxlib  ------------------------------ 41.30 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 41.36 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 41.03 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 41.49 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 41.41 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 41.23 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 41.45 MiB/349.49 MiB",,terminal_output +1635,3970902,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (5/17)\r\njax  ------------------------------ 2.06 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 41.96 MiB/85.42 MiB\r\njaxlib  ------------------------------ 41.64 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 41.73 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 41.55 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 42.00 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 41.74 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 41.73 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 42.08 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (5/17)\r\njax  ------------------------------ 2.08 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 42.39 MiB/85.42 MiB\r\njaxlib  ------------------------------ 42.15 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 42.25 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 42.03 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 42.37 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 42.26 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 42.07 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 42.55 MiB/349.49 MiB",,terminal_output +1636,3970960,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (5/17)\r\njax  ------------------------------ 2.09 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 42.87 MiB/85.42 MiB\r\njaxlib  ------------------------------ 42.63 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 42.64 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 42.41 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 42.87 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 42.71 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 42.46 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 43.00 MiB/349.49 MiB",,terminal_output +1637,3971018,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (5/17)\r\njax  ------------------------------ 2.12 MiB/2.60 MiB\r\nnumpy  ------------------------------ 767.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 43.36 MiB/85.42 MiB\r\njaxlib  ------------------------------ 42.99 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 43.17 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 42.91 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 43.22 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 43.08 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 42.96 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 43.36 MiB/349.49 MiB",,terminal_output +1638,3971076,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (5/17)\r\njax  ------------------------------ 2.12 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 43.76 MiB/85.42 MiB\r\njaxlib  ------------------------------ 43.49 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 43.71 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 43.41 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 43.69 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 43.62 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 43.46 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 43.91 MiB/349.49 MiB",,terminal_output +1639,3971177,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (5/17)\r\njax  ------------------------------ 2.12 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 44.27 MiB/85.42 MiB\r\njaxlib  ------------------------------ 43.97 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 44.25 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 43.91 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 44.20 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 44.14 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 43.95 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 44.41 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (5/17)\r\njax  ------------------------------ 2.14 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 44.76 MiB/85.42 MiB\r\njaxlib  ------------------------------ 44.47 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 44.64 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 44.38 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 44.58 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 44.52 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 44.32 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 44.80 MiB/349.49 MiB",,terminal_output +1640,3971233,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (5/17)\r\njax  ------------------------------ 2.15 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 45.17 MiB/85.42 MiB\r\njaxlib  ------------------------------ 44.84 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 45.16 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 44.80 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 45.06 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 45.04 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 44.71 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 45.16 MiB/349.49 MiB",,terminal_output +1641,3971304,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (5/17)\r\njax  ------------------------------ 2.18 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 45.56 MiB/85.42 MiB\r\njaxlib  ------------------------------ 45.22 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 45.59 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 45.15 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 45.44 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 45.44 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 45.24 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 45.75 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (5/17)\r\njax  ------------------------------ 2.18 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 46.07 MiB/85.42 MiB\r\njaxlib  ------------------------------ 45.73 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 46.11 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 45.58 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 45.95 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 45.93 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 45.78 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 46.19 MiB/349.49 MiB",,terminal_output +1642,3971360,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (5/17)\r\njax  ------------------------------ 2.20 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 46.48 MiB/85.42 MiB\r\njaxlib  ------------------------------ 46.14 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 46.59 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 46.20 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 46.36 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 46.45 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 46.15 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 46.58 MiB/349.49 MiB",,terminal_output +1643,3971424,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (5/17)\r\njax  ------------------------------ 2.20 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 47.00 MiB/85.42 MiB\r\njaxlib  ------------------------------ 46.64 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 47.03 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 46.56 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 46.87 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 46.83 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 46.65 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 46.97 MiB/349.49 MiB",,terminal_output +1644,3971482,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (5/17)\r\njax  ------------------------------ 2.22 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 47.39 MiB/85.42 MiB\r\njaxlib  ------------------------------ 47.16 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 47.43 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 46.98 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 47.26 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 47.25 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 47.17 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 47.50 MiB/349.49 MiB",,terminal_output +1645,3971554,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (5/17)\r\njax  ------------------------------ 2.22 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 47.81 MiB/85.42 MiB\r\njaxlib  ------------------------------ 47.57 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 47.95 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 47.49 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 47.83 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 47.80 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 47.44 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 48.03 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (5/17)\r\njax  ------------------------------ 2.25 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 48.28 MiB/85.42 MiB\r\njaxlib  ------------------------------ 47.93 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 48.35 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 47.89 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 48.25 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 48.20 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 47.97 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 48.41 MiB/349.49 MiB",,terminal_output +1646,3971614,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (5/17)\r\njax  ------------------------------ 2.25 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 48.78 MiB/85.42 MiB\r\njaxlib  ------------------------------ 48.42 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 48.75 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 48.31 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 48.76 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 48.61 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 48.43 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 48.81 MiB/349.49 MiB",,terminal_output +1647,3971669,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (5/17)\r\njax  ------------------------------ 2.25 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 49.17 MiB/85.42 MiB\r\njaxlib  ------------------------------ 48.86 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 49.31 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 48.83 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 49.14 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 49.14 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 48.85 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 49.36 MiB/349.49 MiB",,terminal_output +1648,3971753,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (5/17)\r\njax  ------------------------------ 2.25 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 49.75 MiB/85.42 MiB\r\njaxlib  ------------------------------ 49.38 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 49.81 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 49.26 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 49.67 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 49.68 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 49.39 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 49.72 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (5/17)\r\njax  ------------------------------ 2.25 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 50.13 MiB/85.42 MiB\r\njaxlib  ------------------------------ 49.81 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 50.20 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 49.84 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 50.06 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 50.10 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 49.79 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 50.44 MiB/349.49 MiB",,terminal_output +1649,3971853,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (5/17)\r\njax  ------------------------------ 2.25 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 50.65 MiB/85.42 MiB\r\njaxlib  ------------------------------ 50.33 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 50.72 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 50.14 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 50.63 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 50.61 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 50.29 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 50.84 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (5/17)\r\njax  ------------------------------ 2.26 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 51.04 MiB/85.42 MiB\r\njaxlib  ------------------------------ 50.81 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 51.11 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 50.83 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 51.00 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 51.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 50.87 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 51.25 MiB/349.49 MiB",,terminal_output +1650,3971913,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (5/17)\r\njax  ------------------------------ 2.26 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 51.60 MiB/85.42 MiB\r\njaxlib  ------------------------------ 51.25 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 51.70 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 51.23 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 51.53 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 51.50 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 51.25 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 51.77 MiB/349.49 MiB",,terminal_output +1651,3971972,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (5/17)\r\njax  ------------------------------ 2.28 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 52.01 MiB/85.42 MiB\r\njaxlib  ------------------------------ 51.79 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 52.10 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 51.75 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 52.00 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 51.92 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 51.66 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 52.16 MiB/349.49 MiB",,terminal_output +1652,3972028,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (5/17)\r\njax  ------------------------------ 2.28 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 52.54 MiB/85.42 MiB\r\njaxlib  ------------------------------ 52.18 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 52.63 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 52.13 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 52.50 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 52.45 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 52.12 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 52.73 MiB/349.49 MiB",,terminal_output +1653,3972134,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (5/17)\r\njax  ------------------------------ 2.28 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 52.97 MiB/85.42 MiB\r\njaxlib  ------------------------------ 52.61 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 53.20 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 52.65 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 52.91 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 52.89 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 52.53 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 53.23 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (5/17)\r\njax  ------------------------------ 2.29 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 53.34 MiB/85.42 MiB\r\njaxlib  ------------------------------ 53.13 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 53.59 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 53.07 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 53.45 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 53.40 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 52.87 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 53.61 MiB/349.49 MiB",,terminal_output +1654,3972253,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (5/17)\r\njax  ------------------------------ 2.29 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 54.01 MiB/85.42 MiB\r\njaxlib  ------------------------------ 53.52 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 53.95 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 53.47 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 53.86 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 53.80 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 53.41 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 53.98 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (5/17)\r\njax  ------------------------------ 2.29 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 54.28 MiB/85.42 MiB\r\njaxlib  ------------------------------ 53.91 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 54.48 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 54.01 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 54.37 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 54.20 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 53.95 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 54.50 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (5/17)\r\njax  ------------------------------ 2.29 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 54.78 MiB/85.42 MiB\r\njaxlib  ------------------------------ 54.44 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 54.84 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 54.39 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 54.77 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 54.76 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 54.45 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 54.88 MiB/349.49 MiB",,terminal_output +1655,3972309,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (5/17)\r\njax  ------------------------------ 2.31 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 55.28 MiB/85.42 MiB\r\njaxlib  ------------------------------ 54.88 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 55.29 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 54.94 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 55.19 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 55.16 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 54.89 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 55.44 MiB/349.49 MiB",,terminal_output +1656,3972367,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (5/17)\r\njax  ------------------------------ 2.31 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 55.60 MiB/85.42 MiB\r\njaxlib  ------------------------------ 55.44 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 55.86 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 55.25 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 55.70 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 55.72 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 55.30 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 55.88 MiB/349.49 MiB",,terminal_output +1657,3972469,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (5/17)\r\njax  ------------------------------ 2.31 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 56.14 MiB/85.42 MiB\r\njaxlib  ------------------------------ 55.99 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 56.25 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 55.71 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 56.08 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 56.10 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 55.67 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 56.26 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (5/17)\r\njax  ------------------------------ 2.31 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 56.55 MiB/85.42 MiB\r\njaxlib  ------------------------------ 56.30 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 56.68 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 56.10 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 56.62 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 56.66 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 56.20 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 56.70 MiB/349.49 MiB",,terminal_output +1658,3972528,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (5/17)\r\njax  ------------------------------ 2.31 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 57.09 MiB/85.42 MiB\r\njaxlib  ------------------------------ 56.78 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 57.22 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 56.63 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 57.06 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 57.11 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 56.61 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 57.20 MiB/349.49 MiB",,terminal_output +1659,3972582,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (5/17)\r\njax  ------------------------------ 2.33 MiB/2.60 MiB\r\nnumpy  ------------------------------ 783.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 57.51 MiB/85.42 MiB\r\njaxlib  ------------------------------ 57.20 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 57.77 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 57.04 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 57.47 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 57.47 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 57.00 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 57.69 MiB/349.49 MiB",,terminal_output +1660,3972685,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (5/17)\r\njax  ------------------------------ 2.33 MiB/2.60 MiB\r\nnumpy  ------------------------------ 799.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 58.06 MiB/85.42 MiB\r\njaxlib  ------------------------------ 57.63 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 58.05 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 57.50 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 57.91 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 58.02 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 57.56 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 58.08 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (5/17)\r\njax  ------------------------------ 2.33 MiB/2.60 MiB\r\nnumpy  ------------------------------ 799.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 58.51 MiB/85.42 MiB\r\njaxlib  ------------------------------ 58.11 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 58.59 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 58.06 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 58.42 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 58.47 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 57.94 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 58.42 MiB/349.49 MiB",,terminal_output +1661,3972809,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (5/17)\r\njax  ------------------------------ 2.33 MiB/2.60 MiB\r\nnumpy  ------------------------------ 799.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 59.08 MiB/85.42 MiB\r\njaxlib  ------------------------------ 58.50 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 58.98 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 58.47 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 58.81 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 59.04 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 58.41 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 58.96 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (5/17)\r\njax  ------------------------------ 2.33 MiB/2.60 MiB\r\nnumpy  ------------------------------ 799.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 59.37 MiB/85.42 MiB\r\njaxlib  ------------------------------ 59.09 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 59.53 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 58.89 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 59.31 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 59.35 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 58.94 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 59.44 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (5/17)\r\njax  ------------------------------ 2.34 MiB/2.60 MiB\r\nnumpy  ------------------------------ 799.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 59.89 MiB/85.42 MiB\r\njaxlib  ------------------------------ 59.52 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 59.97 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 59.47 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 59.77 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 59.81 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 59.36 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 59.94 MiB/349.49 MiB",,terminal_output +1662,3972862,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (5/17)\r\njax  ------------------------------ 2.36 MiB/2.60 MiB\r\nnumpy  ------------------------------ 799.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 60.26 MiB/85.42 MiB\r\njaxlib  ------------------------------ 59.93 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 60.39 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 59.98 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 60.07 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 60.19 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 59.75 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 60.36 MiB/349.49 MiB",,terminal_output +1663,3972921,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (5/17)\r\njax  ------------------------------ 2.36 MiB/2.60 MiB\r\nnumpy  ------------------------------ 815.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 60.71 MiB/85.42 MiB\r\njaxlib  ------------------------------ 60.38 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 60.81 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 60.37 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 60.50 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 60.75 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 60.26 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 60.81 MiB/349.49 MiB",,terminal_output +1664,3972979,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (5/17)\r\njax  ------------------------------ 2.37 MiB/2.60 MiB\r\nnumpy  ------------------------------ 815.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 61.09 MiB/85.42 MiB\r\njaxlib  ------------------------------ 60.84 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 61.23 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 60.78 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 61.05 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 61.14 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 60.59 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 61.37 MiB/349.49 MiB",,terminal_output +1665,3973082,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (5/17)\r\njax  ------------------------------ 2.37 MiB/2.60 MiB\r\nnumpy  ------------------------------ 815.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 61.59 MiB/85.42 MiB\r\njaxlib  ------------------------------ 61.18 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 61.75 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 61.32 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 61.47 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 61.60 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 61.13 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 61.79 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (5/17)\r\njax  ------------------------------ 2.37 MiB/2.60 MiB\r\nnumpy  ------------------------------ 927.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 61.97 MiB/85.42 MiB\r\njaxlib  ------------------------------ 61.59 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 62.15 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 61.73 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 62.00 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 62.09 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 61.57 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 62.18 MiB/349.49 MiB",,terminal_output +1666,3973204,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (5/17)\r\njax  ------------------------------ 2.37 MiB/2.60 MiB\r\nnumpy  ------------------------------ 927.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 62.34 MiB/85.42 MiB\r\njaxlib  ------------------------------ 62.00 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 62.42 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 62.11 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 62.39 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 62.53 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 61.89 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 62.59 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (5/17)\r\njax  ------------------------------ 2.37 MiB/2.60 MiB\r\nnumpy  ------------------------------ 927.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 62.64 MiB/85.42 MiB\r\njaxlib  ------------------------------ 62.22 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 62.97 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 62.56 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 62.81 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 62.94 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 62.28 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 63.04 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (5/17)\r\njax  ------------------------------ 2.37 MiB/2.60 MiB\r\nnumpy  ------------------------------ 927.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 63.20 MiB/85.42 MiB\r\njaxlib  ------------------------------ 62.77 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 63.39 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 62.88 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 63.22 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 63.31 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 62.76 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 63.31 MiB/349.49 MiB",,terminal_output +1667,3973307,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (5/17)\r\njax  ------------------------------ 2.39 MiB/2.60 MiB\r\nnumpy  ------------------------------ 943.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 63.65 MiB/85.42 MiB\r\njaxlib  ------------------------------ 63.14 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 63.68 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 63.38 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 63.65 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 63.62 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 63.09 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 63.84 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (5/17)\r\njax  ------------------------------ 2.39 MiB/2.60 MiB\r\nnumpy  ------------------------------ 943.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 63.96 MiB/85.42 MiB\r\njaxlib  ------------------------------ 63.65 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 64.13 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 63.68 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 63.94 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 64.06 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 63.55 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 64.12 MiB/349.49 MiB",,terminal_output +1668,3973358,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (5/17)\r\njax  ------------------------------ 2.41 MiB/2.60 MiB\r\nnumpy  ------------------------------ 943.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 64.38 MiB/85.42 MiB\r\njaxlib  ------------------------------ 63.93 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 64.56 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 64.06 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 64.31 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 64.48 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 63.95 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 64.51 MiB/349.49 MiB",,terminal_output +1669,3973459,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (5/17)\r\njax  ------------------------------ 2.41 MiB/2.60 MiB\r\nnumpy  ------------------------------ 943.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 64.67 MiB/85.42 MiB\r\njaxlib  ------------------------------ 64.30 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 65.01 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 64.34 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 64.59 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 64.93 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 64.38 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 64.95 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (5/17)\r\njax  ------------------------------ 2.43 MiB/2.60 MiB\r\nnumpy  ------------------------------ 943.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 65.19 MiB/85.42 MiB\r\njaxlib  ------------------------------ 64.77 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 65.47 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 64.94 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 65.15 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 65.22 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 64.65 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 65.33 MiB/349.49 MiB",,terminal_output +1670,3973560,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (5/17)\r\njax  ------------------------------ 2.45 MiB/2.60 MiB\r\nnumpy  ------------------------------ 943.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 65.69 MiB/85.42 MiB\r\njaxlib  ------------------------------ 65.20 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 65.89 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 65.38 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 65.62 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 65.75 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 65.20 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 65.79 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (5/17)\r\njax  ------------------------------ 2.45 MiB/2.60 MiB\r\nnumpy  ------------------------------ 943.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 66.06 MiB/85.42 MiB\r\njaxlib  ------------------------------ 65.79 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 66.45 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 65.83 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 66.03 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 66.07 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 65.72 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 66.19 MiB/349.49 MiB",,terminal_output +1671,3973620,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (5/17)\r\njax  ------------------------------ 2.45 MiB/2.60 MiB\r\nnumpy  ------------------------------ 943.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 66.48 MiB/85.42 MiB\r\njaxlib  ------------------------------ 66.21 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 66.87 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 66.41 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 66.57 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 66.65 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 66.20 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 66.79 MiB/349.49 MiB",,terminal_output +1672,3973720,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (5/17)\r\njax  ------------------------------ 2.45 MiB/2.60 MiB\r\nnumpy  ------------------------------ 959.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 67.00 MiB/85.42 MiB\r\njaxlib  ------------------------------ 66.67 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 67.35 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 66.84 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 67.00 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 67.10 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 66.65 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 67.27 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 959.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 67.39 MiB/85.42 MiB\r\njaxlib  ------------------------------ 67.06 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 67.82 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 67.28 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 67.39 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 67.69 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 67.08 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 67.82 MiB/349.49 MiB",,terminal_output +1673,3973772,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 959.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 67.97 MiB/85.42 MiB\r\njaxlib  ------------------------------ 67.49 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 68.22 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 67.73 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 67.97 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 68.10 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 67.56 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 68.16 MiB/349.49 MiB",,terminal_output +1674,3973832,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 975.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 68.39 MiB/85.42 MiB\r\njaxlib  ------------------------------ 68.12 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 68.75 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 68.28 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 68.42 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 68.33 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 67.95 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 68.75 MiB/349.49 MiB",,terminal_output +1675,3973907,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 975.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 68.77 MiB/85.42 MiB\r\njaxlib  ------------------------------ 68.55 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 69.17 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 68.67 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 68.86 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 68.97 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 68.51 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 69.16 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 975.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 69.20 MiB/85.42 MiB\r\njaxlib  ------------------------------ 69.00 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 69.64 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 69.19 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 69.27 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 69.29 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 69.03 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 69.64 MiB/349.49 MiB",,terminal_output +1676,3973962,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 975.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 69.78 MiB/85.42 MiB\r\njaxlib  ------------------------------ 69.40 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 70.05 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 69.64 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 69.69 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 69.68 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 69.38 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 70.05 MiB/349.49 MiB",,terminal_output +1677,3974020,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 975.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 70.06 MiB/85.42 MiB\r\njaxlib  ------------------------------ 69.83 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 70.48 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 70.14 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 70.09 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 70.14 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 69.86 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 70.45 MiB/349.49 MiB",,terminal_output +1678,3974078,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 975.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 70.64 MiB/85.42 MiB\r\njaxlib  ------------------------------ 70.22 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 70.94 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 70.70 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 70.61 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 70.49 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 70.44 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 70.92 MiB/349.49 MiB",,terminal_output +1679,3974180,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 975.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 71.01 MiB/85.42 MiB\r\njaxlib  ------------------------------ 70.79 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 71.48 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 70.98 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 71.14 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 71.11 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 70.66 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 71.51 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 991.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 71.50 MiB/85.42 MiB\r\njaxlib  ------------------------------ 71.02 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 71.89 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 71.44 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 71.58 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 71.56 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 71.25 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 71.98 MiB/349.49 MiB",,terminal_output +1680,3974282,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 991.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 71.87 MiB/85.42 MiB\r\njaxlib  ------------------------------ 71.67 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 72.34 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 71.97 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 72.02 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 71.94 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 71.69 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 72.42 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 991.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 72.33 MiB/85.42 MiB\r\njaxlib  ------------------------------ 72.14 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 72.86 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 72.54 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 72.48 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 72.38 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 72.14 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 72.84 MiB/349.49 MiB",,terminal_output +1681,3974385,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 991.06 KiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 72.72 MiB/85.42 MiB\r\njaxlib  ------------------------------ 72.70 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 73.40 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 72.83 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 73.06 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 72.80 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 72.58 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 73.40 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.03 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 73.32 MiB/85.42 MiB\r\njaxlib  ------------------------------ 73.00 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 73.84 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 73.37 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 73.41 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 73.34 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 73.19 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 73.71 MiB/349.49 MiB",,terminal_output +1682,3974503,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.03 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 73.80 MiB/85.42 MiB\r\njaxlib  ------------------------------ 73.49 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 74.25 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 73.84 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 73.84 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 73.74 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 73.59 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 74.31 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.03 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 74.26 MiB/85.42 MiB\r\njaxlib  ------------------------------ 73.84 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 74.67 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 74.25 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 74.23 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 74.21 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 74.10 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 74.61 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.03 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 74.70 MiB/85.42 MiB\r\njaxlib  ------------------------------ 74.24 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 75.10 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 74.75 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 74.69 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 74.63 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 74.58 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 75.24 MiB/349.49 MiB",,terminal_output +1683,3974558,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.03 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 75.05 MiB/85.42 MiB\r\njaxlib  ------------------------------ 74.84 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 75.59 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 75.14 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 75.06 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 75.17 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 75.16 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 75.72 MiB/349.49 MiB",,terminal_output +1684,3974663,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (5/17)\r\njax  ------------------------------ 2.47 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.03 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 75.68 MiB/85.42 MiB\r\njaxlib  ------------------------------ 75.12 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 76.06 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 75.58 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 75.69 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 75.47 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 75.55 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 76.12 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (5/17)\r\njax  ------------------------------ 2.48 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.04 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 75.98 MiB/85.42 MiB\r\njaxlib  ------------------------------ 75.70 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 76.39 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 76.14 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 76.15 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 75.86 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 76.02 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 76.44 MiB/349.49 MiB",,terminal_output +1685,3974725,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (5/17)\r\njax  ------------------------------ 2.48 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.06 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 76.40 MiB/85.42 MiB\r\njaxlib  ------------------------------ 76.07 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 76.87 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 76.47 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 76.50 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 76.36 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 76.27 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 77.02 MiB/349.49 MiB",,terminal_output +1686,3974803,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (5/17)\r\njax  ------------------------------ 2.48 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.06 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 76.83 MiB/85.42 MiB\r\njaxlib  ------------------------------ 76.37 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 77.45 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 76.89 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 76.98 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 76.95 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 76.69 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 77.32 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (5/17)\r\njax  ------------------------------ 2.48 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.07 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 77.15 MiB/85.42 MiB\r\njaxlib  ------------------------------ 76.95 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 77.88 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 77.48 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 77.37 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 77.24 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 77.14 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 77.89 MiB/349.49 MiB",,terminal_output +1687,3974903,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (5/17)\r\njax  ------------------------------ 2.48 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.07 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 77.72 MiB/85.42 MiB\r\njaxlib  ------------------------------ 77.25 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 78.33 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 77.73 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 77.86 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 77.83 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 77.77 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 78.36 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (5/17)\r\njax  ------------------------------ 2.48 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.07 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 78.22 MiB/85.42 MiB\r\njaxlib  ------------------------------ 77.80 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 78.92 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 78.34 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 78.27 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 78.24 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 78.13 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 78.92 MiB/349.49 MiB",,terminal_output +1688,3975005,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (5/17)\r\njax  ------------------------------ 2.48 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.07 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 78.66 MiB/85.42 MiB\r\njaxlib  ------------------------------ 78.37 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 79.33 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 78.93 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 78.87 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 78.84 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 78.64 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 79.25 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (5/17)\r\njax  ------------------------------ 2.48 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.07 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 79.17 MiB/85.42 MiB\r\njaxlib  ------------------------------ 78.82 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 79.79 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 79.35 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 79.32 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 79.21 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 79.22 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 79.83 MiB/349.49 MiB",,terminal_output +1689,3975064,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (5/17)\r\njax  ------------------------------ 2.48 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.09 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 79.77 MiB/85.42 MiB\r\njaxlib  ------------------------------ 79.23 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 80.19 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 79.82 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 79.87 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 79.79 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 79.61 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 80.25 MiB/349.49 MiB",,terminal_output +1690,3975121,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.09 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 80.17 MiB/85.42 MiB\r\njaxlib  ------------------------------ 79.68 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 80.78 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 80.38 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 80.19 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 80.25 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 80.20 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 80.80 MiB/349.49 MiB",,terminal_output +1691,3975222,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.11 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 80.62 MiB/85.42 MiB\r\njaxlib  ------------------------------ 80.12 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 81.06 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 80.85 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 80.76 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 80.73 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 80.53 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 81.28 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.11 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 81.08 MiB/85.42 MiB\r\njaxlib  ------------------------------ 80.71 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 81.51 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 81.25 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 81.12 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 81.12 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 81.04 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 81.70 MiB/349.49 MiB",,terminal_output +1692,3975322,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.11 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 81.54 MiB/85.42 MiB\r\njaxlib  ------------------------------ 81.18 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 82.07 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 81.73 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 81.60 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 81.56 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 81.50 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 82.12 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.12 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 81.95 MiB/85.42 MiB\r\njaxlib  ------------------------------ 81.64 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 82.48 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 82.16 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 81.99 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 81.97 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 81.89 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 82.50 MiB/349.49 MiB",,terminal_output +1693,3975380,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.12 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 82.41 MiB/85.42 MiB\r\njaxlib  ------------------------------ 82.11 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 82.75 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 82.64 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 82.44 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 82.39 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 82.32 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 83.00 MiB/349.49 MiB",,terminal_output +1694,3975486,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.14 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 82.66 MiB/85.42 MiB\r\njaxlib  ------------------------------ 82.54 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 83.34 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 83.06 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 82.87 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 82.81 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 82.72 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 83.38 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.14 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 83.09 MiB/85.42 MiB\r\njaxlib  ------------------------------ 82.81 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 83.76 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 83.32 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 83.16 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 83.12 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 83.04 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 83.67 MiB/349.49 MiB",,terminal_output +1695,3975552,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.15 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 83.54 MiB/85.42 MiB\r\njaxlib  ------------------------------ 83.27 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 84.04 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 83.78 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 83.65 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 83.54 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 83.54 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 84.15 MiB/349.49 MiB\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.15 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 83.95 MiB/85.42 MiB\r\njaxlib  ------------------------------ 83.68 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 84.50 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 84.20 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 84.23 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 84.15 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 83.89 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 84.58 MiB/349.49 MiB",,terminal_output +1696,3975609,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.18 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 84.50 MiB/85.42 MiB\r\njaxlib  ------------------------------ 84.12 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 85.11 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 84.64 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 84.52 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 84.57 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 84.42 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 85.04 MiB/349.49 MiB",,terminal_output +1697,3975667,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.18 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 84.94 MiB/85.42 MiB\r\njaxlib  ------------------------------ 84.12 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 85.51 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 85.20 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 85.19 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 85.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 84.93 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 85.70 MiB/349.49 MiB",,terminal_output +1698,3975725,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.20 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 85.40 MiB/85.42 MiB\r\njaxlib  ------------------------------ 84.12 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 86.06 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 85.75 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 85.61 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 85.44 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 85.35 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 86.12 MiB/349.49 MiB",,terminal_output +1699,3975777,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.21 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 85.42 MiB/85.42 MiB\r\njaxlib  ------------------------------ 84.14 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 86.43 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 86.20 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 85.99 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 85.81 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 85.76 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 86.55 MiB/349.49 MiB",,terminal_output +1700,3975830,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.21 MiB/16.02 MiB\r\nnvidia-cuda-nvrtc-cu12  ------------------------------ 85.42 MiB/85.42 MiB\r\njaxlib  ------------------------------ 85.46 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 86.68 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 86.58 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 86.40 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 86.21 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 86.15 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 86.95 MiB/349.49 MiB",,terminal_output +1701,3975886,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (5/17)\r\njax  ------------------------------ 2.50 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.23 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.46 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 87.35 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 87.05 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 86.90 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 86.72 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 86.67 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 87.37 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 86.09 MiB/545.17 MiB\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (5/17)\r\njax  ------------------------------ 2.51 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.23 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.46 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 87.35 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 87.20 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 86.90 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 86.72 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 86.67 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 87.37 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 86.09 MiB/545.17 MiB",,terminal_output +1702,3975953,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (5/17)\r\njax  ------------------------------ 2.51 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.23 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.49 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 87.98 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 87.70 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 87.59 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 87.29 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 87.36 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 88.01 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 86.75 MiB/545.17 MiB\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (6/17)\r\njax  ------------------------------ 2.51 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.23 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.50 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 88.62 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 88.34 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 88.06 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 87.97 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 87.91 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 88.58 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 87.23 MiB/545.17 MiB",,terminal_output +1703,3976008,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (6/17)\r\njax  ------------------------------ 2.51 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.23 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.50 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 89.16 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 88.81 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 88.77 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 88.39 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 88.53 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 89.27 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 87.93 MiB/545.17 MiB",,terminal_output +1704,3976065,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (6/17)\r\njax  ------------------------------ 2.51 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.25 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.52 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 89.81 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 89.31 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 89.45 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 89.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 89.19 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 89.77 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 88.59 MiB/545.17 MiB",,terminal_output +1705,3976123,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (6/17)\r\njax  ------------------------------ 2.51 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.25 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.52 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 90.30 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 90.00 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 89.98 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 89.67 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 89.72 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 90.41 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 89.09 MiB/545.17 MiB",,terminal_output +1706,3976224,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (6/17)\r\njax  ------------------------------ 2.51 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.28 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.52 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 90.88 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 90.69 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 90.57 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 90.12 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 90.40 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 90.89 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 89.79 MiB/545.17 MiB",,terminal_output +1707,3976279,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (6/17)\r\njax  ------------------------------ 2.51 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.28 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.52 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 91.59 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 91.12 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 90.97 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 90.75 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 90.99 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 91.58 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 90.39 MiB/545.17 MiB\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (6/17)\r\njax  ------------------------------ 2.51 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.31 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.52 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 92.11 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 91.80 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 91.48 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 91.42 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 91.49 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 92.19 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 90.87 MiB/545.17 MiB",,terminal_output +1708,3976360,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (6/17)\r\njax  ------------------------------ 2.51 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.32 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.53 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 92.56 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 92.33 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 92.13 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 91.90 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 92.00 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 92.72 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 91.55 MiB/545.17 MiB\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (6/17)\r\njax  ------------------------------ 2.51 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.32 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.53 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 93.30 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 92.86 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 92.56 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 92.68 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 92.57 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 93.25 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 92.11 MiB/545.17 MiB",,terminal_output +1709,3976415,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (6/17)\r\njax  ------------------------------ 2.51 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.32 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.53 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 93.95 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 93.33 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 93.13 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 93.07 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 93.20 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 93.77 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 92.78 MiB/545.17 MiB",,terminal_output +1710,3976472,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (6/17)\r\njax  ------------------------------ 2.53 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.32 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.53 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 94.47 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 93.84 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 93.76 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 93.75 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 93.74 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 94.44 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 93.32 MiB/545.17 MiB",,terminal_output +1711,3976530,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (6/17)\r\njax  ------------------------------ 2.53 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.32 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.53 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 94.97 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 94.53 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 94.17 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 94.45 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 94.29 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 94.98 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 93.81 MiB/545.17 MiB",,terminal_output +1712,3976589,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (6/17)\r\njax  ------------------------------ 2.53 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.32 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.55 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 95.59 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 95.19 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 94.82 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 94.77 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 94.88 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 95.52 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 94.48 MiB/545.17 MiB",,terminal_output +1713,3976760,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (6/17)\r\njax  ------------------------------ 2.53 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.32 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.55 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 96.20 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 95.73 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 95.34 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 95.48 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 95.33 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 96.25 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 95.04 MiB/545.17 MiB\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (6/17)\r\njax  ------------------------------ 2.53 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.32 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.55 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 96.81 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 96.25 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 95.79 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 95.96 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 95.84 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 96.75 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 95.72 MiB/545.17 MiB\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (6/17)\r\njax  ------------------------------ 2.53 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.32 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.55 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 97.25 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 96.94 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 96.42 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 96.63 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 96.30 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 97.27 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 96.09 MiB/545.17 MiB\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (6/17)\r\njax  ------------------------------ 2.53 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.32 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.55 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 97.81 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 97.50 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 96.96 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 97.15 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 96.83 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 97.95 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 96.57 MiB/545.17 MiB",,terminal_output +1714,3976855,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (6/17)\r\njax  ------------------------------ 2.54 MiB/2.60 MiB\r\nnumpy  ------------------------------ 1.46 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.57 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 98.34 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 97.81 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 97.48 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 97.67 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 97.47 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 98.47 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 97.20 MiB/545.17 MiB\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (6/17)\r\nnumpy  ------------------------------ 1.59 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.60 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 98.90 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 98.52 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 97.98 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 98.13 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 97.97 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 99.00 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 97.71 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 98.59 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (6/17)\r\nnumpy  ------------------------------ 1.59 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.60 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 98.90 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 98.52 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 97.98 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 98.13 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 97.98 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 99.00 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 97.71 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 98.59 MiB/554.32 MiB ",,terminal_output +1715,3976916,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (6/17)\r\nnumpy  ------------------------------ 1.59 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.61 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 99.43 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 99.06 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 98.52 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 98.49 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 98.48 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 99.52 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 98.24 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 99.17 MiB/554.32 MiB ",,terminal_output +1716,3976977,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 1.68 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.63 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 99.98 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 99.61 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 99.14 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 99.16 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 99.08 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 100.06 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 98.68 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 99.71 MiB/554.32 MiB ",,terminal_output +1717,3977058,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 1.71 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.63 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 100.56 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 100.16 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 99.61 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 99.67 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 99.62 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 100.64 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 99.45 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 100.26 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 1.71 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.64 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 101.06 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 100.69 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 100.33 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 100.26 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 100.12 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 101.38 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 99.83 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 100.99 MiB/554.32 MiB ",,terminal_output +1718,3977120,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 1.71 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.64 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 101.80 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 101.44 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 100.81 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 100.90 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 100.85 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 101.77 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 100.57 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 101.46 MiB/554.32 MiB ",,terminal_output +1719,3977224,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 1.71 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.66 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 102.39 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 102.00 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 101.28 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 101.46 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 101.41 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 102.47 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 101.11 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 102.07 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 1.94 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.66 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 103.00 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 102.55 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 101.86 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 102.05 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 101.85 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 102.98 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 101.83 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 102.61 MiB/554.32 MiB ",,terminal_output +1720,3977284,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 1.94 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.67 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 103.67 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 103.09 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 102.42 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 102.77 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 102.41 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 103.53 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 102.24 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 103.17 MiB/554.32 MiB ",,terminal_output +1721,3977342,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 1.94 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.67 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 104.22 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 103.84 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 102.95 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 103.36 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 103.09 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 104.11 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 102.74 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 103.70 MiB/554.32 MiB ",,terminal_output +1722,3977427,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.01 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.67 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 104.77 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 104.18 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 103.62 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 103.87 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 103.67 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 104.78 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 103.44 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 104.21 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.38 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.67 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 105.12 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 104.91 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 104.20 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 104.32 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 104.23 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 105.42 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 103.81 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 104.75 MiB/554.32 MiB ",,terminal_output +1723,3977478,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.38 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.69 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 105.87 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 105.43 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 104.79 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 104.88 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 104.78 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 105.91 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 104.52 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 105.48 MiB/554.32 MiB ",,terminal_output +1724,3977538,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.39 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.69 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 106.46 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 105.96 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 105.23 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 105.43 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 105.54 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 106.50 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 105.06 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 106.06 MiB/554.32 MiB ",,terminal_output +1725,3977605,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.39 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.69 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 106.95 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 106.73 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 105.86 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 106.18 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 105.92 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 107.27 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 105.65 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 106.75 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.39 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.69 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 107.68 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 107.28 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 106.61 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 106.76 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 106.59 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 107.80 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 106.36 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 107.31 MiB/554.32 MiB ",,terminal_output +1726,3977661,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.39 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.69 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 108.22 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 107.86 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 107.17 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 107.36 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 107.33 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 108.31 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 107.11 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 107.91 MiB/554.32 MiB ",,terminal_output +1727,3977717,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.39 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.69 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 108.73 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 108.42 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 107.78 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 108.10 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 107.78 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 109.06 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 107.48 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 108.64 MiB/554.32 MiB ",,terminal_output +1728,3977768,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.39 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.69 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 109.39 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 109.12 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 108.19 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 108.63 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 108.45 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 109.45 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 108.14 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 109.22 MiB/554.32 MiB ",,terminal_output +1729,3977830,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.39 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.69 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 109.97 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 109.70 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 108.94 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 109.21 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 109.03 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 109.95 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 108.70 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 109.75 MiB/554.32 MiB ",,terminal_output +1730,3977935,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.42 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.69 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 110.48 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 110.23 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 109.52 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 109.76 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 109.61 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 110.71 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 109.31 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 110.25 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.42 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.69 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 111.03 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 110.79 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 110.06 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 110.30 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 110.14 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 111.28 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 109.87 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 110.87 MiB/554.32 MiB ",,terminal_output +1731,3977996,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.44 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.69 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 111.65 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 111.51 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 110.62 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 111.08 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 110.70 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 111.80 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 110.44 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 111.34 MiB/554.32 MiB ",,terminal_output +1732,3978059,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.45 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.69 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 112.31 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 112.05 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 111.23 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 111.62 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 111.40 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 112.54 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 111.19 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 112.11 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.46 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.71 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 113.08 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 112.48 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 111.86 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 112.10 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 111.95 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 113.29 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 111.77 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 112.82 MiB/554.32 MiB ",,terminal_output +1733,3978119,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.46 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.71 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 113.70 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 113.18 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 112.62 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 112.77 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 112.69 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 113.89 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 112.36 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 113.44 MiB/554.32 MiB ",,terminal_output +1734,3978177,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.48 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.71 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 114.42 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 113.68 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 113.27 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 113.39 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 113.30 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 114.59 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 113.11 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 113.95 MiB/554.32 MiB ",,terminal_output +1735,3978355,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.51 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.71 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 115.00 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 114.30 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 113.59 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 113.99 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 114.06 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 115.21 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 113.83 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 114.70 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.51 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.72 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 115.37 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 114.87 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 114.48 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 114.46 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 114.43 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 115.74 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 114.36 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 115.33 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r\r⠧ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.51 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.72 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 116.12 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 115.38 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 114.80 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 115.02 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 115.01 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 116.18 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 114.72 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 115.66 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.53 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 116.41 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 115.92 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 115.58 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 115.76 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 115.65 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 116.68 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 115.29 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 116.42 MiB/554.32 MiB ",,terminal_output +1736,3978455,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.53 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 117.14 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 116.44 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 116.06 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 116.16 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 116.31 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 117.43 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 116.06 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 116.95 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.56 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 117.39 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 117.12 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 116.67 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 116.91 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 116.93 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 117.96 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 116.62 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 117.50 MiB/554.32 MiB ",,terminal_output +1737,3978510,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠇ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.58 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 117.42 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 117.84 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 117.26 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 117.47 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 117.50 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 118.60 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 117.19 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 118.05 MiB/554.32 MiB ",,terminal_output +1738,3978567,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.61 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 117.45 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 118.20 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 117.72 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 117.95 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 118.01 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 119.07 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 117.75 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 118.59 MiB/554.32 MiB ",,terminal_output +1739,3978670,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.62 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 118.12 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 118.69 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 118.14 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 118.34 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 118.39 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 119.70 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 118.31 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 119.23 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 2.64 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 118.80 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 119.10 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 119.06 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 119.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 119.22 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 120.27 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 118.85 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 119.70 MiB/554.32 MiB ",,terminal_output +1740,3978772,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠋ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 3.53 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 118.81 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 119.40 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 119.48 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 119.64 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 119.81 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 120.93 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 119.45 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 120.39 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 4.99 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 118.83 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 119.40 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 119.97 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 120.24 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 120.47 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 121.41 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 120.12 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 121.16 MiB/554.32 MiB ",,terminal_output +1741,3978861,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 5.65 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-nvshmem-cu12  ------------------------------ 118.83 MiB/118.83 MiB\r\njax-cuda12-pjrt  ------------------------------ 119.40 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 121.11 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 120.96 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 121.20 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 122.14 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 120.78 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 122.00 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 5.72 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\njax-cuda12-pjrt  ------------------------------ 119.40 MiB/119.46 MiB\r\nnvidia-cufft-cu12  ------------------------------ 121.36 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 121.50 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 121.49 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 122.61 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 121.30 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 122.33 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 5.72 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 121.62 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 121.71 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 121.77 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 122.86 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 121.51 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 122.52 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r⠙ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 5.73 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 121.62 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 121.71 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 121.77 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 122.87 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 121.51 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 122.52 MiB/554.32 MiB ",,terminal_output +1742,3978917,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠙ Preparing packages... (7/17)\r\nnumpy  ------------------------------ 5.76 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 122.54 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 121.97 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 122.00 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 123.26 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 122.50 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 122.73 MiB/554.32 MiB ",,terminal_output +1743,3978974,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.78 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 123.04 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 122.98 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 122.92 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 124.13 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 123.25 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 123.59 MiB/554.32 MiB ",,terminal_output +1744,3979083,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.78 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 123.94 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 123.79 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 123.79 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 124.98 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 123.92 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 124.65 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r⠹ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.78 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 124.91 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 124.64 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 124.65 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 125.92 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 124.92 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 125.45 MiB/554.32 MiB ",,terminal_output +1745,3979145,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.78 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 125.83 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 125.33 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 125.48 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 126.86 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 126.11 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 126.54 MiB/554.32 MiB ",,terminal_output +1746,3979206,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.78 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 126.61 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 126.30 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 126.22 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 127.75 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 127.09 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 127.54 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r⠸ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.79 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 127.50 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 127.41 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 127.20 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 128.61 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 127.89 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 128.41 MiB/554.32 MiB ",,terminal_output +1747,3979267,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.79 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 128.46 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 128.20 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 128.03 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 129.56 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 128.89 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 129.43 MiB/554.32 MiB ",,terminal_output +1748,3979325,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠸ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.79 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 129.17 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 128.96 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 128.95 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 130.57 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 129.93 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 130.31 MiB/554.32 MiB ",,terminal_output +1749,3979384,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.79 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 130.25 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 129.98 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 129.94 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 131.38 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 130.72 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 131.20 MiB/554.32 MiB ",,terminal_output +1750,3979436,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.79 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 130.73 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 131.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 131.03 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 132.20 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 131.52 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 132.36 MiB/554.32 MiB ",,terminal_output +1751,3979496,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.79 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 131.67 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 131.68 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 132.00 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 133.15 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 132.50 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 133.06 MiB/554.32 MiB ",,terminal_output +1752,3979556,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠼ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.79 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 132.33 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 132.43 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 132.74 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 133.89 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 133.20 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 134.01 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r⠴ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.79 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 133.17 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 133.42 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 133.19 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 134.84 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 133.96 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 134.64 MiB/554.32 MiB ",,terminal_output +1753,3979658,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.82 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 134.00 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 134.22 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 134.09 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 135.65 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 134.98 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 135.61 MiB/554.32 MiB \r\r\r\r\r\r\r\r\r⠴ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.83 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 135.16 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 134.97 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 135.12 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 136.41 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 135.84 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 136.32 MiB/554.32 MiB ",,terminal_output +1754,3979721,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠴ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.85 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 135.75 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 136.12 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 135.77 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 137.47 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 136.80 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 137.06 MiB/554.32 MiB ",,terminal_output +1755,3979772,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.87 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 136.69 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 137.18 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 136.66 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 138.42 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 137.64 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 138.08 MiB/554.32 MiB ",,terminal_output +1756,3979833,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.87 MiB/16.02 MiB\r\njaxlib  ------------------------------ 85.75 MiB/85.75 MiB\r\nnvidia-cufft-cu12  ------------------------------ 137.67 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 137.86 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 137.62 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 139.17 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 138.73 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 139.12 MiB/554.32 MiB ",,terminal_output +1757,3979954,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r⠦ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.87 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 138.35 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 138.66 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 138.37 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 139.98 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 139.52 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 139.89 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠦ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.87 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 138.35 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 138.66 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 138.37 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 139.98 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 139.55 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 139.89 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠦ Preparing packages... (9/17)\r\nnumpy  ------------------------------ 5.92 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 138.65 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 139.52 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 139.15 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 140.37 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 140.11 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 140.62 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠧ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 5.92 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 139.95 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 140.46 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 140.08 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 141.20 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 140.95 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 141.79 MiB/554.32 MiB ",,terminal_output +1758,3980008,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠧ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 5.93 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 140.94 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 141.44 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 140.92 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 142.40 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 141.95 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 142.53 MiB/554.32 MiB ",,terminal_output +1759,3980068,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠧ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 5.93 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 141.90 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 142.38 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 141.88 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 143.18 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 143.15 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 143.41 MiB/554.32 MiB ",,terminal_output +1760,3980126,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠧ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 5.93 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 143.00 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 143.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 143.00 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 144.29 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 144.00 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 144.65 MiB/554.32 MiB ",,terminal_output +1761,3980228,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠇ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.36 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 143.86 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 143.58 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 143.86 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 145.36 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 144.90 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 145.20 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠇ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.44 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 144.88 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 144.53 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 144.67 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 146.23 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 145.79 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 146.17 MiB/554.32 MiB ",,terminal_output +1762,3980304,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠇ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.50 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 145.88 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 145.59 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 145.57 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 147.30 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 146.61 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 146.93 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠇ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.50 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 146.62 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 146.17 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 146.67 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 148.26 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 147.68 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 147.92 MiB/554.32 MiB ",,terminal_output +1763,3980365,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠋ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.50 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 147.62 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 147.19 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 147.47 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 149.03 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 148.65 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 149.00 MiB/554.32 MiB ",,terminal_output +1764,3980424,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠋ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.50 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 148.36 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 148.18 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 148.30 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 150.02 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 149.47 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 149.99 MiB/554.32 MiB ",,terminal_output +1765,3980531,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠋ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.52 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 149.40 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 149.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 149.25 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 150.82 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 150.44 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 150.64 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠋ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.52 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 150.34 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 149.90 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 150.28 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 151.80 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 151.21 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 151.66 MiB/554.32 MiB ",,terminal_output +1766,3980602,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠙ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.53 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 151.32 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 150.92 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 150.97 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 152.90 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 151.97 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 152.66 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠙ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.55 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 152.09 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 151.76 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 152.00 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 153.66 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 152.97 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 153.56 MiB/554.32 MiB ",,terminal_output +1767,3980706,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠙ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.55 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 152.88 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 152.71 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 152.93 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 154.64 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 153.99 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 154.40 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠙ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.55 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 153.87 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 153.67 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 153.67 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 155.44 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 154.95 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 155.08 MiB/554.32 MiB ",,terminal_output +1768,3980763,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠹ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.56 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 155.06 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 154.54 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 154.70 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 156.34 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 155.84 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 156.02 MiB/554.32 MiB ",,terminal_output +1769,3980824,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠹ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.56 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 155.89 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 155.38 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 155.66 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 157.64 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 156.77 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 156.89 MiB/554.32 MiB ",,terminal_output +1770,3980884,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠹ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.59 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 156.72 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 156.46 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 156.70 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 158.57 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 157.69 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 157.50 MiB/554.32 MiB ",,terminal_output +1771,3981003,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠹ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.59 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 157.72 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 157.27 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 157.67 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 159.18 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 159.04 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 158.72 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠸ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.61 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 158.73 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 158.17 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 158.70 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 159.96 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 159.69 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 159.23 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠸ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.63 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 159.54 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 158.82 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 159.28 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 160.83 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 160.69 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 160.19 MiB/554.32 MiB ",,terminal_output +1772,3981059,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠸ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.63 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 160.44 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 159.64 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 160.28 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 161.61 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 161.66 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 160.78 MiB/554.32 MiB ",,terminal_output +1773,3981117,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠸ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.63 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 161.38 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 160.61 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 161.09 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 162.69 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 162.56 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 161.89 MiB/554.32 MiB ",,terminal_output +1774,3981175,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠼ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.63 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 162.14 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 161.61 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 161.98 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 163.36 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 163.64 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 162.68 MiB/554.32 MiB ",,terminal_output +1775,3981276,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠼ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.70 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 162.91 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 162.47 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 162.69 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 164.11 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 164.20 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 163.42 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠼ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.72 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 164.03 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 163.14 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 163.33 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 164.82 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 164.99 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 164.15 MiB/554.32 MiB ",,terminal_output +1776,3981379,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠼ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.72 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 164.45 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 163.80 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 164.23 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 165.89 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 165.92 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 164.84 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠴ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.72 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 165.17 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 164.49 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 165.32 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 166.48 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 166.48 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 165.65 MiB/554.32 MiB ",,terminal_output +1777,3981470,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠴ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.72 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 165.96 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 165.39 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 165.82 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 167.50 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 167.50 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 166.56 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠴ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.73 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 166.93 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 166.38 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 166.89 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 168.17 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 168.28 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 167.30 MiB/554.32 MiB ",,terminal_output +1778,3981530,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠴ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.73 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 167.92 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 167.19 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 167.87 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 169.28 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 169.20 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 168.23 MiB/554.32 MiB ",,terminal_output +1779,3981607,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠦ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.76 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 168.99 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 167.93 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 168.70 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 169.95 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 170.00 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 169.31 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠦ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.78 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 169.78 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 168.84 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 169.54 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 170.88 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 170.70 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 170.09 MiB/554.32 MiB ",,terminal_output +1780,3981666,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠦ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.81 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 170.51 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 169.54 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 170.38 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 171.64 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 171.80 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 170.88 MiB/554.32 MiB ",,terminal_output +1781,3981722,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠦ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.82 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 171.66 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 170.59 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 171.47 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 172.70 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 172.59 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 171.88 MiB/554.32 MiB ",,terminal_output +1782,3981774,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠧ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.87 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 172.32 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 171.50 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 172.45 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 173.48 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 173.41 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 172.65 MiB/554.32 MiB ",,terminal_output +1783,3981878,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠧ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.92 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 173.37 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 172.10 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 173.36 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 174.12 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 174.50 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 173.33 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠧ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.92 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 174.26 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 172.83 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 174.14 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 175.05 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 175.42 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 174.04 MiB/554.32 MiB ",,terminal_output +1784,3981982,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠧ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.92 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 174.97 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 174.04 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 175.45 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 175.97 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 176.44 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 175.22 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠇ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.97 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 175.77 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 174.54 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 176.00 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 176.94 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 177.45 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 176.20 MiB/554.32 MiB ",,terminal_output +1785,3982084,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠇ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.97 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 176.58 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 175.47 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 177.19 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 177.58 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 178.06 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 176.92 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠇ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 6.98 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 177.64 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 176.63 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 178.12 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 178.17 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 179.00 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 177.56 MiB/554.32 MiB ",,terminal_output +1786,3982204,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠇ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.01 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 178.14 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 177.14 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 179.14 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 179.17 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 180.17 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 178.61 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠋ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.07 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 178.78 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 178.03 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 179.97 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 179.79 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 180.93 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 179.70 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠋ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.10 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 179.98 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 178.71 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 181.00 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 180.69 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 181.96 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 180.42 MiB/554.32 MiB ",,terminal_output +1787,3982264,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠋ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.13 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 180.66 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 179.65 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 181.97 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 181.64 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 183.00 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 181.86 MiB/554.32 MiB ",,terminal_output +1788,3982328,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠋ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.18 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 181.84 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 181.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 182.65 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 182.65 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 183.67 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 182.97 MiB/554.32 MiB ",,terminal_output +1789,3982381,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠙ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.20 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 182.47 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 181.53 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 183.44 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 183.40 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 184.47 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 183.46 MiB/554.32 MiB ",,terminal_output +1790,3982485,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠙ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.20 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 183.19 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 182.40 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 184.45 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 184.53 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 185.36 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 184.16 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠙ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.21 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 184.19 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 183.42 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 185.51 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 185.18 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 186.33 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 185.12 MiB/554.32 MiB ",,terminal_output +1791,3982597,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠙ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.25 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 185.17 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 184.16 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 186.33 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 186.45 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 187.08 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 186.14 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠹ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.30 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 186.14 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 185.12 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 187.32 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 187.23 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 188.20 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 187.17 MiB/554.32 MiB ",,terminal_output +1792,3982655,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠹ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.30 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 187.15 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 186.16 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 188.01 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 188.12 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 189.16 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 187.87 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠹ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.33 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 187.87 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 186.94 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 188.98 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 188.90 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 189.91 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 189.11 MiB/554.32 MiB ",,terminal_output +1793,3982755,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠹ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.33 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 188.86 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 188.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 189.84 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 189.97 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 190.84 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 189.79 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠸ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.33 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 189.64 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 189.02 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 190.60 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 190.94 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 191.73 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 190.54 MiB/554.32 MiB ",,terminal_output +1794,3982811,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠸ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.33 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 190.61 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 189.66 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 191.53 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 191.62 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 192.87 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 191.44 MiB/554.32 MiB ",,terminal_output +1795,3982871,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠸ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.34 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 191.25 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 190.45 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 192.61 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 192.56 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 193.91 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 192.37 MiB/554.32 MiB ",,terminal_output +1796,3982974,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠸ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.37 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 191.57 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 191.39 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 193.42 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 193.51 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 195.05 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 193.15 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠼ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.37 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 191.57 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 192.62 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 194.69 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 194.23 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 195.89 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 194.28 MiB/554.32 MiB ",,terminal_output +1797,3983035,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠼ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.37 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 191.57 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 193.56 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 195.55 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 195.54 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 196.81 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 195.51 MiB/554.32 MiB ",,terminal_output +1798,3983155,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠼ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.37 MiB/16.02 MiB\r\nnvidia-cufft-cu12  ------------------------------ 191.57 MiB/191.57 MiB\r\nnvidia-nccl-cu12  ------------------------------ 194.65 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 196.00 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 196.20 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 198.43 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 196.81 MiB/554.32 MiB \r\r\r\r\r\r\r\r⠼ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.37 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 194.65 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 196.33 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 196.53 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 198.43 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 196.81 MiB/554.32 MiB \r\r\r\r\r\r\r⠼ Preparing packages... (10/17)\r\nnumpy  ------------------------------ 7.37 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 195.31 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 196.89 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 196.82 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 198.72 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 196.99 MiB/554.32 MiB \r\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.37 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 196.24 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 197.73 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 198.48 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 199.90 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 198.38 MiB/554.32 MiB ",,terminal_output +1799,3983214,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.37 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 197.85 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 198.70 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 199.70 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 200.84 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 199.26 MiB/554.32 MiB ",,terminal_output +1800,3983303,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.37 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 198.83 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 199.83 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 200.71 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 202.11 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 200.49 MiB/554.32 MiB ",,terminal_output +1801,3983353,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.39 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 200.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 200.61 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 201.64 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 202.95 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 201.50 MiB/554.32 MiB \r\r\r\r\r\r\r⠦ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.39 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 200.81 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 201.50 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 202.47 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 203.70 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 202.95 MiB/554.32 MiB ",,terminal_output +1802,3983455,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠦ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.40 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 201.48 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 202.61 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 203.65 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 204.81 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 203.61 MiB/554.32 MiB \r\r\r\r\r\r\r⠦ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.40 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 202.42 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 203.61 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 204.44 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 206.11 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 204.61 MiB/554.32 MiB ",,terminal_output +1803,3983511,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠦ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.40 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 203.45 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 204.78 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 205.40 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 206.94 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 206.00 MiB/554.32 MiB ",,terminal_output +1804,3983614,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠧ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.44 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 204.33 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 205.64 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 206.58 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 208.23 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 206.90 MiB/554.32 MiB \r\r\r\r\r\r\r⠧ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.44 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 205.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 206.67 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 207.51 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 209.25 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 207.60 MiB/554.32 MiB ",,terminal_output +1805,3983669,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠧ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.44 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 206.06 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 207.64 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 208.16 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 210.05 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 208.75 MiB/554.32 MiB ",,terminal_output +1806,3983773,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠧ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.45 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 207.28 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 208.53 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 208.92 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 211.17 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 209.61 MiB/554.32 MiB \r\r\r\r\r\r\r⠇ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.45 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 208.33 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 209.76 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 209.92 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 212.37 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 210.86 MiB/554.32 MiB ",,terminal_output +1807,3983838,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠇ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.45 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 209.31 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 210.56 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 211.05 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 213.40 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 211.89 MiB/554.32 MiB ",,terminal_output +1808,3983945,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠇ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.45 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 210.62 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 211.39 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 212.02 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 214.51 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 212.79 MiB/554.32 MiB \r\r\r\r\r\r\r⠇ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.45 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 211.28 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 212.81 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 213.20 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 215.51 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 213.89 MiB/554.32 MiB ",,terminal_output +1809,3984002,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠋ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.45 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 212.11 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 213.92 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 214.00 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 216.72 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 215.00 MiB/554.32 MiB \r\r\r\r\r\r\r⠋ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.47 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 213.31 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 214.53 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 215.06 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 217.21 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 215.98 MiB/554.32 MiB ",,terminal_output +1810,3984060,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠋ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.72 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 214.16 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 215.42 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 215.92 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 218.23 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 217.20 MiB/554.32 MiB ",,terminal_output +1811,3984117,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠋ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.72 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 214.57 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 216.58 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 217.39 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 219.51 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 217.91 MiB/554.32 MiB ",,terminal_output +1812,3984178,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠙ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.72 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 215.80 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 217.42 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 218.29 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 220.76 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 218.76 MiB/554.32 MiB ",,terminal_output +1813,3984238,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠙ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.72 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 217.10 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 218.44 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 219.38 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 221.62 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 219.95 MiB/554.32 MiB ",,terminal_output +1814,3984404,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠙ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.72 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 218.20 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 219.62 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 220.43 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 222.67 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 220.97 MiB/554.32 MiB \r\r\r\r\r\r\r⠙ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.75 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 219.22 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 220.42 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 221.47 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 223.70 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 221.67 MiB/554.32 MiB \r\r\r\r\r\r\r⠹ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.78 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 220.33 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 221.16 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 222.47 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 224.94 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 222.78 MiB/554.32 MiB \r\r\r\r\r\r\r⠹ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.78 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 221.37 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 222.31 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 223.39 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 226.36 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 223.62 MiB/554.32 MiB ",,terminal_output +1815,3984505,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠹ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.79 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 222.50 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 223.23 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 224.50 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 227.55 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 224.86 MiB/554.32 MiB \r\r\r\r\r\r\r⠹ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.79 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 223.50 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 224.58 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 225.40 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 228.80 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 225.89 MiB/554.32 MiB ",,terminal_output +1816,3984607,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠸ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.79 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 224.27 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 225.58 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 226.69 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 229.59 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 227.32 MiB/554.32 MiB \r\r\r\r\r\r\r⠸ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.84 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 225.35 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 226.48 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 227.39 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 230.45 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 228.03 MiB/554.32 MiB ",,terminal_output +1817,3984661,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠸ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.84 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 226.14 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 227.42 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 228.17 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 231.34 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 229.22 MiB/554.32 MiB ",,terminal_output +1818,3984755,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠸ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.84 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 227.36 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 228.31 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 228.95 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 232.55 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 230.01 MiB/554.32 MiB \r\r\r\r\r\r\r⠼ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.84 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 228.03 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 229.42 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 230.07 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 233.56 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 231.26 MiB/554.32 MiB ",,terminal_output +1819,3984816,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠼ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.84 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 228.85 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 230.76 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 230.98 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 234.62 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 232.55 MiB/554.32 MiB ",,terminal_output +1820,3984877,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠼ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.84 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 229.90 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 232.03 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 232.72 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 235.55 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 233.37 MiB/554.32 MiB ",,terminal_output +1821,3984933,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠼ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.84 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 231.23 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 233.70 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 233.45 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 236.62 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 234.39 MiB/554.32 MiB ",,terminal_output +1822,3985086,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.84 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 232.55 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 234.56 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 234.50 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 237.87 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 235.67 MiB/554.32 MiB \r\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.85 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 233.30 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 235.97 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 235.75 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 238.99 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 236.94 MiB/554.32 MiB \r\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.85 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 234.60 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 236.90 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 236.95 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 240.06 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 237.72 MiB/554.32 MiB ",,terminal_output +1823,3985204,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.85 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 235.88 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 238.33 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 237.61 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 241.12 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 239.40 MiB/554.32 MiB \r\r\r\r\r\r\r⠦ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.85 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 236.91 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 239.70 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 239.22 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 242.20 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 240.17 MiB/554.32 MiB \r\r\r\r\r\r\r⠦ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.85 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 238.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 240.91 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 240.42 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 243.73 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 241.53 MiB/554.32 MiB ",,terminal_output +1824,3985268,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠦ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.85 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 239.07 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 242.14 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 241.69 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 244.98 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 243.15 MiB/554.32 MiB ",,terminal_output +1825,3985319,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠦ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.87 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 240.59 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 242.98 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 242.92 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 246.13 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 243.98 MiB/554.32 MiB ",,terminal_output +1826,3985420,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠧ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.87 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 241.80 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 244.49 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 244.19 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 247.02 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 245.18 MiB/554.32 MiB \r\r\r\r\r\r\r⠧ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.87 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 242.59 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 245.70 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 245.00 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 248.23 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 246.32 MiB/554.32 MiB ",,terminal_output +1827,3985521,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠧ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.87 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 243.30 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 246.66 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 246.05 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 249.48 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 247.59 MiB/554.32 MiB \r\r\r\r\r\r\r⠧ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.87 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 244.59 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 247.67 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 247.03 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 250.42 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 248.39 MiB/554.32 MiB ",,terminal_output +1828,3985624,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠇ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.87 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 245.59 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 248.48 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 248.18 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 251.43 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 249.61 MiB/554.32 MiB \r\r\r\r\r\r\r⠇ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.87 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 246.48 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 249.42 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 249.00 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 252.67 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 250.69 MiB/554.32 MiB ",,terminal_output +1829,3985730,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠇ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 247.20 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 250.58 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 250.08 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 253.62 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 251.98 MiB/554.32 MiB \r\r\r\r\r\r\r⠇ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 248.61 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 251.56 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 250.99 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 254.65 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 252.97 MiB/554.32 MiB ",,terminal_output +1830,3985779,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠋ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 249.31 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 252.42 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 252.03 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 255.75 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 254.01 MiB/554.32 MiB ",,terminal_output +1831,3985923,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠋ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 249.87 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 252.95 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 252.62 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 256.62 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 254.96 MiB/554.32 MiB \r\r\r\r\r\r\r⠋ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 250.57 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 253.95 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 253.73 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 257.55 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 255.54 MiB/554.32 MiB \r\r\r\r\r\r\r⠋ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 251.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 254.95 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 254.54 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 258.00 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 255.98 MiB/554.32 MiB ",,terminal_output +1832,3985980,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠙ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 252.10 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 255.78 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 255.48 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 258.95 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 257.25 MiB/554.32 MiB ",,terminal_output +1833,3986137,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠙ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 252.92 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 256.80 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 256.51 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 260.34 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 258.06 MiB/554.32 MiB \r\r\r\r\r\r\r⠙ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 253.93 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 257.00 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 258.00 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 261.35 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 259.53 MiB/554.32 MiB \r\r\r\r\r\r\r⠙ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 255.12 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 257.00 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 259.23 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 262.67 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 260.87 MiB/554.32 MiB ",,terminal_output +1834,3986189,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠹ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 255.85 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 257.36 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 260.12 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 263.28 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 261.54 MiB/554.32 MiB ",,terminal_output +1835,3986242,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠹ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 256.78 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 258.06 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 261.16 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 264.40 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 262.51 MiB/554.32 MiB ",,terminal_output +1836,3986305,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠹ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 257.62 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 259.27 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 261.89 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 265.33 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 263.51 MiB/554.32 MiB \r\r\r\r\r\r\r⠹ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 258.72 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 260.17 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 262.88 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 266.44 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 264.34 MiB/554.32 MiB ",,terminal_output +1837,3986416,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠸ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 259.84 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 261.11 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 263.97 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 267.03 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 265.33 MiB/554.32 MiB \r\r\r\r\r\r\r⠸ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 260.42 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 262.24 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 264.62 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 268.30 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 266.47 MiB/554.32 MiB ",,terminal_output +1838,3986460,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠸ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.89 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 261.69 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 263.14 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 265.62 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 269.14 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 267.52 MiB/554.32 MiB ",,terminal_output +1839,3986563,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠸ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.90 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 262.38 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 264.26 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 266.90 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 270.06 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 268.34 MiB/554.32 MiB \r\r\r\r\r\r\r⠼ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.90 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 263.33 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 265.25 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 267.86 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 271.00 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 269.26 MiB/554.32 MiB ",,terminal_output +1840,3986624,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠼ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.90 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 264.32 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 266.37 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 268.81 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 272.02 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 270.12 MiB/554.32 MiB ",,terminal_output +1841,3986682,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠼ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.90 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 265.76 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 267.33 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 269.54 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 272.92 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 271.50 MiB/554.32 MiB ",,terminal_output +1842,3986762,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠼ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.90 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 266.77 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 268.60 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 270.50 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 273.77 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 272.35 MiB/554.32 MiB \r\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.90 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 267.42 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 269.48 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 271.65 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 274.98 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 273.43 MiB/554.32 MiB ",,terminal_output +1843,3986812,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.90 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 268.34 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 270.38 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 272.89 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 276.00 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 274.34 MiB/554.32 MiB ",,terminal_output +1844,3986894,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.90 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 269.28 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 271.25 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 273.79 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 276.89 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 275.59 MiB/554.32 MiB ",,terminal_output +1845,3986963,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.90 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 270.52 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 272.25 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 274.66 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 278.07 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 276.51 MiB/554.32 MiB \r\r\r\r\r\r\r⠦ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.90 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 271.37 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 273.41 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 275.88 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 278.94 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 277.32 MiB/554.32 MiB ",,terminal_output +1846,3987016,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠦ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.92 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 272.53 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 274.31 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 276.75 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 279.87 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 278.33 MiB/554.32 MiB ",,terminal_output +1847,3987122,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠦ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.92 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 273.09 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 275.11 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 277.97 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 280.82 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 279.64 MiB/554.32 MiB \r\r\r\r\r\r\r⠦ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.92 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 274.29 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 276.29 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 278.93 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 281.72 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 280.20 MiB/554.32 MiB ",,terminal_output +1848,3987180,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠧ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.93 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 275.23 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 277.22 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 279.76 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 282.76 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 281.41 MiB/554.32 MiB ",,terminal_output +1849,3987255,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠧ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.93 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 276.10 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 278.16 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 280.58 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 283.65 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 282.63 MiB/554.32 MiB \r\r\r\r\r\r\r⠧ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.93 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 276.98 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 278.98 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 281.59 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 284.90 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 283.72 MiB/554.32 MiB ",,terminal_output +1850,3987358,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠧ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.93 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 278.04 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 279.97 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 282.72 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 285.77 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 284.58 MiB/554.32 MiB \r\r\r\r\r\r\r⠇ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.93 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 278.92 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 281.21 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 283.52 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 286.70 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 285.57 MiB/554.32 MiB ",,terminal_output +1851,3987426,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠇ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.93 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 279.86 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 282.11 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 284.39 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 287.67 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 286.73 MiB/554.32 MiB ",,terminal_output +1852,3987503,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠇ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 7.93 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 280.96 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 282.96 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 285.48 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 288.62 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 287.64 MiB/554.32 MiB \r\r\r\r\r\r\r⠇ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 8.00 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 281.71 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 284.14 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 286.56 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 289.46 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 288.55 MiB/554.32 MiB ",,terminal_output +1853,3987564,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠋ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 8.01 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 282.56 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 285.14 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 287.78 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 290.53 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 289.31 MiB/554.32 MiB ",,terminal_output +1854,3987618,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠋ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 8.01 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 283.81 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 286.42 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 288.75 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 291.29 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 290.33 MiB/554.32 MiB ",,terminal_output +1855,3987802,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠋ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 9.42 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 284.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 287.25 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 289.69 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 291.81 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 291.01 MiB/554.32 MiB \r\r\r\r\r\r\r⠋ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 9.85 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 285.19 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 288.23 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 290.25 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 292.75 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 291.89 MiB/554.32 MiB \r\r\r\r\r\r\r⠙ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 11.43 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 285.52 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 288.62 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 290.90 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 293.70 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 292.55 MiB/554.32 MiB \r\r\r\r\r\r\r⠙ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 12.37 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 286.17 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 289.52 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 291.85 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 294.47 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 293.50 MiB/554.32 MiB ",,terminal_output +1856,3987966,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠙ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 12.88 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 286.94 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 290.31 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 292.89 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 295.17 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 294.47 MiB/554.32 MiB \r\r\r\r\r\r\r⠙ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 13.90 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 287.38 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 291.02 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 293.73 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 295.62 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 295.19 MiB/554.32 MiB \r\r\r\r\r\r\r⠹ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 14.61 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 288.58 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 291.53 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 294.42 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 296.67 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 295.63 MiB/554.32 MiB ",,terminal_output +1857,3988013,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠹ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 15.24 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 289.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 292.58 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 295.22 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 297.20 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 296.67 MiB/554.32 MiB ",,terminal_output +1858,3988120,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠹ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 15.82 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 289.56 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 293.45 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 296.06 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 298.28 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 297.61 MiB/554.32 MiB \r\r\r\r\r\r\r⠹ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 15.97 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 290.36 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 294.28 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 297.20 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 299.23 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 298.47 MiB/554.32 MiB ",,terminal_output +1859,3988171,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠸ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 15.98 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 291.23 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 295.36 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 297.77 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 299.98 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 299.78 MiB/554.32 MiB ",,terminal_output +1860,3988357,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠸ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 16.00 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 291.93 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 296.75 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 298.52 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 300.67 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 300.56 MiB/554.32 MiB \r\r\r\r\r\r\r⠸ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 16.00 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 292.99 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 297.61 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 299.48 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 301.48 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 301.97 MiB/554.32 MiB \r\r\r\r\r\r\r⠸ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 16.00 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 293.94 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 298.64 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 300.58 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 302.53 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 302.31 MiB/554.32 MiB \r\r\r\r\r\r\r⠼ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 16.00 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 294.56 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 299.61 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 301.33 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 303.02 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 303.76 MiB/554.32 MiB ",,terminal_output +1861,3988417,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠼ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 16.00 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 295.24 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 300.51 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 302.76 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 303.69 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 304.96 MiB/554.32 MiB ",,terminal_output +1862,3988526,"TERMINAL",0,0,"\r\r\r\r\r\r\r⠼ Preparing packages... (11/17)\r\nnumpy  ------------------------------ 16.02 MiB/16.02 MiB\r\nnvidia-nccl-cu12  ------------------------------ 296.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 301.81 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 303.69 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 304.53 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 305.68 MiB/554.32 MiB \r\r\r\r\r\r\r⠼ Preparing packages... (11/17)\r\nnvidia-nccl-cu12  ------------------------------ 296.70 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 302.51 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 304.39 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 305.28 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 306.46 MiB/554.32 MiB \r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnvidia-nccl-cu12  ------------------------------ 296.70 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 302.94 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 304.39 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 305.73 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 306.92 MiB/554.32 MiB ",,terminal_output +1863,3988579,"TERMINAL",0,0,"\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnvidia-nccl-cu12  ------------------------------ 297.02 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 303.16 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 304.87 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 305.94 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 307.79 MiB/554.32 MiB ",,terminal_output +1864,3988659,"TERMINAL",0,0,"\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnvidia-nccl-cu12  ------------------------------ 298.38 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 303.98 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 305.96 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 307.09 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 308.42 MiB/554.32 MiB \r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnvidia-nccl-cu12  ------------------------------ 299.73 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 305.06 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 306.92 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 308.28 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 309.50 MiB/554.32 MiB ",,terminal_output +1865,3988724,"TERMINAL",0,0,"\r\r\r\r\r\r⠴ Preparing packages... (11/17)\r\nnvidia-nccl-cu12  ------------------------------ 300.55 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 306.44 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 308.03 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 309.12 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 310.25 MiB/554.32 MiB ",,terminal_output +1866,3988777,"TERMINAL",0,0,"\r\r\r\r\r\r⠦ Preparing packages... (12/17)\r\nnvidia-nccl-cu12  ------------------------------ 300.98 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 306.81 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 308.82 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 309.72 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 311.43 MiB/554.32 MiB ",,terminal_output +1867,3988856,"TERMINAL",0,0,"\r\r\r\r\r\r⠦ Preparing packages... (12/17)\r\nnvidia-nccl-cu12  ------------------------------ 301.23 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 308.29 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 309.52 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 310.15 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 312.12 MiB/554.32 MiB \r\r\r\r\r\r⠦ Preparing packages... (12/17)\r\nnvidia-nccl-cu12  ------------------------------ 302.00 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 309.24 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 310.00 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 311.00 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 313.03 MiB/554.32 MiB ",,terminal_output +1868,3988914,"TERMINAL",0,0,"\r\r\r\r\r\r⠦ Preparing packages... (12/17)\r\nnvidia-nccl-cu12  ------------------------------ 303.12 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 310.12 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 310.69 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 312.02 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 314.05 MiB/554.32 MiB ",,terminal_output +1869,3989019,"TERMINAL",0,0,"\r\r\r\r\r\r⠧ Preparing packages... (12/17)\r\nnvidia-nccl-cu12  ------------------------------ 304.31 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 310.54 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 311.87 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 313.17 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 314.93 MiB/554.32 MiB \r\r\r\r\r\r⠧ Preparing packages... (12/17)\r\nnvidia-nccl-cu12  ------------------------------ 304.97 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 311.79 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 313.00 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 313.59 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 315.76 MiB/554.32 MiB ",,terminal_output +1870,3989079,"TERMINAL",0,0,"\r\r\r\r\r\r⠧ Preparing packages... (12/17)\r\nnvidia-nccl-cu12  ------------------------------ 305.63 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 312.36 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 314.31 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 314.79 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 316.81 MiB/554.32 MiB ",,terminal_output +1871,3989137,"TERMINAL",0,0,"\r\r\r\r\r\r⠧ Preparing packages... (12/17)\r\nnvidia-nccl-cu12  ------------------------------ 306.81 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 313.36 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 315.14 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 315.41 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 317.68 MiB/554.32 MiB ",,terminal_output +1872,3989190,"TERMINAL",0,0,"\r\r\r\r\r\r⠇ Preparing packages... (12/17)\r\nnvidia-nccl-cu12  ------------------------------ 307.61 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 314.23 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 316.30 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 316.37 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 318.81 MiB/554.32 MiB ",,terminal_output +1873,3989257,"TERMINAL",0,0,"\r\r\r\r\r\r⠇ Preparing packages... (12/17)\r\nnvidia-nccl-cu12  ------------------------------ 307.61 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 315.42 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 317.51 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 317.75 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 320.05 MiB/554.32 MiB \r\r\r\r\r\r⠇ Preparing packages... (12/17)\r\nnvidia-nccl-cu12  ------------------------------ 307.61 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 316.43 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 318.66 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 319.62 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 320.95 MiB/554.32 MiB ",,terminal_output +1874,3989316,"TERMINAL",0,0,"\r\r\r\r\r\r⠇ Preparing packages... (12/17)\r\nnvidia-nccl-cu12  ------------------------------ 307.61 MiB/307.61 MiB\r\nnvidia-cusolver-cu12  ------------------------------ 317.34 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 320.60 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 320.72 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 322.78 MiB/554.32 MiB ",,terminal_output +1875,3989385,"TERMINAL",0,0,"\r\r\r\r\r\r⠇ Preparing packages... (12/17)\r\nnvidia-cusolver-cu12  ------------------------------ 318.57 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 320.99 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 321.00 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 323.12 MiB/554.32 MiB \r\r\r\r\r⠋ Preparing packages... (12/17)\r\nnvidia-cusolver-cu12  ------------------------------ 318.92 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 321.86 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 321.66 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 324.33 MiB/554.32 MiB ",,terminal_output +1876,3989438,"TERMINAL",0,0,"\r\r\r\r\r⠋ Preparing packages... (12/17)\r\nnvidia-cusolver-cu12  ------------------------------ 320.65 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 323.37 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 322.75 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 324.95 MiB/554.32 MiB ",,terminal_output +1877,3989491,"TERMINAL",0,0,"\r\r\r\r\r⠋ Preparing packages... (12/17)\r\nnvidia-cusolver-cu12  ------------------------------ 322.20 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 324.34 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 324.14 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 326.12 MiB/554.32 MiB ",,terminal_output +1878,3989542,"TERMINAL",0,0,"\r\r\r\r\r⠋ Preparing packages... (12/17)\r\nnvidia-cusolver-cu12  ------------------------------ 322.45 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 325.87 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 325.86 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 327.73 MiB/554.32 MiB ",,terminal_output +1879,3989660,"TERMINAL",0,0,"\r\r\r\r\r⠙ Preparing packages... (13/17)\r\nnvidia-cusolver-cu12  ------------------------------ 322.45 MiB/322.45 MiB\r\nnvidia-cusparse-cu12  ------------------------------ 327.38 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 328.45 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 329.67 MiB/554.32 MiB \r\r\r\r\r⠙ Preparing packages... (13/17)\r\nnvidia-cusparse-cu12  ------------------------------ 329.00 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 330.25 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 330.97 MiB/554.32 MiB \r\r\r\r⠙ Preparing packages... (13/17)\r\nnvidia-cusparse-cu12  ------------------------------ 329.06 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 330.25 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 330.97 MiB/554.32 MiB \r\r\r\r⠙ Preparing packages... (13/17)\r\nnvidia-cusparse-cu12  ------------------------------ 329.92 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 331.55 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 332.90 MiB/554.32 MiB ",,terminal_output +1880,3989754,"TERMINAL",0,0,"\r\r\r\r⠙ Preparing packages... (13/17)\r\nnvidia-cusparse-cu12  ------------------------------ 331.55 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 332.92 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 334.28 MiB/554.32 MiB \r\r\r\r⠹ Preparing packages... (14/17)\r\nnvidia-cusparse-cu12  ------------------------------ 333.42 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 334.81 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 335.54 MiB/554.32 MiB ",,terminal_output +1881,3989815,"TERMINAL",0,0,"\r\r\r\r⠹ Preparing packages... (14/17)\r\nnvidia-cusparse-cu12  ------------------------------ 334.95 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 335.76 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 337.44 MiB/554.32 MiB ",,terminal_output +1882,3989871,"TERMINAL",0,0,"\r\r\r\r⠹ Preparing packages... (14/17)\r\nnvidia-cusparse-cu12  ------------------------------ 336.92 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 337.36 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 338.64 MiB/554.32 MiB ",,terminal_output +1883,3989931,"TERMINAL",0,0,"\r\r\r\r⠹ Preparing packages... (14/17)\r\nnvidia-cusparse-cu12  ------------------------------ 338.35 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 339.36 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 341.36 MiB/554.32 MiB ",,terminal_output +1884,3990053,"TERMINAL",0,0,"\r\r\r\r⠸ Preparing packages... (14/17)\r\nnvidia-cusparse-cu12  ------------------------------ 340.96 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 341.42 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 343.34 MiB/554.32 MiB \r\r\r\r⠸ Preparing packages... (14/17)\r\nnvidia-cusparse-cu12  ------------------------------ 343.45 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 343.09 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 344.73 MiB/554.32 MiB \r\r\r\r⠸ Preparing packages... (14/17)\r\nnvidia-cusparse-cu12  ------------------------------ 345.84 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 345.59 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 346.55 MiB/554.32 MiB ",,terminal_output +1885,3990112,"TERMINAL",0,0,"\r\r\r\r⠸ Preparing packages... (14/17)\r\nnvidia-cusparse-cu12  ------------------------------ 347.91 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 348.00 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 349.34 MiB/554.32 MiB ",,terminal_output +1886,3990172,"TERMINAL",0,0,"\r\r\r\r⠼ Preparing packages... (14/17)\r\nnvidia-cusparse-cu12  ------------------------------ 349.49 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 350.53 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 352.09 MiB/554.32 MiB ",,terminal_output +1887,3990233,"TERMINAL",0,0,"\r\r\r\r⠼ Preparing packages... (14/17)\r\nnvidia-cusparse-cu12  ------------------------------ 349.49 MiB/349.49 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 354.21 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 355.02 MiB/554.32 MiB \r\r\r\r⠼ Preparing packages... (14/17)\r\nnvidia-cudnn-cu12  ------------------------------ 354.54 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 355.96 MiB/554.32 MiB ",,terminal_output +1888,3990290,"TERMINAL",0,0,"\r\r\r⠼ Preparing packages... (14/17)\r\nnvidia-cudnn-cu12  ------------------------------ 357.70 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 359.50 MiB/554.32 MiB ",,terminal_output +1889,3990341,"TERMINAL",0,0,"\r\r\r⠼ Preparing packages... (14/17)\r\nnvidia-cudnn-cu12  ------------------------------ 361.86 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 362.83 MiB/554.32 MiB ",,terminal_output +1890,3990407,"TERMINAL",0,0,"\r\r\r⠴ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 365.89 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 366.36 MiB/554.32 MiB \r\r\r⠴ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 369.87 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 369.72 MiB/554.32 MiB ",,terminal_output +1891,3990465,"TERMINAL",0,0,"\r\r\r⠴ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 373.27 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 373.80 MiB/554.32 MiB ",,terminal_output +1892,3990523,"TERMINAL",0,0,"\r\r\r⠴ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 376.69 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 377.75 MiB/554.32 MiB ",,terminal_output +1893,3990586,"TERMINAL",0,0,"\r\r\r⠦ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 380.44 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 380.95 MiB/554.32 MiB ",,terminal_output +1894,3990639,"TERMINAL",0,0,"\r\r\r⠦ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 384.65 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 382.88 MiB/554.32 MiB ",,terminal_output +1895,3990704,"TERMINAL",0,0,"\r\r\r⠦ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 388.44 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 385.72 MiB/554.32 MiB ",,terminal_output +1896,3990763,"TERMINAL",0,0,"\r\r\r⠦ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 391.62 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 389.31 MiB/554.32 MiB \r\r\r⠧ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 395.51 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 391.37 MiB/554.32 MiB ",,terminal_output +1897,3990806,"TERMINAL",0,0,"\r\r\r⠧ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 398.95 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 396.72 MiB/554.32 MiB ",,terminal_output +1898,3990911,"TERMINAL",0,0,"\r\r\r⠧ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 402.76 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 399.72 MiB/554.32 MiB \r\r\r⠧ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 406.75 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 403.84 MiB/554.32 MiB ",,terminal_output +1899,3990971,"TERMINAL",0,0,"\r\r\r⠇ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 410.88 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 406.15 MiB/554.32 MiB ",,terminal_output +1900,3991031,"TERMINAL",0,0,"\r\r\r⠇ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 414.48 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 410.38 MiB/554.32 MiB ",,terminal_output +1901,3991207,"TERMINAL",0,0,"\r\r\r⠇ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 417.94 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 414.37 MiB/554.32 MiB \r\r\r⠇ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 417.94 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 423.67 MiB/554.32 MiB \r\r\r⠋ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 417.94 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 433.15 MiB/554.32 MiB \r\r\r⠋ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 418.20 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 442.79 MiB/554.32 MiB ",,terminal_output +1902,3991270,"TERMINAL",0,0,"\r\r\r⠋ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 420.01 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 447.93 MiB/554.32 MiB ",,terminal_output +1903,3991331,"TERMINAL",0,0,"\r\r\r⠋ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 423.12 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 451.91 MiB/554.32 MiB ",,terminal_output +1904,3991455,"TERMINAL",0,0,"\r\r\r⠙ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 426.55 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 454.00 MiB/554.32 MiB \r\r\r⠙ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 427.84 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 456.62 MiB/554.32 MiB \r\r\r⠙ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 429.74 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 459.76 MiB/554.32 MiB ",,terminal_output +1905,3991516,"TERMINAL",0,0,"\r\r\r⠙ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 434.14 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 463.31 MiB/554.32 MiB ",,terminal_output +1906,3991574,"TERMINAL",0,0,"\r\r\r⠹ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 435.76 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 466.97 MiB/554.32 MiB ",,terminal_output +1907,3991677,"TERMINAL",0,0,"\r\r\r⠹ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 438.76 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 470.06 MiB/554.32 MiB \r\r\r⠹ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 443.65 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 476.47 MiB/554.32 MiB ",,terminal_output +1908,3991754,"TERMINAL",0,0,"\r\r\r⠹ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 448.42 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 479.81 MiB/554.32 MiB \r\r\r⠸ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 452.25 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 482.93 MiB/554.32 MiB ",,terminal_output +1909,3991814,"TERMINAL",0,0,"\r\r\r⠸ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 457.45 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 489.05 MiB/554.32 MiB ",,terminal_output +1910,3991916,"TERMINAL",0,0,"\r\r\r⠸ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 461.98 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 495.60 MiB/554.32 MiB \r\r\r⠸ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 465.48 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 498.76 MiB/554.32 MiB ",,terminal_output +1911,3991985,"TERMINAL",0,0,"\r\r\r⠼ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 469.19 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 502.37 MiB/554.32 MiB ",,terminal_output +1912,3992106,"TERMINAL",0,0,"\r\r\r⠼ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 472.51 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 505.72 MiB/554.32 MiB \r\r\r⠼ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 474.56 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 512.23 MiB/554.32 MiB \r\r\r⠼ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 478.33 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 515.64 MiB/554.32 MiB ",,terminal_output +1913,3992166,"TERMINAL",0,0,"\r\r\r⠴ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 483.00 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 519.55 MiB/554.32 MiB ",,terminal_output +1914,3992224,"TERMINAL",0,0,"\r\r\r⠴ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 486.56 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 523.47 MiB/554.32 MiB ",,terminal_output +1915,3992283,"TERMINAL",0,0,"\r\r\r⠴ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 490.62 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 527.70 MiB/554.32 MiB ",,terminal_output +1916,3992343,"TERMINAL",0,0,"\r\r\r⠴ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 494.88 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 530.81 MiB/554.32 MiB ",,terminal_output +1917,3992403,"TERMINAL",0,0,"\r\r\r⠦ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 498.92 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 533.83 MiB/554.32 MiB \r\r\r⠦ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 503.34 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 538.59 MiB/554.32 MiB ",,terminal_output +1918,3992462,"TERMINAL",0,0,"\r\r\r⠦ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 504.09 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 544.65 MiB/554.32 MiB ",,terminal_output +1919,3992522,"TERMINAL",0,0,"\r\r\r⠦ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 507.62 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 547.90 MiB/554.32 MiB ",,terminal_output +1920,3992580,"TERMINAL",0,0,"\r\r\r⠧ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 511.33 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 553.02 MiB/554.32 MiB ",,terminal_output +1921,3992684,"TERMINAL",0,0,"\r\r\r⠧ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 515.81 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 554.30 MiB/554.32 MiB \r\r\r⠧ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 519.06 MiB/545.17 MiB\r\nnvidia-cublas-cu12  ------------------------------ 554.32 MiB/554.32 MiB \r\r\r⠧ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 519.19 MiB/545.17 MiB ",,terminal_output +1922,3992756,"TERMINAL",0,0,"\r\r⠧ Preparing packages... (15/17)\r\nnvidia-cudnn-cu12  ------------------------------ 521.69 MiB/545.17 MiB \r\r⠇ Preparing packages... (16/17)\r\nnvidia-cudnn-cu12  ------------------------------ 526.03 MiB/545.17 MiB ",,terminal_output +1923,3992858,"TERMINAL",0,0,"\r\r⠇ Preparing packages... (16/17)\r\nnvidia-cudnn-cu12  ------------------------------ 531.19 MiB/545.17 MiB \r\r⠇ Preparing packages... (16/17)\r\nnvidia-cudnn-cu12  ------------------------------ 536.64 MiB/545.17 MiB ",,terminal_output +1924,3992920,"TERMINAL",0,0,"\r\r⠇ Preparing packages... (16/17)\r\nnvidia-cudnn-cu12  ------------------------------ 541.73 MiB/545.17 MiB ",,terminal_output +1925,3992976,"TERMINAL",0,0,"\r\r⠋ Preparing packages... (16/17) \r⠋  (17/17) \rPrepared 17 packages in 28.65s\r\n",,terminal_output +1926,4012867,"TERMINAL",0,0,"Uninstalled 17 packages in 19.83s\r\n░░░░░░░░░░░░░░░░░░░░ [0/0] Installing wheels... \r░░░░░░░░░░░░░░░░░░░░ [0/17] Installing wheels... \r░░░░░░░░░░░░░░░░░░░░ [0/17] jax-cuda12-pjrt==0.6.2 \r█░░░░░░░░░░░░░░░░░░░ [1/17] jax-cuda12-pjrt==0.6.2 \r█░░░░░░░░░░░░░░░░░░░ [1/17] jax-cuda12-plugin==0.6.2 \r██░░░░░░░░░░░░░░░░░░ [2/17] jax-cuda12-plugin==0.6.2 \r██░░░░░░░░░░░░░░░░░░ [2/17] nvidia-nccl-cu12==2.27.6 \r███░░░░░░░░░░░░░░░░░ [3/17] nvidia-nccl-cu12==2.27.6 \r███░░░░░░░░░░░░░░░░░ [3/17] nvidia-cufft-cu12==11.4.1.4 \r████░░░░░░░░░░░░░░░░ [4/17] nvidia-cufft-cu12==11.4.1.4 \r████░░░░░░░░░░░░░░░░ [4/17] nvidia-cuda-nvcc-cu12==12.9.86 \r█████░░░░░░░░░░░░░░░ [5/17] nvidia-cuda-nvcc-cu12==12.9.86 \r█████░░░░░░░░░░░░░░░ [5/17] nvidia-nvjitlink-cu12==12.9.86 \r███████░░░░░░░░░░░░░ [6/17] nvidia-nvjitlink-cu12==12.9.86 \r███████░░░░░░░░░░░░░ [6/17] nvidia-cusparse-cu12==12.5.10.65 \r████████░░░░░░░░░░░░ [7/17] nvidia-cusparse-cu12==12.5.10.65 \r████████░░░░░░░░░░░░ [7/17] nvidia-cusolver-cu12==11.7.5.82 \r█████████░░░░░░░░░░░ [8/17] nvidia-cusolver-cu12==11.7.5.82 \r█████████░░░░░░░░░░░ [8/17] nvidia-cuda-runtime-cu12==12.9.79 \r██████████░░░░░░░░░░ [9/17] nvidia-cuda-runtime-cu12==12.9.79 \r██████████░░░░░░░░░░ [9/17] nvidia-cuda-nvrtc-cu12==12.9.86 ",,terminal_output +1927,4012962,"TERMINAL",0,0,"\r████████████████░░░░ [14/17] nvidia-nvshmem-cu12==3.3.9 ",,terminal_output +1928,4013095,"TERMINAL",0,0,"\r█████████████████░░░ [15/17] jax==0.6.2 \r██████████████████░░ [16/17] jax==0.6.2 ",,terminal_output +1929,4013359,"TERMINAL",0,0,"\r██████████████████░░ [16/17] numpy==2.2.6 \r████████████████████ [17/17] numpy==2.2.6 \rInstalled 17 packages in 568ms\r\n - jax==0.6.1\r\n + jax==0.6.2\r\n - jax-cuda12-pjrt==0.6.1\r\n + jax-cuda12-pjrt==0.6.2\r\n - jax-cuda12-plugin==0.6.1\r\n + jax-cuda12-plugin==0.6.2\r\n - jaxlib==0.6.1\r\n + jaxlib==0.6.2\r\n - numpy==1.26.4\r\n + numpy==2.2.6\r\n - nvidia-cublas-cu12==12.8.4.1\r\n + nvidia-cublas-cu12==12.9.1.4\r\n - nvidia-cuda-cupti-cu12==12.9.19\r\n + nvidia-cuda-cupti-cu12==12.9.79\r\n - nvidia-cuda-nvcc-cu12==12.9.41\r\n + nvidia-cuda-nvcc-cu12==12.9.86\r\n - nvidia-cuda-nvrtc-cu12==12.6.77\r\n + nvidia-cuda-nvrtc-cu12==12.9.86\r\n - nvidia-cuda-runtime-cu12==12.9.37\r\n + nvidia-cuda-runtime-cu12==12.9.79\r\n - nvidia-cudnn-cu12==9.10.1.4\r\n + nvidia-cudnn-cu12==9.11.0.98\r\n - nvidia-cufft-cu12==11.4.0.6\r\n + nvidia-cufft-cu12==11.4.1.4\r\n - nvidia-cusolver-cu12==11.7.4.40\r\n + nvidia-cusolver-cu12==11.7.5.82\r\n - nvidia-cusparse-cu12==12.5.9.5\r\n + nvidia-cusparse-cu12==12.5.10.65\r\n - nvidia-nccl-cu12==2.26.5\r\n + nvidia-nccl-cu12==2.27.6\r\n - nvidia-nvjitlink-cu12==12.9.41\r\n + nvidia-nvjitlink-cu12==12.9.86\r\n - nvidia-nvshmem-cu12==3.2.5\r\n + nvidia-nvshmem-cu12==3.3.9\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1930,4016007,"TERMINAL",0,0,"salloc",,terminal_focus +1931,4017957,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",0,0,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""debug-mihir""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\n\nenv | grep SLURM\n\nXLA_FLAGS=--xla_gpu_autotune_level=0 srun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --grad_clip_threshold=10 \\n --max_lr=1e-4 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-debug-run-$slurm_job_id \\n --tags dynamics debug \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir\n ",shellscript,tab +1932,4030145,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",604,0,"",shellscript,selection_mouse +1933,4030776,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1084,0,"",shellscript,selection_mouse +1934,4030776,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1083,0,"",shellscript,selection_command +1935,4031366,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1084,0,"",shellscript,selection_mouse +1936,4031368,"slurm/jobs/mihir/horeka/yolo-runs/tester.sh",1083,0,"",shellscript,selection_command +1937,4044212,"TERMINAL",0,0,"bash",,terminal_focus +1938,4059649,"TERMINAL",0,0,"git checkout main",,terminal_command +1939,4059660,"TERMINAL",0,0,"]633;E;2025-07-16 12:10:56 git checkout main;dea9d5fc-91fd-447c-886d-4b0240ae057d]633;CSwitched to branch 'main'\r\nYour branch is up to date with 'origin/main'.\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1940,4060722,"",0,0,"Switched from branch 'grad-norm-log-and-clip' to 'main'",,git_branch_checkout +1941,4063264,"TERMINAL",0,0,"git pull",,terminal_command +1942,4063294,"TERMINAL",0,0,"]633;E;2025-07-16 12:10:59 git pull;dea9d5fc-91fd-447c-886d-4b0240ae057d]633;C",,terminal_output +1943,4064726,"TERMINAL",0,0,"Already up to date.\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1944,4177953,"TERMINAL",0,0,"git checkout -b ""causal-transformer-dynamics-model""",,terminal_command +1945,4177976,"TERMINAL",0,0,"]633;E;2025-07-16 12:12:54 git checkout -b ""causal-transformer-dynamics-model"";dea9d5fc-91fd-447c-886d-4b0240ae057d]633;CSwitched to a new branch 'causal-transformer-dynamics-model'\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output +1946,4180754,"",0,0,"Switched from branch 'main' to 'causal-transformer-dynamics-model'",,git_branch_checkout +1947,4183632,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n warmup_steps: int = 5000\n lr_schedule : str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=args.dtype\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(args.lr_schedule, \n args.init_lr, \n args.max_lr, \n args.decay_end, \n args.num_steps, \n args.warmup_steps, \n args.wsd_decay_steps)\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n # Restore full dynamics model\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, grain_iterator, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +1948,4187130,"train_dynamics.py",5231,0,"",python,selection_mouse +1949,4187371,"genie.py",0,0,"from typing import Dict, Any\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nfrom flax.training.train_state import TrainState\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\nimport os\nimport grain\n\n\nclass Genie(nn.Module):\n """"""Genie model""""""\n\n # --- Tokenizer ---\n in_dim: int\n tokenizer_dim: int\n latent_patch_dim: int\n num_patch_latents: int\n patch_size: int\n tokenizer_num_blocks: int\n tokenizer_num_heads: int\n # --- LAM ---\n lam_dim: int\n latent_action_dim: int\n num_latent_actions: int\n lam_patch_size: int\n lam_num_blocks: int\n lam_num_heads: int\n lam_co_train: bool\n # --- Dynamics ---\n dyna_dim: int\n dyna_num_blocks: int\n dyna_num_heads: int\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n dropout: float = 0.0\n mask_limit: float = 0.0\n\n def setup(self):\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\n lam_outputs = self.lam.vq_encode(batch[""videos""], training=False)\n latent_actions = jax.lax.cond(\n self.lam_co_train,\n lambda: lam_outputs[""z_q""],\n lambda: jax.lax.stop_gradient(lam_outputs[""z_q""])\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\n latent_actions=latent_actions,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )\n outputs[""lam_indices""] = lam_outputs[""indices""]\n return outputs\n\n @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> Any:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by \n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size \n T: number of input (conditioning) frames \n N: patches per frame \n S: sequence length \n A: action space \n D: model latent dimension\n """"""\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""] # (B, T, N)\n B, T, N = token_idxs.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs.dtype)\n token_idxs = jnp.concatenate([token_idxs, pad], axis=1) # (B, S, N)\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n \n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n\n def generation_step_fn(carry, step_t):\n rng, current_token_idxs = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current and future frames (i.e., t >= step_t)\n mask = jnp.arange(seq_len) >= step_t # (S,)\n mask = jnp.broadcast_to(mask[None, :, None], (B, seq_len, N)) # (B, S, N)\n mask = mask.astype(bool)\n masked_token_idxs = current_token_idxs * ~mask\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs,\n mask,\n action_tokens,\n )\n final_carry_maskgit, _ = loop_fn(init_carry_maskgit, jnp.arange(steps))\n updated_token_idxs = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs)\n return new_carry, None\n\n # --- Run the autoregressive generation using scan ---\n initial_carry = (batch[""rng""], token_idxs)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn,\n initial_carry,\n timesteps_to_scan\n )\n final_token_idxs = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n final_token_idxs,\n video_hw=batch[""videos""].shape[2:4],\n )\n return final_frames\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, token_idxs, mask, action_tokens = carry\n step = x\n N = token_idxs.shape[2]\n\n # --- Construct + encode video ---\n vid_embed = self.dynamics.patch_embed(token_idxs) # (B, S, N, D)\n mask_token = self.dynamics.mask_token # (1, 1, 1, D,)\n mask_expanded = mask[..., None] # (B, S, N, 1) \n vid_embed = jnp.where(mask_expanded, mask_token, vid_embed)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed) / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jax.random.categorical(_rng, final_logits)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, token_idxs, new_mask, action_tokens)\n return new_carry, None\n\ndef restore_genie_components(\n train_state: TrainState,\n sharding: jax.sharding.NamedSharding,\n grain_iterator: grain.DataLoaderIterator,\n inputs: Dict[str, jax.Array],\n rng: jax.Array,\n args,\n):\n """"""Restore pre-trained Genie components""""""\n rng, _rng = jax.random.split(rng)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.adamw(\n learning_rate=optax.constant_schedule(args.max_lr),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n )\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\n handler_registry.add('dataloader_state', grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler)\n \n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n )\n tokenizer_init_params = dummy_tokenizer.init(_rng, inputs)\n dummy_tokenizer_train_state = TrainState.create(\n apply_fn=dummy_tokenizer.apply, params=tokenizer_init_params, tx=dummy_tx\n )\n abstract_sharded_tokenizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_train_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_tokenizer_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )[""model_state""]\n restored_tokenizer_params = restored_tokenizer.params[""params""]\n train_state.params[""params""][""tokenizer""].update(restored_tokenizer_params)\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n )\n lam_init_params = dummy_lam.init(_rng, inputs)\n dummy_lam_train_state = TrainState.create(\n apply_fn=dummy_lam.apply, params=lam_init_params, tx=dummy_tx\n )\n abstract_sharded_lam_state = _create_abstract_sharded_pytree(\n dummy_lam_train_state, sharding\n )\n restored_lam = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_lam_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )[""model_state""]\n restored_lam_params = restored_lam.params[""params""]\n # Genie does not initialize all LAM modules, thus we omit those extra modules during restoration\n # (f.srambical) FIXME: Currently, this is a small HBM memory crunch since the LAM's decoder is loaded into HBM and immediately dicarded.\n # A workaround would be to restore to host memory first, and only move the weights to HBM after pruning the decoder\n restored_lam_params = {\n k: v\n for k, v in restored_lam_params.items()\n if k in train_state.params[""params""][""lam""]\n }\n train_state.params[""params""][""lam""].update(restored_lam_params)\n lam_checkpoint_manager.close()\n\n return train_state\n\ndef _create_abstract_sharded_pytree(pytree_template, sharding_spec):\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)",python,tab +1950,4190480,"genie.py",1967,0,"",python,selection_mouse +1951,4191334,"genie.py",1952,0,"",python,selection_mouse +1952,4191470,"genie.py",1950,8,"dynamics",python,selection_mouse +1953,4192123,"genie.py",1970,0,"",python,selection_mouse +1954,4194523,"genie.py",1968,0,"",python,selection_mouse +1955,4194739,"models/dynamics.py",0,0,"from typing import Dict, Any\n\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\n\nfrom utils.nn import STTransformer\n\n\nclass DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n return dict(token_logits=logits, mask=mask)\n",python,tab +1956,4225105,"models/dynamics.py",1799,0,"",python,selection_mouse +1957,4225106,"models/dynamics.py",1798,0,"",python,selection_command +1958,4226007,"models/dynamics.py",1799,0,"",python,selection_mouse +1959,4226010,"models/dynamics.py",1798,0,"",python,selection_command +1960,4226792,"models/dynamics.py",1851,0,"",python,selection_mouse +1961,4226804,"models/dynamics.py",1850,0,"",python,selection_command +1962,4227508,"models/dynamics.py",1851,0,"\n ",python,content +1963,4227624,"models/dynamics.py",1860,0,"\n ",python,content +1964,4227624,"models/dynamics.py",1852,8,"",python,content +1965,4227772,"models/dynamics.py",1861,0,"\n ",python,content +1966,4227773,"models/dynamics.py",1853,8,"",python,content +1967,4228102,"models/dynamics.py",1858,4,"",python,content +1968,4228375,"models/dynamics.py",1854,4,"",python,content +1969,4228542,"models/dynamics.py",1853,1,"",python,content +1970,4435583,"utils/nn.py",0,0,"import math\nfrom typing import Dict, Tuple\n\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\n\n\nclass PositionalEncoding(nn.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n d_model: int # Hidden dimensionality of the input.\n max_len: int = 5000 # Maximum length of a sequence to expect.\n\n def setup(self):\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n self.pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\n self.pe = self.pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def __call__(self, x):\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nn.Module):\n dim: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n\n @nn.remat\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n x = x + z\n\n # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z, mask=causal_mask)\n x = x + z\n x = x.swapaxes(1, 2)\n\n # --- Feedforward ---\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n # FIXME (f.srambical): Here, the attention hidden dimension is the same as the FFN's. Usually, FFN hidden dimension is 4x model_dim\n z = nn.Dense(\n self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.gelu(z)\n x = x + z\n\n return x\n\n\nclass STTransformer(nn.Module):\n model_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n x = nn.Sequential(\n [\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.Dense(self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n ]\n )(x)\n for _ in range(self.num_blocks):\n x = STBlock(\n dim=self.model_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n x = nn.Dense(\n self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n return x # (B, T, E)\n\n\ndef normalize(x):\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nn.Module):\n latent_dim: int\n num_latents: int\n dropout: float\n\n def setup(self):\n self.codebook = normalize(\n self.param(\n ""codebook"",\n nn.initializers.lecun_uniform(),\n (self.num_latents, self.latent_dim),\n )\n )\n self.drop = nn.Dropout(self.dropout, deterministic=False)\n\n def __call__(\n self, x: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x = normalize(x)\n codebook = normalize(self.codebook)\n distance = -jnp.matmul(x, codebook.T)\n if training:\n dropout_key = self.make_rng(""dropout"")\n distance = self.drop(distance, rng=dropout_key)\n\n # --- Get indices and embeddings ---\n indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]\n\n # --- Straight through estimator ---\n z_q = x + jax.lax.stop_gradient(z - x)\n return z_q, z, x, indices\n\n def get_codes(self, indices: jax.Array):\n return self.codebook[indices]\n",python,tab +1971,4544126,"utils/nn.py",1003,0,"",python,selection_mouse +1972,4545036,"utils/nn.py",1003,0,"\n",python,content +1973,4545393,"utils/nn.py",1003,0,"",python,selection_command +1974,4546055,"utils/nn.py",1003,0,"class CausalTransformerBlock(nn.Module):\n model_dim: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # LayerNorm + Causal Self-Attention\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n seq_len = z.shape[1]\n # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z, mask=causal_mask)\n x = x + z\n\n # Feedforward\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n z = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.gelu(z)\n x = x + z\n\n return x\n\nclass CausalTransformer(nn.Module):\n model_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # Input projection and normalization\n x = nn.Sequential(\n [\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.Dense(self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n ]\n )(x)\n # Causal transformer blocks\n for _ in range(self.num_blocks):\n x = CausalTransformerBlock(\n model_dim=self.model_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n # Output projection\n x = nn.Dense(\n self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n return x # (B, T, E)\n",python,content +1975,4558485,"models/dynamics.py",0,0,"",python,tab +1976,4560129,"models/dynamics.py",1853,0,"class DynamicsAutoregressive(nn.Module):\n """"""Autoregressive (causal) dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n\n def setup(self):\n # Use a causal transformer instead of STTransformer\n from utils.nn import CausalTransformer # Make sure this exists or implement it\n self.dynamics = CausalTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.action_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # No random masking, just embed the tokens\n vid_embed = self.patch_embed(batch[""video_tokens""])\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed, causal=True) # Pass causal=True if needed\n return dict(token_logits=logits, mask=None)",python,content +1977,4562864,"models/dynamics.py",1820,0,"",python,selection_mouse +1978,4563323,"models/dynamics.py",1869,0,"",python,selection_mouse +1979,4564005,"models/dynamics.py",1852,0,"",python,selection_command +1980,4564409,"models/dynamics.py",1852,0,"\n",python,content +1981,4748168,"models/dynamics.py",1852,0,"",python,selection_mouse +1982,4751274,"models/dynamics.py",2145,0,"",python,selection_mouse +1983,4751807,"models/dynamics.py",2206,0,"",python,selection_mouse +1984,4752974,"models/dynamics.py",2205,0,"",python,selection_command +1985,4753635,"models/dynamics.py",2173,88,"",python,content +1986,4753667,"models/dynamics.py",2181,0,"",python,selection_command +1987,4754656,"models/dynamics.py",3107,0,"",python,selection_command +1988,4755326,"models/dynamics.py",0,0,"",python,selection_command +1989,4756736,"models/dynamics.py",124,0,"",python,selection_mouse +1990,4756744,"models/dynamics.py",123,0,"",python,selection_command +1991,4757033,"models/dynamics.py",124,0,"",python,selection_command +1992,4757253,"models/dynamics.py",124,0,",",python,content +1993,4757255,"models/dynamics.py",125,0,"",python,selection_keyboard +1994,4757399,"models/dynamics.py",125,0," ",python,content +1995,4757400,"models/dynamics.py",126,0,"",python,selection_keyboard +1996,4757899,"models/dynamics.py",126,0,"C",python,content +1997,4757900,"models/dynamics.py",127,0,"",python,selection_keyboard +1998,4758086,"models/dynamics.py",127,0,"a",python,content +1999,4758087,"models/dynamics.py",128,0,"",python,selection_keyboard +2000,4758229,"models/dynamics.py",128,0,"u",python,content +2001,4758230,"models/dynamics.py",129,0,"",python,selection_keyboard +2002,4758583,"models/dynamics.py",126,3,"CausalTransformer",python,content +2003,4759188,"models/dynamics.py",142,0,"",python,selection_command +2004,4772336,"models/dynamics.py",2705,0,"",python,selection_mouse +2005,4773230,"models/dynamics.py",2794,0,"",python,selection_mouse +2006,4773231,"models/dynamics.py",2793,0,"",python,selection_command +2007,4773815,"models/dynamics.py",2704,0,"",python,selection_mouse +2008,4773992,"models/dynamics.py",2702,8,"training",python,selection_mouse +2009,4774249,"models/dynamics.py",2702,9,"training:",python,selection_mouse +2010,4774271,"models/dynamics.py",2702,10,"training: ",python,selection_mouse +2011,4774286,"models/dynamics.py",2702,14,"training: bool",python,selection_mouse +2012,4774498,"models/dynamics.py",2702,15,"training: bool ",python,selection_mouse +2013,4774561,"models/dynamics.py",2702,16,"training: bool =",python,selection_mouse +2014,4774616,"models/dynamics.py",2702,17,"training: bool = ",python,selection_mouse +2015,4774673,"models/dynamics.py",2702,21,"training: bool = True",python,selection_mouse +2016,4775115,"models/dynamics.py",2720,0,"",python,selection_mouse +2017,4775116,"models/dynamics.py",2719,4,"True",python,selection_mouse +2018,4775299,"models/dynamics.py",2719,75,"True) -> Dict[str, Any]:\n # No random masking, just embed the tokens",python,selection_mouse +2019,4776062,"models/dynamics.py",2705,0,"",python,selection_mouse +2020,4776197,"models/dynamics.py",2702,8,"training",python,selection_mouse +2021,4776470,"models/dynamics.py",2702,10,"training: ",python,selection_mouse +2022,4776487,"models/dynamics.py",2702,14,"training: bool",python,selection_mouse +2023,4776540,"models/dynamics.py",2702,15,"training: bool ",python,selection_mouse +2024,4776599,"models/dynamics.py",2702,16,"training: bool =",python,selection_mouse +2025,4776986,"models/dynamics.py",2718,0,"",python,selection_mouse +2026,4786917,"models/dynamics.py",2826,0,"",python,selection_mouse +2027,4787449,"models/dynamics.py",2767,0,"",python,selection_mouse +2028,4829828,"models/dynamics.py",2224,0,"",python,selection_mouse +2029,4830094,"utils/nn.py",0,0,"",python,tab +2030,4838090,"utils/nn.py",3002,0,"",python,selection_mouse +2031,4841298,"utils/nn.py",1465,0,"",python,selection_mouse +2032,4841420,"utils/nn.py",1457,11,"causal_mask",python,selection_mouse +2033,4842416,"utils/nn.py",1432,0,"",python,selection_mouse +2034,4842544,"utils/nn.py",1431,7,"seq_len",python,selection_mouse +2035,4843469,"utils/nn.py",1480,0,"",python,selection_mouse +2036,4843588,"utils/nn.py",1480,3,"jnp",python,selection_mouse +2037,4843846,"utils/nn.py",1480,8,"jnp.ones",python,selection_mouse +2038,4844262,"utils/nn.py",1486,0,"",python,selection_mouse +2039,4844263,"utils/nn.py",1484,4,"ones",python,selection_mouse +2040,4844483,"utils/nn.py",1480,8,"jnp.ones",python,selection_mouse +2041,4844547,"utils/nn.py",1479,9,"(jnp.ones",python,selection_mouse +2042,4844603,"utils/nn.py",1475,13,"tril(jnp.ones",python,selection_mouse +2043,4844797,"utils/nn.py",1474,14,".tril(jnp.ones",python,selection_mouse +2044,4844814,"utils/nn.py",1471,17,"jnp.tril(jnp.ones",python,selection_mouse +2045,4845222,"utils/nn.py",1473,0,"",python,selection_mouse +2046,4845223,"utils/nn.py",1471,3,"jnp",python,selection_mouse +2047,4845423,"utils/nn.py",1471,8,"jnp.tril",python,selection_mouse +2048,4845440,"utils/nn.py",1471,12,"jnp.tril(jnp",python,selection_mouse +2049,4845458,"utils/nn.py",1471,13,"jnp.tril(jnp.",python,selection_mouse +2050,4845512,"utils/nn.py",1471,26,"jnp.tril(jnp.ones((seq_len",python,selection_mouse +2051,4845529,"utils/nn.py",1471,35,"jnp.tril(jnp.ones((seq_len, seq_len",python,selection_mouse +2052,4845598,"utils/nn.py",1471,85,"jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))\n z = nn.MultiHeadAttention(",python,selection_mouse +2053,4846034,"utils/nn.py",1471,50,"jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2054,4846269,"utils/nn.py",1521,0,"",python,selection_mouse +2055,4846533,"utils/nn.py",1521,35,"\n z = nn.MultiHeadAttention(",python,selection_mouse +2056,4846586,"utils/nn.py",1499,22,"seq_len), dtype=bool))",python,selection_mouse +2057,4846597,"utils/nn.py",1490,31,"seq_len, seq_len), dtype=bool))",python,selection_mouse +2058,4846649,"utils/nn.py",1439,82," seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2059,4846650,"utils/nn.py",1430,91," seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2060,4846650,"utils/nn.py",1398,123,"1]\n # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2061,4846664,"utils/nn.py",1392,129,"shape[1]\n # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2062,4846716,"utils/nn.py",1390,131,"z.shape[1]\n # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2063,4846717,"utils/nn.py",1388,133,"= z.shape[1]\n # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2064,4846731,"utils/nn.py",1380,141,"seq_len = z.shape[1]\n # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2065,4846831,"utils/nn.py",1379,142," seq_len = z.shape[1]\n # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2066,4846848,"utils/nn.py",1378,143," seq_len = z.shape[1]\n # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2067,4846903,"utils/nn.py",1377,144," seq_len = z.shape[1]\n # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2068,4846903,"utils/nn.py",1376,145," seq_len = z.shape[1]\n # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2069,4846913,"utils/nn.py",1375,146," seq_len = z.shape[1]\n # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2070,4846972,"utils/nn.py",1374,147," seq_len = z.shape[1]\n # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2071,4847026,"utils/nn.py",1373,148," seq_len = z.shape[1]\n # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2072,4847197,"utils/nn.py",1402,119," # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2073,4848107,"utils/nn.py",1402,0,"",python,selection_mouse +2074,4848108,"utils/nn.py",1401,8," ",python,selection_mouse +2075,4848270,"utils/nn.py",1401,48," # Causal mask: (1, 1, seq_len, seq_len)\n",python,selection_mouse +2076,4848534,"utils/nn.py",1401,121," # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))\n",python,selection_mouse +2077,4848593,"utils/nn.py",1401,156," # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))\n z = nn.MultiHeadAttention(\n",python,selection_mouse +2078,4849030,"utils/nn.py",1401,121," # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))\n",python,selection_mouse +2079,4849364,"utils/nn.py",1521,0,"",python,selection_mouse +2080,4849768,"utils/nn.py",1521,35,"\n z = nn.MultiHeadAttention(",python,selection_mouse +2081,4849784,"utils/nn.py",1499,22,"seq_len), dtype=bool))",python,selection_mouse +2082,4849839,"utils/nn.py",1489,32,"(seq_len, seq_len), dtype=bool))",python,selection_mouse +2083,4849840,"utils/nn.py",1429,92,", seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2084,4849840,"utils/nn.py",1423,98," (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2085,4849849,"utils/nn.py",1418,103,"mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2086,4849865,"utils/nn.py",1411,110,"Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2087,4849919,"utils/nn.py",1410,111," Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2088,4850034,"utils/nn.py",1409,112,"# Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2089,4850112,"utils/nn.py",1408,113," # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2090,4850132,"utils/nn.py",1407,114," # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2091,4850147,"utils/nn.py",1406,115," # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2092,4850203,"utils/nn.py",1405,116," # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2093,4850203,"utils/nn.py",1404,117," # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2094,4850266,"utils/nn.py",1403,118," # Causal mask: (1, 1, seq_len, seq_len)\n causal_mask = jnp.tril(jnp.ones((seq_len, seq_len), dtype=bool))",python,selection_mouse +2095,4850607,"utils/nn.py",1403,0,"",python,selection_mouse diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-ed344163-9eb9-4ff1-a884-39337a6b19681756972582944-2025_09_04-09.56.57.291/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-ed344163-9eb9-4ff1-a884-39337a6b19681756972582944-2025_09_04-09.56.57.291/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..f1fe156ffd0715d448140be8e9a55f52f8204949 --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-ed344163-9eb9-4ff1-a884-39337a6b19681756972582944-2025_09_04-09.56.57.291/source.csv @@ -0,0 +1,9682 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +2,849,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"9:56:57 AM [info] Activating crowd-code\n9:56:57 AM [info] Recording started\n9:56:57 AM [info] Initializing git provider using file system watchers...\n9:56:57 AM [info] Git repository found\n9:56:57 AM [info] Git provider initialized successfully\n",Log,tab +3,999,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"9:56:58 AM [info] Initial git state: [object Object]\n",Log,content +4,6367,"TERMINAL",0,0,"undefined[tum_cte0515@hkn1990 jasmine]$ source .venv/bin/activate",,terminal_command +5,6410,"TERMINAL",0,0,"]633;E;2025-09-04 09:57:03 source .venv/bin/activate;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +6,8493,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n\n\n# Log the sbatch script\ncat $0\nsource .venv/bin/activate\n\npython generate_dataset.py \\n --num_episodes 10000 \\n --output_dir /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m \\n --min_episode_length 1000",shellscript,tab +7,10615,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",549,0,"",shellscript,selection_mouse +8,10815,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",549,2,"/h",shellscript,selection_mouse +9,10816,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",549,6,"/hkfs/",shellscript,selection_mouse +10,10817,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",549,16,"/hkfs/work/works",shellscript,selection_mouse +11,10818,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",549,29,"/hkfs/work/workspace/scratch/",shellscript,selection_mouse +12,10819,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",531,18,"\n --output_dir ",shellscript,selection_mouse +13,10896,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",549,82,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episo",shellscript,selection_mouse +14,10925,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",549,84,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episode",shellscript,selection_mouse +15,10947,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",549,87,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_1",shellscript,selection_mouse +16,10978,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",549,88,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10",shellscript,selection_mouse +17,11004,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",549,89,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m",shellscript,selection_mouse +18,11035,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",549,91,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m \",shellscript,selection_mouse +19,11082,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",549,121,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m \\n --min_episode_length 1000",shellscript,selection_mouse +20,11537,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",549,89,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m",shellscript,selection_mouse +21,17348,"TERMINAL",0,0,"queue",,terminal_command +22,17419,"TERMINAL",0,0,"]633;E;2025-09-04 09:57:14 queue;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Thu Sep 4 09:57:14 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3457966 accelerat train_la tum_cte0 PD\t0:00\t 8 (Resources)3457967 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)3457968 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)",,terminal_output +23,18217,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +24,25789,"TERMINAL",0,0,"cd /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m",,terminal_command +25,25835,"TERMINAL",0,0,"]633;E;2025-09-04 09:57:23 cd /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m]633;D;0",,terminal_output +26,32800,"TERMINAL",0,0,"ls -l | wc -l",,terminal_command +27,32841,"TERMINAL",0,0,"]633;E;2025-09-04 09:57:30 ls -l | wc -l;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +28,38077,"TERMINAL",0,0,"10002\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m]633;D;0",,terminal_output +29,44802,"TERMINAL",0,0,"fsacct_week",,terminal_command +30,44878,"TERMINAL",0,0,"]633;E;2025-09-04 09:57:42 fsacct_week;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C JobID JobName Partition All State Elapsed Timelimit \r\n--------------- ------------------------------ ---------------- --- ------------ ---------- ---------- \r\n 3454890 train_lam_minecraft_1node_dev accelerated 24 REQUEUED 00:04:42 00:10:00 \r\n 3454917 train_lam_minecraft_1node_dev accelerated 24 REQUEUED 00:04:36 00:10:00 \r\n 3457966 train_lam_minecraft_8node_dar+ accelerated 0 PENDING 00:00:00 2-00:00:00 \r\n 3457967 train_lam_minecraft_8node_dar+ accelerated 0 PENDING 00:00:00 2-00:00:00 \r\n 3457968 train_lam_minecraft_8node_dar+ accelerated 0 PENDING 00:00:00 2-00:00:00 \r\n 3457969 train_lam_minecraft_8node_dar+ accelerated 192 FAILED 00:05:59 2-00:00:00 \r\n 3463210 train_lam_minecraft_8node_dar+ accelerated 192 FAILED 00:00:28 2-00:00:00 \r\n 3463320 generate_coinrun_dataset_10m cpuonly 152 COMPLETED 05:00:19 12:00:00 \r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m]633;D;0",,terminal_output +31,72534,"TERMINAL",0,0,"logs",,terminal_command +32,72553,"TERMINAL",0,0,"]633;E;2025-09-04 09:58:09 logs;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +33,73045,"TERMINAL",0,0,"ls",,terminal_command +34,73091,"TERMINAL",0,0,"]633;E;2025-09-04 09:58:10 ls;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +35,73338,"TERMINAL",0,0,"atari train_dyn_yolorun_3333026.log train_lam_action_space_scaling_50_3329789.log train_tokenizer_batch_size_scaling_4_node_3321524.log\r\nbig_run train_dyn_yolorun_3333448.log train_lam_action_space_scaling_50_3329804.log train_tokenizer_batch_size_scaling_8_node_3320176.log\r\nbig-runs train_dyn_yolorun_3335345.log train_lam_action_space_scaling_50_3331286.log train_tokenizer_batch_size_scaling_8_node_3321525.log\r\ncausal train_dyn_yolorun_3335362.log train_lam_action_space_scaling_6_3318549.log train_tokenizer_minecraft_overfit_sample_3309656.log\r\ncoinrun train_dyn_yolorun_3348592.log train_lam_action_space_scaling_6_3320178.log train_tokenizer_model_size_scaling_127M_3317233.log\r\ndata_coinrun train_dyn_yolorun_new_arch_3351743.log train_lam_action_space_scaling_6_3321528.log train_tokenizer_model_size_scaling_127M_3318554.log\r\nlam train_dyn_yolorun_new_arch_3352103.log train_lam_action_space_scaling_6_3329790.log train_tokenizer_model_size_scaling_140M_3313562.log\r\nmaskgit train_dyn_yolorun_new_arch_3352115.log train_lam_action_space_scaling_6_3329805.log train_tokenizer_model_size_scaling_140M_3316019.log\r\nmaskgit-maskprob-fix train_dyn_yolorun_new_arch_3358457.log train_lam_action_space_scaling_6_3331287.log train_tokenizer_model_size_scaling_200M_3313563.log\r\npreprocess train_lam_action_space_scaling_10_3320179.log train_lam_action_space_scaling_8_3318550.log train_tokenizer_model_size_scaling_200M_3316020.log\r\ntrain_dyn_causal_180M_3372931.log train_lam_action_space_scaling_10_3321529.log train_lam_action_space_scaling_8_3329791.log train_tokenizer_model_size_scaling_227M_3317234.log\r\ntrain_dyn_causal_180M_3372963.log train_lam_action_space_scaling_10_3329786.log train_lam_action_space_scaling_8_3329806.log train_tokenizer_model_size_scaling_227M_3318555.log\r\ntrain_dyn_causal_180M_3372969.log train_lam_action_space_scaling_10_3329801.log train_lam_action_space_scaling_8_3331288.log train_tokenizer_model_size_scaling_227M_3320173.log\r\ntrain_dyn_causal_180M_3373107.log train_lam_action_space_scaling_10_3331283.log train_lam_minecraft_overfit_sample_3309655.log train_tokenizer_model_size_scaling_227M_3321523.log\r\ntrain_dyn_causal_255M_3372932.log train_lam_action_space_scaling_12_3318546.log train_lam_model_size_scaling_38M_3317098.log train_tokenizer_model_size_scaling_37M_3313565.log\r\ntrain_dyn_causal_255M_3372970.log train_lam_action_space_scaling_12_3320177.log train_lam_model_size_scaling_38M_3317115.log train_tokenizer_model_size_scaling_37M_3316022.log\r\ntrain_dyn_causal_255M_3373108.log train_lam_action_space_scaling_12_3321527.log train_lam_model_size_scaling_38M_3317231.log train_tokenizer_model_size_scaling_37M_3317232.log\r\ntrain_dyn_causal_356M_3372934.log train_lam_action_space_scaling_12_3329787.log train_tokenizer_batch_size_scaling_16_node_3321526.log train_tokenizer_model_size_scaling_37M_3317239.log\r\ntrain_dyn_causal_356M_3372971.log train_lam_action_space_scaling_12_3329802.log train_tokenizer_batch_size_scaling_1_node_3318551.log train_tokenizer_model_size_scaling_37M_3318556.log\r\ntrain_dyn_causal_356M_3373109.log train_lam_action_space_scaling_12_3331284.log train_tokenizer_batch_size_scaling_2_node_3318552.log train_tokenizer_model_size_scaling_74M_3318557.log\r\ntrain_dyn_causal_500M_3372936.log train_lam_action_space_scaling_20_3318547.log train_tokenizer_batch_size_scaling_2_node_3330806.log train_tokenizer_model_size_scaling_74M_3320174.log\r\ntrain_dyn_causal_500M_3372972.log train_lam_action_space_scaling_20_3329788.log train_tokenizer_batch_size_scaling_2_node_3330848.log train_tokenizer_model_size_scaling_74M_3321522.log\r\ntrain_dyn_causal_500M_3373110.log train_lam_action_space_scaling_20_3329803.log train_tokenizer_batch_size_scaling_2_node_3331282.log train_tokenizer_model_size_scaling_80M_3313564.log\r\ntrain_dyn_new_arch-bugfixed-spatial-shift_3359343.log train_lam_action_space_scaling_20_3331285.log train_tokenizer_batch_size_scaling_4_node_3318553.log train_tokenizer_model_size_scaling_80M_3316026.log\r\ntrain_dyn_new_arch-bugfixed-temporal-shift_3359349.log train_lam_action_space_scaling_50_3320180.log train_tokenizer_batch_size_scaling_4_node_3320175.log yoloruns\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +36,75994,"TERMINAL",0,0,"cd lam/",,terminal_command +37,76041,"TERMINAL",0,0,"]633;E;2025-09-04 09:58:13 cd lam/;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam]633;D;0",,terminal_output +38,76290,"TERMINAL",0,0,"ls",,terminal_command +39,76343,"TERMINAL",0,0,"]633;E;2025-09-04 09:58:13 ls;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +40,76436,"TERMINAL",0,0,"train_lam_minecraft_8node_3431870.log train_lam_minecraft_8node_3431895.log train_lam_minecraft_8node_3454944.log train_lam_minecraft_8node_darkness_filter_37M_3454953.log\r\ntrain_lam_minecraft_8node_3431875.log train_lam_minecraft_8node_3454890.log train_lam_minecraft_8node_3454948.log train_lam_minecraft_8node_darkness_filter_37M_3457969.log\r\ntrain_lam_minecraft_8node_3431876.log train_lam_minecraft_8node_3454917.log train_lam_minecraft_8node_darkness_filter_133M_3454956.log train_lam_minecraft_8node_darkness_filter_37M_3463210.log\r\ntrain_lam_minecraft_8node_3431885.log train_lam_minecraft_8node_3454941.log train_lam_minecraft_8node_darkness_filter_311M_3454955.log train_lam_minecraft_8node_darkness_filter_400M_3454954.log\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam]633;D;0",,terminal_output +41,80487,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/train_lam_minecraft_8node_darkness_filter_37M_3463210.log",0,0,"slurmstepd: error: couldn't chdir to `/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs': No such file or directory: going to /tmp instead\nslurmstepd: error: couldn't chdir to `/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs': No such file or directory: going to /tmp instead\n#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --job-name=train_lam_minecraft_8node_darkness_filter_37M\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\n# slurm_job_id=$SLURM_JOB_ID\nslurm_job_id=3454953\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/lam/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_lam.py \\n --save_ckpt \\n --restore_ckpt \\n --wandb_id $slurm_job_id \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=160 \\n --init_lr=0 \\n --max_lr=1e-4 \\n --darkness_threshold=50 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=lam-minecraft-8-node-darkness-filter-37M-$slurm_job_id \\n --tags lam minecraft 8-node darkness-filter 37M \\n --entity instant-uv \\n --project jafar \\n --num_latents=100 \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n/var/spool/slurmd/job3463210/slurm_script: line 38: .venv/bin/activate: No such file or directory\nSLURM_JOB_USER=tum_cte0515\nSLURM_TASKS_PER_NODE=4(x8)\nSLURM_JOB_UID=999226\nSLURM_TASK_PID=2836931\nSLURM_JOB_GPUS=0,1,2,3\nSLURM_LOCALID=0\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs\nSLURMD_NODENAME=hkn0414\nSLURM_JOB_START_TIME=1756922773\nSLURM_CLUSTER_NAME=hk\nSLURM_JOB_END_TIME=1757095573\nSLURM_CPUS_ON_NODE=24\nSLURM_JOB_CPUS_PER_NODE=24(x8)\nSLURM_GPUS_ON_NODE=4\nSLURM_GTIDS=0\nSLURM_JOB_PARTITION=accelerated\nSLURM_TRES_PER_TASK=cpu=5\nSLURM_OOM_KILL_STEP=0\nSLURM_JOB_NUM_NODES=8\nSLURM_JOBID=3463210\nSLURM_JOB_QOS=normal\nSLURM_PROCID=0\nSLURM_CPUS_PER_TASK=5\nSLURM_NTASKS=32\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e12.hkn0414\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\nSLURM_SCRIPT_CONTEXT=prolog_task\nSLURM_NODELIST=hkn[0414,0518,0620,0624,0627,0716,0730,0804]\nSLURM_JOB_ACCOUNT=hk-project-p0023960\nSLURM_PRIO_PROCESS=0\nSLURM_NPROCS=32\nSLURM_NNODES=8\nSLURM_SUBMIT_HOST=hkn1993.localdomain\nSLURM_JOB_ID=3463210\nSLURM_NODEID=0\nSLURM_CONF=/etc/slurm/slurm.conf\nSLURM_JOB_NAME=train_lam_minecraft_8node_darkness_filter_37M\nSLURM_NTASKS_PER_NODE=4\nSLURM_JOB_GID=502226\nSLURM_JOB_NODELIST=hkn[0414,0518,0620,0624,0627,0716,0730,0804]\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\nGpuFreq=control_disabled\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\nsrun: error: hkn0414: tasks 0-3: Exited with exit code 2\nsrun: error: hkn0716: tasks 20-23: Exited with exit code 2\nsrun: error: hkn0804: tasks 28-31: Exited with exit code 2\nsrun: error: hkn0620: tasks 8-11: Exited with exit code 2\nsrun: error: hkn0627: tasks 16-19: Exited with exit code 2\nsrun: error: hkn0730: tasks 24-27: Exited with exit code 2\nsrun: error: hkn0518: tasks 4-7: Exited with exit code 2\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\n/usr/bin/python: can't open file '/tmp/train_lam.py': [Errno 2] No such file or directory\nsrun: error: hkn0624: tasks 12-15: Exited with exit code 2\n\n============================= JOB FEEDBACK =============================\n\nJob ID: 3463210\nCluster: hk\nUser/Group: tum_cte0515/hk-project-p0023960\nAccount: hk-project-p0023960\nState: FAILED (exit code 2)\nPartition: accelerated\nNodes: 8\nCores per node: 24\nNodelist: hkn[0414,0518,0620,0624,0627,0716,0730,0804]\nCPU Utilized: 00:00:01\nCPU Efficiency: 0.02% of 01:29:36 core-walltime\nJob Wall-clock time: 00:00:28\nStarttime: Wed Sep 3 20:06:13 2025\nEndtime: Wed Sep 3 20:06:41 2025\nMemory Utilized: 10.48 MB\nMemory Efficiency: 0.00% of 0.00 MB\nEnergy Consumed: 2180926 Joule / 605.812777777778 Watthours\nAverage node power draw: 77890.2142857143 Watt\n",log,tab +42,80937,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/train_lam_minecraft_8node_darkness_filter_37M_3463210.log",1280,0,"",log,selection_mouse +43,80959,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/train_lam_minecraft_8node_darkness_filter_37M_3463210.log",1279,0,"",log,selection_command +44,81612,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/train_lam_minecraft_8node_darkness_filter_37M_3463210.log",7850,0,"",log,selection_command +45,129491,"TERMINAL",0,0,"queue",,terminal_command +46,129543,"TERMINAL",0,0,"]633;E;2025-09-04 09:59:06 queue;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +47,129612,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Thu Sep 4 09:59:06 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3457966 accelerat train_la tum_cte0 PD\t0:00\t 8 (Resources)3457967 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)3457968 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)",,terminal_output +48,130683,"TERMINAL",0,0,"7",,terminal_output +49,131708,"TERMINAL",0,0,"8",,terminal_output +50,132730,"TERMINAL",0,0,"9",,terminal_output +51,133776,"TERMINAL",0,0,"10",,terminal_output +52,134828,"TERMINAL",0,0,"1",,terminal_output +53,135867,"TERMINAL",0,0,"3",,terminal_output +54,136950,"TERMINAL",0,0,"4",,terminal_output +55,137963,"TERMINAL",0,0,"5",,terminal_output +56,139004,"TERMINAL",0,0,"6",,terminal_output +57,140052,"TERMINAL",0,0,"7",,terminal_output +58,141085,"TERMINAL",0,0,"8",,terminal_output +59,141415,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam]633;D;0",,terminal_output +60,145414,"TERMINAL",0,0,"runner",,terminal_command +61,145462,"TERMINAL",0,0,"]633;E;2025-09-04 09:59:22 runner;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +62,146227,"TERMINAL",0,0,"ls",,terminal_command +63,146316,"TERMINAL",0,0,"]633;E;2025-09-04 09:59:23 ls;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;Cdata_atari frames killer_partition.sh models requeuer.log scripts_cremers slurm-3359338.out slurm-3400254.out utils\r\ndebug generate_dataset.py killer.sh overfit_dir requirements-franz.txt scripts_horeka slurm-3373409.out tests wandb\r\ndiff.diff genie_fixed_maskgit.py LICENSE overfit_dir.zip requirements.txt slurm slurm-3373410.out train_dynamics.py weekend-job-requeuer.sh\r\ndiff.log genie.py local-logs __pycache__ sample.py slurm-3309772.out slurm-3379613.out train_lam.py weekend-job-starter.sh\r\nframe-knoms.png gifs log.log README.md sample.py_bak slurm-3359333.out slurm-3379615.out train_tokenizer_bak.py\r\nframe.png input_pipeline logs read_tf_record.py samples slurm-3359334.out slurm-3379616.out train_tokenizer.py\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +64,155488,"TERMINAL",0,0,"scancel --me",,terminal_command +65,155538,"TERMINAL",0,0,"]633;E;2025-09-04 09:59:32 scancel --me;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +66,195836,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",,terminal_command +67,195874,"TERMINAL",0,0,"]633;E;2025-09-04 10:00:13 sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CSubmitted batch job 3465195\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +68,201884,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-133M.sbatch",,terminal_command +69,201949,"TERMINAL",0,0,"]633;E;2025-09-04 10:00:19 sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-133M.sbatch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CSubmitted batch job 3465196\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +70,207462,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-311M.sbatch",,terminal_command +71,207506,"TERMINAL",0,0,"]633;E;2025-09-04 10:00:24 sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-311M.sbatch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CSubmitted batch job 3465197\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +72,211031,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",,terminal_command +73,211097,"TERMINAL",0,0,"]633;E;2025-09-04 10:00:28 sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CSubmitted batch job 3465198\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +74,212398,"TERMINAL",0,0,"queue",,terminal_command +75,212517,"TERMINAL",0,0,"]633;E;2025-09-04 10:00:29 queue;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Thu Sep 4 10:00:29 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3465198 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)3465197 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)3465196 accelerat train_la tum_cte0 PD\t0:00\t 8 (Resources)3465195 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)",,terminal_output +76,213530,"TERMINAL",0,0,"30",,terminal_output +77,213774,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +78,215831,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/train_lam_minecraft_8node_darkness_filter_37M_3463210.log",0,0,"",log,tab +79,221388,".venv/lib/python3.10/site-packages/gym3/types_np.py",0,0,"from functools import partial\nfrom typing import Any, Optional, Sequence, Tuple\n\nimport numpy as np\n\nfrom gym3.types import Discrete, Real, TensorType, ValType, multimap\n\n\ndef concat(xs: Sequence[Any], axis: int = 0) -> Any:\n """"""\n Concatenate the (leaf) arrays from xs\n\n :param xs: list of trees with the same shape, where the leaf values are numpy arrays\n :param axis: axis to concatenate along\n """"""\n return multimap(lambda *xs: np.concatenate(xs, axis=axis), *xs)\n\n\ndef stack(xs: Sequence[Any], axis: int = 0) -> Any:\n """"""\n Stack the (leaf) arrays from xs\n\n :param xs: list of trees with the same shape, where the leaf values are numpy arrays\n :param axis: axis to stack along\n """"""\n return multimap(lambda *xs: np.stack(xs, axis=axis), *xs)\n\n\ndef split(x: Any, sections: Sequence[int]) -> Sequence[Any]:\n """"""\n Split the (leaf) arrays from the tree x\n\n Examples:\n\n split([1,2,3,4], [1,2,3,4]) => [[1], [2], [3], [4]]\n split([1,2,3,4], [1,3,4]) => [[1], [2, 3], [4]]\n\n :param x: a tree where the leaf values are numpy arrays\n :param sections: list of indices to split at (not sizes of each split)\n\n :returns: list of trees with length `len(sections)` with the same shape as x\n where each leaf is the corresponding section of the leaf in x\n """"""\n result = []\n start = 0\n for end in sections:\n select_tree = multimap(lambda arr: arr[start:end], x)\n start = end\n result.append(select_tree)\n return result\n\n\ndef dtype(tt: TensorType) -> np.dtype:\n """"""\n :param tt: TensorType to get dtype for\n\n :returns: numpy.dtype to use for tt\n """"""\n assert isinstance(tt, TensorType)\n return np.dtype(tt.eltype.dtype_name)\n\n\ndef zeros(vt: ValType, bshape: Tuple) -> Any:\n """"""\n :param vt: ValType to create zeros for\n :param bshape: batch shape to prepend to the shape of each numpy array created by this function\n\n :returns: tree of numpy arrays matching vt\n """"""\n return multimap(\n lambda subdt: np.zeros(bshape + subdt.shape, dtype=dtype(subdt)), vt\n )\n\n\ndef _sample_tensor(\n tt: TensorType, bshape: Tuple, rng: Optional[np.random.RandomState] = None\n) -> np.ndarray:\n """"""\n :param tt: TensorType to create sample for\n :param bshape: batch shape to prepend to the shape of each numpy array created by this function\n :param rng: np.random.RandomState to use for sampling\n\n :returns: numpy array matching tt\n """"""\n if rng is None:\n rng = np.random\n assert isinstance(tt, TensorType)\n eltype = tt.eltype\n shape = bshape + tt.shape\n if isinstance(eltype, Discrete):\n return rng.randint(eltype.n, size=shape, dtype=dtype(tt))\n elif isinstance(eltype, Real):\n return rng.randn(*shape).astype(dtype(tt))\n else:\n raise ValueError(f""Expected ScalarType, got {type(eltype)}"")\n\n\ndef sample(\n vt: ValType, bshape: Tuple, rng: Optional[np.random.RandomState] = None\n) -> Any:\n """"""\n :param vt: ValType to create sample for\n :param bshape: batch shape to prepend to the shape of each numpy array created by this function\n :param rng: np.random.RandomState to use for sampling\n\n :returns: tree of numpy arrays matching vt\n """"""\n return multimap(partial(_sample_tensor, bshape=bshape, rng=rng), vt)\n",python,tab +80,223851,".venv/lib/python3.10/site-packages/procgen/env.py",0,0,"import os\nimport random\nfrom typing import Sequence, Optional, List\n\nimport gym3\nfrom gym3.libenv import CEnv\nimport numpy as np\nfrom .builder import build\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\n\nMAX_STATE_SIZE = 2 ** 20\n\nENV_NAMES = [\n ""bigfish"",\n ""bossfight"",\n ""caveflyer"",\n ""chaser"",\n ""climber"",\n ""coinrun"",\n ""dodgeball"",\n ""fruitbot"",\n ""heist"",\n ""jumper"",\n ""leaper"",\n ""maze"",\n ""miner"",\n ""ninja"",\n ""plunder"",\n ""starpilot"",\n]\n\nEXPLORATION_LEVEL_SEEDS = {\n ""coinrun"": 1949448038,\n ""caveflyer"": 1259048185,\n ""leaper"": 1318677581,\n ""jumper"": 1434825276,\n ""maze"": 158988835,\n ""heist"": 876640971,\n ""climber"": 1561126160,\n ""ninja"": 1123500215,\n}\n\n# should match DistributionMode in game.h, except for 'exploration' which is handled by Python\nDISTRIBUTION_MODE_DICT = {\n ""easy"": 0,\n ""hard"": 1,\n ""extreme"": 2,\n ""memory"": 10,\n ""exploration"": 20,\n}\n\n\ndef create_random_seed():\n rand_seed = random.SystemRandom().randint(0, 2 ** 31 - 1)\n try:\n # force MPI processes to definitely choose different random seeds\n from mpi4py import MPI\n\n rand_seed = rand_seed - (rand_seed % MPI.COMM_WORLD.size) + MPI.COMM_WORLD.rank\n except ModuleNotFoundError:\n pass\n return rand_seed\n\n\nclass BaseProcgenEnv(CEnv):\n """"""\n Base procedurally generated environment\n """"""\n\n def __init__(\n self,\n num,\n env_name,\n options,\n debug=False,\n rand_seed=None,\n num_levels=0,\n start_level=0,\n use_sequential_levels=False,\n debug_mode=0,\n resource_root=None,\n num_threads=4,\n render_mode=None,\n ):\n if resource_root is None:\n resource_root = os.path.join(SCRIPT_DIR, ""data"", ""assets"") + os.sep\n assert os.path.exists(resource_root)\n\n lib_dir = os.path.join(SCRIPT_DIR, ""data"", ""prebuilt"")\n if os.path.exists(lib_dir):\n assert any([os.path.exists(os.path.join(lib_dir, name)) for name in [""libenv.so"", ""libenv.dylib"", ""env.dll""]]), ""package is installed, but the prebuilt environment library is missing""\n assert not debug, ""debug has no effect for pre-compiled library""\n else:\n # only compile if we don't find a pre-built binary\n lib_dir = build(debug=debug)\n \n self.combos = self.get_combos()\n\n if render_mode is None:\n render_human = False\n elif render_mode == ""rgb_array"":\n render_human = True\n else:\n raise Exception(f""invalid render mode {render_mode}"")\n\n if rand_seed is None:\n rand_seed = create_random_seed()\n\n options.update(\n {\n ""env_name"": env_name,\n ""num_levels"": num_levels,\n ""start_level"": start_level,\n ""num_actions"": len(self.combos),\n ""use_sequential_levels"": bool(use_sequential_levels),\n ""debug_mode"": debug_mode,\n ""rand_seed"": rand_seed,\n ""num_threads"": num_threads,\n ""render_human"": render_human,\n # these will only be used the first time an environment is created in a process\n ""resource_root"": resource_root,\n }\n )\n\n self.options = options\n\n super().__init__(\n lib_dir=lib_dir,\n num=num,\n options=options,\n c_func_defs=[\n ""int get_state(libenv_env *, int, char *, int);"",\n ""void set_state(libenv_env *, int, char *, int);"",\n ],\n )\n # don't use the dict space for actions\n self.ac_space = self.ac_space[""action""]\n\n def get_state(self):\n length = MAX_STATE_SIZE\n buf = self._ffi.new(f""char[{length}]"")\n result = []\n for env_idx in range(self.num):\n n = self.call_c_func(""get_state"", env_idx, buf, length)\n result.append(bytes(self._ffi.buffer(buf, n)))\n return result\n\n def set_state(self, states):\n assert len(states) == self.num\n for env_idx in range(self.num):\n state = states[env_idx]\n self.call_c_func(""set_state"", env_idx, state, len(state))\n\n def get_combos(self):\n return [\n (""LEFT"", ""DOWN""),\n (""LEFT"",),\n (""LEFT"", ""UP""),\n (""DOWN"",),\n (),\n (""UP"",),\n (""RIGHT"", ""DOWN""),\n (""RIGHT"",),\n (""RIGHT"", ""UP""),\n (""D"",),\n (""A"",),\n (""W"",),\n (""S"",),\n (""Q"",),\n (""E"",),\n ]\n\n def keys_to_act(self, keys_list: Sequence[Sequence[str]]) -> List[Optional[np.ndarray]]:\n """"""\n Convert list of keys being pressed to actions, used in interactive mode\n """"""\n result = []\n for keys in keys_list:\n action = None\n max_len = -1\n for i, combo in enumerate(self.get_combos()):\n pressed = True\n for key in combo:\n if key not in keys:\n pressed = False\n\n if pressed and (max_len < len(combo)):\n action = i\n max_len = len(combo)\n\n if action is not None:\n action = np.array([action])\n result.append(action)\n return result\n\n def act(self, ac):\n # tensorflow may return int64 actions (https://github.com/openai/gym/blob/master/gym/spaces/discrete.py#L13)\n # so always cast actions to int32\n return super().act({""action"": ac.astype(np.int32)})\n\n\nclass ProcgenGym3Env(BaseProcgenEnv):\n """"""\n gym3 interface for Procgen\n """"""\n def __init__(\n self,\n num,\n env_name,\n center_agent=True,\n use_backgrounds=True,\n use_monochrome_assets=False,\n restrict_themes=False,\n use_generated_assets=False,\n paint_vel_info=False,\n distribution_mode=""hard"",\n **kwargs,\n ):\n assert (\n distribution_mode in DISTRIBUTION_MODE_DICT\n ), f'""{distribution_mode}"" is not a valid distribution mode.'\n\n if distribution_mode == ""exploration"":\n assert (\n env_name in EXPLORATION_LEVEL_SEEDS\n ), f""{env_name} does not support exploration mode""\n\n distribution_mode = DISTRIBUTION_MODE_DICT[""hard""]\n assert ""num_levels"" not in kwargs, ""exploration mode overrides num_levels""\n kwargs[""num_levels""] = 1\n assert ""start_level"" not in kwargs, ""exploration mode overrides start_level""\n kwargs[""start_level""] = EXPLORATION_LEVEL_SEEDS[env_name]\n else:\n distribution_mode = DISTRIBUTION_MODE_DICT[distribution_mode]\n\n options = {\n ""center_agent"": bool(center_agent),\n ""use_generated_assets"": bool(use_generated_assets),\n ""use_monochrome_assets"": bool(use_monochrome_assets),\n ""restrict_themes"": bool(restrict_themes),\n ""use_backgrounds"": bool(use_backgrounds),\n ""paint_vel_info"": bool(paint_vel_info),\n ""distribution_mode"": distribution_mode,\n }\n super().__init__(num, env_name, options, **kwargs)\n \n \nclass ToBaselinesVecEnv(gym3.ToBaselinesVecEnv):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : 15\n }\n def render(self, mode=""human""):\n info = self.env.get_info()[0]\n _, ob, _ = self.env.observe()\n if mode == ""rgb_array"":\n if ""rgb"" in info:\n return info[""rgb""]\n else:\n return ob['rgb'][0] \n\n\ndef ProcgenEnv(num_envs, env_name, **kwargs):\n return ToBaselinesVecEnv(ProcgenGym3Env(num=num_envs, env_name=env_name, **kwargs))\n",python,tab +81,226974,"/home/hk-project-p0023960/tum_cte0515/Projects/jafar/requirements.txt",0,0,"dm_pix>=0.4.3\neinops>=0.8.0\nflax>=0.8.5\njax[cuda12]>=0.4.30\noptax>=0.2.3\nprocgen>=0.10.7\ntorch>=2.0.1\ntyro>=0.8.5\nwandb>=0.17.4",pip-requirements,tab +82,228655,".venv/lib/python3.10/site-packages/procgen/env.py",0,0,"",python,tab +83,231669,"TERMINAL",0,0,"bash",,terminal_focus +84,232649,"TERMINAL",0,0,"bash",,terminal_focus +85,250840,"generate_dataset.py",0,0,"""""""\nGenerates a dataset of random-action CoinRun episodes.\nEpisodes are saved individually as memory-mapped files for efficient loading.\n""""""\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom gym3 import types_np\nimport numpy as np\nfrom procgen import ProcgenGym3Env\nimport tyro\n\n\n@dataclass\nclass Args:\n num_episodes: int = 10000\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 50\n\n\nargs = tyro.cli(Args)\noutput_dir = Path(args.output_dir)\noutput_dir.mkdir(parents=True, exist_ok=True)\n\n# --- Generate episodes ---\ni = 0\nmetadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n dataseq = []\n\n # --- Run episode ---\n for j in range(1000):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n breakpoint()\n env.act(action)\n rew, obs, first = env.observe()\n dataseq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(dataseq) >= args.min_episode_length:\n episode_data = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.npy""\n np.save(episode_path, episode_data.astype(np.uint8))\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")\n",python,tab +86,252715,"generate_dataset.py",870,0,"",python,selection_mouse +87,257314,"generate_dataset.py",856,21,"",python,content +88,257380,"generate_dataset.py",864,0,"",python,selection_command +89,302576,"TERMINAL",0,0,"queue",,terminal_command +90,302658,"TERMINAL",0,0,"]633;E;2025-09-04 10:01:59 queue;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Thu Sep 4 10:01:59 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3465195 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)3465196 accelerat train_la tum_cte0 PD\t0:00\t 8 (Resources)3465197 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)3465198 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)",,terminal_output +91,303702,"TERMINAL",0,0,"2:00",,terminal_output +92,303775,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +93,308531,"TERMINAL",0,0,"idling",,terminal_command +94,308607,"TERMINAL",0,0,"]633;E;2025-09-04 10:02:05 idling;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1990.localdomain: Thu Sep 4 10:02:05 2025Partition dev_cpuonly:\t 8 nodes idle\rPartition cpuonly:\t 1 nodes idle\rPartition dev_accelerated:\t 0 nodes idle\rPartition accelerated:\t 0 nodes idle\rPartition dev_accelerated-h100 :\t 1 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 6 nodes idle\rPartition accelerated-h200:\t 0 nodes idle",,terminal_output +95,309630,"TERMINAL",0,0,"6",,terminal_output +96,310671,"TERMINAL",0,0,"7",,terminal_output +97,311707,"TERMINAL",0,0,"8",,terminal_output +98,312743,"TERMINAL",0,0,"9",,terminal_output +99,313783,"TERMINAL",0,0,"10",,terminal_output +100,314819,"TERMINAL",0,0,"2",,terminal_output +101,315856,"TERMINAL",0,0,"3",,terminal_output +102,316901,"TERMINAL",0,0,"4",,terminal_output +103,317944,"TERMINAL",0,0,"5",,terminal_output +104,318430,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +105,748441,"TERMINAL",0,0,"ls",,terminal_command +106,748480,"TERMINAL",0,0,"]633;E;2025-09-04 10:09:25 ls;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +107,748633,"TERMINAL",0,0,"data_atari frames killer_partition.sh models requeuer.log scripts_cremers slurm-3359338.out slurm-3400254.out utils\r\ndebug generate_dataset.py killer.sh overfit_dir requirements-franz.txt scripts_horeka slurm-3373409.out tests wandb\r\ndiff.diff genie_fixed_maskgit.py LICENSE overfit_dir.zip requirements.txt slurm slurm-3373410.out train_dynamics.py weekend-job-requeuer.sh\r\ndiff.log genie.py local-logs __pycache__ sample.py slurm-3309772.out slurm-3379613.out train_lam.py weekend-job-starter.sh\r\nframe-knoms.png gifs log.log README.md sample.py_bak slurm-3359333.out slurm-3379615.out train_tokenizer_bak.py\r\nframe.png input_pipeline logs read_tf_record.py samples slurm-3359334.out slurm-3379616.out train_tokenizer.py\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +108,756196,"TERMINAL",0,0,"dev",,terminal_command +109,772107,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long\n#SBATCH --gres=gpu:1\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/lam/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/lam/%x_%j.log\n#SBATCH --job-name=train_lam_coinrun_og_reproduction\n\n# Log the sbatch script\ncat $0\n\nsource .venv/bin/activate\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\ntags=""coinrun_og reproduction 10k_dataset lam helmholtz_reproduction""\n\nnpy_records_dir=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes""\n\nCHECKPOINT_DIR=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/lam/${job_name}_${slurm_job_id}""\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --log_image_interval=1000 \\n --log \\n --name=""${job_name}_${slurm_job_id}"" \\n --tags $tags \\n --entity instant-uv \\n --project jafar \\n --data_dir $npy_records_dir\n",shellscript,tab +110,776256,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",962,0,"",shellscript,selection_mouse +111,776929,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",825,0,"",shellscript,selection_mouse +112,779948,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",0,0,"",shellscript,tab +113,781787,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",367,0,"",shellscript,selection_mouse +114,781799,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",366,0,"",shellscript,selection_command +115,787411,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --gres=gpu:1\n#SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/dynamics/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/dynamics/%x_%j.log\n#SBATCH --job-name=train_dynamics_coinrun_og_reproduction\n\n# Log the sbatch script\ncat $0\n\nsource .venv/bin/activate\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\ntags=""coinrun_og dynanmics 10m_dataset helmholtz_reproduction dyn_repro""\n\nnpy_records_dir=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes""\n\ntokenizer_ckpt_dir=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/tokenizer/train_tokenizer_coinrun_og_reproduction_28246778/tokenizer_1756303195_110000""\nlam_ckpt_dir=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/lam/train_lam_coinrun_og_reproduction_28246647/lam_1756303037_200000""\nCHECKPOINT_DIR=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/dynamics/${job_name}/${slurm_job_id}""\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --tokenizer_checkpoint=""${tokenizer_ckpt_dir}"" \\n --lam_checkpoint=""${lam_ckpt_dir}"" \\n --log_image_interval=1000 \\n --log \\n --name=""${job_name}_${slurm_job_id}"" \\n --tags ${tags} \\n --entity instant-uv \\n --project jafar \\n --data_dir $npy_records_dir\n",shellscript,tab +116,813635,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --gres=gpu:1\n#SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/dynamics/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/dynamics/%x_%j.log\n#SBATCH --job-name=train_dynamics_coinrun_og_reproduction\n\n# Log the sbatch script\ncat $0\n\nsource .venv/bin/activate\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\ntags=""coinrun_og dynanmics 10m_dataset helmholtz_reproduction dyn_repro""\n\nnpy_records_dir=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes""\n\ntokenizer_ckpt_dir=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/tokenizer/train_tokenizer_coinrun_og_reproduction_28246778/tokenizer_1756303195_110000""\nlam_ckpt_dir=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/lam/train_lam_coinrun_og_reproduction_28246647/lam_1756303037_200000""\nCHECKPOINT_DIR=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/dynamics/${job_name}/${slurm_job_id}""\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --tokenizer_checkpoint=""${tokenizer_ckpt_dir}"" \\n --lam_checkpoint=""${lam_ckpt_dir}"" \\n --log_image_interval=1000 \\n --log \\n --name=""${job_name}_${slurm_job_id}"" \\n --tags ${tags} \\n --entity instant-uv \\n --project jafar \\n --data_dir $npy_records_dir\n",shellscript,tab +117,817291,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",727,0,"",shellscript,selection_mouse +118,817890,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",687,0,"",shellscript,selection_mouse +119,819018,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",632,0,"",shellscript,selection_mouse +120,819204,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",631,9,"dynanmics",shellscript,selection_mouse +121,819376,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",614,73,"tags=""coinrun_og dynanmics 10m_dataset helmholtz_reproduction dyn_repro""\n",shellscript,selection_mouse +122,819841,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",644,0,"",shellscript,selection_mouse +123,819841,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",641,11,"10m_dataset",shellscript,selection_mouse +124,820657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",685,0,"",shellscript,selection_mouse +125,950474,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long\n#SBATCH --gres=gpu:1\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/lam/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/lam/%x_%j.log\n#SBATCH --job-name=train_lam_coinrun_og_reproduction\n\n# Log the sbatch script\ncat $0\n\nsource .venv/bin/activate\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\ntags=""coinrun_og reproduction 10k_dataset lam helmholtz_reproduction""\n\nnpy_records_dir=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes""\n\nCHECKPOINT_DIR=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/lam/${job_name}_${slurm_job_id}""\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_lam.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --log_image_interval=1000 \\n --log \\n --name=""${job_name}_${slurm_job_id}"" \\n --tags $tags \\n --entity instant-uv \\n --project jafar \\n --data_dir $npy_records_dir\n",shellscript,tab +126,952092,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",704,0,"",shellscript,selection_mouse +127,953034,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",682,0,"",shellscript,selection_mouse +128,953576,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",693,0,"",shellscript,selection_mouse +129,954185,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",686,0,"",shellscript,selection_mouse +130,955984,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",678,0,"",shellscript,selection_mouse +131,956957,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",694,0,"",shellscript,selection_mouse +132,970328,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",0,0,"",shellscript,tab +133,972235,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,0,"",shellscript,selection_mouse +134,972399,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,4,"/lus",shellscript,selection_mouse +135,972400,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,20,"/lustre/groups/haicu",shellscript,selection_mouse +136,972400,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",669,18,"\nnpy_records_dir=""",shellscript,selection_mouse +137,972401,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,81,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episode",shellscript,selection_mouse +138,972543,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,101,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes""\n",shellscript,selection_mouse +139,972544,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,225,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes""\n\nCHECKPOINT_DIR=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/lam/${job_name}_${slurm_jo",shellscript,selection_mouse +140,972545,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,230,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes""\n\nCHECKPOINT_DIR=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/lam/${job_name}_${slurm_job_id}",shellscript,selection_mouse +141,972545,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,231,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes""\n\nCHECKPOINT_DIR=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/lam/${job_name}_${slurm_job_id}""",shellscript,selection_mouse +142,972669,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,230,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes""\n\nCHECKPOINT_DIR=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/lam/${job_name}_${slurm_job_id}",shellscript,selection_mouse +143,972696,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,101,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes""\n",shellscript,selection_mouse +144,972733,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,100,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes""",shellscript,selection_mouse +145,972812,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,99,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes",shellscript,selection_mouse +146,972884,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,98,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episode",shellscript,selection_mouse +147,973165,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,99,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes",shellscript,selection_mouse +148,974192,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,99,"",shellscript,content +149,974888,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",687,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m",shellscript,content +150,977514,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",933,0,"",shellscript,selection_mouse +151,978151,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",815,0,"",shellscript,selection_mouse +152,986632,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",855,0,"d",shellscript,content +153,986633,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",854,0,"r",shellscript,content +154,986633,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",852,2,"",shellscript,content +155,986633,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",851,0,"h",shellscript,content +156,986633,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",850,0,"s_",shellscript,content +157,986633,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",847,3,"",shellscript,content +158,986633,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",844,1,"",shellscript,content +159,986633,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",840,0,"d3695-",shellscript,content +160,986633,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",839,1,"",shellscript,content +161,986633,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",838,0,"m_i",shellscript,content +162,986633,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",836,2,"",shellscript,content +163,986633,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",835,0,"tch/t",shellscript,content +164,986634,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",827,8,"",shellscript,content +165,986634,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",826,0,"scr",shellscript,content +166,986634,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",815,0,"rk",shellscript,content +167,986634,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",806,9,"",shellscript,content +168,986634,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",805,0,"w",shellscript,content +169,986634,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",803,2,"",shellscript,content +170,986634,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",799,3,"",shellscript,content +171,986634,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",798,0,"hkf",shellscript,content +172,986634,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",796,2,"",shellscript,content +173,987306,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",974,0,"",shellscript,selection_mouse +174,987307,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",974,0,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes",shellscript,content +175,987307,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1073,0,"",shellscript,selection_keyboard +176,988919,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",946,0,"",shellscript,selection_mouse +177,989870,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",974,99,"",shellscript,content +178,993608,"TERMINAL",0,0,"bash",,terminal_focus +179,1001593,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --job-name=train_lam_minecraft_8node_darkness_filter_37M\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\n# slurm_job_id=$SLURM_JOB_ID\nslurm_job_id=3454953\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/lam/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_lam.py \\n --save_ckpt \\n --restore_ckpt \\n --wandb_id $slurm_job_id \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=160 \\n --init_lr=0 \\n --max_lr=1e-4 \\n --darkness_threshold=50 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=lam-minecraft-8-node-darkness-filter-37M-$slurm_job_id \\n --tags lam minecraft 8-node darkness-filter 37M \\n --entity instant-uv \\n --project jafar \\n --num_latents=100 \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +180,1003897,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,0,"",shellscript,selection_mouse +181,1004018,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,1,"/",shellscript,selection_mouse +182,1004019,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,7,"/hkfs/w",shellscript,selection_mouse +183,1004083,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,16,"/hkfs/work/works",shellscript,selection_mouse +184,1004083,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,27,"/hkfs/work/workspace/scratc",shellscript,selection_mouse +185,1004084,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,36,"/hkfs/work/workspace/scratch/tum_ind",shellscript,selection_mouse +186,1004115,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,44,"/hkfs/work/workspace/scratch/tum_ind3695-jaf",shellscript,selection_mouse +187,1004149,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,53,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shar",shellscript,selection_mouse +188,1004213,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,59,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/che",shellscript,selection_mouse +189,1004271,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,67,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints",shellscript,selection_mouse +190,1004304,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,126,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/lam/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR",shellscript,selection_mouse +191,1004611,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,91,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/lam/$job_name/$sl",shellscript,selection_mouse +192,1004612,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,93,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/lam/$job_name/$slur",shellscript,selection_mouse +193,1004613,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,95,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/lam/$job_name/$slurm_",shellscript,selection_mouse +194,1004613,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,96,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/lam/$job_name/$slurm_j",shellscript,selection_mouse +195,1004613,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,98,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/lam/$job_name/$slurm_job",shellscript,selection_mouse +196,1004615,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,99,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/lam/$job_name/$slurm_job_",shellscript,selection_mouse +197,1004615,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,100,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/lam/$job_name/$slurm_job_i",shellscript,selection_mouse +198,1004615,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",1342,101,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/lam/$job_name/$slurm_job_id",shellscript,selection_mouse +199,1007803,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",0,0,"",shellscript,tab +200,1009109,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,0,"",shellscript,selection_mouse +201,1009275,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,2,"/h",shellscript,selection_mouse +202,1009276,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",778,17,"\nCHECKPOINT_DIR=""",shellscript,selection_mouse +203,1009692,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,82,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${",shellscript,selection_mouse +204,1009693,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,83,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${j",shellscript,selection_mouse +205,1009693,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,84,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${jo",shellscript,selection_mouse +206,1009693,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,85,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job",shellscript,selection_mouse +207,1009833,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,86,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_",shellscript,selection_mouse +208,1009834,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,87,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_n",shellscript,selection_mouse +209,1009834,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,88,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_na",shellscript,selection_mouse +210,1009834,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,89,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_nam",shellscript,selection_mouse +211,1009835,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,90,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_name",shellscript,selection_mouse +212,1009835,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,91,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_name}",shellscript,selection_mouse +213,1009835,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,92,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_name}_",shellscript,selection_mouse +214,1009864,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,94,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_name}_${",shellscript,selection_mouse +215,1009891,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,95,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_name}_${s",shellscript,selection_mouse +216,1009913,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,97,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_name}_${slu",shellscript,selection_mouse +217,1009978,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,99,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_name}_${slurm",shellscript,selection_mouse +218,1009979,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,100,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_name}_${slurm_",shellscript,selection_mouse +219,1009986,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,102,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_name}_${slurm_jo",shellscript,selection_mouse +220,1010012,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,103,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_name}_${slurm_job",shellscript,selection_mouse +221,1010084,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,104,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_name}_${slurm_job_",shellscript,selection_mouse +222,1010086,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,105,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_name}_${slurm_job_i",shellscript,selection_mouse +223,1010127,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,106,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_name}_${slurm_job_id",shellscript,selection_mouse +224,1010191,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,107,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/coinrun/lam/${job_name}_${slurm_job_id}",shellscript,selection_mouse +225,1010808,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,107,"",shellscript,content +226,1011128,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",795,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/lam/$job_name/$slurm_job_id",shellscript,content +227,1012607,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",922,0,"",shellscript,selection_mouse +228,1013190,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",778,0,"",shellscript,selection_mouse +229,1014222,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",860,0,"",shellscript,selection_mouse +230,1014973,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",871,0,"",shellscript,selection_mouse +231,1015142,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",869,3,"lam",shellscript,selection_mouse +232,1016591,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",869,3,"j",shellscript,content +233,1016593,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",870,0,"",shellscript,selection_keyboard +234,1016693,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",870,0,"a",shellscript,content +235,1016694,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",871,0,"",shellscript,selection_keyboard +236,1016855,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",871,0,"f",shellscript,content +237,1016856,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",872,0,"",shellscript,selection_keyboard +238,1017033,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",872,0,"a",shellscript,content +239,1017034,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",873,0,"",shellscript,selection_keyboard +240,1017167,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",873,0,"r",shellscript,content +241,1017168,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",874,0,"",shellscript,selection_keyboard +242,1018550,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",874,0,"_og_reproduction/lam",shellscript,content +243,1023935,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",770,0,"",shellscript,selection_mouse +244,1024098,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",756,20,"coinrun_episodes_10m",shellscript,selection_mouse +245,1024841,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",963,0,"",shellscript,selection_mouse +246,1025524,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",870,0,"",shellscript,selection_mouse +247,1025670,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",869,21,"jafar_og_reproduction",shellscript,selection_mouse +248,1026764,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",944,0,"",shellscript,selection_mouse +249,1027776,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",475,0,"",shellscript,selection_mouse +250,1030683,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --job-name=train_lam_minecraft_8node_darkness_filter_400M\n#SBATCH --reservation=llmtum\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\n# slurm_job_id=$SLURM_JOB_ID\nslurm_job_id=3454954\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/lam/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_lam.py \\n --save_ckpt \\n --restore_ckpt \\n --wandb_id $slurm_job_id \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=160 \\n --init_lr=0 \\n --max_lr=1e-4 \\n --darkness_threshold=50 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=lam-minecraft-8-node-darkness-filter-400M-$slurm_job_id \\n --tags lam minecraft 8-node darkness-filter 400M \\n --entity instant-uv \\n --project jafar \\n --num_latents=100 \\n --model_dim=1024 \\n --num_blocks=12 \\n --num_heads=16 \\n --latent_dim=64 \\n --ffn_dim=4096 \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n",shellscript,tab +251,1033149,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",374,0,"",shellscript,selection_mouse +252,1033151,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",373,0,"",shellscript,selection_command +253,1033248,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",373,1,"g",shellscript,selection_mouse +254,1033252,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",374,0,"",shellscript,selection_command +255,1033378,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",364,10,"/%x_%j.log",shellscript,selection_mouse +256,1033379,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",239,135,"red/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +257,1033379,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",228,146,"jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +258,1033380,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",214,160,"h/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +259,1033380,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",169,205,"\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +260,1033458,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",187,187,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +261,1033470,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",185,189,"t=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +262,1033489,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",184,190,"ut=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +263,1033517,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",183,191,"put=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +264,1033551,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",181,193,"utput=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +265,1033564,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",180,194,"output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +266,1033592,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",178,196,"--output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +267,1033614,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",177,197," --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +268,1033637,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",175,199,"CH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +269,1033662,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",277,97,"TCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +270,1033699,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",276,98,"ATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +271,1033737,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",275,99,"BATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +272,1033815,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",274,100,"SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +273,1033884,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",273,101,"#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +274,1033924,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",170,204,"#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,selection_mouse +275,1035381,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",170,0,"",shellscript,selection_command +276,1036759,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",0,0,"",shellscript,tab +277,1037904,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",342,0,"",shellscript,selection_mouse +278,1039279,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",341,0,"",shellscript,selection_command +279,1039745,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",342,0,"#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,content +280,1039793,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",342,0,"",shellscript,selection_command +281,1042262,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",343,0,"",shellscript,selection_command +282,1042686,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",342,0,"",shellscript,selection_command +283,1042889,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",342,0,"\n",shellscript,content +284,1043748,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",329,0,"",shellscript,selection_command +285,1047208,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",342,1,"",shellscript,content +286,1047265,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",342,0,"",shellscript,selection_command +287,1048646,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",342,204,"",shellscript,content +288,1048677,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",341,0,"",shellscript,selection_command +289,1049335,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",432,0,"\n",shellscript,content +290,1050920,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",433,0,"#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log",shellscript,content +291,1050988,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",433,0,"",shellscript,selection_command +292,1051448,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",329,0,"",shellscript,selection_command +293,1052395,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",224,209,"",shellscript,content +294,1053598,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",311,0,"",shellscript,selection_mouse +295,1054157,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",316,0,"",shellscript,selection_mouse +296,1054669,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",314,0,"",shellscript,selection_mouse +297,1055750,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",313,0,"",shellscript,selection_command +298,1057203,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",313,0,"j",shellscript,content +299,1057204,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",314,0,"",shellscript,selection_keyboard +300,1058455,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",314,0,"afar_og_reproduction/",shellscript,content +301,1059931,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",437,0,"jafar_og_reproduction/",shellscript,content +302,1059933,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",459,0,"",shellscript,selection_command +303,1060556,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",458,0,"",shellscript,selection_command +304,1060985,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",524,0,"",shellscript,selection_command +305,1061201,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",526,0,"",shellscript,selection_command +306,1061418,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",549,0,"",shellscript,selection_command +307,1063270,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",202,0,"",shellscript,selection_mouse +308,1063275,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",201,0,"",shellscript,selection_command +309,1063696,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",202,0,"",shellscript,selection_mouse +310,1063704,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",201,0,"",shellscript,selection_command +311,1064329,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",166,0,"",shellscript,selection_command +312,1067377,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",155,58,"",shellscript,content +313,1068636,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",129,0,"",shellscript,selection_command +314,1070136,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",119,26,"",shellscript,content +315,1070639,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",93,0,"",shellscript,selection_command +316,1070985,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",67,0,"",shellscript,selection_command +317,1071162,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",39,0,"",shellscript,selection_command +318,1071498,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",67,0,"",shellscript,selection_command +319,1071977,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",93,0,"",shellscript,selection_command +320,1072222,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",119,0,"",shellscript,selection_command +321,1072510,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",93,0,"",shellscript,selection_command +322,1072653,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",67,0,"",shellscript,selection_command +323,1072890,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",68,0,"",shellscript,selection_command +324,1073415,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",69,0,"",shellscript,selection_command +325,1073449,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",70,0,"",shellscript,selection_command +326,1073482,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",71,0,"",shellscript,selection_command +327,1073495,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",72,0,"",shellscript,selection_command +328,1073540,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",73,0,"",shellscript,selection_command +329,1073565,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",74,0,"",shellscript,selection_command +330,1073584,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",75,0,"",shellscript,selection_command +331,1073644,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",76,0,"",shellscript,selection_command +332,1073656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",77,0,"",shellscript,selection_command +333,1073714,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",78,0,"",shellscript,selection_command +334,1074001,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",79,0,"",shellscript,selection_command +335,1074141,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",80,0,"",shellscript,selection_command +336,1074282,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",81,0,"",shellscript,selection_command +337,1074596,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",82,0,"",shellscript,selection_command +338,1075910,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",82,1,"2",shellscript,content +339,1076795,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",54,0,"",shellscript,selection_command +340,1077237,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",36,0,"",shellscript,selection_command +341,1077449,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",20,0,"",shellscript,selection_command +342,1077705,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",36,0,"",shellscript,selection_command +343,1077873,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",54,0,"",shellscript,selection_command +344,1078025,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",82,0,"",shellscript,selection_command +345,1078196,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",108,0,"",shellscript,selection_command +346,1078362,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",134,0,"",shellscript,selection_command +347,1078543,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",155,0,"",shellscript,selection_command +348,1079246,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",280,0,"",shellscript,selection_command +349,1079425,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",404,0,"",shellscript,selection_command +350,1079749,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",296,0,"",shellscript,selection_mouse +351,1080384,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",420,0,"",shellscript,selection_command +352,1082491,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",466,0,"",shellscript,selection_mouse +353,1082513,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",465,0,"",shellscript,selection_command +354,1083623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",466,0,"",shellscript,selection_mouse +355,1083624,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",465,0,"",shellscript,selection_command +356,1084320,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",472,0,"",shellscript,selection_command +357,1084475,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",474,0,"",shellscript,selection_command +358,1084637,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",497,0,"",shellscript,selection_command +359,1084809,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",501,0,"",shellscript,selection_command +360,1085063,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",524,0,"",shellscript,selection_command +361,1085779,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",549,0,"",shellscript,selection_command +362,1087216,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",554,0,"",shellscript,selection_command +363,1087398,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",577,0,"",shellscript,selection_command +364,1087579,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",625,0,"",shellscript,selection_command +365,1087777,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",648,0,"",shellscript,selection_command +366,1088414,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",734,0,"",shellscript,selection_command +367,1088559,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",757,0,"",shellscript,selection_command +368,1089799,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,0,"",shellscript,selection_mouse +369,1089980,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,1,"h",shellscript,selection_mouse +370,1089980,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,4,"helm",shellscript,selection_mouse +371,1089981,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,7,"helmhol",shellscript,selection_mouse +372,1089981,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,9,"helmholtz",shellscript,selection_mouse +373,1090054,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,12,"helmholtz_re",shellscript,selection_mouse +374,1090055,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,14,"helmholtz_repr",shellscript,selection_mouse +375,1090055,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,16,"helmholtz_reprod",shellscript,selection_mouse +376,1090085,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,17,"helmholtz_reprodu",shellscript,selection_mouse +377,1090226,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,18,"helmholtz_reproduc",shellscript,selection_mouse +378,1090262,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,19,"helmholtz_reproduct",shellscript,selection_mouse +379,1090288,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,20,"helmholtz_reproducti",shellscript,selection_mouse +380,1090348,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,21,"helmholtz_reproductio",shellscript,selection_mouse +381,1091314,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,22,"helmholtz_reproduction",shellscript,selection_mouse +382,1092532,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,22,"",shellscript,content +383,1093183,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",601,0,"r",shellscript,content +384,1093184,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",602,0,"",shellscript,selection_keyboard +385,1093353,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",602,0,"e",shellscript,content +386,1093354,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",603,0,"",shellscript,selection_keyboard +387,1093490,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",603,0,"p",shellscript,content +388,1093491,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",604,0,"",shellscript,selection_keyboard +389,1093643,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",604,0,"o",shellscript,content +390,1093644,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",605,0,"",shellscript,selection_keyboard +391,1094053,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",604,1,"",shellscript,content +392,1094189,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",604,0,"r",shellscript,content +393,1094190,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",605,0,"",shellscript,selection_keyboard +394,1094256,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",605,0,"o",shellscript,content +395,1094258,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",606,0,"",shellscript,selection_keyboard +396,1096051,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",606,0,"_",shellscript,content +397,1096052,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",607,0,"",shellscript,selection_keyboard +398,1096311,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",607,0,"m",shellscript,content +399,1096312,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",608,0,"",shellscript,selection_keyboard +400,1096439,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",608,0,"i",shellscript,content +401,1096440,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",609,0,"",shellscript,selection_keyboard +402,1096544,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",609,0,"h",shellscript,content +403,1096545,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",610,0,"",shellscript,selection_keyboard +404,1096605,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",610,0,"i",shellscript,content +405,1096606,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",611,0,"",shellscript,selection_keyboard +406,1096711,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",611,0,"r",shellscript,content +407,1096711,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",612,0,"",shellscript,selection_keyboard +408,1538147,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1034,0,"",shellscript,selection_mouse +409,1538689,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1031,0,"",shellscript,selection_mouse +410,1538831,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1027,8,"job_name",shellscript,selection_mouse +411,1540868,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1046,0,"",shellscript,selection_mouse +412,1541022,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1039,12,"slurm_job_id",shellscript,selection_mouse +413,1544531,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1032,0,"",shellscript,selection_mouse +414,1544693,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1027,8,"job_name",shellscript,selection_mouse +415,1545814,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1035,0,"",shellscript,selection_mouse +416,1545814,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1027,8,"job_name",shellscript,selection_mouse +417,1546452,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1040,0,"",shellscript,selection_mouse +418,1546608,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1039,12,"slurm_job_id",shellscript,selection_mouse +419,1547260,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1031,0,"",shellscript,selection_mouse +420,1547418,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1027,8,"job_name",shellscript,selection_mouse +421,1548080,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1042,0,"",shellscript,selection_mouse +422,1548236,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1039,12,"slurm_job_id",shellscript,selection_mouse +423,1548963,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1033,0,"",shellscript,selection_mouse +424,1549115,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1027,8,"job_name",shellscript,selection_mouse +425,1550369,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1046,0,"",shellscript,selection_mouse +426,1550503,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1039,12,"slurm_job_id",shellscript,selection_mouse +427,1552254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1068,0,"",shellscript,selection_mouse +428,1553421,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1069,0,"",shellscript,selection_mouse +429,1553596,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1068,4,"tags",shellscript,selection_mouse +430,1554424,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1147,0,"",shellscript,selection_mouse +431,1554563,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1139,15,"npy_records_dir",shellscript,selection_mouse +432,1570667,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",84,0,"",shellscript,selection_mouse +433,1644097,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1100,0,"",shellscript,selection_mouse +434,1645854,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",1122,0,"",shellscript,selection_mouse +435,1673365,"TERMINAL",0,0,"undefined[tum_cte0515@hkn1990 jasmine]$ cd slurm/",,terminal_command +436,1673707,"TERMINAL",0,0,"ls",,terminal_command +437,1677745,"TERMINAL",0,0,"git status",,terminal_command +438,1677800,"TERMINAL",0,0,"]633;E;2025-09-04 10:24:54 git status;9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;C",,terminal_output +439,1678201,"TERMINAL",0,0,"On branch main\r\nYour branch is up to date with 'origin/main'.\r\n\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tjobs/mihir/horeka/jafar_og_reproduction/\r\n\tjobs/mihir/horeka/lam/\r\n\tutils/alfred/sqrt_lr_scaling.py\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D;0",,terminal_output +440,1693040,"TERMINAL",0,0,"git restorejobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch^C",,terminal_command +441,1693065,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D",,terminal_output +442,1696656,"TERMINAL",0,0,"git restore jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch",,terminal_command +443,1696688,"TERMINAL",0,0,"]633;E;2025-09-04 10:25:13 git restore jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch;9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jasmine/slurm",,terminal_output +444,1698912,"TERMINAL",0,0,"git status",,terminal_command +445,1698966,"TERMINAL",0,0,"]633;E;2025-09-04 10:25:16 git status;9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;COn branch main\r\nYour branch is up to date with 'origin/main'.\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tjobs/mihir/horeka/jafar_og_reproduction/\r\n\tjobs/mihir/horeka/lam/\r\n\tutils/alfred/sqrt_lr_scaling.py\r\n\r\nnothing added to commit but untracked files present (use ""git add"" to track)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D;0",,terminal_output +446,1706008,"TERMINAL",0,0,"git add jobs/mihir/",,terminal_command +447,1706058,"TERMINAL",0,0,"]633;E;2025-09-04 10:25:23 git add jobs/mihir/;9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;C",,terminal_output +448,1706325,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D;0",,terminal_output +449,1727160,"TERMINAL",0,0,"# git commit -m ""added og coinrun jobs""",,terminal_command +450,1727198,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;;9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D",,terminal_output +451,1732745,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long\n#SBATCH --gres=gpu:1\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/tokenizer/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/tokenizer/%x_%j.log\n#SBATCH --job-name=train_tokenizer_coinrun_og_reproduction\n\n# Log the sbatch script\ncat $0\n\nsource .venv/bin/activate\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\ntags=""coinrun_og reproduction 10k_dataset tokenizer helmholtz_reproduction""\n\nnpy_records_dir=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes""\n\nCHECKPOINT_DIR=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/tokenizer/${job_name}_${slurm_job_id}""\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --log_image_interval=1000 \\n --log \\n --name=""${job_name}_${slurm_job_id}"" \\n --tags $tags \\n --entity instant-uv \\n --project jafar \\n --data_dir $npy_records_dir\n\n",shellscript,tab +452,1734517,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",399,0,"",shellscript,selection_mouse +453,1736757,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",761,0,"",shellscript,selection_mouse +454,1738755,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",0,0,"",shellscript,tab +455,1739837,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",723,0,"",shellscript,selection_mouse +456,1740333,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",696,0,"",shellscript,selection_mouse +457,1741444,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",695,0,"",shellscript,selection_command +458,1742571,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",615,107,"npy_records_dir=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m""",shellscript,selection_command +459,1742814,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",615,108,"npy_records_dir=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m""\n",shellscript,selection_command +460,1742944,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",615,249,"npy_records_dir=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m""\n\nCHECKPOINT_DIR=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/jafar_og_reproduction/lam/$job_name/$slurm_job_id""",shellscript,selection_command +461,1743294,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",615,0,"",shellscript,selection_command +462,1745536,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",0,0,"",shellscript,tab +463,1746572,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",714,0,"",shellscript,selection_mouse +464,1747925,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",811,0,"\n",shellscript,content +465,1748226,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",812,0,"\nnpy_records_dir=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m""\n\nCHECKPOINT_DIR=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/jafar_og_reproduction/lam/$job_name/$slurm_job_id""",shellscript,content +466,1748318,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",813,0,"",shellscript,selection_command +467,1748616,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",812,0,"",shellscript,selection_command +468,1748987,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",694,0,"",shellscript,selection_command +469,1749340,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",694,118,"",shellscript,content +470,1749489,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",695,0,"",shellscript,selection_command +471,1749685,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",803,0,"",shellscript,selection_command +472,1749865,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",804,0,"",shellscript,selection_command +473,1749997,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",803,0,"",shellscript,selection_command +474,1750147,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",695,0,"",shellscript,selection_command +475,1750340,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",694,0,"",shellscript,selection_command +476,1750652,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",694,1,"",shellscript,content +477,1750748,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",802,0,"",shellscript,selection_command +478,1750932,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",803,0,"",shellscript,selection_command +479,1751106,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",944,0,"",shellscript,selection_command +480,1751283,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",945,0,"",shellscript,selection_command +481,1752405,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",945,136,"",shellscript,content +482,1753133,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",944,0,"",shellscript,selection_command +483,1753215,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",803,0,"",shellscript,selection_command +484,1753896,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",918,0,"tokenizer",shellscript,content +485,1753897,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",915,3,"",shellscript,content +486,1754790,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",802,0,"",shellscript,selection_command +487,1754964,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",694,0,"",shellscript,selection_command +488,1755579,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",693,0,"",shellscript,selection_command +489,1755735,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",617,0,"",shellscript,selection_command +490,1755736,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",616,0,"",shellscript,selection_command +491,1755736,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",589,0,"",shellscript,selection_command +492,1755832,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",564,0,"",shellscript,selection_command +493,1755833,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",563,0,"",shellscript,selection_command +494,1755833,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",537,0,"",shellscript,selection_command +495,1755833,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",536,0,"",shellscript,selection_command +496,1755834,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",529,0,"",shellscript,selection_command +497,1755834,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",505,0,"",shellscript,selection_command +498,1755867,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",504,0,"",shellscript,selection_command +499,1755867,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",445,0,"",shellscript,selection_command +500,1755958,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",335,0,"",shellscript,selection_command +501,1756438,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",445,0,"",shellscript,selection_command +502,1756827,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",335,0,"",shellscript,selection_command +503,1758250,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",423,0,"ctio",shellscript,content +504,1758251,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",422,0,"_og_reprod",shellscript,content +505,1758251,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",421,0,"hir/jafa",shellscript,content +506,1758251,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",420,1,"",shellscript,content +507,1758251,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",419,0,"gs_m",shellscript,content +508,1758251,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",418,0,"l",shellscript,content +509,1758251,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",417,1,"",shellscript,content +510,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",411,0,"d",shellscript,content +511,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",410,0,"r",shellscript,content +512,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",408,2,"",shellscript,content +513,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",407,0,"_sh",shellscript,content +514,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",403,3,"",shellscript,content +515,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",400,1,"",shellscript,content +516,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",396,0,"d3695-",shellscript,content +517,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",395,1,"",shellscript,content +518,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",394,0,"m_i",shellscript,content +519,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",392,2,"",shellscript,content +520,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",391,0,"atch/t",shellscript,content +521,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",386,5,"",shellscript,content +522,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",385,0,"sc",shellscript,content +523,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",382,3,"",shellscript,content +524,1758252,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",371,0,"k",shellscript,content +525,1758253,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",361,10,"",shellscript,content +526,1758253,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",360,0,"wo",shellscript,content +527,1758253,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",359,1,"",shellscript,content +528,1758253,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",355,3,"",shellscript,content +529,1758253,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",354,0,"hkf",shellscript,content +530,1758253,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",352,2,"",shellscript,content +531,1758253,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",313,0,"ctio",shellscript,content +532,1758253,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",312,0,"_og_reprod",shellscript,content +533,1758253,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",311,0,"hir/jafa",shellscript,content +534,1758253,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",310,1,"",shellscript,content +535,1758253,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",309,0,"gs_m",shellscript,content +536,1758253,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",308,0,"l",shellscript,content +537,1758253,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",307,1,"",shellscript,content +538,1758254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",301,0,"d",shellscript,content +539,1758254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",300,0,"r",shellscript,content +540,1758254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",298,2,"",shellscript,content +541,1758254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",297,0,"_sh",shellscript,content +542,1758254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",293,3,"",shellscript,content +543,1758254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",290,1,"",shellscript,content +544,1758254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",286,0,"d3695-",shellscript,content +545,1758254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",285,1,"",shellscript,content +546,1758254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",284,0,"m_i",shellscript,content +547,1758254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",282,2,"",shellscript,content +548,1758254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",281,0,"atch/t",shellscript,content +549,1758254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",276,5,"",shellscript,content +550,1758254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",275,0,"sc",shellscript,content +551,1758255,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",272,3,"",shellscript,content +552,1758255,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",261,0,"k",shellscript,content +553,1758255,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",251,10,"",shellscript,content +554,1758255,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",250,0,"wo",shellscript,content +555,1758255,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",249,1,"",shellscript,content +556,1758255,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",245,3,"",shellscript,content +557,1758255,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",244,0,"hkf",shellscript,content +558,1758255,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",242,2,"",shellscript,content +559,1760332,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",485,0,"",shellscript,selection_command +560,1760851,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",544,0,"",shellscript,selection_command +561,1760888,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",545,0,"",shellscript,selection_command +562,1760903,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",569,0,"",shellscript,selection_command +563,1760931,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",576,0,"",shellscript,selection_command +564,1760954,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",577,0,"",shellscript,selection_command +565,1761087,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",603,0,"",shellscript,selection_command +566,1761258,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",604,0,"",shellscript,selection_command +567,1761259,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",629,0,"",shellscript,selection_command +568,1761259,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",656,0,"",shellscript,selection_command +569,1761259,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",657,0,"",shellscript,selection_command +570,1761402,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",733,0,"",shellscript,selection_command +571,1761402,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",734,0,"",shellscript,selection_command +572,1761403,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",842,0,"",shellscript,selection_command +573,1761403,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",843,0,"",shellscript,selection_command +574,1761404,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",990,0,"",shellscript,selection_command +575,1761455,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",991,0,"",shellscript,selection_command +576,1761456,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1016,0,"",shellscript,selection_command +577,1761458,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1017,0,"",shellscript,selection_command +578,1761458,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1034,0,"",shellscript,selection_command +579,1761459,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1035,0,"",shellscript,selection_command +580,1765585,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1034,0,"",shellscript,selection_command +581,1766090,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1017,0,"",shellscript,selection_command +582,1766159,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1016,0,"",shellscript,selection_command +583,1766161,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",991,0,"",shellscript,selection_command +584,1766184,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",990,0,"",shellscript,selection_command +585,1766237,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",843,0,"",shellscript,selection_command +586,1766269,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",842,0,"",shellscript,selection_command +587,1766300,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",734,0,"",shellscript,selection_command +588,1766319,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",733,0,"",shellscript,selection_command +589,1766363,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",657,0,"",shellscript,selection_command +590,1766942,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",732,0,"",shellscript,selection_command +591,1768081,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",731,0,"",shellscript,selection_command +592,1768920,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",733,0,"",shellscript,selection_command +593,1769538,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",731,0,"",shellscript,selection_command +594,1770208,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",731,0,"hir",shellscript,content +595,1770208,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",729,2,"",shellscript,content +596,1770208,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",728,0,"_m",shellscript,content +597,1770209,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",724,4,"",shellscript,content +598,1770209,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",709,10,"",shellscript,content +599,1779840,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1084,0,"",shellscript,selection_mouse +600,1780431,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1195,0,"",shellscript,selection_mouse +601,1780437,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1194,0,"",shellscript,selection_command +602,1781119,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1117,0,"",shellscript,selection_mouse +603,1782082,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1133,0,"",shellscript,selection_mouse +604,1782083,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1132,0,"",shellscript,selection_command +605,1783120,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1221,0,"",shellscript,selection_mouse +606,1783144,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1220,0,"",shellscript,selection_command +607,1783927,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1160,0,"",shellscript,selection_mouse +608,1785178,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1111,0,"",shellscript,selection_mouse +609,1785808,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1157,0,"",shellscript,selection_mouse +610,1792927,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n init_lr: float = 0.0\n max_lr: float = 3e-4\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 20000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n warmup_steps: int = 10000\n darkness_threshold: float = 0.0\n # Tokenizer\n model_dim: int = 512\n ffn_dim: int = 2048\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 4\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(\n model: TokenizerVQVAE, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n # --- Compute loss ---\n # FIXME (f.srambical): Can we even do native int8 training without casting the video at all?\n # FIXME (f.srambical): If the tokenizer is the reason for the dynamics model being memory-bound,\n # should we at least train the tokenizer natively in int8?\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n outputs[""recon""] = outputs[""recon""].astype(jnp.float32)\n mse = jnp.square(gt - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@nnx.jit\ndef train_step(\n tokenizer: TokenizerVQVAE, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n def loss_fn(model: TokenizerVQVAE) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return tokenizer_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(\n tokenizer\n )\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n ffn_dim=args.ffn_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(tokenizer, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(tokenizer, tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n darkness_threshold=args.darkness_threshold,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n inputs = dict(videos=videos)\n loss, recon, metrics = train_step(tokenizer, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +611,1801719,"train_tokenizer.py",1381,0,"",python,selection_mouse +612,1801822,"train_tokenizer.py",1373,10,"patch_size",python,selection_mouse +613,1806157,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",0,0,"",shellscript,tab +614,1807743,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1110,0,"",shellscript,selection_mouse +615,1809656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1121,0,"\n ",shellscript,content +616,1809892,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1126,0,"-",shellscript,content +617,1809893,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1127,0,"",shellscript,selection_keyboard +618,1809996,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1127,0,"-",shellscript,content +619,1809997,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1128,0,"",shellscript,selection_keyboard +620,1810271,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1128,0,"patch_size",shellscript,content +621,1811400,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1138,0,"=",shellscript,content +622,1811401,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1139,0,"",shellscript,selection_keyboard +623,1811630,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1139,0,"1",shellscript,content +624,1811631,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1140,0,"",shellscript,selection_keyboard +625,1812107,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1140,0,"6",shellscript,content +626,1812108,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1141,0,"",shellscript,selection_keyboard +627,1812342,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1141,0," ",shellscript,content +628,1812343,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1142,0,"",shellscript,selection_keyboard +629,1812580,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1142,0,"\",shellscript,content +630,1812581,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1143,0,"",shellscript,selection_keyboard +631,1813464,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",1142,0,"",shellscript,selection_command +632,1820434,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",689,0,"",shellscript,selection_mouse +633,1821366,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",689,1,"m",shellscript,content +634,1827166,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",0,0,"",shellscript,tab +635,1828254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",587,0,"",shellscript,selection_mouse +636,1829151,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",587,1,"m",shellscript,content +637,1833948,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",0,0,"",shellscript,tab +638,1835338,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",0,0,"",shellscript,tab +639,1836426,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",0,0,"",shellscript,tab +640,1841800,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",0,0,"",shellscript,tab +641,1846207,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,0,"",shellscript,selection_mouse +642,1846392,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,2,"he",shellscript,selection_mouse +643,1846393,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,3,"hel",shellscript,selection_mouse +644,1846393,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,5,"helmh",shellscript,selection_mouse +645,1846408,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,6,"helmho",shellscript,selection_mouse +646,1847333,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",659,0,"",shellscript,selection_mouse +647,1848181,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",662,0,"",shellscript,selection_mouse +648,1849323,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",685,0,"_mihir",shellscript,content +649,1849325,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",691,0,"",shellscript,selection_command +650,1849974,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",690,0,"",shellscript,selection_command +651,1850160,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",689,0,"",shellscript,selection_command +652,1850635,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",688,0,"",shellscript,selection_command +653,1850707,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",687,0,"",shellscript,selection_command +654,1850708,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",686,0,"",shellscript,selection_command +655,1850735,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",685,0,"",shellscript,selection_command +656,1850770,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",684,0,"",shellscript,selection_command +657,1850804,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",683,0,"",shellscript,selection_command +658,1850855,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",682,0,"",shellscript,selection_command +659,1850868,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",681,0,"",shellscript,selection_command +660,1850939,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",680,0,"",shellscript,selection_command +661,1850998,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",679,0,"",shellscript,selection_command +662,1851110,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",678,0,"",shellscript,selection_command +663,1851171,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",677,0,"",shellscript,selection_command +664,1851330,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",676,0,"",shellscript,selection_command +665,1851488,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",675,0,"",shellscript,selection_command +666,1851652,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",674,0,"",shellscript,selection_command +667,1852640,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,21,"",shellscript,content +668,1854208,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,1,"",shellscript,content +669,1854383,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,1,"",shellscript,content +670,1855606,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",654,0,"",shellscript,selection_command +671,1855914,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,0,"",shellscript,selection_command +672,1856449,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,1,"",shellscript,content +673,1856641,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,1,"",shellscript,content +674,1856832,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,1,"",shellscript,content +675,1856966,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,1,"",shellscript,content +676,1857978,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",666,0,"",shellscript,selection_command +677,1858380,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,0,"",shellscript,selection_command +678,1859254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",666,0,"",shellscript,selection_command +679,1860045,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",783,0,"_10m",shellscript,content +680,1860045,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",757,9,"",shellscript,content +681,1860045,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",750,0,"_",shellscript,content +682,1860045,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",749,1,"",shellscript,content +683,1860045,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",744,0,"d",shellscript,content +684,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",743,0,"r",shellscript,content +685,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",741,2,"",shellscript,content +686,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",740,0,"h",shellscript,content +687,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",739,0,"s_",shellscript,content +688,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",736,3,"",shellscript,content +689,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",733,1,"",shellscript,content +690,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",729,0,"d3695-",shellscript,content +691,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",728,1,"",shellscript,content +692,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",727,0,"m_i",shellscript,content +693,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",725,2,"",shellscript,content +694,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",724,0,"atch/t",shellscript,content +695,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",719,5,"",shellscript,content +696,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",718,0,"sc",shellscript,content +697,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",715,3,"",shellscript,content +698,1860046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",704,0,"rk",shellscript,content +699,1860047,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",695,9,"",shellscript,content +700,1860047,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",694,0,"w",shellscript,content +701,1860047,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",692,2,"",shellscript,content +702,1860047,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",688,3,"",shellscript,content +703,1860047,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",687,0,"hkf",shellscript,content +704,1860047,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",685,2,"",shellscript,content +705,1864653,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1225,0,"ctio",shellscript,content +706,1864654,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1224,0,"/jafar_og_reprod",shellscript,content +707,1864654,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1223,0,"hi",shellscript,content +708,1864654,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1222,1,"",shellscript,content +709,1864654,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1221,0,"m",shellscript,content +710,1864654,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1219,2,"",shellscript,content +711,1864654,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1206,0,"d",shellscript,content +712,1864654,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1205,0,"r",shellscript,content +713,1864654,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1203,2,"",shellscript,content +714,1864654,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1202,0,"h",shellscript,content +715,1864654,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1201,0,"s_",shellscript,content +716,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1198,3,"",shellscript,content +717,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1195,1,"",shellscript,content +718,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1191,0,"d3695-",shellscript,content +719,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1190,1,"",shellscript,content +720,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1189,0,"m_i",shellscript,content +721,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1187,2,"",shellscript,content +722,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1186,0,"tch/t",shellscript,content +723,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1178,8,"",shellscript,content +724,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1177,0,"scr",shellscript,content +725,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1166,0,"rk",shellscript,content +726,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1157,9,"",shellscript,content +727,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1156,0,"w",shellscript,content +728,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1154,2,"",shellscript,content +729,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1150,3,"",shellscript,content +730,1864655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1149,0,"hkf",shellscript,content +731,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1147,2,"",shellscript,content +732,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1058,0,"ctio",shellscript,content +733,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1057,0,"od",shellscript,content +734,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1056,0,"g_rep",shellscript,content +735,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1054,2,"",shellscript,content +736,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1053,0,"mihir/jafar_",shellscript,content +737,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1052,1,"",shellscript,content +738,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1039,0,"d",shellscript,content +739,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1038,0,"r",shellscript,content +740,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1036,2,"",shellscript,content +741,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1035,0,"_sh",shellscript,content +742,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1031,3,"",shellscript,content +743,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1028,1,"",shellscript,content +744,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1024,0,"d3695-",shellscript,content +745,1864656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1023,1,"",shellscript,content +746,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1022,0,"m_i",shellscript,content +747,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1020,2,"",shellscript,content +748,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1019,0,"atch/t",shellscript,content +749,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1014,5,"",shellscript,content +750,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1013,0,"sc",shellscript,content +751,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1010,3,"",shellscript,content +752,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",999,0,"rk",shellscript,content +753,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",990,9,"",shellscript,content +754,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",989,0,"w",shellscript,content +755,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",987,2,"",shellscript,content +756,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",983,3,"",shellscript,content +757,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",982,0,"hkf",shellscript,content +758,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",980,2,"",shellscript,content +759,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",875,0,"ctio",shellscript,content +760,1864657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",874,0,"od",shellscript,content +761,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",873,0,"g_rep",shellscript,content +762,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",871,2,"",shellscript,content +763,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",870,0,"mihir/jafar_",shellscript,content +764,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",869,1,"",shellscript,content +765,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",856,0,"d",shellscript,content +766,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",855,0,"r",shellscript,content +767,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",853,2,"",shellscript,content +768,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",852,0,"_sh",shellscript,content +769,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",848,3,"",shellscript,content +770,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",845,1,"",shellscript,content +771,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",841,0,"d3695-",shellscript,content +772,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",840,1,"",shellscript,content +773,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",839,0,"m_i",shellscript,content +774,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",837,2,"",shellscript,content +775,1864658,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",836,0,"atch/t",shellscript,content +776,1864659,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",831,5,"",shellscript,content +777,1864659,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",830,0,"sc",shellscript,content +778,1864659,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",827,3,"",shellscript,content +779,1864659,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",816,0,"rk",shellscript,content +780,1864659,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",807,9,"",shellscript,content +781,1864659,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",806,0,"w",shellscript,content +782,1864659,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",804,2,"",shellscript,content +783,1864659,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",800,3,"",shellscript,content +784,1864659,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",799,0,"hkf",shellscript,content +785,1864659,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",797,2,"",shellscript,content +786,1867620,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",422,0,"ctio",shellscript,content +787,1867620,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",421,0,"_og_reprod",shellscript,content +788,1867620,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",420,0,"hir/jafa",shellscript,content +789,1867620,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",419,1,"",shellscript,content +790,1867620,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",418,0,"gs_m",shellscript,content +791,1867620,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",417,0,"l",shellscript,content +792,1867621,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",416,1,"",shellscript,content +793,1867621,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",410,0,"d",shellscript,content +794,1867621,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",409,0,"r",shellscript,content +795,1867621,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",407,2,"",shellscript,content +796,1867621,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",406,0,"_sh",shellscript,content +797,1867621,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",402,3,"",shellscript,content +798,1867621,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",399,1,"",shellscript,content +799,1867621,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",395,0,"d3695-",shellscript,content +800,1867621,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",394,1,"",shellscript,content +801,1867621,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",393,0,"m_i",shellscript,content +802,1867621,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",391,2,"",shellscript,content +803,1867621,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",390,0,"atch/t",shellscript,content +804,1867621,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",385,5,"",shellscript,content +805,1867621,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",384,0,"sc",shellscript,content +806,1867622,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",381,3,"",shellscript,content +807,1867622,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",370,0,"k",shellscript,content +808,1867622,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",360,10,"",shellscript,content +809,1867622,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",359,0,"wo",shellscript,content +810,1867622,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",358,1,"",shellscript,content +811,1867622,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",354,3,"",shellscript,content +812,1867622,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",353,0,"hkf",shellscript,content +813,1867622,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",351,2,"",shellscript,content +814,1867622,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",313,0,"ctio",shellscript,content +815,1867622,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",312,0,"_og_reprod",shellscript,content +816,1867622,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",311,0,"hir/jafa",shellscript,content +817,1867622,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",310,1,"",shellscript,content +818,1867622,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",309,0,"gs_m",shellscript,content +819,1867622,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",308,0,"l",shellscript,content +820,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",307,1,"",shellscript,content +821,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",301,0,"d",shellscript,content +822,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",300,0,"r",shellscript,content +823,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",298,2,"",shellscript,content +824,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",297,0,"_sh",shellscript,content +825,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",293,3,"",shellscript,content +826,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",290,1,"",shellscript,content +827,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",286,0,"d3695-",shellscript,content +828,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",285,1,"",shellscript,content +829,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",284,0,"m_i",shellscript,content +830,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",282,2,"",shellscript,content +831,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",281,0,"atch/t",shellscript,content +832,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",276,5,"",shellscript,content +833,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",275,0,"sc",shellscript,content +834,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",272,3,"",shellscript,content +835,1867623,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",261,0,"k",shellscript,content +836,1867624,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",251,10,"",shellscript,content +837,1867624,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",250,0,"wo",shellscript,content +838,1867624,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",249,1,"",shellscript,content +839,1867624,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",245,3,"",shellscript,content +840,1867624,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",244,0,"hkf",shellscript,content +841,1867624,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",242,2,"",shellscript,content +842,1869733,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",354,0,"",shellscript,selection_command +843,1870189,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",483,0,"",shellscript,selection_command +844,1870268,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",541,0,"",shellscript,selection_command +845,1870280,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",542,0,"",shellscript,selection_command +846,1870300,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",566,0,"",shellscript,selection_command +847,1870340,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",573,0,"",shellscript,selection_command +848,1870408,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",574,0,"",shellscript,selection_command +849,1870408,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",600,0,"",shellscript,selection_command +850,1870447,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",601,0,"",shellscript,selection_command +851,1870478,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",626,0,"",shellscript,selection_command +852,1870491,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",653,0,"",shellscript,selection_command +853,1870520,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",654,0,"",shellscript,selection_command +854,1870533,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",706,0,"",shellscript,selection_command +855,1870564,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",707,0,"",shellscript,selection_command +856,1870588,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",815,0,"",shellscript,selection_command +857,1870656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",816,0,"",shellscript,selection_command +858,1870667,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1020,0,"",shellscript,selection_command +859,1870668,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1200,0,"",shellscript,selection_command +860,1870707,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1350,0,"",shellscript,selection_command +861,1870770,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1375,0,"",shellscript,selection_command +862,1870795,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1376,0,"",shellscript,selection_command +863,1870819,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1393,0,"",shellscript,selection_command +864,1870844,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1394,0,"",shellscript,selection_command +865,1871046,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1426,0,"",shellscript,selection_command +866,1871295,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1459,0,"",shellscript,selection_command +867,1871440,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1512,0,"",shellscript,selection_command +868,1871631,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1553,0,"",shellscript,selection_command +869,1871814,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1585,0,"",shellscript,selection_command +870,1872039,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1597,0,"",shellscript,selection_command +871,1872329,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1585,0,"",shellscript,selection_command +872,1872810,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1553,0,"",shellscript,selection_command +873,1872926,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1512,0,"",shellscript,selection_command +874,1873090,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1459,0,"",shellscript,selection_command +875,1901261,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1134,0,"",shellscript,selection_mouse +876,1901689,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1134,1,"t",shellscript,selection_mouse +877,1901690,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1134,6,"train_",shellscript,selection_mouse +878,1901690,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1134,190,"train_lam_coinrun_og_reproduction_28246647/lam_1756303037_200000""\nCHECKPOINT_DIR=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/jafar_og_reproduction/dynamics/${j",shellscript,selection_mouse +879,1902326,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1129,0,"",shellscript,selection_mouse +880,1902482,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1108,21,"jafar_og_reproduction",shellscript,selection_mouse +881,1902873,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",937,0,"",shellscript,selection_mouse +882,1904201,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",869,0,"",shellscript,selection_mouse +883,1904966,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",815,0,"",shellscript,selection_mouse +884,1906025,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",815,0,"\n",shellscript,content +885,1907020,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",816,0,"#",shellscript,content +886,1907021,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",817,0,"",shellscript,selection_keyboard +887,1907235,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",817,0," ",shellscript,content +888,1907236,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",818,0,"",shellscript,selection_keyboard +889,1907492,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",818,0,"T",shellscript,content +890,1907493,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",819,0,"",shellscript,selection_keyboard +891,1907649,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",819,0,"O",shellscript,content +892,1907650,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",820,0,"",shellscript,selection_keyboard +893,1907742,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",820,0,"D",shellscript,content +894,1907743,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",821,0,"",shellscript,selection_keyboard +895,1907802,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",821,0,"O",shellscript,content +896,1907803,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",822,0,"",shellscript,selection_keyboard +897,1907900,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",822,0," ",shellscript,content +898,1907901,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",823,0,"",shellscript,selection_keyboard +899,1910944,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",823,0,"m",shellscript,content +900,1910945,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",824,0,"",shellscript,selection_keyboard +901,1911084,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",824,0,"i",shellscript,content +902,1911085,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",825,0,"",shellscript,selection_keyboard +903,1911182,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",825,0,"h",shellscript,content +904,1911183,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",826,0,"",shellscript,selection_keyboard +905,1911284,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",826,0,"i",shellscript,content +906,1911285,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",827,0,"",shellscript,selection_keyboard +907,1911360,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",827,0,"r",shellscript,content +908,1911361,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",828,0,"",shellscript,selection_keyboard +909,1912975,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",828,0,": update the tokenizer and lam checkpoints",shellscript,content +910,1914599,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",869,0,"",shellscript,selection_command +911,1914711,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",924,0,"",shellscript,selection_command +912,1915013,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1128,0,"",shellscript,selection_command +913,1915268,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",924,0,"",shellscript,selection_command +914,1915915,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",923,0,"",shellscript,selection_command +915,1916413,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",922,0,"",shellscript,selection_command +916,1916448,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",921,0,"",shellscript,selection_command +917,1916476,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",920,0,"",shellscript,selection_command +918,1916510,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",919,0,"",shellscript,selection_command +919,1916538,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",918,0,"",shellscript,selection_command +920,1916571,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",917,0,"",shellscript,selection_command +921,1916599,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",916,0,"",shellscript,selection_command +922,1916660,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",915,0,"",shellscript,selection_command +923,1916686,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",914,0,"",shellscript,selection_command +924,1916723,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",913,0,"",shellscript,selection_command +925,1916737,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",912,0,"",shellscript,selection_command +926,1916771,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",911,0,"",shellscript,selection_command +927,1916806,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",910,0,"",shellscript,selection_command +928,1916859,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",909,0,"",shellscript,selection_command +929,1916915,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",908,0,"",shellscript,selection_command +930,1916959,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",907,0,"",shellscript,selection_command +931,1916960,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",906,0,"",shellscript,selection_command +932,1916960,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",905,0,"",shellscript,selection_command +933,1916988,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",904,0,"",shellscript,selection_command +934,1917029,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",903,0,"",shellscript,selection_command +935,1917053,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",902,0,"",shellscript,selection_command +936,1917081,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",901,0,"",shellscript,selection_command +937,1917105,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",900,0,"",shellscript,selection_command +938,1917128,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",899,0,"",shellscript,selection_command +939,1917137,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",898,0,"",shellscript,selection_command +940,1917202,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",897,0,"",shellscript,selection_command +941,1917224,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",896,0,"",shellscript,selection_command +942,1917334,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1100,0,"",shellscript,selection_command +943,1917719,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1254,0,"\n",shellscript,content +944,1918254,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1256,0,"",shellscript,selection_command +945,1918341,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1255,0,"",shellscript,selection_command +946,1918493,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1075,0,"",shellscript,selection_command +947,1918677,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",871,0,"",shellscript,selection_command +948,1919065,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",872,0,"",shellscript,selection_command +949,1919561,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",873,0,"",shellscript,selection_command +950,1919599,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",874,0,"",shellscript,selection_command +951,1919642,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",875,0,"",shellscript,selection_command +952,1919659,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",876,0,"",shellscript,selection_command +953,1919717,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",877,0,"",shellscript,selection_command +954,1919720,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",878,0,"",shellscript,selection_command +955,1919753,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",879,0,"",shellscript,selection_command +956,1919815,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",880,0,"",shellscript,selection_command +957,1919843,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",881,0,"",shellscript,selection_command +958,1919887,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",882,0,"",shellscript,selection_command +959,1919925,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",883,0,"",shellscript,selection_command +960,1919925,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",884,0,"",shellscript,selection_command +961,1919966,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",885,0,"",shellscript,selection_command +962,1919998,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",886,0,"",shellscript,selection_command +963,1919999,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",887,0,"",shellscript,selection_command +964,1920053,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",888,0,"",shellscript,selection_command +965,1920067,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",889,0,"",shellscript,selection_command +966,1920104,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",890,0,"",shellscript,selection_command +967,1922826,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",890,184,"",shellscript,content +968,1922861,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",889,0,"",shellscript,selection_command +969,1924308,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",905,164,"",shellscript,content +970,1924308,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",890,0,"""""",shellscript,content +971,1926236,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1102,0,"",shellscript,selection_mouse +972,1926242,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1101,0,"",shellscript,selection_command +973,1926763,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1085,0,"",shellscript,selection_mouse +974,1927295,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1084,0,"",shellscript,selection_mouse +975,1927306,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1083,0,"",shellscript,selection_command +976,1928298,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1085,0,"",shellscript,selection_mouse +977,1929936,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1102,0,"",shellscript,selection_mouse +978,1929943,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",1101,0,"",shellscript,selection_command +979,1996306,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=2:00:00\n#SBATCH --partition=cpu_p\n#SBATCH --qos=cpu_short\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset\n\n\n# Log the sbatch script\ncat $0\nsource .venv/bin/activate\n\npython generate_dataset.py --num_episodes 10000 --output_dir /lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes_10k",shellscript,tab +980,1998752,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n\n\n# Log the sbatch script\ncat $0\nsource .venv/bin/activate\n\npython generate_dataset.py \\n --num_episodes 10000 \\n --output_dir /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m \\n --min_episode_length 1000",shellscript,tab +981,2003676,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",634,0,"",shellscript,selection_mouse +982,2003854,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",633,1,"s",shellscript,selection_mouse +983,2003855,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",628,6,"isodes",shellscript,selection_mouse +984,2003855,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",623,11,"un_episodes",shellscript,selection_mouse +985,2003856,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",618,16,"coinrun_episodes",shellscript,selection_mouse +986,2003858,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",612,22,"inrun/coinrun_episodes",shellscript,selection_mouse +987,2003897,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",605,29,"data_coinrun/coinrun_episodes",shellscript,selection_mouse +988,2003920,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",595,39,"ws_shared/data_coinrun/coinrun_episodes",shellscript,selection_mouse +989,2003951,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",531,103,"\n --output_dir /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes",shellscript,selection_mouse +990,2004256,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",570,64,"scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes",shellscript,selection_mouse +991,2004394,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",568,66,"e/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes",shellscript,selection_mouse +992,2004395,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",566,68,"ace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes",shellscript,selection_mouse +993,2004395,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",564,70,"space/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes",shellscript,selection_mouse +994,2004396,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",562,72,"rkspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes",shellscript,selection_mouse +995,2004396,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",561,73,"orkspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes",shellscript,selection_mouse +996,2004396,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",560,74,"workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes",shellscript,selection_mouse +997,2006160,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,0,"",shellscript,selection_mouse +998,2006374,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,1,"/",shellscript,selection_mouse +999,2006375,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,4,"/hkf",shellscript,selection_mouse +1000,2006375,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,7,"/hkfs/w",shellscript,selection_mouse +1001,2006375,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",531,18,"\n --output_dir ",shellscript,selection_mouse +1002,2006579,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,39,"/hkfs/work/workspace/scratch/tum_ind369",shellscript,selection_mouse +1003,2006580,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,45,"/hkfs/work/workspace/scratch/tum_ind3695-jafa",shellscript,selection_mouse +1004,2006580,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,49,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_",shellscript,selection_mouse +1005,2006581,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,54,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_share",shellscript,selection_mouse +1006,2006581,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,59,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/dat",shellscript,selection_mouse +1007,2006614,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,64,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coi",shellscript,selection_mouse +1008,2006640,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,68,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun",shellscript,selection_mouse +1009,2006669,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,70,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/c",shellscript,selection_mouse +1010,2006723,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,72,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coi",shellscript,selection_mouse +1011,2006757,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,73,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coin",shellscript,selection_mouse +1012,2006788,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,74,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinr",shellscript,selection_mouse +1013,2006815,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,75,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinru",shellscript,selection_mouse +1014,2006841,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,77,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_",shellscript,selection_mouse +1015,2006869,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,78,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_e",shellscript,selection_mouse +1016,2006897,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,79,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_ep",shellscript,selection_mouse +1017,2006948,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,80,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_epi",shellscript,selection_mouse +1018,2007016,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,81,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_epis",shellscript,selection_mouse +1019,2007074,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,82,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episo",shellscript,selection_mouse +1020,2007229,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,83,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episod",shellscript,selection_mouse +1021,2007338,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,84,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episode",shellscript,selection_mouse +1022,2008189,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",549,85,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes",shellscript,selection_mouse +1023,2011356,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",0,0,"",shellscript,tab +1024,2012520,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",566,0,"",shellscript,selection_mouse +1025,2012695,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",566,6,"/lustr",shellscript,selection_mouse +1026,2012696,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",566,17,"/lustre/groups/ha",shellscript,selection_mouse +1027,2012696,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",566,35,"/lustre/groups/haicu/workspace/alfr",shellscript,selection_mouse +1028,2012696,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",566,86,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes_10k",shellscript,selection_mouse +1029,2013166,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",566,83,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes_",shellscript,selection_mouse +1030,2013167,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",566,84,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes_1",shellscript,selection_mouse +1031,2013167,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",566,85,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes_10",shellscript,selection_mouse +1032,2013211,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",566,86,"/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes_10k",shellscript,selection_mouse +1033,2014292,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",505,147,"",shellscript,content +1034,2015403,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",505,0,"python generate_dataset.py --num_episodes 10000 --output_dir /lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes_10k",shellscript,content +1035,2015449,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",505,0,"",shellscript,selection_command +1036,2017130,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",567,0,"",shellscript,selection_mouse +1037,2017389,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",567,6,"lustre",shellscript,selection_mouse +1038,2017663,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",566,7,"/lustre",shellscript,selection_mouse +1039,2017773,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",565,8," /lustre",shellscript,selection_mouse +1040,2018014,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",565,0,"",shellscript,selection_mouse +1041,2018756,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",505,0,"",shellscript,selection_command +1042,2019025,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",505,0," ",shellscript,content +1043,2019027,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",506,0,"",shellscript,selection_keyboard +1044,2020528,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",505,1,"",shellscript,content +1045,2021910,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",566,0,"",shellscript,selection_mouse +1046,2022392,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",566,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes",shellscript,content +1047,2023134,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",651,0," ",shellscript,content +1048,2023135,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",652,0,"",shellscript,selection_keyboard +1049,2023893,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",651,0,"",shellscript,selection_command +1050,2025278,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",652,0,"",shellscript,selection_command +1051,2026073,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",738,0,"",shellscript,selection_keyboard +1052,2026519,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",737,0,"",shellscript,selection_command +1053,2028184,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",652,0,"",shellscript,selection_mouse +1054,2030527,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",652,86,"",shellscript,content +1055,2030555,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",651,0,"",shellscript,selection_command +1056,2033626,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",445,0,"",shellscript,selection_mouse +1057,2034171,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",346,0,"",shellscript,selection_mouse +1058,2037570,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",0,0,"",shellscript,tab +1059,2039232,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",416,0,"",shellscript,selection_mouse +1060,2039362,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",415,1,"\n",shellscript,selection_mouse +1061,2039540,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",298,118,"tch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n",shellscript,selection_mouse +1062,2039541,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",291,125,"ce/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n",shellscript,selection_mouse +1063,2039541,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",286,130,"rkspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n",shellscript,selection_mouse +1064,2039542,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",279,137,"work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n",shellscript,selection_mouse +1065,2039542,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",274,142,"hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n",shellscript,selection_mouse +1066,2039542,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",269,147,"ror=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n",shellscript,selection_mouse +1067,2039542,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",265,151,"--error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n",shellscript,selection_mouse +1068,2039543,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",261,155,"TCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n",shellscript,selection_mouse +1069,2039543,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",259,157,"BATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n",shellscript,selection_mouse +1070,2039589,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",257,159,"#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n",shellscript,selection_mouse +1071,2039618,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",145,271,"#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n",shellscript,selection_mouse +1072,2042737,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",415,0,"",shellscript,selection_mouse +1073,2042753,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",414,0,"",shellscript,selection_command +1074,2042882,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",414,1,"m",shellscript,selection_mouse +1075,2042886,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",415,0,"",shellscript,selection_command +1076,2042982,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",411,4,"_10m",shellscript,selection_mouse +1077,2042984,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",289,126,"pace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_mouse +1078,2042984,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",140,275,"sk=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_mouse +1079,2043012,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",47,368,"--ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_mouse +1080,2043071,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",23,392,"BATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_mouse +1081,2043120,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",21,394,"#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_mouse +1082,2043160,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",20,395,"\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_mouse +1083,2043560,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",21,394,"#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_mouse +1084,2046633,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",0,0,"",shellscript,tab +1085,2047525,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",445,0,"",shellscript,selection_mouse +1086,2047618,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",444,1,"\n",shellscript,selection_mouse +1087,2047711,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",192,253,"roups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset\n",shellscript,selection_mouse +1088,2047712,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",129,316,"=cpu_short\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset\n",shellscript,selection_mouse +1089,2047713,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",99,346,"-partition=cpu_p\n#SBATCH --qos=cpu_short\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset\n",shellscript,selection_mouse +1090,2047713,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",96,349,"H --partition=cpu_p\n#SBATCH --qos=cpu_short\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset\n",shellscript,selection_mouse +1091,2047738,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",94,351,"TCH --partition=cpu_p\n#SBATCH --qos=cpu_short\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset\n",shellscript,selection_mouse +1092,2047766,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",93,352,"ATCH --partition=cpu_p\n#SBATCH --qos=cpu_short\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset\n",shellscript,selection_mouse +1093,2047790,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",69,376,"BATCH --time=2:00:00\n#SBATCH --partition=cpu_p\n#SBATCH --qos=cpu_short\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset\n",shellscript,selection_mouse +1094,2047866,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",41,404,"BATCH --ntasks-per-node=1\n#SBATCH --time=2:00:00\n#SBATCH --partition=cpu_p\n#SBATCH --qos=cpu_short\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset\n",shellscript,selection_mouse +1095,2047886,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",40,405,"SBATCH --ntasks-per-node=1\n#SBATCH --time=2:00:00\n#SBATCH --partition=cpu_p\n#SBATCH --qos=cpu_short\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset\n",shellscript,selection_mouse +1096,2047962,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",39,406,"#SBATCH --ntasks-per-node=1\n#SBATCH --time=2:00:00\n#SBATCH --partition=cpu_p\n#SBATCH --qos=cpu_short\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset\n",shellscript,selection_mouse +1097,2048034,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",21,424,"#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=2:00:00\n#SBATCH --partition=cpu_p\n#SBATCH --qos=cpu_short\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/generate_dataset/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset\n",shellscript,selection_mouse +1098,2048800,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",21,425,"",shellscript,content +1099,2049370,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",21,0,"#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,content +1100,2050465,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",415,0,"\n",shellscript,content +1101,2051418,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",415,0,"",shellscript,selection_mouse +1102,2051420,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",414,0,"",shellscript,selection_command +1103,2051679,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",415,0,"",shellscript,selection_command +1104,2051819,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",414,1,"",shellscript,content +1105,2052395,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",414,0,"k",shellscript,content +1106,2052396,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",415,0,"",shellscript,selection_keyboard +1107,2052490,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",414,0,"",shellscript,selection_command +1108,2074130,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",0,0,"",shellscript,tab +1109,2080769,"TERMINAL",0,0,"git add jobs/mihir/",,terminal_command +1110,2080825,"TERMINAL",0,0,"]633;E;2025-09-04 10:31:37 git add jobs/mihir/;9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;C",,terminal_output +1111,2080836,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D;0",,terminal_output +1112,2084093,"TERMINAL",0,0,"git commit -m ""added og coinrun jobs""",,terminal_command +1113,2084141,"TERMINAL",0,0,"]633;E;2025-09-04 10:31:41 git commit -m ""added og coinrun jobs"";9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;C",,terminal_output +1114,2084439,"TERMINAL",0,0,"[main dda1543] added og coinrun jobs\r\n 11 files changed, 615 insertions(+)\r\n create mode 100644 jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch\r\n create mode 100644 jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch\r\n create mode 100644 jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch\r\n create mode 100644 jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch\r\n create mode 100644 jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch\r\n create mode 100644 jobs/mihir/horeka/lam/train_lam_minecraft_1node_dev.sbatch\r\n create mode 100644 jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-133M.sbatch\r\n create mode 100644 jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-311M.sbatch\r\n create mode 100644 jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch\r\n create mode 100644 jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch\r\n create mode 100644 jobs/mihir/horeka/lam/train_lam_minecraft_8node.sbatch\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D;0",,terminal_output +1115,2086298,"TERMINAL",0,0,"git status",,terminal_command +1116,2086311,"TERMINAL",0,0,"]633;E;2025-09-04 10:31:43 git status;9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;COn branch main\r\nYour branch is ahead of 'origin/main' by 1 commit.\r\n (use ""git push"" to publish your local commits)\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tutils/alfred/sqrt_lr_scaling.py\r\n\r\nnothing added to commit but untracked files present (use ""git add"" to track)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D;0",,terminal_output +1117,2088514,"TERMINAL",0,0,"git push",,terminal_command +1118,2088564,"TERMINAL",0,0,"]633;E;2025-09-04 10:31:45 git push;9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;C",,terminal_output +1119,2089848,"TERMINAL",0,0,"To github.com:p-doom/slurm.git\r\n ! [rejected]  main -> main (fetch first)\r\nerror: failed to push some refs to 'github.com:p-doom/slurm.git'\r\nhint: Updates were rejected because the remote contains work that you do not\r\nhint: have locally. This is usually caused by another repository pushing to\r\nhint: the same ref. If you want to integrate the remote changes, use\r\nhint: 'git pull' before pushing again.\r\nhint: See the 'Note about fast-forwards' in 'git push --help' for details.\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D;1",,terminal_output +1120,2092428,"TERMINAL",0,0,"git pull",,terminal_command +1121,2092481,"TERMINAL",0,0,"]633;E;2025-09-04 10:31:49 git pull;9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;C",,terminal_output +1122,2094201,"TERMINAL",0,0,"remote: Enumerating objects: 71, done.\r\nremote: Counting objects: 1% (1/71)\rremote: Counting objects: 2% (2/71)\rremote: Counting objects: 4% (3/71)\rremote: Counting objects: 5% (4/71)\rremote: Counting objects: 7% (5/71)\rremote: Counting objects: 8% (6/71)\rremote: Counting objects: 9% (7/71)\rremote: Counting objects: 11% (8/71)\rremote: Counting objects: 12% (9/71)\rremote: Counting objects: 14% (10/71)\rremote: Counting objects: 15% (11/71)\rremote: Counting objects: 16% (12/71)\rremote: Counting objects: 18% (13/71)\rremote: Counting objects: 19% (14/71)\rremote: Counting objects: 21% (15/71)\rremote: Counting objects: 22% (16/71)\rremote: Counting objects: 23% (17/71)\rremote: Counting objects: 25% (18/71)\rremote: Counting objects: 26% (19/71)\rremote: Counting objects: 28% (20/71)\rremote: Counting objects: 29% (21/71)\rremote: Counting objects: 30% (22/71)\rremote: Counting objects: 32% (23/71)\rremote: Counting objects: 33% (24/71)\rremote: Counting objects: 35% (25/71)\rremote: Counting objects: 36% (26/71)\rremote: Counting objects: 38% (27/71)\rremote: Counting objects: 39% (28/71)\rremote: Counting objects: 40% (29/71)\rremote: Counting objects: 42% (30/71)\rremote: Counting objects: 43% (31/71)\rremote: Counting objects: 45% (32/71)\rremote: Counting objects: 46% (33/71)\rremote: Counting objects: 47% (34/71)\rremote: Counting objects: 49% (35/71)\rremote: Counting objects: 50% (36/71)\rremote: Counting objects: 52% (37/71)\rremote: Counting objects: 53% (38/71)\rremote: Counting objects: 54% (39/71)\rremote: Counting objects: 56% (40/71)\rremote: Counting objects: 57% (41/71)\rremote: Counting objects: 59% (42/71)\rremote: Counting objects: 60% (43/71)\rremote: Counting objects: 61% (44/71)\rremote: Counting objects: 63% (45/71)\rremote: Counting objects: 64% (46/71)\rremote: Counting objects: 66% (47/71)\rremote: Counting objects: 67% (48/71)\rremote: Counting objects: 69% (49/71)\rremote: Counting objects: 70% (50/71)\rremote: Counting objects: 71% (51/71)\rremote: Counting objects: 73% (52/71)\rremote: Counting objects: 74% (53/71)\rremote: Counting objects: 76% (54/71)\rremote: Counting objects: 77% (55/71)\rremote: Counting objects: 78% (56/71)\rremote: Counting objects: 80% (57/71)\rremote: Counting objects: 81% (58/71)\rremote: Counting objects: 83% (59/71)\rremote: Counting objects: 84% (60/71)\rremote: Counting objects: 85% (61/71)\rremote: Counting objects: 87% (62/71)\rremote: Counting objects: 88% (63/71)\rremote: Counting objects: 90% (64/71)\rremote: Counting objects: 91% (65/71)\rremote: Counting objects: 92% (66/71)\rremote: Counting objects: 94% (67/71)\rremote: Counting objects: 95% (68/71)\rremote: Counting objects: 97% (69/71)\rremote: Counting objects: 98% (70/71)\rremote: Counting objects: 100% (71/71)\rremote: Counting objects: 100% (71/71), done.\r\nremote: Compressing objects: 4% (1/21)\rremote: Compressing objects: 9% (2/21)\rremote: Compressing objects: 14% (3/21)\rremote: Compressing objects: 19% (4/21)\rremote: Compressing objects: 23% (5/21)\rremote: Compressing objects: 28% (6/21)\rremote: Compressing objects: 33% (7/21)\rremote: Compressing objects: 38% (8/21)\rremote: Compressing objects: 42% (9/21)\rremote: Compressing objects: 47% (10/21)\rremote: Compressing objects: 52% (11/21)\rremote: Compressing objects: 57% (12/21)\rremote: Compressing objects: 61% (13/21)\rremote: Compressing objects: 66% (14/21)\rremote: Compressing objects: 71% (15/21)\rremote: Compressing objects: 76% (16/21)\rremote: Compressing objects: 80% (17/21)\rremote: Compressing objects: 85% (18/21)\rremote: Compressing objects: 90% (19/21)\rremote: Compressing objects: 95% (20/21)\rremote: Compressing objects: 100% (21/21)\rremote: Compressing objects: 100% (21/21), done.\r\n",,terminal_output +1123,2094254,"TERMINAL",0,0,"remote: Total 53 (delta 35), reused 49 (delta 32), pack-reused 0 (from 0)\r\n",,terminal_output +1124,2094321,"TERMINAL",0,0,"Unpacking objects: 1% (1/53)\rUnpacking objects: 3% (2/53)\r",,terminal_output +1125,2094391,"TERMINAL",0,0,"Unpacking objects: 5% (3/53)\rUnpacking objects: 7% (4/53)\rUnpacking objects: 9% (5/53)\r",,terminal_output +1126,2094748,"TERMINAL",0,0,"Unpacking objects: 11% (6/53)\rUnpacking objects: 13% (7/53)\rUnpacking objects: 15% (8/53)\rUnpacking objects: 16% (9/53)\rUnpacking objects: 18% (10/53)\rUnpacking objects: 20% (11/53)\rUnpacking objects: 22% (12/53)\rUnpacking objects: 24% (13/53)\rUnpacking objects: 26% (14/53)\rUnpacking objects: 28% (15/53)\rUnpacking objects: 30% (16/53)\rUnpacking objects: 32% (17/53)\rUnpacking objects: 33% (18/53)\rUnpacking objects: 35% (19/53)\rUnpacking objects: 37% (20/53)\rUnpacking objects: 39% (21/53)\rUnpacking objects: 41% (22/53)\rUnpacking objects: 43% (23/53)\rUnpacking objects: 45% (24/53)\rUnpacking objects: 47% (25/53)\rUnpacking objects: 49% (26/53)\rUnpacking objects: 50% (27/53)\rUnpacking objects: 52% (28/53)\rUnpacking objects: 54% (29/53)\rUnpacking objects: 56% (30/53)\rUnpacking objects: 58% (31/53)\rUnpacking objects: 60% (32/53)\rUnpacking objects: 62% (33/53)\rUnpacking objects: 64% (34/53)\rUnpacking objects: 66% (35/53)\rUnpacking objects: 67% (36/53)\rUnpacking objects: 69% (37/53)\rUnpacking objects: 71% (38/53)\rUnpacking objects: 73% (39/53)\rUnpacking objects: 75% (40/53)\rUnpacking objects: 77% (41/53)\rUnpacking objects: 79% (42/53)\rUnpacking objects: 81% (43/53)\rUnpacking objects: 83% (44/53)\rUnpacking objects: 84% (45/53)\rUnpacking objects: 86% (46/53)\rUnpacking objects: 88% (47/53)\rUnpacking objects: 90% (48/53)\r",,terminal_output +1127,2094853,"TERMINAL",0,0,"Unpacking objects: 92% (49/53)\rUnpacking objects: 94% (50/53), 6.29 KiB | 12.00 KiB/s\rUnpacking objects: 96% (51/53), 6.29 KiB | 12.00 KiB/s\rUnpacking objects: 98% (52/53), 6.29 KiB | 12.00 KiB/s\rUnpacking objects: 100% (53/53), 6.29 KiB | 12.00 KiB/s\rUnpacking objects: 100% (53/53), 6.52 KiB | 11.00 KiB/s, done.\r\n",,terminal_output +1128,2095126,"TERMINAL",0,0,"From github.com:p-doom/slurm\r\n 63f0e09..034c3aa main -> origin/main\r\n",,terminal_output +1129,2095491,"TERMINAL",0,0,"hint: Waiting for your editor to close the file... [?1049h[>4;2m[?1h=[?2004h[?1004h[?12h[?12l[?25l""~/Projects/jasmine/slurm/.git/MERGE_MSG"" 6L, 273B▽ Pzz\[0%m [>c]10;?]11;?Merge branch 'main' of github.com:p-doom/slurm\r\n# Please enter a commit message to explain why this merge is necessary,# especially if it merges an updated upstream into a topic branch.#\r\n# Lines starting with '#' will be ignored, and an empty message aborts\r\n# the commit.\r\n~ ~ ~ ~ ~ 1,1All[?25h",,terminal_output +1130,2095546,"TERMINAL",0,0,"P+q436f\P+q6b75\P+q6b64\P+q6b72\P+q6b6c\P+q2332\P+q2334\P+q2569\P+q2a37\P+q6b31\[?12$p[?25l/3333/3333 [?25h[?25l/f6f6/e3e3 [?25h",,terminal_output +1131,2096942,"TERMINAL",0,0,"[?25l^[",,terminal_output +1132,2097008,"TERMINAL",0,0," ^[ [?25h",,terminal_output +1133,2097158,"TERMINAL",0,0,"[?25l::[?25h",,terminal_output +1134,2097742,"TERMINAL",0,0,"w",,terminal_output +1135,2097870,"TERMINAL",0,0,"q",,terminal_output +1136,2097984,"TERMINAL",0,0,"\r[?25l[?2004l[>4;m"".git/MERGE_MSG"" 6L, 273B written",,terminal_output +1137,2098063,"TERMINAL",0,0,"\r\r\r\n[?1004l[?2004l[?1l>[?25h[>4;m[?1049l\rMerge made by the 'ort' strategy.\r\n",,terminal_output +1138,2098188,"TERMINAL",0,0," dev/alfred/berlin/topology/nnx/train_tokenizer_overfit_1.sbatch | 46 ++++++++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/berlin/topology/nnx/train_tokenizer_overfit_2_gpu.sbatch | 42 ++++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/berlin/topology/nnx/train_tokenizer_restore_2gpu_to_1gpu.sbatch | 41 +++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/berlin/topology/nnx/train_tokenizer_restore_2gpu_to_2gpu.sbatch | 41 +++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/berlin/topology/prennx/train_tokenizer_overfit_2_gpu.sbatch | 42 ++++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/berlin/topology/prennx/train_tokenizer_restore_2gpu_to_1gpu.sbatch | 43 +++++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/berlin/topology/prennx/train_tokenizer_restore_2gpu_to_2gpu.sbatch | 43 +++++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/berlin/topology/train_tokenizer_overfit_1gpu.sbatch | 41 +++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/berlin/topology/train_tokenizer_overfit_2_gpu.sbatch | 42 ++++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/berlin/topology/train_tokenizer_restore_1gpu_to_1gpu.sbatch | 41 +++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/berlin/topology/train_tokenizer_restore_1gpu_to_2gpu.sbatch | 40 ++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/berlin/topology/train_tokenizer_restore_2gpu_to_1gpu.sbatch | 42 ++++++++++++++++++++++++++++++++++++++++++\r\n dev/alfred/berlin/topology/train_tokenizer_restore_2gpu_to_2gpu.sbatch | 42 ++++++++++++++++++++++++++++++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_baseline.sbatch | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_mixed_prec.sbatch | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_wsd.sbatch | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_wsd_3e-5_3e-6.sbatch | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_wsd_3e-6.sbatch | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_wsd_8e-6.sbatch | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_bigrun/coinrun_dynamics_reproduction.sbatch | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_bigrun/coinrun_dynamics_reproduction_cotrain.sbatch | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_bigrun/coinrun_lam_big_run.sbatch | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_bigrun/coinrun_tokenizer_repoduction_ffn_512_n_blocks_8_full_prec.sbatch | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_bigrun/generate_data.sbatch | 15 +++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_bigrun/sample.sbatch | 30 ++++++++++++++++++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_bigrun/sample_cotrain.sbatch | 28 ++++++++++++++++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_lam_reproduction_ffn_512_num_blocks_8_full_prec.sbatch | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_tokenizer_repoduction_ffn_512_n_blocks_8_full_prec.sbatch | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n 28 files changed, 1562 insertions(+)\r\n create mode 100644 dev/alfred/berlin/topology/nnx/train_tokenizer_overfit_1.sbatch\r\n create mode 100644 dev/alfred/berlin/topology/nnx/train_tokenizer_overfit_2_gpu.sbatch\r\n create mode 100644 dev/alfred/berlin/topology/nnx/train_tokenizer_restore_2gpu_to_1gpu.sbatch\r\n create mode 100644 dev/alfred/berlin/topology/nnx/train_tokenizer_restore_2gpu_to_2gpu.sbatch\r\n create mode 100644 dev/alfred/berlin/topology/prennx/train_tokenizer_overfit_2_gpu.sbatch\r\n create mode 100644 dev/alfred/berlin/topology/prennx/train_tokenizer_restore_2gpu_to_1gpu.sbatch\r\n create mode 100644 dev/alfred/berlin/topology/prennx/train_tokenizer_restore_2gpu_to_2gpu.sbatch\r\n create mode 100644 dev/alfred/berlin/topology/train_tokenizer_overfit_1gpu.sbatch\r\n create mode 100644 dev/alfred/berlin/topology/train_tokenizer_overfit_2_gpu.sbatch\r\n create mode 100644 dev/alfred/berlin/topology/train_tokenizer_restore_1gpu_to_1gpu.sbatch\r\n create mode 100644 dev/alfred/berlin/topology/train_tokenizer_restore_1gpu_to_2gpu.sbatch\r\n create mode 100644 dev/alfred/berlin/topology/train_tokenizer_restore_2gpu_to_1gpu.sbatch\r\n create mode 100644 dev/alfred/berlin/topology/train_tokenizer_restore_2gpu_to_2gpu.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_baseline.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_mixed_prec.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_wsd.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_wsd_3e-5_3e-6.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_wsd_3e-6.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_wsd_8e-6.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_bigrun/coinrun_dynamics_reproduction.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_bigrun/coinrun_dynamics_reproduction_cotrain.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_bigrun/coinrun_lam_big_run.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_bigrun/coinrun_tokenizer_repoduction_ffn_512_n_blocks_8_full_prec.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_bigrun/generate_data.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_bigrun/sample.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_bigrun/sample_cotrain.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_lam_reproduction_ffn_512_num_blocks_8_full_prec.sbatch\r\n create mode 100644 jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_tokenizer_repoduction_ffn_512_n_blocks_8_full_prec.sbatch\r\n",,terminal_output +1139,2108553,"TERMINAL",0,0,"git push",,terminal_command +1140,2108601,"TERMINAL",0,0,"]633;E;2025-09-04 10:32:05 git push;9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;C",,terminal_output +1141,2109814,"TERMINAL",0,0,"Enumerating objects: 29, done.\r\nCounting objects: 3% (1/27)\rCounting objects: 7% (2/27)\rCounting objects: 11% (3/27)\rCounting objects: 14% (4/27)\rCounting objects: 18% (5/27)\rCounting objects: 22% (6/27)\rCounting objects: 25% (7/27)\rCounting objects: 29% (8/27)\rCounting objects: 33% (9/27)\rCounting objects: 37% (10/27)\rCounting objects: 40% (11/27)\rCounting objects: 44% (12/27)\rCounting objects: 48% (13/27)\rCounting objects: 51% (14/27)\rCounting objects: 55% (15/27)\rCounting objects: 59% (16/27)\rCounting objects: 62% (17/27)\rCounting objects: 66% (18/27)\rCounting objects: 70% (19/27)\rCounting objects: 74% (20/27)\rCounting objects: 77% (21/27)\rCounting objects: 81% (22/27)\rCounting objects: 85% (23/27)\rCounting objects: 88% (24/27)\rCounting objects: 92% (25/27)\rCounting objects: 96% (26/27)\rCounting objects: 100% (27/27)\rCounting objects: 100% (27/27), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 4% (1/21)\rCompressing objects: 9% (2/21)\rCompressing objects: 14% (3/21)\rCompressing objects: 19% (4/21)\rCompressing objects: 23% (5/21)\rCompressing objects: 28% (6/21)\rCompressing objects: 33% (7/21)\rCompressing objects: 38% (8/21)\rCompressing objects: 42% (9/21)\rCompressing objects: 47% (10/21)\rCompressing objects: 52% (11/21)\rCompressing objects: 57% (12/21)\rCompressing objects: 61% (13/21)\rCompressing objects: 66% (14/21)\rCompressing objects: 71% (15/21)\rCompressing objects: 76% (16/21)\rCompressing objects: 80% (17/21)\rCompressing objects: 85% (18/21)\rCompressing objects: 90% (19/21)\rCompressing objects: 95% (20/21)\rCompressing objects: 100% (21/21)\rCompressing objects: 100% (21/21), done.\r\nWriting objects: 4% (1/21)\rWriting objects: 9% (2/21)\rWriting objects: 14% (3/21)\rWriting objects: 19% (4/21)\rWriting objects: 23% (5/21)\rWriting objects: 28% (6/21)\rWriting objects: 33% (7/21)\rWriting objects: 42% (9/21)\rWriting objects: 47% (10/21)\rWriting objects: 52% (11/21)\rWriting objects: 61% (13/21)\rWriting objects: 66% (14/21)\rWriting objects: 76% (16/21)\rWriting objects: 80% (17/21)\rWriting objects: 85% (18/21)\rWriting objects: 90% (19/21)\rWriting objects: 95% (20/21)\rWriting objects: 100% (21/21)\rWriting objects: 100% (21/21), 3.91 KiB | 800.00 KiB/s, done.\r\nTotal 21 (delta 14), reused 0 (delta 0), pack-reused 0\r\n",,terminal_output +1142,2109932,"TERMINAL",0,0,"remote: Resolving deltas: 0% (0/14)\rremote: Resolving deltas: 7% (1/14)\rremote: Resolving deltas: 14% (2/14)\rremote: Resolving deltas: 21% (3/14)\rremote: Resolving deltas: 28% (4/14)\rremote: Resolving deltas: 35% (5/14)\rremote: Resolving deltas: 42% (6/14)\rremote: Resolving deltas: 50% (7/14)\rremote: Resolving deltas: 57% (8/14)\rremote: Resolving deltas: 64% (9/14)\rremote: Resolving deltas: 71% (10/14)\rremote: Resolving deltas: 78% (11/14)\rremote: Resolving deltas: 85% (12/14)\rremote: Resolving deltas: 92% (13/14)\rremote: Resolving deltas: 100% (14/14)\r",,terminal_output +1143,2109996,"TERMINAL",0,0,"remote: Resolving deltas: 100% (14/14), completed with 5 local objects.\r\n",,terminal_output +1144,2110072,"TERMINAL",0,0,"To github.com:p-doom/slurm.git\r\n 034c3aa..d7a176c main -> main\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D;0",,terminal_output +1145,2115382,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",670,0,"",shellscript,selection_mouse +1146,2115431,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",669,0,"",shellscript,selection_command +1147,2115961,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",475,0,"",shellscript,selection_mouse +1148,2157824,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",670,0,"",shellscript,selection_mouse +1149,2157838,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",669,0,"",shellscript,selection_command +1150,2162213,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch",0,0,"",shellscript,tab +1151,2163681,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",0,0,"",shellscript,tab +1152,2164704,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",0,0,"",shellscript,tab +1153,2166376,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",0,0,"",shellscript,tab +1154,2167100,"TERMINAL",0,0,"bash",,terminal_focus +1155,2170686,"TERMINAL",0,0,"bash",,terminal_focus +1156,2171766,"TERMINAL",0,0,"bash",,terminal_focus +1157,2174176,"TERMINAL",0,0,"jafar",,terminal_command +1158,2175811,"TERMINAL",0,0,"ls",,terminal_command +1159,2175845,"TERMINAL",0,0,"]633;E;2025-09-04 10:33:13 ls;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +1160,2175947,"TERMINAL",0,0,"generate_dataset.py genie.py LICENSE models README.md requirements.txt sample.py train_dynamics.py train_lam.py train_tokenizer.py utils\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1161,2179580,"TERMINAL",0,0,"bash",,terminal_focus +1162,2183315,"TERMINAL",0,0,"git remote -v",,terminal_command +1163,2186832,"TERMINAL",0,0,"bash",,terminal_focus +1164,2187913,"TERMINAL",0,0,"git clone git@github.com:p-doom/slurm.git",,terminal_command +1165,2187995,"TERMINAL",0,0,"]633;E;2025-09-04 10:33:25 git clone git@github.com:p-doom/slurm.git;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CCloning into 'slurm'...\r\n",,terminal_output +1166,2189500,"TERMINAL",0,0,"remote: Enumerating objects: 1948, done.\r\nremote: Counting objects: 0% (1/263)\rremote: Counting objects: 1% (3/263)\rremote: Counting objects: 2% (6/263)\rremote: Counting objects: 3% (8/263)\rremote: Counting objects: 4% (11/263)\rremote: Counting objects: 5% (14/263)\rremote: Counting objects: 6% (16/263)\rremote: Counting objects: 7% (19/263)\rremote: Counting objects: 8% (22/263)\rremote: Counting objects: 9% (24/263)\rremote: Counting objects: 10% (27/263)\rremote: Counting objects: 11% (29/263)\rremote: Counting objects: 12% (32/263)\rremote: Counting objects: 13% (35/263)\rremote: Counting objects: 14% (37/263)\rremote: Counting objects: 15% (40/263)\rremote: Counting objects: 16% (43/263)\rremote: Counting objects: 17% (45/263)\rremote: Counting objects: 18% (48/263)\rremote: Counting objects: 19% (50/263)\rremote: Counting objects: 20% (53/263)\rremote: Counting objects: 21% (56/263)\rremote: Counting objects: 22% (58/263)\rremote: Counting objects: 23% (61/263)\rremote: Counting objects: 24% (64/263)\rremote: Counting objects: 25% (66/263)\rremote: Counting objects: 26% (69/263)\rremote: Counting objects: 27% (72/263)\rremote: Counting objects: 28% (74/263)\r",,terminal_output +1167,2189618,"TERMINAL",0,0,"remote: Counting objects: 29% (77/263)\rremote: Counting objects: 30% (79/263)\rremote: Counting objects: 31% (82/263)\rremote: Counting objects: 32% (85/263)\rremote: Counting objects: 33% (87/263)\rremote: Counting objects: 34% (90/263)\rremote: Counting objects: 35% (93/263)\rremote: Counting objects: 36% (95/263)\rremote: Counting objects: 37% (98/263)\rremote: Counting objects: 38% (100/263)\rremote: Counting objects: 39% (103/263)\rremote: Counting objects: 40% (106/263)\rremote: Counting objects: 41% (108/263)\rremote: Counting objects: 42% (111/263)\rremote: Counting objects: 43% (114/263)\rremote: Counting objects: 44% (116/263)\rremote: Counting objects: 45% (119/263)\rremote: Counting objects: 46% (121/263)\rremote: Counting objects: 47% (124/263)\rremote: Counting objects: 48% (127/263)\rremote: Counting objects: 49% (129/263)\rremote: Counting objects: 50% (132/263)\rremote: Counting objects: 51% (135/263)\rremote: Counting objects: 52% (137/263)\rremote: Counting objects: 53% (140/263)\rremote: Counting objects: 54% (143/263)\rremote: Counting objects: 55% (145/263)\rremote: Counting objects: 56% (148/263)\rremote: Counting objects: 57% (150/263)\rremote: Counting objects: 58% (153/263)\rremote: Counting objects: 59% (156/263)\rremote: Counting objects: 60% (158/263)\rremote: Counting objects: 61% (161/263)\rremote: Counting objects: 62% (164/263)\rremote: Counting objects: 63% (166/263)\rremote: Counting objects: 64% (169/263)\rremote: Counting objects: 65% (171/263)\rremote: Counting objects: 66% (174/263)\rremote: Counting objects: 67% (177/263)\rremote: Counting objects: 68% (179/263)\rremote: Counting objects: 69% (182/263)\rremote: Counting objects: 70% (185/263)\rremote: Counting objects: 71% (187/263)\rremote: Counting objects: 72% (190/263)\rremote: Counting objects: 73% (192/263)\rremote: Counting objects: 74% (195/263)\rremote: Counting objects: 75% (198/263)\rremote: Counting objects: 76% (200/263)\rremote: Counting objects: 77% (203/263)\rremote: Counting objects: 78% (206/263)\rremote: Counting objects: 79% (208/263)\rremote: Counting objects: 80% (211/263)\rremote: Counting objects: 81% (214/263)\rremote: Counting objects: 82% (216/263)\rremote: Counting objects: 83% (219/263)\rremote: Counting objects: 84% (221/263)\rremote: Counting objects: 85% (224/263)\rremote: Counting objects: 86% (227/263)\rremote: Counting objects: 87% (229/263)\rremote: Counting objects: 88% (232/263)\rremote: Counting objects: 89% (235/263)\rremote: Counting objects: 90% (237/263)\rremote: Counting objects: 91% (240/263)\rremote: Counting objects: 92% (242/263)\rremote: Counting objects: 93% (245/263)\rremote: Counting objects: 94% (248/263)\rremote: Counting objects: 95% (250/263)\rremote: Counting objects: 96% (253/263)\rremote: Counting objects: 97% (256/263)\rremote: Counting objects: 98% (258/263)\rremote: Counting objects: 99% (261/263)\rremote: Counting objects: 100% (263/263)\rremote: Counting objects: 100% (263/263), done.\r\nremote: Compressing objects: 0% (1/142)\rremote: Compressing objects: 1% (2/142)\rremote: Compressing objects: 2% (3/142)\rremote: Compressing objects: 3% (5/142)\rremote: Compressing objects: 4% (6/142)\rremote: Compressing objects: 5% (8/142)\rremote: Compressing objects: 6% (9/142)\rremote: Compressing objects: 7% (10/142)\rremote: Compressing objects: 8% (12/142)\rremote: Compressing objects: 9% (13/142)\rremote: Compressing objects: 10% (15/142)\rremote: Compressing objects: 11% (16/142)\rremote: Compressing objects: 12% (18/142)\rremote: Compressing objects: 13% (19/142)\rremote: Compressing objects: 14% (20/142)\rremote: Compressing objects: 15% (22/142)\rremote: Compressing objects: 16% (23/142)\rremote: Compressing objects: 17% (25/142)\rremote: Compressing objects: 18% (26/142)\rremote: Compressing objects: 19% (27/142)\rremote: Compressing objects: 20% (29/142)\rremote: Compressing objects: 21% (30/142)\rremote: Compressing objects: 22% (32/142)\rremote: Compressing objects: 23% (33/142)\rremote: Compressing objects: 24% (35/142)\rremote: Compressing objects: 25% (36/142)\rremote: Compressing objects: 26% (37/142)\rremote: Compressing objects: 27% (39/142)\rremote: Compressing objects: 28% (40/142)\rremote: Compressing objects: 29% (42/142)\rremote: Compressing objects: 30% (43/142)\rremote: Compressing objects: 31% (45/142)\rremote: Compressing objects: 32% (46/142)\rremote: Compressing objects: 33% (47/142)\rremote: Compressing objects: 34% (49/142)\rremote: Compressing objects: 35% (50/142)\rremote: Compressing objects: 36% (52/142)\rremote: Compressing objects: 37% (53/142)\rremote: Compressing objects: 38% (54/142)\rremote: Compressing objects: 39% (56/142)\rremote: Compressing objects: 40% (57/142)\rremote: Compressing objects: 41% (59/142)\rremote: Compressing objects: 42% (60/142)\rremote: Compressing objects: 43% (62/142)\rremote: Compressing objects: 44% (63/142)\rremote: Compressing objects: 45% (64/142)\rremote: Compressing objects: 46% (66/142)\rremote: Compressing objects: 47% (67/142)\rremote: Compressing objects: 48% (69/142)\rremote: Compressing objects: 49% (70/142)\rremote: Compressing objects: 50% (71/142)\rremote: Compressing objects: 51% (73/142)\rremote: Compressing objects: 52% (74/142)\rremote: Compressing objects: 53% (76/142)\rremote: Compressing objects: 54% (77/142)\rremote: Compressing objects: 55% (79/142)\rremote: Compressing objects: 56% (80/142)\rremote: Compressing objects: 57% (81/142)\rremote: Compressing objects: 58% (83/142)\rremote: Compressing objects: 59% (84/142)\rremote: Compressing objects: 60% (86/142)\rremote: Compressing objects: 61% (87/142)\rremote: Compressing objects: 62% (89/142)\rremote: Compressing objects: 63% (90/142)\rremote: Compressing objects: 64% (91/142)\rremote: Compressing objects: 65% (93/142)\rremote: Compressing objects: 66% (94/142)\rremote: Compressing objects: 67% (96/142)\rremote: Compressing objects: 68% (97/142)\rremote: Compressing objects: 69% (98/142)\rremote: Compressing objects: 70% (100/142)\rremote: Compressing objects: 71% (101/142)\rremote: Compressing objects: 72% (103/142)\rremote: Compressing objects: 73% (104/142)\rremote: Compressing objects: 74% (106/142)\rremote: Compressing objects: 75% (107/142)\rremote: Compressing objects: 76% (108/142)\rremote: Compressing objects: 77% (110/142)\rremote: Compressing objects: 78% (111/142)\rremote: Compressing objects: 79% (113/142)\rremote: Compressing objects: 80% (114/142)\rremote: Compressing objects: 81% (116/142)\rremote: Compressing objects: 82% (117/142)\rremote: Compressing objects: 83% (118/142)\rremote: Compressing objects: 84% (120/142)\rremote: Compressing objects: 85% (121/142)\rremote: Compressing objects: 86% (123/142)\rremote: Compressing objects: 87% (124/142)\rremote: Compressing objects: 88% (125/142)\rremote: Compressing objects: 89% (127/142)\rremote: Compressing objects: 90% (128/142)\rremote: Compressing objects: 91% (130/142)\rremote: Compressing objects: 92% (131/142)\rremote: Compressing objects: 93% (133/142)\rremote: Compressing objects: 94% (134/142)\rremote: Compressing objects: 95% (135/142)\rremote: Compressing objects: 96% (137/142)\rremote: Compressing objects: 97% (138/142)\rremote: Compressing objects: 98% (140/142)\rremote: Compressing objects: 99% (141/142)\rremote: Compressing objects: 100% (142/142)\rremote: Compressing objects: 100% (142/142), done.\r\n",,terminal_output +1168,2189684,"TERMINAL",0,0,"Receiving objects: 0% (1/1948)\rReceiving objects: 1% (20/1948)\rReceiving objects: 2% (39/1948)\rReceiving objects: 3% (59/1948)\rReceiving objects: 4% (78/1948)\rReceiving objects: 5% (98/1948)\rReceiving objects: 6% (117/1948)\rReceiving objects: 7% (137/1948)\rReceiving objects: 8% (156/1948)\rReceiving objects: 9% (176/1948)\rReceiving objects: 10% (195/1948)\rReceiving objects: 11% (215/1948)\rReceiving objects: 12% (234/1948)\rReceiving objects: 13% (254/1948)\rReceiving objects: 14% (273/1948)\rReceiving objects: 15% (293/1948)\r",,terminal_output +1169,2189801,"TERMINAL",0,0,"Receiving objects: 16% (312/1948)\rReceiving objects: 17% (332/1948)\rReceiving objects: 18% (351/1948)\rReceiving objects: 19% (371/1948)\rReceiving objects: 20% (390/1948)\rReceiving objects: 21% (410/1948)\rReceiving objects: 22% (429/1948)\rReceiving objects: 23% (449/1948)\rReceiving objects: 24% (468/1948)\rReceiving objects: 25% (487/1948)\rReceiving objects: 26% (507/1948)\rReceiving objects: 27% (526/1948)\rReceiving objects: 28% (546/1948)\rReceiving objects: 29% (565/1948)\rReceiving objects: 30% (585/1948)\rReceiving objects: 31% (604/1948)\rReceiving objects: 32% (624/1948)\rReceiving objects: 33% (643/1948)\rReceiving objects: 34% (663/1948)\rReceiving objects: 35% (682/1948)\rReceiving objects: 36% (702/1948)\rReceiving objects: 37% (721/1948)\rReceiving objects: 38% (741/1948)\rReceiving objects: 39% (760/1948)\rReceiving objects: 40% (780/1948)\rReceiving objects: 41% (799/1948)\rReceiving objects: 42% (819/1948)\rReceiving objects: 43% (838/1948)\rReceiving objects: 44% (858/1948)\rReceiving objects: 45% (877/1948)\rReceiving objects: 46% (897/1948)\rReceiving objects: 47% (916/1948)\rReceiving objects: 48% (936/1948)\rReceiving objects: 49% (955/1948)\rReceiving objects: 50% (974/1948)\rReceiving objects: 51% (994/1948)\rReceiving objects: 52% (1013/1948)\rReceiving objects: 53% (1033/1948)\rReceiving objects: 54% (1052/1948)\rReceiving objects: 55% (1072/1948)\rReceiving objects: 56% (1091/1948)\rReceiving objects: 57% (1111/1948)\rReceiving objects: 58% (1130/1948)\rReceiving objects: 59% (1150/1948)\rReceiving objects: 60% (1169/1948)\rReceiving objects: 61% (1189/1948)\rReceiving objects: 62% (1208/1948)\rReceiving objects: 63% (1228/1948)\rReceiving objects: 64% (1247/1948)\rReceiving objects: 65% (1267/1948)\rReceiving objects: 66% (1286/1948)\rReceiving objects: 67% (1306/1948)\rReceiving objects: 68% (1325/1948)\rReceiving objects: 69% (1345/1948)\r",,terminal_output +1170,2189924,"TERMINAL",0,0,"Receiving objects: 70% (1364/1948)\rReceiving objects: 71% (1384/1948)\rReceiving objects: 72% (1403/1948)\rReceiving objects: 73% (1423/1948)\rReceiving objects: 74% (1442/1948)\rReceiving objects: 75% (1461/1948)\rReceiving objects: 76% (1481/1948)\rReceiving objects: 77% (1500/1948)\rReceiving objects: 78% (1520/1948)\rReceiving objects: 79% (1539/1948)\rReceiving objects: 80% (1559/1948)\rremote: Total 1948 (delta 150), reused 219 (delta 114), pack-reused 1685 (from 1)\r\nReceiving objects: 81% (1578/1948)\rReceiving objects: 82% (1598/1948)\rReceiving objects: 83% (1617/1948)\rReceiving objects: 84% (1637/1948)\rReceiving objects: 85% (1656/1948)\rReceiving objects: 86% (1676/1948)\rReceiving objects: 87% (1695/1948)\rReceiving objects: 88% (1715/1948)\rReceiving objects: 89% (1734/1948)\rReceiving objects: 90% (1754/1948)\rReceiving objects: 91% (1773/1948)\rReceiving objects: 92% (1793/1948)\rReceiving objects: 93% (1812/1948)\rReceiving objects: 94% (1832/1948)\rReceiving objects: 95% (1851/1948)\rReceiving objects: 96% (1871/1948)\rReceiving objects: 97% (1890/1948)\rReceiving objects: 98% (1910/1948)\rReceiving objects: 99% (1929/1948)\rReceiving objects: 100% (1948/1948)\rReceiving objects: 100% (1948/1948), 263.96 KiB | 1.40 MiB/s, done.\r\nResolving deltas: 0% (0/1182)\rResolving deltas: 1% (12/1182)\rResolving deltas: 2% (24/1182)\rResolving deltas: 3% (37/1182)\rResolving deltas: 4% (48/1182)\rResolving deltas: 5% (60/1182)\rResolving deltas: 6% (71/1182)\rResolving deltas: 7% (83/1182)\rResolving deltas: 8% (95/1182)\rResolving deltas: 9% (107/1182)\rResolving deltas: 10% (119/1182)\rResolving deltas: 11% (131/1182)\rResolving deltas: 12% (142/1182)\rResolving deltas: 13% (154/1182)\rResolving deltas: 14% (166/1182)\rResolving deltas: 15% (178/1182)\rResolving deltas: 16% (191/1182)\rResolving deltas: 17% (201/1182)\rResolving deltas: 18% (214/1182)\rResolving deltas: 19% (227/1182)\rResolving deltas: 20% (237/1182)\rResolving deltas: 21% (249/1182)\rResolving deltas: 22% (265/1182)\rResolving deltas: 23% (273/1182)\rResolving deltas: 24% (284/1182)\rResolving deltas: 25% (296/1182)\rResolving deltas: 26% (308/1182)\rResolving deltas: 27% (320/1182)\rResolving deltas: 28% (331/1182)\rResolving deltas: 29% (343/1182)\rResolving deltas: 30% (355/1182)\rResolving deltas: 31% (367/1182)\rResolving deltas: 32% (379/1182)\rResolving deltas: 33% (391/1182)\rResolving deltas: 34% (402/1182)\rResolving deltas: 35% (415/1182)\rResolving deltas: 36% (426/1182)\rResolving deltas: 37% (438/1182)\rResolving deltas: 38% (450/1182)\rResolving deltas: 39% (464/1182)\rResolving deltas: 40% (474/1182)\rResolving deltas: 41% (487/1182)\rResolving deltas: 42% (497/1182)\rResolving deltas: 43% (509/1182)\rResolving deltas: 44% (521/1182)\rResolving deltas: 45% (533/1182)\rResolving deltas: 46% (544/1182)\rResolving deltas: 47% (556/1182)\rResolving deltas: 48% (568/1182)\rResolving deltas: 49% (580/1182)\rResolving deltas: 50% (592/1182)\rResolving deltas: 51% (603/1182)\rResolving deltas: 52% (615/1182)\rResolving deltas: 53% (627/1182)\rResolving deltas: 54% (639/1182)\rResolving deltas: 55% (651/1182)\rResolving deltas: 56% (662/1182)\rResolving deltas: 57% (674/1182)\rResolving deltas: 58% (686/1182)\rResolving deltas: 59% (698/1182)\rResolving deltas: 60% (710/1182)\rResolving deltas: 61% (722/1182)\rResolving deltas: 62% (733/1182)\rResolving deltas: 63% (745/1182)\rResolving deltas: 64% (757/1182)\rResolving deltas: 65% (769/1182)\rResolving deltas: 66% (781/1182)\rResolving deltas: 67% (792/1182)\rResolving deltas: 68% (805/1182)\rResolving deltas: 69% (816/1182)\rResolving deltas: 70% (828/1182)\rResolving deltas: 71% (840/1182)\rResolving deltas: 72% (852/1182)\rResolving deltas: 73% (863/1182)\rResolving deltas: 74% (875/1182)\rResolving deltas: 75% (887/1182)\rResolving deltas: 76% (900/1182)\rResolving deltas: 77% (911/1182)\rResolving deltas: 78% (922/1182)\rResolving deltas: 79% (934/1182)\rResolving deltas: 80% (946/1182)\rResolving deltas: 81% (958/1182)\rResolving deltas: 82% (970/1182)\rResolving deltas: 83% (982/1182)\rResolving deltas: 84% (994/1182)\rResolving deltas: 85% (1005/1182)\rResolving deltas: 86% (1017/1182)\rResolving deltas: 87% (1029/1182)\rResolving deltas: 88% (1041/1182)\rResolving deltas: 89% (1054/1182)\rResolving deltas: 90% (1066/1182)\rResolving deltas: 91% (1076/1182)\rResolving deltas: 92% (1088/1182)\rResolving deltas: 93% (1100/1182)\rResolving deltas: 94% (1112/1182)\rResolving deltas: 95% (1123/1182)\rResolving deltas: 96% (1135/1182)\rResolving deltas: 97% (1147/1182)\rResolving deltas: 98% (1159/1182)\rResolving deltas: 99% (1172/1182)\rResolving deltas: 100% (1182/1182)\rResolving deltas: 100% (1182/1182), done.\r\n",,terminal_output +1171,2190952,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1172,2201255,"TERMINAL",0,0,"ls",,terminal_command +1173,2201308,"TERMINAL",0,0,"]633;E;2025-09-04 10:33:38 ls;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;Cgenerate_dataset.py genie.py LICENSE models README.md requirements.txt sample.py slurm train_dynamics.py train_lam.py train_tokenizer.py utils\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1174,2216412,"TERMINAL",0,0,"cd slurm/",,terminal_command +1175,2216453,"TERMINAL",0,0,"]633;E;2025-09-04 10:33:53 cd slurm/;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar/slurm]633;D;0",,terminal_output +1176,2217286,"TERMINAL",0,0,"ls",,terminal_command +1177,2245872,"TERMINAL",0,0,"jasmine",,terminal_command +1178,2245921,"TERMINAL",0,0,"]633;E;2025-09-04 10:34:23 jasmine;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;Cbash: jasmine: command not found...\r\n",,terminal_output +1179,2247132,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jafar/slurm]633;D;127",,terminal_output +1180,2248522,"TERMINAL",0,0,"dev",,terminal_command +1181,2257775,"TERMINAL",0,0,"rm -rf jafar_matt/",,terminal_command +1182,2257826,"TERMINAL",0,0,"]633;E;2025-09-04 10:34:34 rm -rf jafar_matt/;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +1183,2264660,"TERMINAL",0,0,"bash",,terminal_focus +1184,2284681,"TERMINAL",0,0,"rm",,terminal_focus +1185,2285725,"TERMINAL",0,0,"^C\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;130",,terminal_output +1186,2289216,"TERMINAL",0,0,"du -h jafar_matt/",,terminal_command +1187,2289266,"TERMINAL",0,0,"]633;E;2025-09-04 10:35:06 du -h jafar_matt/;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C350K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/utils\r\n36K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/monitor\r\n34K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/cuda\r\n",,terminal_output +1188,2289392,"TERMINAL",0,0,"2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dbr_quantization\r\n",,terminal_output +1189,2289749,"TERMINAL",0,0,"5.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx/pattern_conversion\r\n58K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx\r\n",,terminal_output +1190,2289920,"TERMINAL",0,0,"169K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization\r\n38K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils\r\n464K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes\r\n2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/cuda\r\n44K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/onednn\r\n35K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/fuser/cuda\r\n",,terminal_output +1191,2289982,"TERMINAL",0,0,"4.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/fuser/cpu\r\n84K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/fuser\r\n131K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen\r\n",,terminal_output +1192,2290284,"TERMINAL",0,0,"206K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python\r\n5.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/operator_upgraders\r\n",,terminal_output +1193,2290549,"TERMINAL",0,0,"101K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/interpreter\r\n",,terminal_output +1194,2290706,"TERMINAL",0,0,"231K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/static\r\n707K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime\r\n",,terminal_output +1195,2290831,"TERMINAL",0,0,"401K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization\r\n2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/coreml/cpp\r\n6.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/coreml/objc\r\n11K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/coreml\r\n",,terminal_output +1196,2290950,"TERMINAL",0,0,"2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/xnnpack/serialization\r\n2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/xnnpack/executor\r\n2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/xnnpack/compiler\r\n10K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/xnnpack\r\n62K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends\r\n",,terminal_output +1197,2291012,"TERMINAL",0,0,"163K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api\r\n",,terminal_output +1198,2291176,"TERMINAL",0,0,"42K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators\r\n832K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr\r\n4.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing\r\n",,terminal_output +1199,2291322,"TERMINAL",0,0,"34K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/train/optim\r\n38K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/train\r\n40K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/model_tracer\r\n36K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/nnc\r\n5.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/compatibility\r\n266K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile\r\n",,terminal_output +1200,2291384,"TERMINAL",0,0,"297K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir\r\n",,terminal_output +1201,2291507,"TERMINAL",0,0,"407K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend\r\n4.0M\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/jit\r\n35K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail\r\n",,terminal_output +1202,2291575,"TERMINAL",0,0,"4.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers\r\n76K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim\r\n",,terminal_output +1203,2291698,"TERMINAL",0,0,"2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/python\r\n",,terminal_output +1204,2291820,"TERMINAL",0,0,"394K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/functional\r\n67K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/utils\r\n",,terminal_output +1205,2291924,"TERMINAL",0,0,"34K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/parallel\r\n",,terminal_output +1206,2292034,"TERMINAL",0,0,"398K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options\r\n",,terminal_output +1207,2292227,"TERMINAL",0,0,"260K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container\r\n876K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules\r\n1.9M\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn\r\n5.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize\r\n",,terminal_output +1208,2292360,"TERMINAL",0,0,"103K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets\r\n36K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/detail\r\n",,terminal_output +1209,2292482,"TERMINAL",0,0,"6.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms\r\n36K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader\r\n",,terminal_output +1210,2292549,"TERMINAL",0,0,"40K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers\r\n260K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data\r\n2.6M\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch\r\n2.6M\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api/include\r\n2.6M\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/api\r\n2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/functorch\r\n2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/mps\r\n2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/mtia/profiler\r\n5.0K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/mtia\r\n2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/multiprocessing\r\n2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/fx\r\n",,terminal_output +1211,2292677,"TERMINAL",0,0,"174K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo\r\n2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/cpu\r\n35K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/export\r\n4.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/xpu\r\n66K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context\r\n34K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/engine\r\n",,terminal_output +1212,2292858,"TERMINAL",0,0,"12K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages\r\n",,terminal_output +1213,2292919,"TERMINAL",0,0,"3.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions\r\n",,terminal_output +1214,2293059,"TERMINAL",0,0,"119K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd\r\n2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/cuda\r\n",,terminal_output +1215,2293165,"TERMINAL",0,0,"4.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/quantization\r\n",,terminal_output +1216,2293226,"TERMINAL",0,0,"3.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/control_plane\r\n",,terminal_output +1217,2293347,"TERMINAL",0,0,"71K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/symm_mem\r\n3.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/control_collectives\r\n1.2M\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d\r\n",,terminal_output +1218,2293651,"TERMINAL",0,0,"35K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/profiler\r\n2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/metrics\r\n",,terminal_output +1219,2293778,"TERMINAL",0,0,"3.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/testing\r\n381K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc\r\n1.7M\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/distributed\r\n2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/tensor\r\n",,terminal_output +1220,2293927,"TERMINAL",0,0,"2.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/internal_ops\r\n3.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops\r\n249K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core\r\n3.5K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/python\r\n36K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ops\r\n",,terminal_output +1221,2294049,"TERMINAL",0,0,"78K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend\r\n675K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/generated\r\n37K\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend\r\n1.1M\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc/lazy\r\n9.8M\tjafar_matt/lib/python3.10/site-packages/torch/include/torch/csrc\r\n9.8M\tjafar_matt/lib/python3.10/site-packages/torch/include/torch\r\n99K\tjafar_matt/lib/python3.10/site-packages/torch/include/fp16\r\n",,terminal_output +1222,2294368,"TERMINAL",0,0,"105K\tjafar_matt/lib/python3.10/site-packages/torch/include/ATen/cuda/detail\r\n",,terminal_output +1223,2294445,"TERMINAL",0,0,"259K\tjafar_matt/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable\r\n841K\tjafar_matt/lib/python3.10/site-packages/torch/include/ATen/cuda\r\n68K\tjafar_matt/lib/python3.10/site-packages/torch/include/ATen/hip/impl\r\n69K\tjafar_matt/lib/python3.10/site-packages/torch/include/ATen/hip\r\n",,terminal_output +1224,2294604,"TERMINAL",0,0,"39K\tjafar_matt/lib/python3.10/site-packages/torch/include/ATen/cudnn\r\n131K\tjafar_matt/lib/python3.10/site-packages/torch/include/ATen/core/op_registration\r\n",,terminal_output +1225,2294720,"TERMINAL",0,0,"132K\tjafar_matt/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl\r\n",,terminal_output +1226,2294908,"TERMINAL",0,0,"231K\tjafar_matt/lib/python3.10/site-packages/torch/include/ATen/core/boxing\r\n",,terminal_output +1227,2294962,"TERMINAL",0,0,"^C",,terminal_output +1228,2295186,"TERMINAL",0,0,"\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;130",,terminal_output +1229,2296102,"TERMINAL",0,0,"bash",,terminal_focus +1230,2298503,"TERMINAL",0,0,"",,terminal_focus +1231,2302657,"TERMINAL",0,0,"source /home/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/bin/activate",,terminal_command +1232,2302739,"TERMINAL",0,0,"]633;E;2025-09-04 10:35:19 source /home/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/bin/activate;e3f3d151-a063-4c85-891d-0bfb917c5617]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +1233,2305761,"TERMINAL",0,0,"rm -rf jafar_matt/",,terminal_command +1234,2305810,"TERMINAL",0,0,"]633;E;2025-09-04 10:35:22 rm -rf jafar_matt/;e3f3d151-a063-4c85-891d-0bfb917c5617]633;C",,terminal_output +1235,2306938,"TERMINAL",0,0,"bash",,terminal_focus +1236,2309895,"TERMINAL",0,0,"jafar",,terminal_command +1237,2309951,"TERMINAL",0,0,"]633;E;2025-09-04 10:35:27 jafar;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1238,2313154,"TERMINAL",0,0,"deactivate",,terminal_command +1239,2313199,"TERMINAL",0,0,"]633;E;2025-09-04 10:35:30 deactivate;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1240,2322730,"TERMINAL",0,0,"uv venv --python 3.10",,terminal_command +1241,2322780,"TERMINAL",0,0,"]633;E;2025-09-04 10:35:39 uv venv --python 3.10;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +1242,2323015,"TERMINAL",0,0,"Using CPython 3.10.18\r\nCreating virtual environment at: .venv\r\nActivate with: source .venv/bin/activate\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1243,2327216,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command +1244,2327305,"TERMINAL",0,0,"]633;E;2025-09-04 10:35:44 source .venv/bin/activate;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1245,2334679,"TERMINAL",0,0,"uv pip install -r requirements.txt",,terminal_command +1246,2334902,"TERMINAL",0,0,"]633;E;2025-09-04 10:35:51 uv pip install -r requirements.txt ;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C⠋ Resolving dependencies... \r⠙ Resolving dependencies... \r⠋ Resolving dependencies... \r⠙ Resolving dependencies... ",,terminal_output +1247,2335005,"TERMINAL",0,0,"\r⠙ dm-pix==0.4.4 \r⠹ dm-pix==0.4.4 \r⠹ einops==0.8.1 \r⠹ dm-pix==0.4.4 \r⠹ einops==0.8.1 \r⠹ dm-pix==0.4.4 \r⠹ einops==0.8.1 \r⠹ dm-pix==0.4.4 \r⠹ einops==0.8.1 \r⠹ flax==0.10.7 \r⠹ dm-pix==0.4.4 \r⠹ einops==0.8.1 \r⠹ flax==0.10.7 \r⠹ dm-pix==0.4.4 \r⠹ einops==0.8.1 \r⠹ flax==0.10.7 \r⠹ jax==0.6.2 \r⠹ jax==0.6.2 \r⠹ optax==0.2.5 \r⠹ procgen==0.10.7 \r⠹ torch==2.8.0 ",,terminal_output +1248,2335109,"TERMINAL",0,0,"\r⠹ nvidia-cuda-nvrtc-cu12==12.8.93 ",,terminal_output +1249,2335226,"TERMINAL",0,0,"\r⠸ nvidia-cuda-nvrtc-cu12==12.8.93 \r⠸ nvidia-cuda-nvrtc-cu12==12.8.93 \r⠸ nvidia-cuda-runtime-cu12==12.8.90 \r⠸ nvidia-cuda-runtime-cu12==12.8.90 \r⠸ nvidia-cublas-cu12==12.8.4.1 ",,terminal_output +1250,2335283,"TERMINAL",0,0,"\r⠸ triton==3.4.0 ",,terminal_output +1251,2335407,"TERMINAL",0,0,"\r⠼ jax-cuda12-plugin==0.6.2 \r⠼ jax-cuda12-pjrt==0.6.2 \r⠼ absl-py==2.3.1 ",,terminal_output +1252,2335583,"TERMINAL",0,0,"\r⠴ pydantic==2.11.7 ",,terminal_output +1253,2335753,"TERMINAL",0,0,"\r⠴ pydantic-core==2.33.2 \r⠴ requests==2.32.5 \r⠴ sentry-sdk==2.36.0 \r⠴ toolz==1.0.0 \r⠦ typing-inspection==0.4.1 \r⠦ charset-normalizer==3.4.3 ",,terminal_output +1254,2335808,"TERMINAL",0,0,"\rResolved 93 packages in 1.02s\r\n",,terminal_output +1255,2337524,"TERMINAL",0,0,"⠋ Preparing packages... (0/0) \r⠋ Preparing packages... (0/1) \r⠙ Preparing packages... (0/1) \r⠙ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 0 B/356.35 KiB \r\r⠙ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 16.00 KiB/356.35 KiB \r\r⠙ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 32.00 KiB/356.35 KiB \r\r⠙ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 48.00 KiB/356.35 KiB \r\r⠙ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 64.00 KiB/356.35 KiB \r\r⠙ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 80.00 KiB/356.35 KiB \r\r⠙ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 96.00 KiB/356.35 KiB \r\r⠙ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 112.00 KiB/356.35 KiB \r\r⠙ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 128.00 KiB/356.35 KiB \r\r⠙ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 144.00 KiB/356.35 KiB \r\r⠙ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 160.00 KiB/356.35 KiB \r\r⠙ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 176.00 KiB/356.35 KiB \r\r⠹ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 176.00 KiB/356.35 KiB \r\r⠹ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 192.00 KiB/356.35 KiB ",,terminal_output +1256,2337657,"TERMINAL",0,0,"\r\r⠹ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 208.00 KiB/356.35 KiB \r\r⠹ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 224.00 KiB/356.35 KiB ",,terminal_output +1257,2337791,"TERMINAL",0,0,"\r\r⠹ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 240.00 KiB/356.35 KiB \r\r⠸ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 240.00 KiB/356.35 KiB \r\r⠸ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 256.00 KiB/356.35 KiB \r\r⠸ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 272.00 KiB/356.35 KiB \r\r⠸ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 288.00 KiB/356.35 KiB ",,terminal_output +1258,2337900,"TERMINAL",0,0,"\r\r⠸ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 304.00 KiB/356.35 KiB \r\r⠼ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 304.00 KiB/356.35 KiB ",,terminal_output +1259,2337961,"TERMINAL",0,0,"\r\r⠼ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 320.00 KiB/356.35 KiB ",,terminal_output +1260,2338065,"TERMINAL",0,0,"\r\r⠼ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 336.00 KiB/356.35 KiB ",,terminal_output +1261,2338127,"TERMINAL",0,0,"\r\r⠴ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 336.00 KiB/356.35 KiB ",,terminal_output +1262,2338233,"TERMINAL",0,0,"\r\r⠴ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 352.00 KiB/356.35 KiB \r\r⠴ Preparing packages... (0/1)\r\nsentry-sdk  ------------------------------ 356.35 KiB/356.35 KiB ",,terminal_output +1263,2338298,"TERMINAL",0,0,"\r\r⠴ Preparing packages... (0/1) \r⠴  (1/1) \rPrepared 1 package in 1.00s\r\n░░░░░░░░░░░░░░░░░░░░ [0/0] Installing wheels... \r░░░░░░░░░░░░░░░░░░░░ [0/93] Installing wheels... ",,terminal_output +1264,2338499,"TERMINAL",0,0,"\r░░░░░░░░░░░░░░░░░░░░ [0/93] glcontext==3.0.0 \r░░░░░░░░░░░░░░░░░░░░ [1/93] glcontext==3.0.0 \r░░░░░░░░░░░░░░░░░░░░ [1/93] sentry-sdk==2.36.0 \r░░░░░░░░░░░░░░░░░░░░ [2/93] sentry-sdk==2.36.0 \r░░░░░░░░░░░░░░░░░░░░ [2/93] aiofiles==24.1.0 \r░░░░░░░░░░░░░░░░░░░░ [3/93] aiofiles==24.1.0 \r░░░░░░░░░░░░░░░░░░░░ [3/93] pycparser==2.22 \r░░░░░░░░░░░░░░░░░░░░ [4/93] pycparser==2.22 \r░░░░░░░░░░░░░░░░░░░░ [4/93] imageio==2.37.0 \r█░░░░░░░░░░░░░░░░░░░ [5/93] imageio==2.37.0 ",,terminal_output +1265,2338617,"TERMINAL",0,0,"\r█░░░░░░░░░░░░░░░░░░░ [5/93] nvidia-nvtx-cu12==12.8.90 \r█░░░░░░░░░░░░░░░░░░░ [6/93] nvidia-nvtx-cu12==12.8.90 \r█░░░░░░░░░░░░░░░░░░░ [6/93] idna==3.10 \r█░░░░░░░░░░░░░░░░░░░ [7/93] idna==3.10 ",,terminal_output +1266,2338722,"TERMINAL",0,0,"\r█░░░░░░░░░░░░░░░░░░░ [7/93] glfw==1.12.0 \r█░░░░░░░░░░░░░░░░░░░ [8/93] glfw==1.12.0 \r█░░░░░░░░░░░░░░░░░░░ [8/93] ml-dtypes==0.5.3 \r█░░░░░░░░░░░░░░░░░░░ [9/93] ml-dtypes==0.5.3 ",,terminal_output +1267,2338851,"TERMINAL",0,0,"\r█░░░░░░░░░░░░░░░░░░░ [9/93] pydantic==2.11.7 \r██░░░░░░░░░░░░░░░░░░ [10/93] pydantic==2.11.7 \r██░░░░░░░░░░░░░░░░░░ [10/93] annotated-types==0.7.0 \r██░░░░░░░░░░░░░░░░░░ [11/93] annotated-types==0.7.0 \r██░░░░░░░░░░░░░░░░░░ [11/93] nvidia-cufile-cu12==1.13.1.3 \r██░░░░░░░░░░░░░░░░░░ [12/93] nvidia-cufile-cu12==1.13.1.3 ",,terminal_output +1268,2338910,"TERMINAL",0,0,"\r██░░░░░░░░░░░░░░░░░░ [12/93] rich==14.1.0 \r██░░░░░░░░░░░░░░░░░░ [13/93] rich==14.1.0 \r██░░░░░░░░░░░░░░░░░░ [13/93] smmap==5.0.2 \r███░░░░░░░░░░░░░░░░░ [14/93] smmap==5.0.2 ",,terminal_output +1269,2338963,"TERMINAL",0,0,"\r███░░░░░░░░░░░░░░░░░ [14/93] nest-asyncio==1.6.0 \r███░░░░░░░░░░░░░░░░░ [15/93] nest-asyncio==1.6.0 ",,terminal_output +1270,2339256,"TERMINAL",0,0,"\r███░░░░░░░░░░░░░░░░░ [15/93] cffi==1.17.1 \r███░░░░░░░░░░░░░░░░░ [16/93] cffi==1.17.1 \r███░░░░░░░░░░░░░░░░░ [16/93] einops==0.8.1 \r███░░░░░░░░░░░░░░░░░ [17/93] einops==0.8.1 \r███░░░░░░░░░░░░░░░░░ [17/93] nvidia-cublas-cu12==12.8.4.1 \r███░░░░░░░░░░░░░░░░░ [18/93] nvidia-cublas-cu12==12.8.4.1 ",,terminal_output +1271,2339319,"TERMINAL",0,0,"\r███░░░░░░░░░░░░░░░░░ [18/93] docstring-parser==0.17.0 \r████░░░░░░░░░░░░░░░░ [19/93] docstring-parser==0.17.0 ",,terminal_output +1272,2339724,"TERMINAL",0,0,"\r████░░░░░░░░░░░░░░░░ [19/93] filelock==3.19.1 \r████░░░░░░░░░░░░░░░░ [20/93] filelock==3.19.1 ",,terminal_output +1273,2339820,"TERMINAL",0,0,"\r████░░░░░░░░░░░░░░░░ [20/93] etils==1.13.0 \r████░░░░░░░░░░░░░░░░ [21/93] etils==1.13.0 \r████░░░░░░░░░░░░░░░░ [21/93] humanize==4.13.0 \r████░░░░░░░░░░░░░░░░ [22/93] humanize==4.13.0 ",,terminal_output +1274,2340022,"TERMINAL",0,0,"\r████░░░░░░░░░░░░░░░░ [22/93] pygments==2.19.2 \r████░░░░░░░░░░░░░░░░ [23/93] pygments==2.19.2 \r████░░░░░░░░░░░░░░░░ [23/93] click==8.2.1 \r█████░░░░░░░░░░░░░░░ [24/93] click==8.2.1 \r█████░░░░░░░░░░░░░░░ [24/93] dm-pix==0.4.4 \r█████░░░░░░░░░░░░░░░ [25/93] dm-pix==0.4.4 \r█████░░░░░░░░░░░░░░░ [25/93] gym-notices==0.1.0 \r█████░░░░░░░░░░░░░░░ [26/93] gym-notices==0.1.0 ",,terminal_output +1275,2340081,"TERMINAL",0,0,"\r█████░░░░░░░░░░░░░░░ [26/93] mdurl==0.1.2 ",,terminal_output +1276,2340186,"TERMINAL",0,0,"\r█████░░░░░░░░░░░░░░░ [27/93] nvidia-cuda-nvcc-cu12==12.9.86 \r██████░░░░░░░░░░░░░░ [28/93] nvidia-cuda-nvcc-cu12==12.9.86 ",,terminal_output +1277,2340346,"TERMINAL",0,0,"\r██████░░░░░░░░░░░░░░ [28/93] protobuf==6.32.0 \r██████░░░░░░░░░░░░░░ [29/93] protobuf==6.32.0 \r██████░░░░░░░░░░░░░░ [29/93] markupsafe==3.0.2 \r██████░░░░░░░░░░░░░░ [30/93] markupsafe==3.0.2 ",,terminal_output +1278,2340483,"TERMINAL",0,0,"\r██████░░░░░░░░░░░░░░ [31/93] optax==0.2.5 \r██████░░░░░░░░░░░░░░ [32/93] optax==0.2.5 ",,terminal_output +1279,2340545,"TERMINAL",0,0,"\r██████░░░░░░░░░░░░░░ [32/93] nvidia-nccl-cu12==2.27.3 ",,terminal_output +1280,2341057,"TERMINAL",0,0,"\r███████░░░░░░░░░░░░░ [33/93] jaxlib==0.6.2 \r███████░░░░░░░░░░░░░ [34/93] jaxlib==0.6.2 ",,terminal_output +1281,2341110,"TERMINAL",0,0,"\r███████░░░░░░░░░░░░░ [34/93] gitpython==3.1.45 \r███████░░░░░░░░░░░░░ [35/93] gitpython==3.1.45 ",,terminal_output +1282,2341224,"TERMINAL",0,0,"\r███████░░░░░░░░░░░░░ [35/93] nvidia-cuda-nvrtc-cu12==12.8.93 \r███████░░░░░░░░░░░░░ [36/93] nvidia-cuda-nvrtc-cu12==12.8.93 \r███████░░░░░░░░░░░░░ [36/93] nvidia-cuda-cupti-cu12==12.8.90 \r███████░░░░░░░░░░░░░ [37/93] nvidia-cuda-cupti-cu12==12.8.90 \r███████░░░░░░░░░░░░░ [37/93] sympy==1.14.0 \r████████░░░░░░░░░░░░ [38/93] sympy==1.14.0 \r████████░░░░░░░░░░░░ [38/93] jax-cuda12-pjrt==0.6.2 \r████████░░░░░░░░░░░░ [39/93] jax-cuda12-pjrt==0.6.2 \r████████░░░░░░░░░░░░ [39/93] cloudpickle==3.1.1 \r████████░░░░░░░░░░░░ [40/93] cloudpickle==3.1.1 ",,terminal_output +1283,2341343,"TERMINAL",0,0,"\r████████░░░░░░░░░░░░ [40/93] typing-inspection==0.4.1 \r████████░░░░░░░░░░░░ [41/93] typing-inspection==0.4.1 ",,terminal_output +1284,2341523,"TERMINAL",0,0,"\r████████░░░░░░░░░░░░ [41/93] markdown-it-py==4.0.0 \r█████████░░░░░░░░░░░ [42/93] markdown-it-py==4.0.0 \r█████████░░░░░░░░░░░ [42/93] nvidia-cuda-runtime-cu12==12.8.90 \r█████████░░░░░░░░░░░ [43/93] nvidia-cuda-runtime-cu12==12.8.90 ",,terminal_output +1285,2341694,"TERMINAL",0,0,"\r█████████░░░░░░░░░░░ [43/93] gym==0.26.2 \r█████████░░░░░░░░░░░ [44/93] gym==0.26.2 \r█████████░░░░░░░░░░░ [44/93] opt-einsum==3.4.0 ",,terminal_output +1286,2341804,"TERMINAL",0,0,"\r█████████░░░░░░░░░░░ [45/93] simplejson==3.20.1 \r█████████░░░░░░░░░░░ [46/93] simplejson==3.20.1 \r█████████░░░░░░░░░░░ [46/93] jinja2==3.1.6 ",,terminal_output +1287,2341908,"TERMINAL",0,0,"\r██████████░░░░░░░░░░ [48/93] tyro==0.9.31 \r██████████░░░░░░░░░░ [49/93] tyro==0.9.31 ",,terminal_output +1288,2342016,"TERMINAL",0,0,"\r██████████░░░░░░░░░░ [49/93] requests==2.32.5 \r██████████░░░░░░░░░░ [50/93] requests==2.32.5 ",,terminal_output +1289,2342079,"TERMINAL",0,0,"\r██████████░░░░░░░░░░ [50/93] nvidia-nvshmem-cu12==3.3.24 ",,terminal_output +1290,2342141,"TERMINAL",0,0,"\r███████████░░░░░░░░░ [52/93] zipp==3.23.0 ",,terminal_output +1291,2342225,"TERMINAL",0,0,"\r███████████░░░░░░░░░ [53/93] packaging==25.0 \r███████████░░░░░░░░░ [54/93] certifi==2025.8.3 ",,terminal_output +1292,2342342,"TERMINAL",0,0,"\r████████████░░░░░░░░ [56/93] platformdirs==4.4.0 \r████████████░░░░░░░░ [57/93] platformdirs==4.4.0 ",,terminal_output +1293,2342403,"TERMINAL",0,0,"\r████████████░░░░░░░░ [57/93] jax-cuda12-plugin==0.6.2 ",,terminal_output +1294,2342536,"TERMINAL",0,0,"\r████████████░░░░░░░░ [58/93] shtab==1.7.2 \r████████████░░░░░░░░ [59/93] shtab==1.7.2 ",,terminal_output +1295,2342670,"TERMINAL",0,0,"\r████████████░░░░░░░░ [59/93] nvidia-cudnn-cu12==9.10.2.21 \r████████████░░░░░░░░ [60/93] nvidia-cudnn-cu12==9.10.2.21 \r████████████░░░░░░░░ [60/93] nvidia-cufft-cu12==11.3.3.83 \r█████████████░░░░░░░ [61/93] typing-extensions==4.15.0 ",,terminal_output +1296,2343636,"TERMINAL",0,0,"\r█████████████░░░░░░░ [62/93] fsspec==2025.9.0 \r█████████████░░░░░░░ [63/93] fsspec==2025.9.0 ",,terminal_output +1297,2343731,"TERMINAL",0,0,"\r█████████████░░░░░░░ [63/93] absl-py==2.3.1 \r█████████████░░░░░░░ [64/93] absl-py==2.3.1 ",,terminal_output +1298,2344196,"TERMINAL",0,0,"\r█████████████░░░░░░░ [64/93] pillow==11.3.0 \r█████████████░░░░░░░ [65/93] pillow==11.3.0 ",,terminal_output +1299,2344405,"TERMINAL",0,0,"\r█████████████░░░░░░░ [65/93] imageio-ffmpeg==0.3.0 \r██████████████░░░░░░ [66/93] imageio-ffmpeg==0.3.0 ",,terminal_output +1300,2344944,"TERMINAL",0,0,"\r██████████████░░░░░░ [66/93] jax==0.6.2 \r██████████████░░░░░░ [67/93] jax==0.6.2 \r██████████████░░░░░░ [67/93] nvidia-cusparselt-cu12==0.7.1 \r██████████████░░░░░░ [68/93] nvidia-cusparselt-cu12==0.7.1 ",,terminal_output +1301,2345152,"TERMINAL",0,0,"\r██████████████░░░░░░ [68/93] triton==3.4.0 \r██████████████░░░░░░ [69/93] triton==3.4.0 ",,terminal_output +1302,2345464,"TERMINAL",0,0,"\r██████████████░░░░░░ [69/93] treescope==0.1.10 \r███████████████░░░░░ [70/93] treescope==0.1.10 ",,terminal_output +1303,2346021,"TERMINAL",0,0,"\r███████████████░░░░░ [70/93] flax==0.10.7 \r███████████████░░░░░ [71/93] flax==0.10.7 ",,terminal_output +1304,2346097,"TERMINAL",0,0,"\r███████████████░░░░░ [71/93] nvidia-cusparse-cu12==12.5.8.93 \r███████████████░░░░░ [72/93] nvidia-cusparse-cu12==12.5.8.93 \r███████████████░░░░░ [72/93] chex==0.1.90 \r███████████████░░░░░ [73/93] chex==0.1.90 ",,terminal_output +1305,2346413,"TERMINAL",0,0,"\r███████████████░░░░░ [73/93] typeguard==4.4.4 \r███████████████░░░░░ [74/93] typeguard==4.4.4 ",,terminal_output +1306,2347607,"TERMINAL",0,0,"\r███████████████░░░░░ [74/93] setuptools==80.9.0 \r████████████████░░░░ [75/93] setuptools==80.9.0 ",,terminal_output +1307,2347744,"TERMINAL",0,0,"\r████████████████░░░░ [75/93] wandb==0.21.3 \r████████████████░░░░ [76/93] wandb==0.21.3 ",,terminal_output +1308,2347806,"TERMINAL",0,0,"\r████████████████░░░░ [76/93] nvidia-cusolver-cu12==11.7.3.90 \r████████████████░░░░ [77/93] nvidia-cusolver-cu12==11.7.3.90 ",,terminal_output +1309,2347862,"TERMINAL",0,0,"\r████████████████░░░░ [77/93] pydantic-core==2.33.2 \r████████████████░░░░ [78/93] pydantic-core==2.33.2 ",,terminal_output +1310,2347967,"TERMINAL",0,0,"\r████████████████░░░░ [78/93] urllib3==2.5.0 \r████████████████░░░░ [79/93] urllib3==2.5.0 \r████████████████░░░░ [79/93] nvidia-curand-cu12==10.3.9.90 \r█████████████████░░░ [80/93] nvidia-curand-cu12==10.3.9.90 ",,terminal_output +1311,2348037,"TERMINAL",0,0,"\r█████████████████░░░ [80/93] gitdb==4.0.12 \r█████████████████░░░ [81/93] gitdb==4.0.12 \r█████████████████░░░ [81/93] moderngl==5.12.0 \r█████████████████░░░ [82/93] moderngl==5.12.0 ",,terminal_output +1312,2348166,"TERMINAL",0,0,"\r█████████████████░░░ [82/93] msgpack==1.1.1 \r█████████████████░░░ [83/93] msgpack==1.1.1 ",,terminal_output +1313,2348416,"TERMINAL",0,0,"\r█████████████████░░░ [83/93] charset-normalizer==3.4.3 \r██████████████████░░ [84/93] charset-normalizer==3.4.3 \r██████████████████░░ [84/93] networkx==3.4.2 \r██████████████████░░ [85/93] networkx==3.4.2 ",,terminal_output +1314,2348607,"TERMINAL",0,0,"\r██████████████████░░ [85/93] toolz==1.0.0 \r██████████████████░░ [86/93] toolz==1.0.0 ",,terminal_output +1315,2348733,"TERMINAL",0,0,"\r██████████████████░░ [86/93] mpmath==1.3.0 \r██████████████████░░ [87/93] mpmath==1.3.0 \r██████████████████░░ [87/93] pyyaml==6.0.2 \r██████████████████░░ [88/93] pyyaml==6.0.2 ",,terminal_output +1316,2349043,"TERMINAL",0,0,"\r██████████████████░░ [88/93] orbax-checkpoint==0.11.24 \r███████████████████░ [89/93] orbax-checkpoint==0.11.24 ",,terminal_output +1317,2350784,"TERMINAL",0,0,"\r███████████████████░ [89/93] torch==2.8.0 \r███████████████████░ [90/93] torch==2.8.0 ",,terminal_output +1318,2353139,"TERMINAL",0,0,"\r███████████████████░ [90/93] numpy==1.26.4 \r███████████████████░ [91/93] numpy==1.26.4 ",,terminal_output +1319,2353595,"TERMINAL",0,0,"\r███████████████████░ [91/93] procgen==0.10.7 \r███████████████████░ [92/93] procgen==0.10.7 ",,terminal_output +1320,2356101,"TERMINAL",0,0,"\r███████████████████░ [92/93] scipy==1.15.3 \r████████████████████ [93/93] scipy==1.15.3 \rInstalled 93 packages in 17.57s\r\n + absl-py==2.3.1\r\n + aiofiles==24.1.0\r\n + annotated-types==0.7.0\r\n + certifi==2025.8.3\r\n + cffi==1.17.1\r\n + charset-normalizer==3.4.3\r\n + chex==0.1.90\r\n + click==8.2.1\r\n + cloudpickle==3.1.1\r\n + dm-pix==0.4.4\r\n + docstring-parser==0.17.0\r\n + einops==0.8.1\r\n + etils==1.13.0\r\n + filelock==3.19.1\r\n + flax==0.10.7\r\n + fsspec==2025.9.0\r\n + gitdb==4.0.12\r\n + gitpython==3.1.45\r\n + glcontext==3.0.0\r\n + glfw==1.12.0\r\n + gym==0.26.2\r\n + gym-notices==0.1.0\r\n + gym3==0.3.3\r\n + humanize==4.13.0\r\n + idna==3.10\r\n + imageio==2.37.0\r\n + imageio-ffmpeg==0.3.0\r\n + importlib-resources==6.5.2\r\n + jax==0.6.2\r\n + jax-cuda12-pjrt==0.6.2\r\n + jax-cuda12-plugin==0.6.2\r\n + jaxlib==0.6.2\r\n + jinja2==3.1.6\r\n + markdown-it-py==4.0.0\r\n + markupsafe==3.0.2\r\n + mdurl==0.1.2\r\n + ml-dtypes==0.5.3\r\n + moderngl==5.12.0\r\n + mpmath==1.3.0\r\n + msgpack==1.1.1\r\n + nest-asyncio==1.6.0\r\n + networkx==3.4.2\r\n + numpy==1.26.4\r\n + nvidia-cublas-cu12==12.8.4.1\r\n + nvidia-cuda-cupti-cu12==12.8.90\r\n + nvidia-cuda-nvcc-cu12==12.9.86\r\n + nvidia-cuda-nvrtc-cu12==12.8.93\r\n + nvidia-cuda-runtime-cu12==12.8.90\r\n + nvidia-cudnn-cu12==9.10.2.21\r\n + nvidia-cufft-cu12==11.3.3.83\r\n + nvidia-cufile-cu12==1.13.1.3\r\n + nvidia-curand-cu12==10.3.9.90\r\n + nvidia-cusolver-cu12==11.7.3.90\r\n + nvidia-cusparse-cu12==12.5.8.93\r\n + nvidia-cusparselt-cu12==0.7.1\r\n + nvidia-nccl-cu12==2.27.3\r\n + nvidia-nvjitlink-cu12==12.8.93\r\n + nvidia-nvshmem-cu12==3.3.24\r\n + nvidia-nvtx-cu12==12.8.90\r\n + opt-einsum==3.4.0\r\n + optax==0.2.5\r\n + orbax-checkpoint==0.11.24\r\n + packaging==25.0\r\n + pillow==11.3.0\r\n + platformdirs==4.4.0\r\n + procgen==0.10.7\r\n + protobuf==6.32.0\r\n + pycparser==2.22\r\n + pydantic==2.11.7\r\n + pydantic-core==2.33.2\r\n + pygments==2.19.2\r\n + pyyaml==6.0.2\r\n + requests==2.32.5\r\n + rich==14.1.0\r\n + scipy==1.15.3\r\n + sentry-sdk==2.36.0\r\n + setuptools==80.9.0\r\n + shtab==1.7.2\r\n + simplejson==3.20.1\r\n + smmap==5.0.2\r\n + sympy==1.14.0\r\n + tensorstore==0.1.76\r\n + toolz==1.0.0\r\n + torch==2.8.0\r\n + treescope==0.1.10\r\n + triton==3.4.0\r\n + typeguard==4.4.4\r\n + typing-extensions==4.15.0\r\n + typing-inspection==0.4.1\r\n + tyro==0.9.31\r\n + urllib3==2.5.0\r\n + wandb==0.21.3\r\n + zipp==3.23.0\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1321,2370124,"TERMINAL",0,0,"clear",,terminal_command +1322,2370168,"TERMINAL",0,0,"]633;E;2025-09-04 10:36:27 clear;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1323,2370518,"TERMINAL",0,0,"ls",,terminal_command +1324,2370555,"TERMINAL",0,0,"]633;E;2025-09-04 10:36:27 ls;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;Cgenerate_dataset.py genie.py LICENSE models README.md requirements.txt sample.py slurm train_dynamics.py train_lam.py train_tokenizer.py utils\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1325,2372843,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +1326,2383017,"TERMINAL",0,0,"bash",,terminal_focus +1327,2391731,"TERMINAL",0,0,"deactivate",,terminal_command +1328,2395709,"TERMINAL",0,0,"rm -rf .venv/",,terminal_command +1329,2395757,"TERMINAL",0,0,"]633;E;2025-09-04 10:36:52 rm -rf .venv/;e3f3d151-a063-4c85-891d-0bfb917c5617]633;C",,terminal_output +1330,2402393,"TERMINAL",0,0,"bash",,terminal_focus +1331,2422792,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",0,0,"",shellscript,tab +1332,2435737,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",,terminal_command +1333,2435896,"TERMINAL",0,0,"]633;E;2025-09-04 10:37:32 sbatch slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;Csbatch: error: Batch job submission failed: No partition specified or system default partition\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;1",,terminal_output +1334,2444648,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",0,0,"",shellscript,tab +1335,2447364,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",469,0,"",shellscript,selection_mouse +1336,2447366,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",468,0,"",shellscript,selection_command +1337,2447973,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",469,0,"",shellscript,selection_mouse +1338,2447990,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",468,0,"",shellscript,selection_command +1339,2448911,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",527,0,"",shellscript,selection_mouse +1340,2450464,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",468,0,"",shellscript,selection_mouse +1341,2451002,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",487,0,"",shellscript,selection_mouse +1342,2451058,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",486,0,"",shellscript,selection_command +1343,2454439,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",139,0,"",shellscript,selection_mouse +1344,2455109,"slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",108,0,"",shellscript,selection_mouse +1345,2458808,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",0,0,"",shellscript,tab +1346,2460065,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",157,0,"",shellscript,selection_mouse +1347,2465910,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",0,0,"",shellscript,tab +1348,2466794,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",136,0,"",shellscript,selection_mouse +1349,2467211,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",139,0,"\n#SBATCH --partition=accelerated",shellscript,content +1350,2467257,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",140,0,"",shellscript,selection_command +1351,2468756,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",0,0,"",shellscript,tab +1352,2470157,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",197,0,"",shellscript,selection_mouse +1353,2470562,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",202,0,"\n#SBATCH --partition=accelerated",shellscript,content +1354,2470646,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",203,0,"",shellscript,selection_command +1355,2470976,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",180,0,"",shellscript,selection_command +1356,2471371,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",180,23,"",shellscript,content +1357,2471486,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",145,0,"",shellscript,selection_command +1358,2473376,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",145,35,"",shellscript,content +1359,2474059,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",119,0,"",shellscript,selection_command +1360,2474510,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",119,26,"",shellscript,content +1361,2474771,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",93,0,"",shellscript,selection_command +1362,2475449,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",119,0,"",shellscript,selection_command +1363,2475630,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",151,0,"",shellscript,selection_command +1364,2477481,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",0,0,"",shellscript,tab +1365,2478580,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",473,0,"",shellscript,selection_mouse +1366,2478592,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",472,0,"",shellscript,selection_command +1367,2479822,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",420,0,"",shellscript,selection_mouse +1368,2479825,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",419,0,"",shellscript,selection_command +1369,2480054,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",419,1,"g",shellscript,selection_mouse +1370,2480055,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",414,5,"%j.lo",shellscript,selection_mouse +1371,2480055,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",396,23,"production/lam/%x_%j.lo",shellscript,selection_mouse +1372,2480056,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",171,248,"\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/jafar_og_reproduction/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/jafar_og_reproduction/lam/%x_%j.lo",shellscript,selection_mouse +1373,2480084,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",420,0,"",shellscript,selection_command +1374,2480085,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",170,250,"d\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/jafar_og_reproduction/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/jafar_og_reproduction/lam/%x_%j.log",shellscript,selection_mouse +1375,2480130,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",93,327,"#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --partition=accelerated\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/jafar_og_reproduction/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/jafar_og_reproduction/lam/%x_%j.log",shellscript,selection_mouse +1376,2480231,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",67,353,"#SBATCH --time=2-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --partition=accelerated\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/jafar_og_reproduction/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/jafar_og_reproduction/lam/%x_%j.log",shellscript,selection_mouse +1377,2480391,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",39,381,"#SBATCH --ntasks-per-node=1\n#SBATCH --time=2-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --partition=accelerated\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/jafar_og_reproduction/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/jafar_og_reproduction/lam/%x_%j.log",shellscript,selection_mouse +1378,2480469,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",21,399,"#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=2-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --partition=accelerated\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/jafar_og_reproduction/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/jafar_og_reproduction/lam/%x_%j.log",shellscript,selection_mouse +1379,2483425,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",171,0,"",shellscript,selection_mouse +1380,2483478,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",170,0,"",shellscript,selection_command +1381,2483890,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",171,0,"",shellscript,selection_mouse +1382,2483912,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",170,0,"",shellscript,selection_command +1383,2484123,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",170,1,"d",shellscript,selection_mouse +1384,2484124,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",167,3,"ate",shellscript,selection_mouse +1385,2484124,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",162,8,"celerate",shellscript,selection_mouse +1386,2484125,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",157,13,"on=accelerate",shellscript,selection_mouse +1387,2484125,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",153,17,"tition=accelerate",shellscript,selection_mouse +1388,2484153,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",171,0,"",shellscript,selection_command +1389,2484154,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",151,20,"artition=accelerated",shellscript,selection_mouse +1390,2484198,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",147,24," --partition=accelerated",shellscript,selection_mouse +1391,2484244,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",124,47,"CH --gres=gpu:1\n#SBATCH --partition=accelerated",shellscript,selection_mouse +1392,2484263,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",122,49,"ATCH --gres=gpu:1\n#SBATCH --partition=accelerated",shellscript,selection_mouse +1393,2484294,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",121,50,"BATCH --gres=gpu:1\n#SBATCH --partition=accelerated",shellscript,selection_mouse +1394,2484320,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",94,77,"SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --partition=accelerated",shellscript,selection_mouse +1395,2484353,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",93,78,"#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --partition=accelerated",shellscript,selection_mouse +1396,2484381,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",67,104,"#SBATCH --time=2-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --partition=accelerated",shellscript,selection_mouse +1397,2484434,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",39,132,"#SBATCH --ntasks-per-node=1\n#SBATCH --time=2-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --partition=accelerated",shellscript,selection_mouse +1398,2484480,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",21,150,"#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=2-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --partition=accelerated",shellscript,selection_mouse +1399,2487251,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",0,0,"",shellscript,tab +1400,2488906,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",171,0,"",shellscript,selection_mouse +1401,2488910,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",170,0,"",shellscript,selection_command +1402,2489031,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",170,1,"1",shellscript,selection_mouse +1403,2489032,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",171,0,"",shellscript,selection_command +1404,2489107,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",133,38,"ition=accelerated\n#SBATCH --gres=gpu:1",shellscript,selection_mouse +1405,2489108,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",128,43,"-partition=accelerated\n#SBATCH --gres=gpu:1",shellscript,selection_mouse +1406,2489148,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",125,46,"H --partition=accelerated\n#SBATCH --gres=gpu:1",shellscript,selection_mouse +1407,2489184,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",122,49,"ATCH --partition=accelerated\n#SBATCH --gres=gpu:1",shellscript,selection_mouse +1408,2489219,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",95,76,"BATCH --cpus-per-task=8\n#SBATCH --partition=accelerated\n#SBATCH --gres=gpu:1",shellscript,selection_mouse +1409,2489251,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",94,77,"SBATCH --cpus-per-task=8\n#SBATCH --partition=accelerated\n#SBATCH --gres=gpu:1",shellscript,selection_mouse +1410,2489319,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",68,103,"SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=accelerated\n#SBATCH --gres=gpu:1",shellscript,selection_mouse +1411,2489354,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",67,104,"#SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=accelerated\n#SBATCH --gres=gpu:1",shellscript,selection_mouse +1412,2489384,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",39,132,"#SBATCH --ntasks-per-node=1\n#SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=accelerated\n#SBATCH --gres=gpu:1",shellscript,selection_mouse +1413,2489757,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",21,150,"#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=accelerated\n#SBATCH --gres=gpu:1",shellscript,selection_mouse +1414,2490639,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",21,150,"",shellscript,content +1415,2491141,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",21,0,"#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=2-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --partition=accelerated",shellscript,content +1416,2491717,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch",170,0,"",shellscript,selection_command +1417,2492907,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",0,0,"",shellscript,tab +1418,2494218,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",223,0,"",shellscript,selection_mouse +1419,2494230,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",222,0,"",shellscript,selection_command +1420,2494331,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",222,1,"g",shellscript,selection_mouse +1421,2494397,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",184,38,"ion=haicu_stefan\n#SBATCH --qos=gpu_lon",shellscript,selection_mouse +1422,2494398,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",223,0,"",shellscript,selection_command +1423,2494398,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",152,71,"rtition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long",shellscript,selection_mouse +1424,2494449,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",147,76," --partition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long",shellscript,selection_mouse +1425,2494487,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",119,104,"CH --cpus-per-task=8\n#SBATCH --partition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long",shellscript,selection_mouse +1426,2494522,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",92,131,"TCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long",shellscript,selection_mouse +1427,2494562,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",70,153,"ATCH --gres=gpu:1\n#SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long",shellscript,selection_mouse +1428,2494591,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",69,154,"BATCH --gres=gpu:1\n#SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long",shellscript,selection_mouse +1429,2494615,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",40,183,"SBATCH --ntasks-per-node=1\n#SBATCH --gres=gpu:1\n#SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long",shellscript,selection_mouse +1430,2494641,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",39,184,"#SBATCH --ntasks-per-node=1\n#SBATCH --gres=gpu:1\n#SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long",shellscript,selection_mouse +1431,2494709,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",21,202,"#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --gres=gpu:1\n#SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long",shellscript,selection_mouse +1432,2495553,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",21,202,"",shellscript,content +1433,2496577,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",21,0,"#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=2-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --partition=accelerated",shellscript,content +1434,2498655,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",357,0,"",shellscript,selection_mouse +1435,2499288,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",488,0,"",shellscript,selection_mouse +1436,2500588,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",489,0,"",shellscript,selection_mouse +1437,2501229,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",373,0,"",shellscript,selection_mouse +1438,2501795,"slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",488,0,"",shellscript,selection_mouse +1439,2505976,"TERMINAL",0,0,"bash",,terminal_focus +1440,2510321,"TERMINAL",0,0,"t commit -m ""added og coinrun jobs""",,terminal_command +1441,2510332,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D",,terminal_output +1442,2514291,"TERMINAL",0,0,"git add jobs/mihir/",,terminal_command +1443,2514317,"TERMINAL",0,0,"]633;E;2025-09-04 10:38:51 git add jobs/mihir/;9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D;0",,terminal_output +1444,2518417,"TERMINAL",0,0,"git commit -m ""added og coinrun jobs""",,terminal_command +1445,2518466,"TERMINAL",0,0,"]633;E;2025-09-04 10:38:55 git commit -m ""added og coinrun jobs"";9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;C",,terminal_output +1446,2518582,"TERMINAL",0,0,"[main a0be88c] added og coinrun jobs\r\n 3 files changed, 6 insertions(+), 9 deletions(-)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D;0",,terminal_output +1447,2519944,"TERMINAL",0,0,"git push",,terminal_command +1448,2519994,"TERMINAL",0,0,"]633;E;2025-09-04 10:38:57 git push;9618af06-aa7a-44c4-9160-5cfc3f9eaf06]633;C",,terminal_output +1449,2521307,"TERMINAL",0,0,"Enumerating objects: 15, done.\r\nCounting objects: 6% (1/15)\rCounting objects: 13% (2/15)\rCounting objects: 20% (3/15)\rCounting objects: 26% (4/15)\rCounting objects: 33% (5/15)\rCounting objects: 40% (6/15)\rCounting objects: 46% (7/15)\rCounting objects: 53% (8/15)\rCounting objects: 60% (9/15)\rCounting objects: 66% (10/15)\rCounting objects: 73% (11/15)\rCounting objects: 80% (12/15)\rCounting objects: 86% (13/15)\rCounting objects: 93% (14/15)\rCounting objects: 100% (15/15)\rCounting objects: 100% (15/15), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 11% (1/9)\rCompressing objects: 22% (2/9)\rCompressing objects: 33% (3/9)\rCompressing objects: 44% (4/9)\rCompressing objects: 55% (5/9)\rCompressing objects: 66% (6/9)\rCompressing objects: 77% (7/9)\rCompressing objects: 88% (8/9)\rCompressing objects: 100% (9/9)\rCompressing objects: 100% (9/9), done.\r\nWriting objects: 11% (1/9)\rWriting objects: 22% (2/9)\rWriting objects: 33% (3/9)\rWriting objects: 44% (4/9)\rWriting objects: 55% (5/9)\rWriting objects: 66% (6/9)\rWriting objects: 77% (7/9)\rWriting objects: 100% (9/9)\rWriting objects: 100% (9/9), 890 bytes | 445.00 KiB/s, done.\r\nTotal 9 (delta 7), reused 0 (delta 0), pack-reused 0\r\n",,terminal_output +1450,2521473,"TERMINAL",0,0,"remote: Resolving deltas: 0% (0/7)\rremote: Resolving deltas: 14% (1/7)\rremote: Resolving deltas: 28% (2/7)\rremote: Resolving deltas: 42% (3/7)\rremote: Resolving deltas: 57% (4/7)\rremote: Resolving deltas: 71% (5/7)\rremote: Resolving deltas: 85% (6/7)\rremote: Resolving deltas: 100% (7/7)\rremote: Resolving deltas: 100% (7/7), completed with 5 local objects.\r\n",,terminal_output +1451,2521639,"TERMINAL",0,0,"rm",,terminal_focus +1452,2521729,"TERMINAL",0,0,"To github.com:p-doom/slurm.git\r\n d7a176c..a0be88c main -> main\r\n",,terminal_output +1453,2521758,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine/slurm]633;D;0",,terminal_output +1454,2522584,"TERMINAL",0,0,"bash",,terminal_focus +1455,2524965,"TERMINAL",0,0,"cd slurm/",,terminal_command +1456,2524994,"TERMINAL",0,0,"]633;E;2025-09-04 10:39:02 cd slurm/;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar/slurm]633;D;0",,terminal_output +1457,2526531,"TERMINAL",0,0,"git pull",,terminal_command +1458,2526571,"TERMINAL",0,0,"]633;E;2025-09-04 10:39:03 git pull;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +1459,2527960,"TERMINAL",0,0,"remote: Enumerating objects: 15, done.\r\nremote: Counting objects: 6% (1/15)\rremote: Counting objects: 13% (2/15)\rremote: Counting objects: 20% (3/15)\rremote: Counting objects: 26% (4/15)\rremote: Counting objects: 33% (5/15)\rremote: Counting objects: 40% (6/15)\rremote: Counting objects: 46% (7/15)\rremote: Counting objects: 53% (8/15)\rremote: Counting objects: 60% (9/15)\rremote: Counting objects: 66% (10/15)\rremote: Counting objects: 73% (11/15)\rremote: Counting objects: 80% (12/15)\rremote: Counting objects: 86% (13/15)\rremote: Counting objects: 93% (14/15)\rremote: Counting objects: 100% (15/15)\rremote: Counting objects: 100% (15/15), done.\r\nremote: Compressing objects: 50% (1/2)\rremote: Compressing objects: 100% (2/2)\rremote: Compressing objects: 100% (2/2), done.\r\nremote: Total 9 (delta 7), reused 9 (delta 7), pack-reused 0 (from 0)\r\nUnpacking objects: 11% (1/9)\rUnpacking objects: 22% (2/9)\rUnpacking objects: 33% (3/9)\rUnpacking objects: 44% (4/9)\rUnpacking objects: 55% (5/9)\rUnpacking objects: 66% (6/9)\rUnpacking objects: 77% (7/9)\rUnpacking objects: 88% (8/9)\rUnpacking objects: 100% (9/9)\rUnpacking objects: 100% (9/9), 870 bytes | 34.00 KiB/s, done.\r\n",,terminal_output +1460,2528082,"TERMINAL",0,0,"From github.com:p-doom/slurm\r\n d7a176c..a0be88c main -> origin/main\r\nUpdating d7a176c..a0be88c\r\n",,terminal_output +1461,2529267,"TERMINAL",0,0,"Fast-forward\r\n jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch | 8 +++-----\r\n jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch | 1 +\r\n jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch | 6 ++----\r\n 3 files changed, 6 insertions(+), 9 deletions(-)\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar/slurm]633;D;0",,terminal_output +1462,2531865,"TERMINAL",0,0,"cd ..",,terminal_command +1463,2531904,"TERMINAL",0,0,"]633;E;2025-09-04 10:39:09 cd ..;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output +1464,2535580,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch",,terminal_command +1465,2535645,"TERMINAL",0,0,"]633;E;2025-09-04 10:39:12 sbatch slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CSubmitted batch job 3465267\r\n]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1466,2543014,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch^C",,terminal_command +1467,2543046,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D",,terminal_output +1468,2545422,"TERMINAL",0,0,"scancel 3465267",,terminal_command +1469,2545481,"TERMINAL",0,0,"]633;E;2025-09-04 10:39:22 scancel 3465267;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1470,2598407,"TERMINAL",0,0,"cursor .",,terminal_command +1471,2598447,"TERMINAL",0,0,"]633;E;2025-09-04 10:40:15 cursor .;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +1472,2598779,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jafar]633;D;0",,terminal_output +1473,2623316,"TERMINAL",0,0,"pwd",,terminal_command +1474,2631755,"TERMINAL",0,0,"cd ..",,terminal_command +1475,2873247,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +1476,3431497,"TERMINAL",0,0,"dev",,terminal_command +1477,3431557,"TERMINAL",0,0,"]633;E;2025-09-04 10:54:08 dev;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +1478,3438703,"TERMINAL",0,0,"",,terminal_focus +1479,3529119,"TERMINAL",0,0,"bash",,terminal_focus +1480,3560718,"TERMINAL",0,0,"git diff",,terminal_command +1481,3560774,"TERMINAL",0,0,"]633;E;2025-09-04 10:56:17 git diff;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[?1h=\rdiff --git a/generate_dataset.py b/generate_dataset.py\r\nindex a67c424..9d5af93 100644\r\n--- a/generate_dataset.py\r\n+++ b/generate_dataset.py\r\n@@ -33,7 +33,8 @@ while i < args.num_episodes:\r\n \r\n # --- Run episode ---\r\n for j in range(1000):\r\n- env.act(types_np.sample(env.ac_space, bshape=(env.num,)))\r\n+ action = types_np.sample(env.ac_space, bshape=(env.num,))\r\n+ env.act(action)\r\n rew, obs, first = env.observe()\r\n dataseq.append(obs[""rgb""])\r\n if first:\r\n",,terminal_output +1482,3560789,"TERMINAL",0,0,"\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +1483,3565161,"TERMINAL",0,0,"git checkout mian",,terminal_command +1484,3565233,"TERMINAL",0,0,"]633;E;2025-09-04 10:56:22 git checkout mian;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;Cerror: pathspec 'mian' did not match any file(s) known to git\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;1",,terminal_output +1485,3567211,"TERMINAL",0,0,"git checkout main",,terminal_command +1486,3567260,"TERMINAL",0,0,"]633;E;2025-09-04 10:56:24 git checkout main;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +1487,3567633,"TERMINAL",0,0,"M\tgenerate_dataset.py\r\nSwitched to branch 'main'\r\nYour branch is behind 'origin/main' by 1 commit, and can be fast-forwarded.\r\n (use ""git pull"" to update your local branch)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +1488,3570664,"",0,0,"Switched from branch 'feat/darkness-filter' to 'main'",,git_branch_checkout +1489,3573207,"TERMINAL",0,0,"git stash",,terminal_command +1490,3573246,"TERMINAL",0,0,"]633;E;2025-09-04 10:56:30 git stash;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +1491,3573490,"TERMINAL",0,0,"Saved working directory and index state WIP on main: b1558ee fix: typo in bibtex (#146)\r\n",,terminal_output +1492,3573561,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +1493,3574869,"TERMINAL",0,0,"git pull",,terminal_command +1494,3574918,"TERMINAL",0,0,"]633;E;2025-09-04 10:56:32 git pull;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +1495,3576764,"TERMINAL",0,0,"remote: Enumerating objects: 233, done.\r\nremote: Counting objects: 1% (1/97)\rremote: Counting objects: 2% (2/97)\rremote: Counting objects: 3% (3/97)\rremote: Counting objects: 4% (4/97)\rremote: Counting objects: 5% (5/97)\rremote: Counting objects: 6% (6/97)\rremote: Counting objects: 7% (7/97)\rremote: Counting objects: 8% (8/97)\rremote: Counting objects: 9% (9/97)\rremote: Counting objects: 10% (10/97)\rremote: Counting objects: 11% (11/97)\rremote: Counting objects: 12% (12/97)\rremote: Counting objects: 13% (13/97)\rremote: Counting objects: 14% (14/97)\rremote: Counting objects: 15% (15/97)\rremote: Counting objects: 16% (16/97)\rremote: Counting objects: 17% (17/97)\rremote: Counting objects: 18% (18/97)\rremote: Counting objects: 19% (19/97)\rremote: Counting objects: 20% (20/97)\rremote: Counting objects: 21% (21/97)\rremote: Counting objects: 22% (22/97)\rremote: Counting objects: 23% (23/97)\rremote: Counting objects: 24% (24/97)\rremote: Counting objects: 25% (25/97)\rremote: Counting objects: 26% (26/97)\rremote: Counting objects: 27% (27/97)\rremote: Counting objects: 28% (28/97)\rremote: Counting objects: 29% (29/97)\rremote: Counting objects: 30% (30/97)\rremote: Counting objects: 31% (31/97)\rremote: Counting objects: 32% (32/97)\rremote: Counting objects: 34% (33/97)\rremote: Counting objects: 35% (34/97)\rremote: Counting objects: 36% (35/97)\rremote: Counting objects: 37% (36/97)\rremote: Counting objects: 38% (37/97)\rremote: Counting objects: 39% (38/97)\rremote: Counting objects: 40% (39/97)\rremote: Counting objects: 41% (40/97)\rremote: Counting objects: 42% (41/97)\rremote: Counting objects: 43% (42/97)\rremote: Counting objects: 44% (43/97)\rremote: Counting objects: 45% (44/97)\rremote: Counting objects: 46% (45/97)\rremote: Counting objects: 47% (46/97)\rremote: Counting objects: 48% (47/97)\rremote: Counting objects: 49% (48/97)\rremote: Counting objects: 50% (49/97)\rremote: Counting objects: 51% (50/97)\rremote: Counting objects: 52% (51/97)\rremote: Counting objects: 53% (52/97)\rremote: Counting objects: 54% (53/97)\rremote: Counting objects: 55% (54/97)\rremote: Counting objects: 56% (55/97)\rremote: Counting objects: 57% (56/97)\rremote: Counting objects: 58% (57/97)\rremote: Counting objects: 59% (58/97)\rremote: Counting objects: 60% (59/97)\rremote: Counting objects: 61% (60/97)\rremote: Counting objects: 62% (61/97)\rremote: Counting objects: 63% (62/97)\rremote: Counting objects: 64% (63/97)\rremote: Counting objects: 65% (64/97)\rremote: Counting objects: 67% (65/97)\rremote: Counting objects: 68% (66/97)\rremote: Counting objects: 69% (67/97)\rremote: Counting objects: 70% (68/97)\rremote: Counting objects: 71% (69/97)\rremote: Counting objects: 72% (70/97)\rremote: Counting objects: 73% (71/97)\rremote: Counting objects: 74% (72/97)\rremote: Counting objects: 75% (73/97)\rremote: Counting objects: 76% (74/97)\rremote: Counting objects: 77% (75/97)\rremote: Counting objects: 78% (76/97)\rremote: Counting objects: 79% (77/97)\rremote: Counting objects: 80% (78/97)\rremote: Counting objects: 81% (79/97)\rremote: Counting objects: 82% (80/97)\rremote: Counting objects: 83% (81/97)\rremote: Counting objects: 84% (82/97)\rremote: Counting objects: 85% (83/97)\rremote: Counting objects: 86% (84/97)\rremote: Counting objects: 87% (85/97)\rremote: Counting objects: 88% (86/97)\rremote: Counting objects: 89% (87/97)\rremote: Counting objects: 90% (88/97)\rremote: Counting objects: 91% (89/97)\rremote: Counting objects: 92% (90/97)\rremote: Counting objects: 93% (91/97)\rremote: Counting objects: 94% (92/97)\rremote: Counting objects: 95% (93/97)\rremote: Counting objects: 96% (94/97)\rremote: Counting objects: 97% (95/97)\rremote: Counting objects: 98% (96/97)\rremote: Counting objects: 100% (97/97)\rremote: Counting objects: 100% (97/97), done.\r\nremote: Compressing objects: 2% (1/49)\rremote: Compressing objects: 4% (2/49)\rremote: Compressing objects: 6% (3/49)\rremote: Compressing objects: 8% (4/49)\rremote: Compressing objects: 10% (5/49)\rremote: Compressing objects: 12% (6/49)\rremote: Compressing objects: 14% (7/49)\rremote: Compressing objects: 16% (8/49)\rremote: Compressing objects: 18% (9/49)\rremote: Compressing objects: 20% (10/49)\rremote: Compressing objects: 22% (11/49)\rremote: Compressing objects: 24% (12/49)\rremote: Compressing objects: 26% (13/49)\rremote: Compressing objects: 28% (14/49)\rremote: Compressing objects: 30% (15/49)\rremote: Compressing objects: 32% (16/49)\rremote: Compressing objects: 34% (17/49)\rremote: Compressing objects: 36% (18/49)\rremote: Compressing objects: 38% (19/49)\rremote: Compressing objects: 40% (20/49)\rremote: Compressing objects: 42% (21/49)\rremote: Compressing objects: 44% (22/49)\rremote: Compressing objects: 46% (23/49)\rremote: Compressing objects: 48% (24/49)\rremote: Compressing objects: 51% (25/49)\rremote: Compressing objects: 53% (26/49)\rremote: Compressing objects: 55% (27/49)\rremote: Compressing objects: 57% (28/49)\rremote: Compressing objects: 59% (29/49)\rremote: Compressing objects: 61% (30/49)\rremote: Compressing objects: 63% (31/49)\rremote: Compressing objects: 65% (32/49)\r",,terminal_output +1496,3576894,"TERMINAL",0,0,"remote: Compressing objects: 67% (33/49)\rremote: Compressing objects: 69% (34/49)\rremote: Compressing objects: 71% (35/49)\rremote: Compressing objects: 73% (36/49)\rremote: Compressing objects: 75% (37/49)\rremote: Compressing objects: 77% (38/49)\rremote: Compressing objects: 79% (39/49)\rremote: Compressing objects: 81% (40/49)\rremote: Compressing objects: 83% (41/49)\rremote: Compressing objects: 85% (42/49)\rremote: Compressing objects: 87% (43/49)\rremote: Compressing objects: 89% (44/49)\rremote: Compressing objects: 91% (45/49)\rremote: Compressing objects: 93% (46/49)\rremote: Compressing objects: 95% (47/49)\rremote: Compressing objects: 97% (48/49)\rremote: Compressing objects: 100% (49/49)\rremote: Compressing objects: 100% (49/49), done.\r\nReceiving objects: 0% (1/233)\rReceiving objects: 1% (3/233)\rReceiving objects: 2% (5/233)\r",,terminal_output +1497,3577147,"TERMINAL",0,0,"Receiving objects: 3% (7/233)\rReceiving objects: 4% (10/233)\rReceiving objects: 5% (12/233)\rReceiving objects: 6% (14/233)\rReceiving objects: 7% (17/233)\rReceiving objects: 8% (19/233)\rReceiving objects: 9% (21/233)\rReceiving objects: 10% (24/233)\rReceiving objects: 11% (26/233)\rReceiving objects: 12% (28/233)\rReceiving objects: 13% (31/233)\rReceiving objects: 14% (33/233)\rReceiving objects: 15% (35/233)\rReceiving objects: 16% (38/233)\rReceiving objects: 17% (40/233)\rReceiving objects: 18% (42/233)\rReceiving objects: 19% (45/233)\rReceiving objects: 20% (47/233)\rReceiving objects: 21% (49/233)\rReceiving objects: 22% (52/233)\rReceiving objects: 23% (54/233)\rReceiving objects: 24% (56/233)\rReceiving objects: 25% (59/233)\rReceiving objects: 26% (61/233)\rReceiving objects: 27% (63/233)\rReceiving objects: 28% (66/233)\rReceiving objects: 29% (68/233)\rReceiving objects: 30% (70/233)\rReceiving objects: 31% (73/233)\rReceiving objects: 32% (75/233)\rReceiving objects: 33% (77/233)\rReceiving objects: 34% (80/233)\rReceiving objects: 35% (82/233)\rReceiving objects: 36% (84/233)\rReceiving objects: 37% (87/233)\rReceiving objects: 38% (89/233)\rReceiving objects: 39% (91/233)\rReceiving objects: 40% (94/233)\rReceiving objects: 41% (96/233)\rReceiving objects: 42% (98/233)\rReceiving objects: 43% (101/233)\rReceiving objects: 44% (103/233)\rReceiving objects: 45% (105/233)\rReceiving objects: 46% (108/233)\rReceiving objects: 47% (110/233)\rReceiving objects: 48% (112/233)\r",,terminal_output +1498,3577583,"TERMINAL",0,0,"remote: Total 233 (delta 64), reused 48 (delta 48), pack-reused 136 (from 2)\r\nReceiving objects: 49% (115/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 50% (117/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 51% (119/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 52% (122/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 53% (124/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 54% (126/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 55% (129/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 56% (131/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 57% (133/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 58% (136/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 59% (138/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 60% (140/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 61% (143/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 62% (145/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 63% (147/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 64% (150/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 65% (152/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 66% (154/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 67% (157/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 68% (159/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 69% (161/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 70% (164/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 71% (166/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 72% (168/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 73% (171/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 74% (173/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 75% (175/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 76% (178/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 77% (180/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 78% (182/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 79% (185/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 80% (187/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 81% (189/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 82% (192/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 83% (194/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 84% (196/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 85% (199/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 86% (201/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 87% (203/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 88% (206/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 89% (208/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 90% (210/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 91% (213/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 92% (215/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 93% (217/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 94% (220/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 95% (222/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 96% (224/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 97% (227/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 98% (229/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 99% (231/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 100% (233/233), 2.93 MiB | 5.74 MiB/s\rReceiving objects: 100% (233/233), 3.42 MiB | 6.09 MiB/s, done.\r\nResolving deltas: 0% (0/137)\rResolving deltas: 1% (2/137)\rResolving deltas: 2% (3/137)\rResolving deltas: 3% (5/137)\rResolving deltas: 4% (6/137)\rResolving deltas: 5% (7/137)\rResolving deltas: 6% (9/137)\rResolving deltas: 7% (10/137)\rResolving deltas: 8% (11/137)\rResolving deltas: 9% (13/137)\rResolving deltas: 10% (14/137)\rResolving deltas: 11% (16/137)\rResolving deltas: 12% (17/137)\rResolving deltas: 13% (18/137)\rResolving deltas: 14% (20/137)\rResolving deltas: 15% (21/137)\rResolving deltas: 16% (22/137)\rResolving deltas: 17% (24/137)\rResolving deltas: 18% (25/137)\rResolving deltas: 19% (27/137)\rResolving deltas: 20% (28/137)\rResolving deltas: 21% (29/137)\rResolving deltas: 22% (31/137)\rResolving deltas: 23% (32/137)\rResolving deltas: 24% (33/137)\rResolving deltas: 25% (35/137)\rResolving deltas: 26% (36/137)\rResolving deltas: 27% (37/137)\rResolving deltas: 28% (39/137)\rResolving deltas: 29% (40/137)\rResolving deltas: 30% (42/137)\rResolving deltas: 31% (43/137)\rResolving deltas: 32% (44/137)\rResolving deltas: 33% (46/137)\rResolving deltas: 34% (47/137)\rResolving deltas: 35% (48/137)\rResolving deltas: 36% (50/137)\rResolving deltas: 37% (51/137)\rResolving deltas: 38% (53/137)\rResolving deltas: 39% (54/137)\rResolving deltas: 40% (55/137)\rResolving deltas: 41% (57/137)\rResolving deltas: 42% (58/137)\rResolving deltas: 43% (59/137)\rResolving deltas: 44% (61/137)\rResolving deltas: 45% (62/137)\rResolving deltas: 46% (64/137)\rResolving deltas: 47% (65/137)\rResolving deltas: 48% (66/137)\rResolving deltas: 49% (68/137)\rResolving deltas: 50% (69/137)\rResolving deltas: 51% (70/137)\rResolving deltas: 52% (72/137)\rResolving deltas: 53% (73/137)\rResolving deltas: 54% (74/137)\rResolving deltas: 55% (76/137)\rResolving deltas: 56% (77/137)\rResolving deltas: 57% (79/137)\rResolving deltas: 58% (80/137)\rResolving deltas: 59% (81/137)\rResolving deltas: 60% (83/137)\rResolving deltas: 61% (84/137)\rResolving deltas: 62% (85/137)\rResolving deltas: 63% (87/137)\rResolving deltas: 64% (88/137)\rResolving deltas: 65% (90/137)\rResolving deltas: 66% (91/137)\rResolving deltas: 67% (92/137)\rResolving deltas: 68% (94/137)\rResolving deltas: 69% (95/137)\rResolving deltas: 70% (96/137)\rResolving deltas: 71% (98/137)\rResolving deltas: 72% (99/137)\rResolving deltas: 73% (101/137)\rResolving deltas: 74% (102/137)\rResolving deltas: 75% (103/137)\rResolving deltas: 76% (105/137)\rResolving deltas: 77% (106/137)\rResolving deltas: 78% (107/137)\rResolving deltas: 79% (109/137)\rResolving deltas: 80% (110/137)\rResolving deltas: 81% (111/137)\rResolving deltas: 82% (113/137)\rResolving deltas: 83% (114/137)\rResolving deltas: 84% (116/137)\rResolving deltas: 85% (117/137)\rResolving deltas: 86% (118/137)\rResolving deltas: 87% (120/137)\rResolving deltas: 88% (121/137)\rResolving deltas: 89% (122/137)\rResolving deltas: 90% (124/137)\rResolving deltas: 91% (125/137)\rResolving deltas: 92% (127/137)\rResolving deltas: 93% (128/137)\rResolving deltas: 94% (129/137)\rResolving deltas: 95% (131/137)\rResolving deltas: 96% (132/137)\rResolving deltas: 97% (133/137)\rResolving deltas: 98% (135/137)\rResolving deltas: 99% (136/137)\rResolving deltas: 100% (137/137)\rResolving deltas: 100% (137/137), completed with 8 local objects.\r\n",,terminal_output +1499,3577930,"TERMINAL",0,0,"From github.com:p-doom/jasmine\r\n cb3826d..bbef694 main -> origin/main\r\n * [new branch] del-lam-decoder-fix-lam-tok-restore -> origin/del-lam-decoder-fix-lam-tok-restore\r\n * [new branch] disable-exit-on-nan -> origin/disable-exit-on-nan\r\n * [new branch] donate-optimizer-buffer-nnx-jit -> origin/donate-optimizer-buffer-nnx-jit\r\n ff9c12e..1429dce dynamics-training-gt-actions -> origin/dynamics-training-gt-actions\r\n * [new branch] exit-on-nan -> origin/exit-on-nan\r\n * [new branch] full-precision-layernorm -> origin/full-precision-layernorm\r\n",,terminal_output +1500,3578130,"TERMINAL",0,0," 4e0846b..eeb24a7 input_pipeline/add-npy2array_record -> origin/input_pipeline/add-npy2array_record\r\n * [new branch] model_inspection_demo -> origin/model_inspection_demo\r\n * [new branch] modularize-training-scripts -> origin/modularize-training-scripts\r\n * [new branch] momentum-in-fp32 -> origin/momentum-in-fp32\r\n * [new branch] print-mem-stats-after-param-init -> origin/print-mem-stats-after-param-init\r\n * [new branch] print-mem-stats-after-param-init-2 -> origin/print-mem-stats-after-param-init-2\r\n * [new branch] revert-163-donate-optimizer-buffer-nnx-jit -> origin/revert-163-donate-optimizer-buffer-nnx-jit\r\n * [new branch] revert-164-xla-client-mem-fraction-98 -> origin/revert-164-xla-client-mem-fraction-98\r\n * [new branch] revert-165-print-mem-stats-after-param-init -> origin/revert-165-print-mem-stats-after-param-init\r\n * [new branch] simplified-param-calculation -> origin/simplified-param-calculation\r\n * [new branch] sow-weights-logits-activations -> origin/sow-weights-logits-activations\r\n * [new branch] tokenizer-fwd-half-precision -> origin/tokenizer-fwd-half-precision\r\n * [new branch] xla-client-mem-fraction-98 -> origin/xla-client-mem-fraction-98\r\n * [new branch] xla-cost-memory-analysis -> origin/xla-cost-memory-analysis\r\n * [new branch] xla-mem-frac-98 -> origin/xla-mem-frac-98\r\n",,terminal_output +1501,3578192,"TERMINAL",0,0,"Updating b1558ee..bbef694\r\n",,terminal_output +1502,3578315,"TERMINAL",0,0,"Fast-forward\r\n",,terminal_output +1503,3578429,"TERMINAL",0,0," genie.py | 53 ++++++++++++++++--------\r\n models/dynamics.py | 19 ++++++---\r\n models/lam.py | 7 +++-\r\n models/tokenizer.py | 1 +\r\n sample.py | 12 ++++--\r\n train_dynamics.py | 366 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------------------------------------------------------\r\n train_lam.py | 420 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-----------------------------------------------------------------------------\r\n train_tokenizer.py | 390 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------------------------------------------------\r\n utils/lr_utils.py | 43 --------------------\r\n utils/nn.py | 112 ++++++++++++++++++++++++++++++++++++++------------\r\n utils/parameter_utils.py | 40 ------------------\r\n utils/train_utils.py | 119 +++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n 12 files changed, 962 insertions(+), 620 deletions(-)\r\n delete mode 100644 utils/lr_utils.py\r\n delete mode 100644 utils/parameter_utils.py\r\n create mode 100644 utils/train_utils.py\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +1504,3605846,"TERMINAL",0,0,"git checkout -b ""coinrun-data-generation""",,terminal_command +1505,3605858,"TERMINAL",0,0,"]633;E;2025-09-04 10:57:03 git checkout -b ""coinrun-data-generation"";86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CSwitched to a new branch 'coinrun-data-generation'\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +1506,3610672,"",0,0,"Switched from branch 'main' to 'coinrun-data-generation'",,git_branch_checkout +1507,3614502,"TERMINAL",0,0,"git stash pop",,terminal_command +1508,3614591,"TERMINAL",0,0,"]633;E;2025-09-04 10:57:11 git stash pop;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;COn branch coinrun-data-generation\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: generate_dataset.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdata/\r\n\tdata_atari/\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\tlogs/\r\n\toverfit_dir.zip\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\tutils/visualizer.py\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\nDropped refs/stash@{0} (7c98d84c03b30860908ead3330d400f3e5f3e611)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +1509,3621981,".venv/lib/python3.10/site-packages/gym3/types_np.py",0,0,"from functools import partial\nfrom typing import Any, Optional, Sequence, Tuple\n\nimport numpy as np\n\nfrom gym3.types import Discrete, Real, TensorType, ValType, multimap\n\n\ndef concat(xs: Sequence[Any], axis: int = 0) -> Any:\n """"""\n Concatenate the (leaf) arrays from xs\n\n :param xs: list of trees with the same shape, where the leaf values are numpy arrays\n :param axis: axis to concatenate along\n """"""\n return multimap(lambda *xs: np.concatenate(xs, axis=axis), *xs)\n\n\ndef stack(xs: Sequence[Any], axis: int = 0) -> Any:\n """"""\n Stack the (leaf) arrays from xs\n\n :param xs: list of trees with the same shape, where the leaf values are numpy arrays\n :param axis: axis to stack along\n """"""\n return multimap(lambda *xs: np.stack(xs, axis=axis), *xs)\n\n\ndef split(x: Any, sections: Sequence[int]) -> Sequence[Any]:\n """"""\n Split the (leaf) arrays from the tree x\n\n Examples:\n\n split([1,2,3,4], [1,2,3,4]) => [[1], [2], [3], [4]]\n split([1,2,3,4], [1,3,4]) => [[1], [2, 3], [4]]\n\n :param x: a tree where the leaf values are numpy arrays\n :param sections: list of indices to split at (not sizes of each split)\n\n :returns: list of trees with length `len(sections)` with the same shape as x\n where each leaf is the corresponding section of the leaf in x\n """"""\n result = []\n start = 0\n for end in sections:\n select_tree = multimap(lambda arr: arr[start:end], x)\n start = end\n result.append(select_tree)\n return result\n\n\ndef dtype(tt: TensorType) -> np.dtype:\n """"""\n :param tt: TensorType to get dtype for\n\n :returns: numpy.dtype to use for tt\n """"""\n assert isinstance(tt, TensorType)\n return np.dtype(tt.eltype.dtype_name)\n\n\ndef zeros(vt: ValType, bshape: Tuple) -> Any:\n """"""\n :param vt: ValType to create zeros for\n :param bshape: batch shape to prepend to the shape of each numpy array created by this function\n\n :returns: tree of numpy arrays matching vt\n """"""\n return multimap(\n lambda subdt: np.zeros(bshape + subdt.shape, dtype=dtype(subdt)), vt\n )\n\n\ndef _sample_tensor(\n tt: TensorType, bshape: Tuple, rng: Optional[np.random.RandomState] = None\n) -> np.ndarray:\n """"""\n :param tt: TensorType to create sample for\n :param bshape: batch shape to prepend to the shape of each numpy array created by this function\n :param rng: np.random.RandomState to use for sampling\n\n :returns: numpy array matching tt\n """"""\n if rng is None:\n rng = np.random\n assert isinstance(tt, TensorType)\n eltype = tt.eltype\n shape = bshape + tt.shape\n if isinstance(eltype, Discrete):\n return rng.randint(eltype.n, size=shape, dtype=dtype(tt))\n elif isinstance(eltype, Real):\n return rng.randn(*shape).astype(dtype(tt))\n else:\n raise ValueError(f""Expected ScalarType, got {type(eltype)}"")\n\n\ndef sample(\n vt: ValType, bshape: Tuple, rng: Optional[np.random.RandomState] = None\n) -> Any:\n """"""\n :param vt: ValType to create sample for\n :param bshape: batch shape to prepend to the shape of each numpy array created by this function\n :param rng: np.random.RandomState to use for sampling\n\n :returns: tree of numpy arrays matching vt\n """"""\n return multimap(partial(_sample_tensor, bshape=bshape, rng=rng), vt)\n",python,tab +1510,3623635,"TERMINAL",0,0,"bash",,terminal_focus +1511,3624002,"TERMINAL",0,0,"bash",,terminal_focus +1512,3631855,"TERMINAL",0,0,"deactivate",,terminal_command +1513,3635377,"TERMINAL",0,0,"ls",,terminal_command +1514,3635462,"TERMINAL",0,0,"]633;E;2025-09-04 10:57:32 ls;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;Cdata frame-knoms.png generate_dataset.py input_pipeline LICENSE models __pycache__ requirements-franz.txt samples tests train_tokenizer.py\r\ndata_atari frame.png genie.py killer_partition.sh log.log overfit_dir README.md requirements.txt scripts_cremers train_dynamics.py utils\r\ndebug frames gifs killer.sh logs overfit_dir.zip read_tf_record.py sample.py slurm train_lam.py wandb\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +1515,3643963,"TERMINAL",0,0,"uv venv --python 3.10",,terminal_command +1516,3644001,"TERMINAL",0,0,"]633;E;2025-09-04 10:57:41 uv venv --python 3.10;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +1517,3644078,"TERMINAL",0,0,"Using CPython 3.10.18\r\nCreating virtual environment at: .venv\r\n",,terminal_output +1518,3644090,"TERMINAL",0,0,"Activate with: source .venv/bin/activate\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +1519,3646861,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command +1520,3646962,"TERMINAL",0,0,"]633;E;2025-09-04 10:57:44 source .venv/bin/activate;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jasmine",,terminal_output +1521,3656373,"TERMINAL",0,0,"uv pip install -r requirements.txt",,terminal_command +1522,3656412,"TERMINAL",0,0,"]633;E;2025-09-04 10:57:53 uv pip install -r requirements.txt ;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +1523,3656479,"TERMINAL",0,0,"⠋ Resolving dependencies... \r⠙ Resolving dependencies... \r⠋ Resolving dependencies... \r⠙ Resolving dependencies... ",,terminal_output +1524,3656603,"TERMINAL",0,0,"\r⠙ ffmpeg-python==0.2.0 \r⠙ hf-transfer==0.1.9 \r⠙ dm-pix==0.4.4 \r⠙ einops==0.8.1 \r⠙ ffmpeg-python==0.2.0 \r⠙ hf-transfer==0.1.9 \r⠙ dm-pix==0.4.4 \r⠙ einops==0.8.1 \r⠙ ffmpeg-python==0.2.0 \r⠙ hf-transfer==0.1.9 \r⠙ dm-pix==0.4.4 \r⠙ einops==0.8.1 \r⠙ ffmpeg-python==0.2.0 \r⠙ hf-transfer==0.1.9 \r⠙ dm-pix==0.4.4 \r⠙ einops==0.8.1 \r⠙ flax==0.10.7 \r⠙ ffmpeg-python==0.2.0 \r⠙ hf-transfer==0.1.9 \r⠙ dm-pix==0.4.4 ",,terminal_output +1525,3656671,"TERMINAL",0,0,"\r⠹ wandb==0.21.3 \r⠹ grain==0.2.12 ",,terminal_output +1526,3656724,"TERMINAL",0,0,"\r⠹ huggingface-hub==0.34.4 ",,terminal_output +1527,3656785,"TERMINAL",0,0,"\r⠹ inquirerpy==0.3.4 ",,terminal_output +1528,3656894,"TERMINAL",0,0,"\r⠹ jax-cuda12-plugin==0.6.2 \r⠸ jax-cuda12-plugin==0.6.2 ",,terminal_output +1529,3656947,"TERMINAL",0,0,"\r⠸ jax-cuda12-pjrt==0.6.2 ",,terminal_output +1530,3657029,"TERMINAL",0,0,"\r⠸ pydantic-core==2.33.2 \r⠸ charset-normalizer==3.4.3 ",,terminal_output +1531,3657083,"TERMINAL",0,0,"\r⠼ wrapt==1.17.3 ",,terminal_output +1532,3657195,"TERMINAL",0,0,"\r⠼ importlib-resources==6.5.2 \rResolved 103 packages in 697ms\r\n",,terminal_output +1533,3659168,"TERMINAL",0,0,"⠋ Preparing packages... (0/0) \r⠋ Preparing packages... (0/10) \r⠙ Preparing packages... (0/10) \r⠙ Preparing packages... (0/10)\r\nidentify  ------------------------------ 0 B/96.83 KiB ",,terminal_output +1534,3659409,"TERMINAL",0,0,"\r\r⠙ Preparing packages... (0/10)\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB \r\r⠙ Preparing packages... (0/10)\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB \r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 0 B/68.03 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB \r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB \r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB \r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB \r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 0 B/80.05 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB \r\r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 14.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB \r\r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 14.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB \r\r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 14.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB \r\r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 14.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB \r\r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 14.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB\r\nvirtualenv  ------------------------------ 0 B/5.71 MiB \r\r\r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 14.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB \r\r\r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 14.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB \r\r\r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 14.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB \r\r\r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 14.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB \r\r\r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 14.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB \r\r\r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 14.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB\r\npre-commit  ------------------------------ 0 B/215.79 KiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB \r\r\r\r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 14.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 14.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB\r\npre-commit  ------------------------------ 16.00 KiB/215.79 KiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB \r\r\r\r\r\r⠙ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 62.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 62.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 14.88 KiB/96.83 KiB\r\npre-commit  ------------------------------ 16.00 KiB/215.79 KiB\r\ndistlib  ------------------------------ 30.88 KiB/458.05 KiB\r\ngrain  ------------------------------ 14.91 KiB/487.72 KiB\r\narray-record  ------------------------------ 13.39 KiB/2.35 MiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 479.89 KiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 16.00 KiB/545.19 MiB ",,terminal_output +1535,3659626,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (0/10)\r\nmore-itertools  ------------------------------ 62.87 KiB/68.03 KiB\r\nwrapt  ------------------------------ 78.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 94.88 KiB/96.83 KiB\r\npre-commit  ------------------------------ 16.00 KiB/215.79 KiB\r\ndistlib  ------------------------------ 74.94 KiB/458.05 KiB\r\ngrain  ------------------------------ 14.91 KiB/487.72 KiB\r\narray-record  ------------------------------ 13.39 KiB/2.35 MiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 1.24 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 48.00 KiB/545.19 MiB \r\r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (0/10)\r\nwrapt  ------------------------------ 78.91 KiB/80.05 KiB\r\nidentify  ------------------------------ 96.83 KiB/96.83 KiB\r\npre-commit  ------------------------------ 16.00 KiB/215.79 KiB\r\ndistlib  ------------------------------ 250.94 KiB/458.05 KiB\r\ngrain  ------------------------------ 14.91 KiB/487.72 KiB\r\narray-record  ------------------------------ 29.39 KiB/2.35 MiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 1.27 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 78.40 KiB/545.19 MiB \r\r\r\r\r\r\r\r\r\r⠹ Preparing packages... (0/10)\r\nwrapt  ------------------------------ 78.91 KiB/80.05 KiB\r\npre-commit  ------------------------------ 16.00 KiB/215.79 KiB\r\ndistlib  ------------------------------ 250.94 KiB/458.05 KiB\r\ngrain  ------------------------------ 14.91 KiB/487.72 KiB\r\narray-record  ------------------------------ 29.39 KiB/2.35 MiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 1.27 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 78.40 KiB/545.19 MiB \r\r\r\r\r\r\r\r\r⠹ Preparing packages... (0/10)\r\nwrapt  ------------------------------ 78.91 KiB/80.05 KiB\r\npre-commit  ------------------------------ 19.98 KiB/215.79 KiB\r\ndistlib  ------------------------------ 266.94 KiB/458.05 KiB\r\ngrain  ------------------------------ 14.91 KiB/487.72 KiB\r\narray-record  ------------------------------ 973.39 KiB/2.35 MiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 1.27 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 78.40 KiB/545.19 MiB \r\r\r\r\r\r\r\r\r⠹ Preparing packages... (0/10)\r\npre-commit  ------------------------------ 19.98 KiB/215.79 KiB\r\ndistlib  ------------------------------ 318.94 KiB/458.05 KiB\r\ngrain  ------------------------------ 14.91 KiB/487.72 KiB\r\narray-record  ------------------------------ 1.66 MiB/2.35 MiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 1.36 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 1.38 MiB/545.19 MiB \r\r\r\r\r\r\r\r⠹ Preparing packages... (0/10)\r\npre-commit  ------------------------------ 35.98 KiB/215.79 KiB\r\ndistlib  ------------------------------ 398.94 KiB/458.05 KiB\r\ngrain  ------------------------------ 14.91 KiB/487.72 KiB\r\narray-record  ------------------------------ 1.95 MiB/2.35 MiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 1.61 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 1.80 MiB/545.19 MiB \r\r\r\r\r\r\r\r⠹ Preparing packages... (0/10)\r\npre-commit  ------------------------------ 35.98 KiB/215.79 KiB\r\ndistlib  ------------------------------ 458.05 KiB/458.05 KiB\r\ngrain  ------------------------------ 14.91 KiB/487.72 KiB\r\narray-record  ------------------------------ 2.28 MiB/2.35 MiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 3.84 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 3.50 MiB/545.19 MiB \r\r\r\r\r\r\r\r⠸ Preparing packages... (3/10)\r\npre-commit  ------------------------------ 35.98 KiB/215.79 KiB\r\ndistlib  ------------------------------ 458.05 KiB/458.05 KiB\r\ngrain  ------------------------------ 30.91 KiB/487.72 KiB\r\narray-record  ------------------------------ 2.28 MiB/2.35 MiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 6.92 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 5.06 MiB/545.19 MiB ",,terminal_output +1536,3659816,"TERMINAL",0,0,"\r\r\r\r\r\r\r\r⠸ Preparing packages... (3/10)\r\npre-commit  ------------------------------ 35.98 KiB/215.79 KiB\r\ndistlib  ------------------------------ 458.05 KiB/458.05 KiB\r\ngrain  ------------------------------ 30.91 KiB/487.72 KiB\r\narray-record  ------------------------------ 2.35 MiB/2.35 MiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 8.99 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 7.84 MiB/545.19 MiB \r\r\r\r\r\r\r\r⠸ Preparing packages... (3/10)\r\npre-commit  ------------------------------ 35.98 KiB/215.79 KiB\r\ndistlib  ------------------------------ 458.05 KiB/458.05 KiB\r\ngrain  ------------------------------ 30.91 KiB/487.72 KiB\r\narray-record  ------------------------------ 2.35 MiB/2.35 MiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 12.09 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 9.72 MiB/545.19 MiB \r\r\r\r\r\r\r\r⠸ Preparing packages... (3/10)\r\npre-commit  ------------------------------ 35.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 30.91 KiB/487.72 KiB\r\narray-record  ------------------------------ 2.35 MiB/2.35 MiB\r\nvirtualenv  ------------------------------ 14.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 12.56 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 10.66 MiB/545.19 MiB \r\r\r\r\r\r\r⠸ Preparing packages... (3/10)\r\npre-commit  ------------------------------ 35.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 30.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 30.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 12.85 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 10.92 MiB/545.19 MiB \r\r\r\r\r\r⠸ Preparing packages... (3/10)\r\npre-commit  ------------------------------ 35.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 30.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 30.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 13.99 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 11.92 MiB/545.19 MiB \r\r\r\r\r\r⠼ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 51.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 30.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 30.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 17.00 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 13.84 MiB/545.19 MiB ",,terminal_output +1537,3659917,"TERMINAL",0,0,"\r\r\r\r\r\r⠼ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 51.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 30.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 30.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 19.59 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 15.94 MiB/545.19 MiB \r\r\r\r\r\r⠼ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 51.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 30.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 30.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 22.37 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 17.92 MiB/545.19 MiB ",,terminal_output +1538,3660016,"TERMINAL",0,0,"\r\r\r\r\r\r⠼ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 51.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 46.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 30.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 24.76 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 20.39 MiB/545.19 MiB \r\r\r\r\r\r⠴ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 51.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 46.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 30.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 27.37 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 22.92 MiB/545.19 MiB ",,terminal_output +1539,3660117,"TERMINAL",0,0,"\r\r\r\r\r\r⠴ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 51.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 46.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 30.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 30.16 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 24.86 MiB/545.19 MiB \r\r\r\r\r\r⠴ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 51.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 62.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 30.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 32.76 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 26.39 MiB/545.19 MiB ",,terminal_output +1540,3660215,"TERMINAL",0,0,"\r\r\r\r\r\r⠴ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 67.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 62.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 30.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 34.48 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 27.31 MiB/545.19 MiB \r\r\r\r\r\r⠦ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 67.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 62.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 30.87 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 38.01 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 28.99 MiB/545.19 MiB ",,terminal_output +1541,3660317,"TERMINAL",0,0,"\r\r\r\r\r\r⠦ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 67.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 62.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 39.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 40.75 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 31.45 MiB/545.19 MiB \r\r\r\r\r\r⠦ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 67.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 78.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 39.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 43.18 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 33.95 MiB/545.19 MiB ",,terminal_output +1542,3660407,"TERMINAL",0,0,"\r\r\r\r\r\r⠦ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 67.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 78.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 39.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 46.03 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 36.26 MiB/545.19 MiB \r\r\r\r\r\r⠧ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 67.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 78.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 39.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 48.27 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 38.94 MiB/545.19 MiB ",,terminal_output +1543,3660468,"TERMINAL",0,0,"\r\r\r\r\r\r⠧ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 67.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 78.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 39.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 51.90 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 40.44 MiB/545.19 MiB ",,terminal_output +1544,3660568,"TERMINAL",0,0,"\r\r\r\r\r\r⠧ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 83.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 94.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 39.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 54.64 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 42.67 MiB/545.19 MiB \r\r\r\r\r\r⠧ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 83.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 94.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 55.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 56.88 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 45.19 MiB/545.19 MiB ",,terminal_output +1545,3660713,"TERMINAL",0,0,"\r\r\r\r\r\r⠇ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 83.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 110.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 55.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 59.64 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 47.40 MiB/545.19 MiB \r\r\r\r\r\r⠇ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 83.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 125.09 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 55.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 62.75 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 49.11 MiB/545.19 MiB \r\r\r\r\r\r⠇ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 83.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 125.09 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 55.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 65.68 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 51.12 MiB/545.19 MiB ",,terminal_output +1546,3660816,"TERMINAL",0,0,"\r\r\r\r\r\r⠇ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 83.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 125.09 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 55.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 68.37 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 53.31 MiB/545.19 MiB \r\r\r\r\r\r⠋ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 83.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 125.09 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 71.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 71.29 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 55.45 MiB/545.19 MiB ",,terminal_output +1547,3660917,"TERMINAL",0,0,"\r\r\r\r\r\r⠋ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 83.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 141.09 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 71.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 73.84 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 57.70 MiB/545.19 MiB \r\r\r\r\r\r⠋ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 83.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 141.09 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 71.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 76.44 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 60.03 MiB/545.19 MiB ",,terminal_output +1548,3661016,"TERMINAL",0,0,"\r\r\r\r\r\r⠋ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 83.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 141.09 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 71.84 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 79.73 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 61.55 MiB/545.19 MiB \r\r\r\r\r\r⠙ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 83.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 141.09 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 77.34 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 81.81 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 63.67 MiB/545.19 MiB ",,terminal_output +1549,3661117,"TERMINAL",0,0,"\r\r\r\r\r\r⠙ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 83.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 158.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 77.34 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 83.77 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 65.16 MiB/545.19 MiB \r\r\r\r\r\r⠙ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 83.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 158.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 77.34 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 85.02 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 67.00 MiB/545.19 MiB ",,terminal_output +1550,3661219,"TERMINAL",0,0,"\r\r\r\r\r\r⠙ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 115.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 158.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 77.34 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 87.20 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 68.86 MiB/545.19 MiB \r\r\r\r\r\r⠹ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 211.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 158.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 77.34 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 89.64 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 71.11 MiB/545.19 MiB ",,terminal_output +1551,3661322,"TERMINAL",0,0,"\r\r\r\r\r\r⠹ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 211.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 158.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 77.34 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 92.16 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 73.29 MiB/545.19 MiB \r\r\r\r\r\r⠹ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 211.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 158.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 77.34 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 94.90 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 75.52 MiB/545.19 MiB ",,terminal_output +1552,3661423,"TERMINAL",0,0,"\r\r\r\r\r\r⠹ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 211.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 174.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 78.83 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 96.80 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 78.34 MiB/545.19 MiB \r\r\r\r\r\r⠸ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 211.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 174.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 78.83 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 99.65 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 80.14 MiB/545.19 MiB ",,terminal_output +1553,3661609,"TERMINAL",0,0,"\r\r\r\r\r\r⠸ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 211.98 KiB/215.79 KiB\r\ngrain  ------------------------------ 174.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 94.83 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 101.97 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 82.76 MiB/545.19 MiB \r\r\r\r\r\r⠸ Preparing packages... (5/10)\r\npre-commit  ------------------------------ 215.79 KiB/215.79 KiB\r\ngrain  ------------------------------ 174.91 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 94.83 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 104.39 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 84.89 MiB/545.19 MiB \r\r\r\r\r\r⠸ Preparing packages... (5/10)\r\ngrain  ------------------------------ 188.51 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 94.83 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 105.88 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 85.98 MiB/545.19 MiB \r\r\r\r\r⠸ Preparing packages... (5/10)\r\ngrain  ------------------------------ 188.51 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 94.83 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 106.51 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 86.88 MiB/545.19 MiB ",,terminal_output +1554,3661717,"TERMINAL",0,0,"\r\r\r\r\r⠼ Preparing packages... (6/10)\r\ngrain  ------------------------------ 204.51 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 94.83 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 109.10 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 89.14 MiB/545.19 MiB \r\r\r\r\r⠼ Preparing packages... (6/10)\r\ngrain  ------------------------------ 204.51 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 94.83 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 111.75 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 91.44 MiB/545.19 MiB \r\r\r\r\r⠼ Preparing packages... (6/10)\r\ngrain  ------------------------------ 204.51 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 94.83 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 114.03 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 94.06 MiB/545.19 MiB ",,terminal_output +1555,3661820,"TERMINAL",0,0,"\r\r\r\r\r⠼ Preparing packages... (6/10)\r\ngrain  ------------------------------ 204.51 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 94.83 KiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 116.06 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 96.84 MiB/545.19 MiB \r\r\r\r\r⠴ Preparing packages... (6/10)\r\ngrain  ------------------------------ 204.51 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 1.73 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 118.56 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 98.14 MiB/545.19 MiB ",,terminal_output +1556,3661919,"TERMINAL",0,0,"\r\r\r\r\r⠴ Preparing packages... (6/10)\r\ngrain  ------------------------------ 205.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 3.36 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 119.48 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 99.78 MiB/545.19 MiB \r\r\r\r\r⠴ Preparing packages... (6/10)\r\ngrain  ------------------------------ 221.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 5.51 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 120.28 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 101.97 MiB/545.19 MiB ",,terminal_output +1557,3662017,"TERMINAL",0,0,"\r\r\r\r\r⠴ Preparing packages... (6/10)\r\ngrain  ------------------------------ 221.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 5.69 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 123.25 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 103.86 MiB/545.19 MiB \r\r\r\r\r⠦ Preparing packages... (6/10)\r\ngrain  ------------------------------ 221.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 5.69 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 125.34 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 106.64 MiB/545.19 MiB ",,terminal_output +1558,3662120,"TERMINAL",0,0,"\r\r\r\r\r⠦ Preparing packages... (6/10)\r\ngrain  ------------------------------ 221.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 5.69 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 127.80 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 109.09 MiB/545.19 MiB \r\r\r\r\r⠦ Preparing packages... (6/10)\r\ngrain  ------------------------------ 221.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 5.69 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 131.03 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 110.86 MiB/545.19 MiB ",,terminal_output +1559,3662220,"TERMINAL",0,0,"\r\r\r\r\r⠦ Preparing packages... (6/10)\r\ngrain  ------------------------------ 221.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 5.69 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 132.97 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 113.70 MiB/545.19 MiB \r\r\r\r\r⠧ Preparing packages... (6/10)\r\ngrain  ------------------------------ 221.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 5.69 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 136.37 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 115.38 MiB/545.19 MiB ",,terminal_output +1560,3662318,"TERMINAL",0,0,"\r\r\r\r\r⠧ Preparing packages... (6/10)\r\ngrain  ------------------------------ 269.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 5.69 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 138.91 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 117.97 MiB/545.19 MiB \r\r\r\r\r⠧ Preparing packages... (6/10)\r\ngrain  ------------------------------ 461.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 5.69 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 141.68 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 120.00 MiB/545.19 MiB ",,terminal_output +1561,3662381,"TERMINAL",0,0,"\r\r\r\r\r⠧ Preparing packages... (6/10)\r\ngrain  ------------------------------ 461.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 5.69 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 144.25 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 122.59 MiB/545.19 MiB ",,terminal_output +1562,3662480,"TERMINAL",0,0,"\r\r\r\r\r⠇ Preparing packages... (6/10)\r\ngrain  ------------------------------ 461.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 5.69 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 147.30 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 124.59 MiB/545.19 MiB \r\r\r\r\r⠇ Preparing packages... (6/10)\r\ngrain  ------------------------------ 461.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 5.69 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 149.79 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 127.21 MiB/545.19 MiB ",,terminal_output +1563,3662637,"TERMINAL",0,0,"\r\r\r\r\r⠇ Preparing packages... (6/10)\r\ngrain  ------------------------------ 461.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 5.69 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 152.84 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 129.20 MiB/545.19 MiB \r\r\r\r\r⠇ Preparing packages... (6/10)\r\ngrain  ------------------------------ 461.48 KiB/487.72 KiB\r\nvirtualenv  ------------------------------ 5.71 MiB/5.71 MiB\r\nnvidia-nccl-cu12  ------------------------------ 154.75 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 132.67 MiB/545.19 MiB \r\r\r\r\r⠋ Preparing packages... (6/10)\r\ngrain  ------------------------------ 461.48 KiB/487.72 KiB\r\nnvidia-nccl-cu12  ------------------------------ 156.69 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 134.48 MiB/545.19 MiB \r\r\r\r⠋ Preparing packages... (6/10)\r\ngrain  ------------------------------ 477.48 KiB/487.72 KiB\r\nnvidia-nccl-cu12  ------------------------------ 156.97 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 134.83 MiB/545.19 MiB ",,terminal_output +1564,3662768,"TERMINAL",0,0,"\r\r\r\r⠋ Preparing packages... (6/10)\r\ngrain  ------------------------------ 477.48 KiB/487.72 KiB\r\nnvidia-nccl-cu12  ------------------------------ 160.50 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 136.59 MiB/545.19 MiB \r\r\r\r⠋ Preparing packages... (6/10)\r\ngrain  ------------------------------ 487.72 KiB/487.72 KiB\r\nnvidia-nccl-cu12  ------------------------------ 162.97 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 138.78 MiB/545.19 MiB \r\r\r\r⠋ Preparing packages... (6/10)\r\nnvidia-nccl-cu12  ------------------------------ 165.52 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 141.11 MiB/545.19 MiB \r\r\r⠋ Preparing packages... (6/10)\r\nnvidia-nccl-cu12  ------------------------------ 165.52 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 141.33 MiB/545.19 MiB ",,terminal_output +1565,3662912,"TERMINAL",0,0,"\r\r\r⠙ Preparing packages... (7/10)\r\nnvidia-nccl-cu12  ------------------------------ 167.49 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 144.43 MiB/545.19 MiB \r\r\r⠙ Preparing packages... (7/10)\r\nnvidia-nccl-cu12  ------------------------------ 171.51 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 145.12 MiB/545.19 MiB \r\r\r⠙ Preparing packages... (7/10)\r\nnvidia-nccl-cu12  ------------------------------ 173.98 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 147.56 MiB/545.19 MiB ",,terminal_output +1566,3662977,"TERMINAL",0,0,"\r\r\r⠙ Preparing packages... (7/10)\r\nnvidia-nccl-cu12  ------------------------------ 177.23 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 149.55 MiB/545.19 MiB ",,terminal_output +1567,3663110,"TERMINAL",0,0,"\r\r\r⠹ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 179.51 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 152.37 MiB/545.19 MiB \r\r\r⠹ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 182.53 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 154.06 MiB/545.19 MiB \r\r\r⠹ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 185.10 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 156.55 MiB/545.19 MiB ",,terminal_output +1568,3663175,"TERMINAL",0,0,"\r\r\r⠹ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 188.31 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 157.70 MiB/545.19 MiB ",,terminal_output +1569,3663272,"TERMINAL",0,0,"\r\r\r⠸ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 191.03 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 159.44 MiB/545.19 MiB \r\r\r⠸ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 192.00 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 161.00 MiB/545.19 MiB ",,terminal_output +1570,3663364,"TERMINAL",0,0,"\r\r\r⠸ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 193.90 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 163.16 MiB/545.19 MiB \r\r\r⠸ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 195.76 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 165.36 MiB/545.19 MiB ",,terminal_output +1571,3663427,"TERMINAL",0,0,"\r\r\r⠼ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 198.86 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 167.36 MiB/545.19 MiB ",,terminal_output +1572,3663560,"TERMINAL",0,0,"\r\r\r⠼ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 200.95 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 169.06 MiB/545.19 MiB \r\r\r⠼ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 202.93 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 170.95 MiB/545.19 MiB \r\r\r⠼ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 204.98 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 172.81 MiB/545.19 MiB ",,terminal_output +1573,3663672,"TERMINAL",0,0,"\r\r\r⠴ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 206.95 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 175.37 MiB/545.19 MiB \r\r\r⠴ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 209.31 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 177.48 MiB/545.19 MiB ",,terminal_output +1574,3663776,"TERMINAL",0,0,"\r\r\r⠴ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 212.65 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 179.51 MiB/545.19 MiB \r\r\r⠴ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 215.54 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 181.50 MiB/545.19 MiB ",,terminal_output +1575,3663907,"TERMINAL",0,0,"\r\r\r⠦ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 218.45 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 183.62 MiB/545.19 MiB \r\r\r⠦ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 221.45 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 185.98 MiB/545.19 MiB \r\r\r⠦ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 224.11 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 188.51 MiB/545.19 MiB ",,terminal_output +1576,3664027,"TERMINAL",0,0,"\r\r\r⠦ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 226.64 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 190.73 MiB/545.19 MiB \r\r\r⠧ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 228.40 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 193.32 MiB/545.19 MiB ",,terminal_output +1577,3664091,"TERMINAL",0,0,"\r\r\r⠧ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 230.84 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 195.25 MiB/545.19 MiB ",,terminal_output +1578,3664160,"TERMINAL",0,0,"\r\r\r⠧ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 232.86 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 198.23 MiB/545.19 MiB ",,terminal_output +1579,3664318,"TERMINAL",0,0,"\r\r\r⠧ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 235.17 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 201.12 MiB/545.19 MiB \r\r\r⠇ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 237.19 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 204.02 MiB/545.19 MiB \r\r\r⠇ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 239.60 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 206.86 MiB/545.19 MiB ",,terminal_output +1580,3664383,"TERMINAL",0,0,"\r\r\r⠇ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 241.59 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 210.01 MiB/545.19 MiB \r\r\r⠇ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 244.56 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 212.20 MiB/545.19 MiB ",,terminal_output +1581,3664441,"TERMINAL",0,0,"\r\r\r⠋ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 247.29 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 214.90 MiB/545.19 MiB ",,terminal_output +1582,3664505,"TERMINAL",0,0,"\r\r\r⠋ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 249.39 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 217.14 MiB/545.19 MiB \r\r\r⠋ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 252.07 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 219.33 MiB/545.19 MiB ",,terminal_output +1583,3664568,"TERMINAL",0,0,"\r\r\r⠋ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 254.38 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 221.80 MiB/545.19 MiB ",,terminal_output +1584,3664715,"TERMINAL",0,0,"\r\r\r⠙ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 256.26 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 224.97 MiB/545.19 MiB \r\r\r⠙ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 258.94 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 226.93 MiB/545.19 MiB \r\r\r⠙ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 260.89 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 229.95 MiB/545.19 MiB ",,terminal_output +1585,3664814,"TERMINAL",0,0,"\r\r\r⠙ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 262.91 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 232.77 MiB/545.19 MiB \r\r\r⠹ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 264.86 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 235.75 MiB/545.19 MiB ",,terminal_output +1586,3664880,"TERMINAL",0,0,"\r\r\r⠹ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 267.73 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 237.72 MiB/545.19 MiB ",,terminal_output +1587,3664938,"TERMINAL",0,0,"\r\r\r⠹ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 270.73 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 239.77 MiB/545.19 MiB ",,terminal_output +1588,3665067,"TERMINAL",0,0,"\r\r\r⠹ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 273.58 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 241.67 MiB/545.19 MiB \r\r\r⠸ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 276.39 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 243.83 MiB/545.19 MiB \r\r\r⠸ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 279.23 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 245.92 MiB/545.19 MiB ",,terminal_output +1589,3665162,"TERMINAL",0,0,"\r\r\r⠸ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 281.27 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 248.62 MiB/545.19 MiB \r\r\r⠸ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 284.55 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 250.34 MiB/545.19 MiB ",,terminal_output +1590,3665221,"TERMINAL",0,0,"\r\r\r⠼ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 287.25 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 252.72 MiB/545.19 MiB ",,terminal_output +1591,3665312,"TERMINAL",0,0,"\r\r\r⠼ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 289.70 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 255.06 MiB/545.19 MiB \r\r\r⠼ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 292.58 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 257.12 MiB/545.19 MiB ",,terminal_output +1592,3665374,"TERMINAL",0,0,"\r\r\r⠼ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 295.18 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 259.44 MiB/545.19 MiB ",,terminal_output +1593,3665509,"TERMINAL",0,0,"\r\r\r⠴ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 297.28 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 261.86 MiB/545.19 MiB \r\r\r⠴ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 299.24 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 264.58 MiB/545.19 MiB \r\r\r⠴ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 301.66 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 267.00 MiB/545.19 MiB ",,terminal_output +1594,3665568,"TERMINAL",0,0,"\r\r\r⠴ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 303.77 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 269.66 MiB/545.19 MiB ",,terminal_output +1595,3665714,"TERMINAL",0,0,"\r\r\r⠦ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 306.06 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 271.69 MiB/545.19 MiB \r\r\r⠦ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 307.60 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 274.02 MiB/545.19 MiB \r\r\r⠦ Preparing packages... (8/10)\r\nnvidia-nccl-cu12  ------------------------------ 307.60 MiB/307.60 MiB\r\nnvidia-cudnn-cu12  ------------------------------ 278.62 MiB/545.19 MiB ",,terminal_output +1596,3665777,"TERMINAL",0,0,"\r\r\r⠦ Preparing packages... (8/10)\r\nnvidia-cudnn-cu12  ------------------------------ 281.86 MiB/545.19 MiB \r\r⠦ Preparing packages... (8/10)\r\nnvidia-cudnn-cu12  ------------------------------ 282.67 MiB/545.19 MiB ",,terminal_output +1597,3665921,"TERMINAL",0,0,"\r\r⠧ Preparing packages... (8/10)\r\nnvidia-cudnn-cu12  ------------------------------ 287.25 MiB/545.19 MiB ",,terminal_output +1598,3665973,"TERMINAL",0,0,"\r\r⠧ Preparing packages... (8/10)\r\nnvidia-cudnn-cu12  ------------------------------ 289.39 MiB/545.19 MiB \r\r⠧ Preparing packages... (8/10)\r\nnvidia-cudnn-cu12  ------------------------------ 291.86 MiB/545.19 MiB \r\r⠧ Preparing packages... (8/10)\r\nnvidia-cudnn-cu12  ------------------------------ 296.45 MiB/545.19 MiB ",,terminal_output +1599,3666027,"TERMINAL",0,0,"\r\r⠇ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 298.20 MiB/545.19 MiB ",,terminal_output +1600,3666089,"TERMINAL",0,0,"\r\r⠇ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 301.57 MiB/545.19 MiB ",,terminal_output +1601,3666142,"TERMINAL",0,0,"\r\r⠇ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 304.86 MiB/545.19 MiB ",,terminal_output +1602,3666206,"TERMINAL",0,0,"\r\r⠇ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 310.02 MiB/545.19 MiB \r\r⠋ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 315.06 MiB/545.19 MiB ",,terminal_output +1603,3666356,"TERMINAL",0,0,"\r\r⠋ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 320.20 MiB/545.19 MiB \r\r⠋ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 324.98 MiB/545.19 MiB \r\r⠋ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 330.02 MiB/545.19 MiB ",,terminal_output +1604,3666421,"TERMINAL",0,0,"\r\r⠙ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 333.12 MiB/545.19 MiB ",,terminal_output +1605,3666534,"TERMINAL",0,0,"\r\r⠙ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 336.59 MiB/545.19 MiB \r\r⠙ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 341.26 MiB/545.19 MiB ",,terminal_output +1606,3666589,"TERMINAL",0,0,"\r\r⠙ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 345.83 MiB/545.19 MiB ",,terminal_output +1607,3666645,"TERMINAL",0,0,"\r\r⠹ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 350.90 MiB/545.19 MiB ",,terminal_output +1608,3666754,"TERMINAL",0,0,"\r\r⠹ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 355.44 MiB/545.19 MiB \r\r⠹ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 360.09 MiB/545.19 MiB \r\r⠹ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 365.14 MiB/545.19 MiB ",,terminal_output +1609,3666874,"TERMINAL",0,0,"\r\r⠸ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 370.09 MiB/545.19 MiB \r\r⠸ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 375.24 MiB/545.19 MiB ",,terminal_output +1610,3666966,"TERMINAL",0,0,"\r\r⠸ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 380.02 MiB/545.19 MiB \r\r⠸ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 385.19 MiB/545.19 MiB ",,terminal_output +1611,3667023,"TERMINAL",0,0,"\r\r⠼ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 389.94 MiB/545.19 MiB ",,terminal_output +1612,3667083,"TERMINAL",0,0,"\r\r⠼ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 395.01 MiB/545.19 MiB ",,terminal_output +1613,3667136,"TERMINAL",0,0,"\r\r⠼ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 399.97 MiB/545.19 MiB ",,terminal_output +1614,3667189,"TERMINAL",0,0,"\r\r⠼ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 404.90 MiB/545.19 MiB ",,terminal_output +1615,3667253,"TERMINAL",0,0,"\r\r⠴ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 409.97 MiB/545.19 MiB \r\r⠴ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 414.84 MiB/545.19 MiB ",,terminal_output +1616,3667315,"TERMINAL",0,0,"\r\r⠴ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 419.98 MiB/545.19 MiB ",,terminal_output +1617,3667374,"TERMINAL",0,0,"\r\r⠴ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 424.62 MiB/545.19 MiB ",,terminal_output +1618,3667452,"TERMINAL",0,0,"\r\r⠦ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 430.39 MiB/545.19 MiB ",,terminal_output +1619,3667545,"TERMINAL",0,0,"\r\r⠦ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 439.98 MiB/545.19 MiB \r\r⠦ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 445.33 MiB/545.19 MiB ",,terminal_output +1620,3667626,"TERMINAL",0,0,"\r\r⠧ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 445.83 MiB/545.19 MiB ",,terminal_output +1621,3667707,"TERMINAL",0,0,"\r\r⠧ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 446.02 MiB/545.19 MiB \r\r⠧ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 447.50 MiB/545.19 MiB \r\r⠧ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 450.86 MiB/545.19 MiB ",,terminal_output +1622,3667820,"TERMINAL",0,0,"\r\r⠇ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 453.89 MiB/545.19 MiB \r\r⠇ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 455.64 MiB/545.19 MiB ",,terminal_output +1623,3667910,"TERMINAL",0,0,"\r\r⠇ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 458.23 MiB/545.19 MiB \r\r⠇ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 462.11 MiB/545.19 MiB ",,terminal_output +1624,3667972,"TERMINAL",0,0,"\r\r⠇ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 466.00 MiB/545.19 MiB ",,terminal_output +1625,3668072,"TERMINAL",0,0,"\r\r⠋ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 470.25 MiB/545.19 MiB \r\r⠋ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 474.34 MiB/545.19 MiB ",,terminal_output +1626,3668125,"TERMINAL",0,0,"\r\r⠋ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 479.02 MiB/545.19 MiB ",,terminal_output +1627,3668259,"TERMINAL",0,0,"\r\r⠋ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 483.73 MiB/545.19 MiB \r\r⠙ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 488.51 MiB/545.19 MiB \r\r⠙ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 493.25 MiB/545.19 MiB ",,terminal_output +1628,3668323,"TERMINAL",0,0,"\r\r⠙ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 497.65 MiB/545.19 MiB ",,terminal_output +1629,3668386,"TERMINAL",0,0,"\r\r⠙ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 502.73 MiB/545.19 MiB ",,terminal_output +1630,3668554,"TERMINAL",0,0,"\r\r⠹ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 505.26 MiB/545.19 MiB \r\r⠹ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 509.58 MiB/545.19 MiB \r\r⠹ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 513.94 MiB/545.19 MiB \r\r⠹ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 518.72 MiB/545.19 MiB ",,terminal_output +1631,3668618,"TERMINAL",0,0,"\r\r⠸ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 523.95 MiB/545.19 MiB ",,terminal_output +1632,3668677,"TERMINAL",0,0,"\r\r⠸ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 528.98 MiB/545.19 MiB ",,terminal_output +1633,3668814,"TERMINAL",0,0,"\r\r⠸ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 534.21 MiB/545.19 MiB \r\r⠸ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 539.31 MiB/545.19 MiB \r\r⠼ Preparing packages... (9/10)\r\nnvidia-cudnn-cu12  ------------------------------ 544.29 MiB/545.19 MiB ",,terminal_output +1634,3668879,"TERMINAL",0,0,"\r\r⠼ Preparing packages... (9/10) \rPrepared 10 packages in 9.69s\r\n░░░░░░░░░░░░░░░░░░░░ [0/0] Installing wheels... \r░░░░░░░░░░░░░░░░░░░░ [0/103] Installing wheels... ",,terminal_output +1635,3668951,"TERMINAL",0,0,"\r░░░░░░░░░░░░░░░░░░░░ [0/103] distlib==0.4.0 \r░░░░░░░░░░░░░░░░░░░░ [1/103] distlib==0.4.0 \r░░░░░░░░░░░░░░░░░░░░ [1/103] more-itertools==10.8.0 \r░░░░░░░░░░░░░░░░░░░░ [2/103] more-itertools==10.8.0 ",,terminal_output +1636,3669073,"TERMINAL",0,0,"\r░░░░░░░░░░░░░░░░░░░░ [2/103] array-record==0.8.1 \r░░░░░░░░░░░░░░░░░░░░ [3/103] array-record==0.8.1 \r░░░░░░░░░░░░░░░░░░░░ [3/103] chex==0.1.90 \r░░░░░░░░░░░░░░░░░░░░ [4/103] chex==0.1.90 \r░░░░░░░░░░░░░░░░░░░░ [4/103] identify==2.6.13 \r░░░░░░░░░░░░░░░░░░░░ [5/103] identify==2.6.13 \r░░░░░░░░░░░░░░░░░░░░ [5/103] wrapt==1.17.3 \r█░░░░░░░░░░░░░░░░░░░ [6/103] wrapt==1.17.3 \r█░░░░░░░░░░░░░░░░░░░ [6/103] virtualenv==20.34.0 \r█░░░░░░░░░░░░░░░░░░░ [7/103] virtualenv==20.34.0 \r█░░░░░░░░░░░░░░░░░░░ [7/103] pre-commit==4.3.0 \r█░░░░░░░░░░░░░░░░░░░ [8/103] pre-commit==4.3.0 \r█░░░░░░░░░░░░░░░░░░░ [8/103] pydantic-core==2.33.2 \r█░░░░░░░░░░░░░░░░░░░ [9/103] pydantic-core==2.33.2 \r█░░░░░░░░░░░░░░░░░░░ [9/103] aiofiles==24.1.0 \r█░░░░░░░░░░░░░░░░░░░ [10/103] aiofiles==24.1.0 \r█░░░░░░░░░░░░░░░░░░░ [10/103] nodeenv==1.9.1 \r██░░░░░░░░░░░░░░░░░░ [11/103] nodeenv==1.9.1 ",,terminal_output +1637,3669184,"TERMINAL",0,0,"\r██░░░░░░░░░░░░░░░░░░ [14/103] ml-dtypes==0.5.3 \r███░░░░░░░░░░░░░░░░░ [16/103] click==8.2.1 ",,terminal_output +1638,3669271,"TERMINAL",0,0,"\r███░░░░░░░░░░░░░░░░░ [18/103] glfw==1.12.0 \r███░░░░░░░░░░░░░░░░░ [19/103] dm-pix==0.4.4 ",,terminal_output +1639,3669378,"TERMINAL",0,0,"\r████░░░░░░░░░░░░░░░░ [21/103] nvidia-cusolver-cu12==11.7.5.82 \r████░░░░░░░░░░░░░░░░ [22/103] nvidia-cusolver-cu12==11.7.5.82 ",,terminal_output +1640,3669485,"TERMINAL",0,0,"\r████░░░░░░░░░░░░░░░░ [22/103] pydantic==2.11.7 \r████░░░░░░░░░░░░░░░░ [23/103] pydantic==2.11.7 ",,terminal_output +1641,3669813,"TERMINAL",0,0,"\r████░░░░░░░░░░░░░░░░ [24/103] urllib3==2.5.0 \r████░░░░░░░░░░░░░░░░ [25/103] urllib3==2.5.0 ",,terminal_output +1642,3670039,"TERMINAL",0,0,"\r████░░░░░░░░░░░░░░░░ [25/103] tqdm==4.67.1 \r█████░░░░░░░░░░░░░░░ [26/103] tqdm==4.67.1 ",,terminal_output +1643,3670103,"TERMINAL",0,0,"\r█████░░░░░░░░░░░░░░░ [26/103] cfgv==3.4.0 \r█████░░░░░░░░░░░░░░░ [27/103] cfgv==3.4.0 ",,terminal_output +1644,3670709,"TERMINAL",0,0,"\r█████░░░░░░░░░░░░░░░ [27/103] pygments==2.19.2 \r█████░░░░░░░░░░░░░░░ [28/103] pygments==2.19.2 \r█████░░░░░░░░░░░░░░░ [28/103] prompt-toolkit==3.0.52 \r█████░░░░░░░░░░░░░░░ [29/103] prompt-toolkit==3.0.52 \r█████░░░░░░░░░░░░░░░ [29/103] idna==3.10 \r█████░░░░░░░░░░░░░░░ [30/103] idna==3.10 ",,terminal_output +1645,3670805,"TERMINAL",0,0,"\r█████░░░░░░░░░░░░░░░ [30/103] optax==0.2.5 \r██████░░░░░░░░░░░░░░ [31/103] optax==0.2.5 ",,terminal_output +1646,3670893,"TERMINAL",0,0,"\r██████░░░░░░░░░░░░░░ [31/103] huggingface-hub==0.34.4 \r██████░░░░░░░░░░░░░░ [32/103] huggingface-hub==0.34.4 \r██████░░░░░░░░░░░░░░ [32/103] docstring-parser==0.17.0 \r██████░░░░░░░░░░░░░░ [33/103] docstring-parser==0.17.0 \r██████░░░░░░░░░░░░░░ [33/103] ffmpeg-python==0.2.0 \r██████░░░░░░░░░░░░░░ [34/103] ffmpeg-python==0.2.0 ",,terminal_output +1647,3670957,"TERMINAL",0,0,"\r██████░░░░░░░░░░░░░░ [34/103] etils==1.13.0 \r██████░░░░░░░░░░░░░░ [35/103] etils==1.13.0 ",,terminal_output +1648,3671019,"TERMINAL",0,0,"\r██████░░░░░░░░░░░░░░ [35/103] filelock==3.19.1 \r██████░░░░░░░░░░░░░░ [36/103] filelock==3.19.1 ",,terminal_output +1649,3671184,"TERMINAL",0,0,"\r██████░░░░░░░░░░░░░░ [36/103] smmap==5.0.2 \r███████░░░░░░░░░░░░░ [37/103] smmap==5.0.2 \r███████░░░░░░░░░░░░░ [37/103] attrs==25.3.0 \r███████░░░░░░░░░░░░░ [38/103] attrs==25.3.0 \r███████░░░░░░░░░░░░░ [38/103] humanize==4.13.0 \r███████░░░░░░░░░░░░░ [39/103] humanize==4.13.0 ",,terminal_output +1650,3671398,"TERMINAL",0,0,"\r███████░░░░░░░░░░░░░ [39/103] pycparser==2.22 \r███████░░░░░░░░░░░░░ [40/103] pycparser==2.22 ",,terminal_output +1651,3671562,"TERMINAL",0,0,"\r███████░░░░░░░░░░░░░ [40/103] simplejson==3.20.1 \r███████░░░░░░░░░░░░░ [41/103] simplejson==3.20.1 ",,terminal_output +1652,3671718,"TERMINAL",0,0,"\r███████░░░░░░░░░░░░░ [41/103] protobuf==6.32.0 \r████████░░░░░░░░░░░░ [42/103] protobuf==6.32.0 \r████████░░░░░░░░░░░░ [42/103] tensorstore==0.1.76 \r████████░░░░░░░░░░░░ [43/103] tensorstore==0.1.76 ",,terminal_output +1653,3671840,"TERMINAL",0,0,"\r████████░░░░░░░░░░░░ [43/103] nvidia-cuda-cupti-cu12==12.9.79 \r████████░░░░░░░░░░░░ [44/103] nvidia-cuda-cupti-cu12==12.9.79 ",,terminal_output +1654,3672174,"TERMINAL",0,0,"\r████████░░░░░░░░░░░░ [44/103] opt-einsum==3.4.0 \r████████░░░░░░░░░░░░ [45/103] opt-einsum==3.4.0 \r████████░░░░░░░░░░░░ [45/103] imageio==2.37.0 \r████████░░░░░░░░░░░░ [46/103] imageio==2.37.0 \r████████░░░░░░░░░░░░ [46/103] nest-asyncio==1.6.0 \r█████████░░░░░░░░░░░ [47/103] nest-asyncio==1.6.0 ",,terminal_output +1655,3672248,"TERMINAL",0,0,"\r█████████░░░░░░░░░░░ [47/103] imageio-ffmpeg==0.3.0 \r█████████░░░░░░░░░░░ [48/103] imageio-ffmpeg==0.3.0 ",,terminal_output +1656,3672298,"TERMINAL",0,0,"\r█████████░░░░░░░░░░░ [48/103] nvidia-nvjitlink-cu12==12.9.86 \r█████████░░░░░░░░░░░ [49/103] nvidia-nvjitlink-cu12==12.9.86 ",,terminal_output +1657,3672424,"TERMINAL",0,0,"\r█████████░░░░░░░░░░░ [49/103] cffi==1.17.1 \r█████████░░░░░░░░░░░ [50/103] cffi==1.17.1 ",,terminal_output +1658,3672488,"TERMINAL",0,0,"\r█████████░░░░░░░░░░░ [50/103] nvidia-cublas-cu12==12.9.1.4 \r█████████░░░░░░░░░░░ [51/103] nvidia-cublas-cu12==12.9.1.4 ",,terminal_output +1659,3672550,"TERMINAL",0,0,"\r█████████░░░░░░░░░░░ [51/103] jax-cuda12-pjrt==0.6.2 \r██████████░░░░░░░░░░ [52/103] jax-cuda12-pjrt==0.6.2 ",,terminal_output +1660,3672617,"TERMINAL",0,0,"\r██████████░░░░░░░░░░ [52/103] tyro==0.9.31 \r██████████░░░░░░░░░░ [53/103] tyro==0.9.31 ",,terminal_output +1661,3672679,"TERMINAL",0,0,"\r██████████░░░░░░░░░░ [53/103] shtab==1.7.2 \r██████████░░░░░░░░░░ [54/103] shtab==1.7.2 ",,terminal_output +1662,3672926,"TERMINAL",0,0,"\r██████████░░░░░░░░░░ [54/103] jaxlib==0.6.2 \r██████████░░░░░░░░░░ [55/103] jaxlib==0.6.2 ",,terminal_output +1663,3672992,"TERMINAL",0,0,"\r██████████░░░░░░░░░░ [55/103] nvidia-cuda-runtime-cu12==12.9.79 \r██████████░░░░░░░░░░ [56/103] nvidia-cuda-runtime-cu12==12.9.79 ",,terminal_output +1664,3673054,"TERMINAL",0,0,"\r██████████░░░░░░░░░░ [56/103] wcwidth==0.2.13 \r███████████░░░░░░░░░ [57/103] wcwidth==0.2.13 ",,terminal_output +1665,3673140,"TERMINAL",0,0,"\r███████████░░░░░░░░░ [57/103] charset-normalizer==3.4.3 \r███████████░░░░░░░░░ [58/103] charset-normalizer==3.4.3 \r███████████░░░░░░░░░ [58/103] pfzy==0.3.4 \r███████████░░░░░░░░░ [59/103] pfzy==0.3.4 \r███████████░░░░░░░░░ [59/103] typing-extensions==4.15.0 \r███████████░░░░░░░░░ [60/103] typing-extensions==4.15.0 ",,terminal_output +1666,3673239,"TERMINAL",0,0,"\r███████████░░░░░░░░░ [60/103] nvidia-cufft-cu12==11.4.1.4 \r███████████░░░░░░░░░ [61/103] nvidia-cufft-cu12==11.4.1.4 ",,terminal_output +1667,3673737,"TERMINAL",0,0,"\r███████████░░░░░░░░░ [61/103] pillow==11.3.0 \r████████████░░░░░░░░ [62/103] pillow==11.3.0 \r████████████░░░░░░░░ [62/103] future==1.0.0 \r████████████░░░░░░░░ [63/103] future==1.0.0 ",,terminal_output +1668,3673800,"TERMINAL",0,0,"\r████████████░░░░░░░░ [63/103] moderngl==5.12.0 \r████████████░░░░░░░░ [64/103] moderngl==5.12.0 ",,terminal_output +1669,3673864,"TERMINAL",0,0,"\r████████████░░░░░░░░ [64/103] fsspec==2025.9.0 \r████████████░░░░░░░░ [65/103] fsspec==2025.9.0 \r████████████░░░░░░░░ [65/103] hf-xet==1.1.9 \r████████████░░░░░░░░ [66/103] hf-xet==1.1.9 ",,terminal_output +1670,3674001,"TERMINAL",0,0,"\r████████████░░░░░░░░ [66/103] pyyaml==6.0.2 \r█████████████░░░░░░░ [67/103] pyyaml==6.0.2 ",,terminal_output +1671,3674320,"TERMINAL",0,0,"\r█████████████░░░░░░░ [67/103] mdurl==0.1.2 \r█████████████░░░░░░░ [68/103] mdurl==0.1.2 \r█████████████░░░░░░░ [68/103] absl-py==2.3.1 \r█████████████░░░░░░░ [69/103] absl-py==2.3.1 ",,terminal_output +1672,3674467,"TERMINAL",0,0,"\r█████████████░░░░░░░ [69/103] gitpython==3.1.45 \r█████████████░░░░░░░ [70/103] gitpython==3.1.45 ",,terminal_output +1673,3674592,"TERMINAL",0,0,"\r█████████████░░░░░░░ [70/103] sentry-sdk==2.36.0 \r█████████████░░░░░░░ [71/103] sentry-sdk==2.36.0 \r█████████████░░░░░░░ [71/103] cloudpickle==3.1.1 \r█████████████░░░░░░░ [72/103] cloudpickle==3.1.1 ",,terminal_output +1674,3674653,"TERMINAL",0,0,"\r█████████████░░░░░░░ [72/103] gym-notices==0.1.0 \r██████████████░░░░░░ [73/103] gym-notices==0.1.0 ",,terminal_output +1675,3674758,"TERMINAL",0,0,"\r██████████████░░░░░░ [73/103] typing-inspection==0.4.1 \r██████████████░░░░░░ [74/103] typing-inspection==0.4.1 ",,terminal_output +1676,3674927,"TERMINAL",0,0,"\r██████████████░░░░░░ [74/103] requests==2.32.5 \r██████████████░░░░░░ [75/103] requests==2.32.5 \r██████████████░░░░░░ [75/103] nvidia-cuda-nvcc-cu12==12.9.86 \r██████████████░░░░░░ [76/103] nvidia-cuda-nvcc-cu12==12.9.86 \r██████████████░░░░░░ [76/103] rich==14.1.0 \r██████████████░░░░░░ [77/103] rich==14.1.0 \r██████████████░░░░░░ [77/103] gitdb==4.0.12 \r███████████████░░░░░ [78/103] gitdb==4.0.12 \r███████████████░░░░░ [78/103] nvidia-cuda-nvrtc-cu12==12.9.86 ",,terminal_output +1677,3675254,"TERMINAL",0,0,"\r███████████████░░░░░ [80/103] packaging==25.0 \r███████████████░░░░░ [81/103] packaging==25.0 \r███████████████░░░░░ [81/103] typeguard==4.4.4 \r███████████████░░░░░ [82/103] typeguard==4.4.4 \r███████████████░░░░░ [82/103] importlib-resources==6.5.2 \r████████████████░░░░ [83/103] importlib-resources==6.5.2 ",,terminal_output +1678,3675326,"TERMINAL",0,0,"\r████████████████░░░░ [83/103] certifi==2025.8.3 \r████████████████░░░░ [84/103] certifi==2025.8.3 ",,terminal_output +1679,3675855,"TERMINAL",0,0,"\r████████████████░░░░ [84/103] inquirerpy==0.3.4 \r████████████████░░░░ [85/103] inquirerpy==0.3.4 ",,terminal_output +1680,3675952,"TERMINAL",0,0,"\r████████████████░░░░ [85/103] platformdirs==4.4.0 \r████████████████░░░░ [86/103] platformdirs==4.4.0 ",,terminal_output +1681,3676264,"TERMINAL",0,0,"\r████████████████░░░░ [86/103] markdown-it-py==4.0.0 \r████████████████░░░░ [87/103] markdown-it-py==4.0.0 ",,terminal_output +1682,3676574,"TERMINAL",0,0,"\r████████████████░░░░ [87/103] gym3==0.3.3 \r█████████████████░░░ [88/103] gym3==0.3.3 ",,terminal_output +1683,3676714,"TERMINAL",0,0,"\r█████████████████░░░ [88/103] nvidia-nvshmem-cu12==3.3.24 \r█████████████████░░░ [89/103] nvidia-nvshmem-cu12==3.3.24 ",,terminal_output +1684,3676913,"TERMINAL",0,0,"\r█████████████████░░░ [89/103] gym==0.26.2 \r█████████████████░░░ [90/103] gym==0.26.2 ",,terminal_output +1685,3676975,"TERMINAL",0,0,"\r█████████████████░░░ [90/103] wandb==0.21.3 \r█████████████████░░░ [91/103] wandb==0.21.3 ",,terminal_output +1686,3677057,"TERMINAL",0,0,"\r█████████████████░░░ [91/103] jax-cuda12-plugin==0.6.2 \r█████████████████░░░ [92/103] jax-cuda12-plugin==0.6.2 \r█████████████████░░░ [92/103] hf-transfer==0.1.9 \r██████████████████░░ [93/103] hf-transfer==0.1.9 ",,terminal_output +1687,3677133,"TERMINAL",0,0,"\r██████████████████░░ [93/103] zipp==3.23.0 \r██████████████████░░ [94/103] zipp==3.23.0 ",,terminal_output +1688,3677271,"TERMINAL",0,0,"\r██████████████████░░ [94/103] einops==0.8.1 \r██████████████████░░ [95/103] einops==0.8.1 ",,terminal_output +1689,3677434,"TERMINAL",0,0,"\r██████████████████░░ [95/103] toolz==1.0.0 \r██████████████████░░ [96/103] toolz==1.0.0 ",,terminal_output +1690,3677645,"TERMINAL",0,0,"\r██████████████████░░ [96/103] jax==0.6.2 \r██████████████████░░ [97/103] jax==0.6.2 ",,terminal_output +1691,3678008,"TERMINAL",0,0,"\r██████████████████░░ [97/103] treescope==0.1.10 \r███████████████████░ [98/103] treescope==0.1.10 ",,terminal_output +1692,3678071,"TERMINAL",0,0,"\r███████████████████░ [98/103] flax==0.10.7 \r███████████████████░ [99/103] flax==0.10.7 ",,terminal_output +1693,3678231,"TERMINAL",0,0,"\r███████████████████░ [99/103] orbax-checkpoint==0.11.24 \r███████████████████░ [100/103] orbax-checkpoint==0.11.24 ",,terminal_output +1694,3679783,"TERMINAL",0,0,"\r███████████████████░ [100/103] procgen==0.10.7 \r███████████████████░ [101/103] procgen==0.10.7 ",,terminal_output +1695,3679961,"TERMINAL",0,0,"\r███████████████████░ [101/103] numpy==1.26.4 \r███████████████████░ [102/103] numpy==1.26.4 ",,terminal_output +1696,3686309,"TERMINAL",0,0,"\r███████████████████░ [102/103] scipy==1.15.3 \r████████████████████ [103/103] scipy==1.15.3 \rInstalled 103 packages in 17.13s\r\n + absl-py==2.3.1\r\n + aiofiles==24.1.0\r\n + annotated-types==0.7.0\r\n + array-record==0.8.1\r\n + attrs==25.3.0\r\n + certifi==2025.8.3\r\n + cffi==1.17.1\r\n + cfgv==3.4.0\r\n + charset-normalizer==3.4.3\r\n + chex==0.1.90\r\n + click==8.2.1\r\n + cloudpickle==3.1.1\r\n + distlib==0.4.0\r\n + dm-pix==0.4.4\r\n + dm-tree==0.1.9\r\n + docstring-parser==0.17.0\r\n + einops==0.8.1\r\n + etils==1.13.0\r\n + ffmpeg-python==0.2.0\r\n + filelock==3.19.1\r\n + flax==0.10.7\r\n + fsspec==2025.9.0\r\n + future==1.0.0\r\n + gitdb==4.0.12\r\n + gitpython==3.1.45\r\n + glcontext==3.0.0\r\n + glfw==1.12.0\r\n + grain==0.2.12\r\n + gym==0.26.2\r\n + gym-notices==0.1.0\r\n + gym3==0.3.3\r\n + hf-transfer==0.1.9\r\n + hf-xet==1.1.9\r\n + huggingface-hub==0.34.4\r\n + humanize==4.13.0\r\n + identify==2.6.13\r\n + idna==3.10\r\n + imageio==2.37.0\r\n + imageio-ffmpeg==0.3.0\r\n + importlib-resources==6.5.2\r\n + inquirerpy==0.3.4\r\n + jax==0.6.2\r\n + jax-cuda12-pjrt==0.6.2\r\n + jax-cuda12-plugin==0.6.2\r\n + jaxlib==0.6.2\r\n + markdown-it-py==4.0.0\r\n + mdurl==0.1.2\r\n + ml-dtypes==0.5.3\r\n + moderngl==5.12.0\r\n + more-itertools==10.8.0\r\n + msgpack==1.1.1\r\n + nest-asyncio==1.6.0\r\n + nodeenv==1.9.1\r\n + numpy==1.26.4\r\n + nvidia-cublas-cu12==12.9.1.4\r\n + nvidia-cuda-cupti-cu12==12.9.79\r\n + nvidia-cuda-nvcc-cu12==12.9.86\r\n + nvidia-cuda-nvrtc-cu12==12.9.86\r\n + nvidia-cuda-runtime-cu12==12.9.79\r\n + nvidia-cudnn-cu12==9.12.0.46\r\n + nvidia-cufft-cu12==11.4.1.4\r\n + nvidia-cusolver-cu12==11.7.5.82\r\n + nvidia-cusparse-cu12==12.5.10.65\r\n + nvidia-nccl-cu12==2.27.7\r\n + nvidia-nvjitlink-cu12==12.9.86\r\n + nvidia-nvshmem-cu12==3.3.24\r\n + opt-einsum==3.4.0\r\n + optax==0.2.5\r\n + orbax-checkpoint==0.11.24\r\n + packaging==25.0\r\n + pfzy==0.3.4\r\n + pillow==11.3.0\r\n + platformdirs==4.4.0\r\n + pre-commit==4.3.0\r\n + procgen==0.10.7\r\n + prompt-toolkit==3.0.52\r\n + protobuf==6.32.0\r\n + pycparser==2.22\r\n + pydantic==2.11.7\r\n + pydantic-core==2.33.2\r\n + pygments==2.19.2\r\n + pyyaml==6.0.2\r\n + requests==2.32.5\r\n + rich==14.1.0\r\n + scipy==1.15.3\r\n + sentry-sdk==2.36.0\r\n + shtab==1.7.2\r\n + simplejson==3.20.1\r\n + smmap==5.0.2\r\n + tensorstore==0.1.76\r\n + toolz==1.0.0\r\n + tqdm==4.67.1\r\n + treescope==0.1.10\r\n + typeguard==4.4.4\r\n + typing-extensions==4.15.0\r\n + typing-inspection==0.4.1\r\n + tyro==0.9.31\r\n + urllib3==2.5.0\r\n + virtualenv==20.34.0\r\n + wandb==0.21.3\r\n + wcwidth==0.2.13\r\n + wrapt==1.17.3\r\n + zipp==3.23.0\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +1697,3705663,"TERMINAL",0,0,"clear",,terminal_command +1698,3705694,"TERMINAL",0,0,"]633;E;2025-09-04 10:58:42 clear;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +1699,3709139,".venv/lib/python3.10/site-packages/gym3/types_np.py",0,0,"",python,tab +1700,3710331,".venv/lib/python3.10/site-packages/procgen/env.py",0,0,"import os\nimport random\nfrom typing import Sequence, Optional, List\n\nimport gym3\nfrom gym3.libenv import CEnv\nimport numpy as np\nfrom .builder import build\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\n\nMAX_STATE_SIZE = 2 ** 20\n\nENV_NAMES = [\n ""bigfish"",\n ""bossfight"",\n ""caveflyer"",\n ""chaser"",\n ""climber"",\n ""coinrun"",\n ""dodgeball"",\n ""fruitbot"",\n ""heist"",\n ""jumper"",\n ""leaper"",\n ""maze"",\n ""miner"",\n ""ninja"",\n ""plunder"",\n ""starpilot"",\n]\n\nEXPLORATION_LEVEL_SEEDS = {\n ""coinrun"": 1949448038,\n ""caveflyer"": 1259048185,\n ""leaper"": 1318677581,\n ""jumper"": 1434825276,\n ""maze"": 158988835,\n ""heist"": 876640971,\n ""climber"": 1561126160,\n ""ninja"": 1123500215,\n}\n\n# should match DistributionMode in game.h, except for 'exploration' which is handled by Python\nDISTRIBUTION_MODE_DICT = {\n ""easy"": 0,\n ""hard"": 1,\n ""extreme"": 2,\n ""memory"": 10,\n ""exploration"": 20,\n}\n\n\ndef create_random_seed():\n rand_seed = random.SystemRandom().randint(0, 2 ** 31 - 1)\n try:\n # force MPI processes to definitely choose different random seeds\n from mpi4py import MPI\n\n rand_seed = rand_seed - (rand_seed % MPI.COMM_WORLD.size) + MPI.COMM_WORLD.rank\n except ModuleNotFoundError:\n pass\n return rand_seed\n\n\nclass BaseProcgenEnv(CEnv):\n """"""\n Base procedurally generated environment\n """"""\n\n def __init__(\n self,\n num,\n env_name,\n options,\n debug=False,\n rand_seed=None,\n num_levels=0,\n start_level=0,\n use_sequential_levels=False,\n debug_mode=0,\n resource_root=None,\n num_threads=4,\n render_mode=None,\n ):\n if resource_root is None:\n resource_root = os.path.join(SCRIPT_DIR, ""data"", ""assets"") + os.sep\n assert os.path.exists(resource_root)\n\n lib_dir = os.path.join(SCRIPT_DIR, ""data"", ""prebuilt"")\n if os.path.exists(lib_dir):\n assert any([os.path.exists(os.path.join(lib_dir, name)) for name in [""libenv.so"", ""libenv.dylib"", ""env.dll""]]), ""package is installed, but the prebuilt environment library is missing""\n assert not debug, ""debug has no effect for pre-compiled library""\n else:\n # only compile if we don't find a pre-built binary\n lib_dir = build(debug=debug)\n \n self.combos = self.get_combos()\n\n if render_mode is None:\n render_human = False\n elif render_mode == ""rgb_array"":\n render_human = True\n else:\n raise Exception(f""invalid render mode {render_mode}"")\n\n if rand_seed is None:\n rand_seed = create_random_seed()\n\n options.update(\n {\n ""env_name"": env_name,\n ""num_levels"": num_levels,\n ""start_level"": start_level,\n ""num_actions"": len(self.combos),\n ""use_sequential_levels"": bool(use_sequential_levels),\n ""debug_mode"": debug_mode,\n ""rand_seed"": rand_seed,\n ""num_threads"": num_threads,\n ""render_human"": render_human,\n # these will only be used the first time an environment is created in a process\n ""resource_root"": resource_root,\n }\n )\n\n self.options = options\n\n super().__init__(\n lib_dir=lib_dir,\n num=num,\n options=options,\n c_func_defs=[\n ""int get_state(libenv_env *, int, char *, int);"",\n ""void set_state(libenv_env *, int, char *, int);"",\n ],\n )\n # don't use the dict space for actions\n self.ac_space = self.ac_space[""action""]\n\n def get_state(self):\n length = MAX_STATE_SIZE\n buf = self._ffi.new(f""char[{length}]"")\n result = []\n for env_idx in range(self.num):\n n = self.call_c_func(""get_state"", env_idx, buf, length)\n result.append(bytes(self._ffi.buffer(buf, n)))\n return result\n\n def set_state(self, states):\n assert len(states) == self.num\n for env_idx in range(self.num):\n state = states[env_idx]\n self.call_c_func(""set_state"", env_idx, state, len(state))\n\n def get_combos(self):\n return [\n (""LEFT"", ""DOWN""),\n (""LEFT"",),\n (""LEFT"", ""UP""),\n (""DOWN"",),\n (),\n (""UP"",),\n (""RIGHT"", ""DOWN""),\n (""RIGHT"",),\n (""RIGHT"", ""UP""),\n (""D"",),\n (""A"",),\n (""W"",),\n (""S"",),\n (""Q"",),\n (""E"",),\n ]\n\n def keys_to_act(self, keys_list: Sequence[Sequence[str]]) -> List[Optional[np.ndarray]]:\n """"""\n Convert list of keys being pressed to actions, used in interactive mode\n """"""\n result = []\n for keys in keys_list:\n action = None\n max_len = -1\n for i, combo in enumerate(self.get_combos()):\n pressed = True\n for key in combo:\n if key not in keys:\n pressed = False\n\n if pressed and (max_len < len(combo)):\n action = i\n max_len = len(combo)\n\n if action is not None:\n action = np.array([action])\n result.append(action)\n return result\n\n def act(self, ac):\n # tensorflow may return int64 actions (https://github.com/openai/gym/blob/master/gym/spaces/discrete.py#L13)\n # so always cast actions to int32\n return super().act({""action"": ac.astype(np.int32)})\n\n\nclass ProcgenGym3Env(BaseProcgenEnv):\n """"""\n gym3 interface for Procgen\n """"""\n def __init__(\n self,\n num,\n env_name,\n center_agent=True,\n use_backgrounds=True,\n use_monochrome_assets=False,\n restrict_themes=False,\n use_generated_assets=False,\n paint_vel_info=False,\n distribution_mode=""hard"",\n **kwargs,\n ):\n assert (\n distribution_mode in DISTRIBUTION_MODE_DICT\n ), f'""{distribution_mode}"" is not a valid distribution mode.'\n\n if distribution_mode == ""exploration"":\n assert (\n env_name in EXPLORATION_LEVEL_SEEDS\n ), f""{env_name} does not support exploration mode""\n\n distribution_mode = DISTRIBUTION_MODE_DICT[""hard""]\n assert ""num_levels"" not in kwargs, ""exploration mode overrides num_levels""\n kwargs[""num_levels""] = 1\n assert ""start_level"" not in kwargs, ""exploration mode overrides start_level""\n kwargs[""start_level""] = EXPLORATION_LEVEL_SEEDS[env_name]\n else:\n distribution_mode = DISTRIBUTION_MODE_DICT[distribution_mode]\n\n options = {\n ""center_agent"": bool(center_agent),\n ""use_generated_assets"": bool(use_generated_assets),\n ""use_monochrome_assets"": bool(use_monochrome_assets),\n ""restrict_themes"": bool(restrict_themes),\n ""use_backgrounds"": bool(use_backgrounds),\n ""paint_vel_info"": bool(paint_vel_info),\n ""distribution_mode"": distribution_mode,\n }\n super().__init__(num, env_name, options, **kwargs)\n \n \nclass ToBaselinesVecEnv(gym3.ToBaselinesVecEnv):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : 15\n }\n def render(self, mode=""human""):\n info = self.env.get_info()[0]\n _, ob, _ = self.env.observe()\n if mode == ""rgb_array"":\n if ""rgb"" in info:\n return info[""rgb""]\n else:\n return ob['rgb'][0] \n\n\ndef ProcgenEnv(num_envs, env_name, **kwargs):\n return ToBaselinesVecEnv(ProcgenGym3Env(num=num_envs, env_name=env_name, **kwargs))\n",python,tab +1701,3711669,"TERMINAL",0,0,"bash",,terminal_focus +1702,3714117,"generate_dataset.py",0,0,"""""""\nGenerates a dataset of random-action CoinRun episodes.\nEpisodes are saved individually as memory-mapped files for efficient loading.\n""""""\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom gym3 import types_np\nimport numpy as np\nfrom procgen import ProcgenGym3Env\nimport tyro\n\n\n@dataclass\nclass Args:\n num_episodes: int = 10000\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 50\n\n\nargs = tyro.cli(Args)\noutput_dir = Path(args.output_dir)\noutput_dir.mkdir(parents=True, exist_ok=True)\n\n# --- Generate episodes ---\ni = 0\nmetadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n dataseq = []\n\n # --- Run episode ---\n for j in range(1000):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n rew, obs, first = env.observe()\n dataseq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(dataseq) >= args.min_episode_length:\n episode_data = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.npy""\n np.save(episode_path, episode_data.astype(np.uint8))\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")\n",python,tab +1703,3715373,"generate_dataset.py",879,0,"\n ",python,content +1704,3715824,"generate_dataset.py",888,0,"b",python,content +1705,3715825,"generate_dataset.py",889,0,"",python,selection_keyboard +1706,3716012,"generate_dataset.py",889,0,"r",python,content +1707,3716014,"generate_dataset.py",890,0,"",python,selection_keyboard +1708,3716114,"generate_dataset.py",890,0,"e",python,content +1709,3716116,"generate_dataset.py",891,0,"",python,selection_keyboard +1710,3716255,"generate_dataset.py",891,0,"a",python,content +1711,3716257,"generate_dataset.py",892,0,"",python,selection_keyboard +1712,3716866,"generate_dataset.py",888,4,"breakpoint",python,content +1713,3717602,"generate_dataset.py",898,0,"()",python,content +1714,3717605,"generate_dataset.py",899,0,"",python,selection_keyboard +1715,3717691,"generate_dataset.py",899,1,")",python,content +1716,3717692,"generate_dataset.py",900,0,"",python,selection_keyboard +1717,3717788,"generate_dataset.py",899,0,"",python,selection_command +1718,3720314,"TERMINAL",0,0,"bash",,terminal_focus +1719,3755899,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n\n\n# Log the sbatch script\ncat $0\nsource .venv/bin/activate\n\npython generate_dataset.py \\n --num_episodes 10000 \\n --output_dir /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m \\n --min_episode_length 1000",shellscript,tab +1720,3775829,"slurm/dev/mihir/horeka/generate_dataset_10m.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n\n\n# Log the sbatch script\ncat $0\nsource .venv/bin/activate\n\npython generate_dataset.py \\n --num_episodes 10000 \\n --output_dir /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m \\n --min_episode_length 1000",shellscript,tab +1721,3783737,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n\n\n# Log the sbatch script\ncat $0\nsource .venv/bin/activate\n\npython generate_dataset.py \\n --num_episodes 10000 \\n --output_dir /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m \\n --min_episode_length 1000",shellscript,tab +1722,3812441,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",670,0,"",shellscript,selection_mouse +1723,3812457,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",669,0,"",shellscript,selection_command +1724,3813271,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",475,0,"",shellscript,selection_mouse +1725,3813758,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",413,0,"",shellscript,selection_mouse +1726,3815944,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",368,47,"#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_command +1727,3816119,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",257,158,"#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_command +1728,3816587,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",145,270,"#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_command +1729,3816700,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",119,296,"#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_command +1730,3816701,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,324,"#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_command +1731,3816785,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",67,348,"#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_command +1732,3816786,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",39,376,"#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_command +1733,3816786,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",21,394,"#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_command +1734,3816808,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",20,395,"\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_command +1735,3816862,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,415,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_command +1736,3817128,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",20,395,"\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_command +1737,3817266,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",21,394,"#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m",shellscript,selection_command +1738,3817529,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",21,395,"",shellscript,content +1739,3818086,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",21,1,"",shellscript,content +1740,3818707,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",21,1,"",shellscript,content +1741,3819472,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",45,0,"",shellscript,selection_command +1742,3819662,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",52,0,"",shellscript,selection_command +1743,3820992,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",45,33,"",shellscript,content +1744,3821651,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",21,0,"",shellscript,selection_command +1745,3821980,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",21,24,"",shellscript,content +1746,3822426,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",21,1,"",shellscript,content +1747,3823179,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",50,0,"",shellscript,selection_command +1748,3823769,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",77,0,"",shellscript,selection_command +1749,3823770,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",186,0,"",shellscript,selection_command +1750,3825381,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",77,0,"",shellscript,selection_command +1751,3825554,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",50,0,"",shellscript,selection_command +1752,3825842,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",77,0,"",shellscript,selection_command +1753,3826142,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",78,0,"",shellscript,selection_command +1754,3826636,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",79,0,"",shellscript,selection_command +1755,3826721,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",80,0,"",shellscript,selection_command +1756,3826723,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",81,0,"",shellscript,selection_command +1757,3826781,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",82,0,"",shellscript,selection_command +1758,3826812,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",83,0,"",shellscript,selection_command +1759,3826812,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",84,0,"",shellscript,selection_command +1760,3826823,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",85,0,"",shellscript,selection_command +1761,3826878,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",86,0,"",shellscript,selection_command +1762,3826914,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",87,0,"",shellscript,selection_command +1763,3826927,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",88,0,"",shellscript,selection_command +1764,3826945,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",89,0,"",shellscript,selection_command +1765,3826979,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",90,0,"",shellscript,selection_command +1766,3827004,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,0,"",shellscript,selection_command +1767,3827015,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",92,0,"",shellscript,selection_command +1768,3827068,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",93,0,"",shellscript,selection_command +1769,3827070,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",94,0,"",shellscript,selection_command +1770,3827113,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",95,0,"",shellscript,selection_command +1771,3827176,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",96,0,"",shellscript,selection_command +1772,3827187,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",97,0,"",shellscript,selection_command +1773,3827515,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",98,0,"",shellscript,selection_command +1774,3827516,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",99,0,"",shellscript,selection_command +1775,3827516,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",100,0,"",shellscript,selection_command +1776,3827516,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",101,0,"",shellscript,selection_command +1777,3827627,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",102,0,"",shellscript,selection_command +1778,3827627,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",103,0,"",shellscript,selection_command +1779,3827628,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",104,0,"",shellscript,selection_command +1780,3827628,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",105,0,"",shellscript,selection_command +1781,3827628,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",106,0,"",shellscript,selection_command +1782,3827628,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",107,0,"",shellscript,selection_command +1783,3827629,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",108,0,"",shellscript,selection_command +1784,3827681,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",109,0,"",shellscript,selection_command +1785,3827682,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",110,0,"",shellscript,selection_command +1786,3827682,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",111,0,"",shellscript,selection_command +1787,3827682,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",112,0,"",shellscript,selection_command +1788,3827693,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",113,0,"",shellscript,selection_command +1789,3827694,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",114,0,"",shellscript,selection_command +1790,3827724,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",115,0,"",shellscript,selection_command +1791,3827785,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",116,0,"",shellscript,selection_command +1792,3827785,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",117,0,"",shellscript,selection_command +1793,3827811,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",118,0,"",shellscript,selection_command +1794,3827842,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",119,0,"",shellscript,selection_command +1795,3827887,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",120,0,"",shellscript,selection_command +1796,3827898,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",121,0,"",shellscript,selection_command +1797,3827958,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",122,0,"",shellscript,selection_command +1798,3827958,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",123,0,"",shellscript,selection_command +1799,3828041,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",124,0,"",shellscript,selection_command +1800,3828042,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",125,0,"",shellscript,selection_command +1801,3828055,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",126,0,"",shellscript,selection_command +1802,3828111,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",127,0,"",shellscript,selection_command +1803,3828112,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",128,0,"",shellscript,selection_command +1804,3828153,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",129,0,"",shellscript,selection_command +1805,3828179,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",130,0,"",shellscript,selection_command +1806,3828234,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",131,0,"",shellscript,selection_command +1807,3828235,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",132,0,"",shellscript,selection_command +1808,3828260,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",133,0,"",shellscript,selection_command +1809,3828307,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",134,0,"",shellscript,selection_command +1810,3828329,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",135,0,"",shellscript,selection_command +1811,3828378,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",136,0,"",shellscript,selection_command +1812,3828390,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",137,0,"",shellscript,selection_command +1813,3828455,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",138,0,"",shellscript,selection_command +1814,3828455,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",139,0,"",shellscript,selection_command +1815,3828467,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",140,0,"",shellscript,selection_command +1816,3828610,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",141,0,"",shellscript,selection_command +1817,3828791,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",142,0,"",shellscript,selection_command +1818,3828792,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",143,0,"",shellscript,selection_command +1819,3828792,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",144,0,"",shellscript,selection_command +1820,3828792,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",145,0,"",shellscript,selection_command +1821,3828847,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",146,0,"",shellscript,selection_command +1822,3828847,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",147,0,"",shellscript,selection_command +1823,3828848,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",148,0,"",shellscript,selection_command +1824,3828848,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",149,0,"",shellscript,selection_command +1825,3828848,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",150,0,"",shellscript,selection_command +1826,3828904,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",151,0,"",shellscript,selection_command +1827,3828904,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",152,0,"",shellscript,selection_command +1828,3828938,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",153,0,"",shellscript,selection_command +1829,3828939,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",154,0,"",shellscript,selection_command +1830,3828964,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",155,0,"",shellscript,selection_command +1831,3828976,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",156,0,"",shellscript,selection_command +1832,3828987,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",157,0,"",shellscript,selection_command +1833,3829059,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",158,0,"",shellscript,selection_command +1834,3829167,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",159,0,"",shellscript,selection_command +1835,3829333,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",160,0,"",shellscript,selection_command +1836,3829476,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",161,0,"",shellscript,selection_command +1837,3829631,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",162,0,"",shellscript,selection_command +1838,3829748,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",163,0,"",shellscript,selection_command +1839,3830206,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",163,21,"",shellscript,content +1840,3833392,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",163,0,"d",shellscript,content +1841,3833393,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",164,0,"",shellscript,selection_keyboard +1842,3833482,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",164,0,"e",shellscript,content +1843,3833482,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",165,0,"",shellscript,selection_keyboard +1844,3833623,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",165,0,"v",shellscript,content +1845,3833623,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",166,0,"",shellscript,selection_keyboard +1846,3833716,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",166,0," ",shellscript,content +1847,3833717,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",167,0,"",shellscript,selection_keyboard +1848,3834082,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",166,0,"",shellscript,selection_command +1849,3837865,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",168,0,"",shellscript,selection_mouse +1850,3837866,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",167,0,"",shellscript,selection_command +1851,3838337,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",198,0,"",shellscript,selection_mouse +1852,3838349,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",197,0,"",shellscript,selection_command +1853,3838839,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",198,0,"",shellscript,selection_mouse +1854,3838847,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",197,0,"",shellscript,selection_command +1855,3840061,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",0,0,"",shellscript,tab +1856,3843699,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +1857,3844441,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",0,0,"",shellscript,tab +1858,3845210,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +1859,3848174,"generate_dataset.py",0,0,"",python,tab +1860,3849742,"generate_dataset.py",897,0,"",python,selection_mouse +1861,3850359,"generate_dataset.py",754,0,"",python,selection_mouse +1862,3851281,"TERMINAL",0,0,"bash",,terminal_focus +1863,3851829,"TERMINAL",0,0,"bash",,terminal_focus +1864,3853777,"generate_dataset.py",0,0,"",python,tab +1865,3854687,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +1866,3860576,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/generate_dataset_10m.sh",,terminal_command +1867,3860626,"TERMINAL",0,0,"]633;E;2025-09-04 11:01:17 sh slurm/dev/mihir/horeka/generate_dataset_10m.sh;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +1868,3867933,"TERMINAL",0,0,"Gym has been unmaintained since 2022 and does not support NumPy 2.0 amongst other critical functionality.\r\nPlease upgrade to Gymnasium, the maintained drop-in replacement of Gym, or contact the authors of your software and request that they upgrade.\r\nUsers of this version of Gym should be able to simply replace 'import gym' with 'import gymnasium as gym' in the vast majority of cases.\r\nSee the migration guide at https://gymnasium.farama.org/introduction/migration_guide/ for additional information.\r\n",,terminal_output +1869,3868880,"TERMINAL",0,0,"bash",,terminal_focus +1870,3870196,"TERMINAL",0,0,"idling",,terminal_command +1871,3870246,"TERMINAL",0,0,"]633;E;2025-09-04 11:01:27 idling;98e58a9e-4278-4ee6-ae9b-7614f41efecb]633;C",,terminal_output +1872,3870312,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1990.localdomain: Thu Sep 4 11:01:27 2025Partition dev_cpuonly:\t 9 nodes idle\rPartition cpuonly: 13 nodes idle\rPartition dev_accelerated:\t 0 nodes idle\rPartition accelerated:\t 0 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 6 nodes idle\rPartition accelerated-h200:\t 0 nodes idle",,terminal_output +1873,3871317,"TERMINAL",0,0,"8",,terminal_output +1874,3872359,"TERMINAL",0,0,"9",,terminal_output +1875,3873399,"TERMINAL",0,0,"30",,terminal_output +1876,3874441,"TERMINAL",0,0,"1",,terminal_output +1877,3875478,"TERMINAL",0,0,"2",,terminal_output +1878,3876562,"TERMINAL",0,0,"3",,terminal_output +1879,3877560,"TERMINAL",0,0,"4",,terminal_output +1880,3878595,"TERMINAL",0,0,"5",,terminal_output +1881,3879640,"TERMINAL",0,0,"6",,terminal_output +1882,3880677,"TERMINAL",0,0,"7",,terminal_output +1883,3880780,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/generate_dataset.py(39)()\r\n-> rew, obs, first = env.observe()\r\n(Pdb) ",,terminal_output +1884,3881718,"TERMINAL",0,0,"8",,terminal_output +1885,3882320,"TERMINAL",0,0,"sh",,terminal_focus +1886,3882769,"TERMINAL",0,0,"9",,terminal_output +1887,3883001,"TERMINAL",0,0,"a",,terminal_output +1888,3883187,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +1889,3883361,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +1890,3883420,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +1891,3883482,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +1892,3883804,"TERMINAL",0,0,"40",,terminal_output +1893,3883955,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +1894,3884169,"TERMINAL",0,0,"[?25larray([5], dtype=int32)\r\n(Pdb) [?25h",,terminal_output +1895,3884854,"TERMINAL",0,0,"2",,terminal_output +1896,3885920,"TERMINAL",0,0,"3",,terminal_output +1897,3886928,"TERMINAL",0,0,"4",,terminal_output +1898,3887860,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +1899,3888013,"TERMINAL",0,0,"5",,terminal_output +1900,3889126,"TERMINAL",0,0,"bash",,terminal_focus +1901,3889226,"TERMINAL",0,0,"6",,terminal_output +1902,3889687,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +1903,3890052,"TERMINAL",0,0,"7",,terminal_output +1904,3890457,"generate_dataset.py",0,0,"",python,tab +1905,3891104,"TERMINAL",0,0,"8",,terminal_output +1906,3892136,"TERMINAL",0,0,"9",,terminal_output +1907,3893185,"TERMINAL",0,0,"50",,terminal_output +1908,3894397,"TERMINAL",0,0,"1",,terminal_output +1909,3895301,"TERMINAL",0,0,"2",,terminal_output +1910,3896377,"TERMINAL",0,0,"3",,terminal_output +1911,3897443,"TERMINAL",0,0,"4",,terminal_output +1912,3898466,"TERMINAL",0,0,"5",,terminal_output +1913,3899408,"TERMINAL",0,0,"6",,terminal_output +1914,3900453,"TERMINAL",0,0,"7",,terminal_output +1915,3901527,"TERMINAL",0,0,"8",,terminal_output +1916,3902532,"TERMINAL",0,0,"9",,terminal_output +1917,3903594,"TERMINAL",0,0,"2:00",,terminal_output +1918,3904618,"TERMINAL",0,0,"1",,terminal_output +1919,3905744,"TERMINAL",0,0,"2",,terminal_output +1920,3906713,"TERMINAL",0,0,"3",,terminal_output +1921,3907797,"TERMINAL",0,0,"4",,terminal_output +1922,3908817,"TERMINAL",0,0,"5",,terminal_output +1923,3909872,"TERMINAL",0,0,"6",,terminal_output +1924,3910866,"TERMINAL",0,0,"8",,terminal_output +1925,3911893,"TERMINAL",0,0,"9",,terminal_output +1926,3913015,"TERMINAL",0,0,"10",,terminal_output +1927,3915522,"TERMINAL",0,0,"12",,terminal_output +1928,3916573,"TERMINAL",0,0,"3",,terminal_output +1929,3917624,"TERMINAL",0,0,"4",,terminal_output +1930,3918646,"TERMINAL",0,0,"5",,terminal_output +1931,3919669,"TERMINAL",0,0,"6",,terminal_output +1932,3920796,"TERMINAL",0,0,"7",,terminal_output +1933,3921731,"TERMINAL",0,0,"8",,terminal_output +1934,3922888,"TERMINAL",0,0,"9",,terminal_output +1935,3923842,"TERMINAL",0,0,"20",,terminal_output +1936,3924908,"TERMINAL",0,0,"2",,terminal_output +1937,3925925,"TERMINAL",0,0,"3",,terminal_output +1938,3926961,"TERMINAL",0,0,"4",,terminal_output +1939,3927983,"TERMINAL",0,0,"5",,terminal_output +1940,3929114,"TERMINAL",0,0,"6",,terminal_output +1941,3930068,"TERMINAL",0,0,"7",,terminal_output +1942,3931140,"TERMINAL",0,0,"8",,terminal_output +1943,3932168,"TERMINAL",0,0,"9",,terminal_output +1944,3933191,"TERMINAL",0,0,"30",,terminal_output +1945,3934226,"TERMINAL",0,0,"1",,terminal_output +1946,3935265,"TERMINAL",0,0,"2",,terminal_output +1947,3936311,"TERMINAL",0,0,"3",,terminal_output +1948,3937346,"TERMINAL",0,0,"4",,terminal_output +1949,3938407,"TERMINAL",0,0,"5",,terminal_output +1950,3939424,"TERMINAL",0,0,"6",,terminal_output +1951,3940459,"TERMINAL",0,0,"7",,terminal_output +1952,3941498,"TERMINAL",0,0,"8",,terminal_output +1953,3942643,"TERMINAL",0,0,"9",,terminal_output +1954,3943634,"TERMINAL",0,0,"40",,terminal_output +1955,3944624,"TERMINAL",0,0,"1",,terminal_output +1956,3945683,"TERMINAL",0,0,"2",,terminal_output +1957,3946707,"TERMINAL",0,0,"3",,terminal_output +1958,3947832,"TERMINAL",0,0,"4",,terminal_output +1959,3948783,"TERMINAL",0,0,"5",,terminal_output +1960,3949830,"TERMINAL",0,0,"73",,terminal_output +1961,3950880,"TERMINAL",0,0,"8",,terminal_output +1962,3951967,"TERMINAL",0,0,"9",,terminal_output +1963,3952940,"TERMINAL",0,0,"50",,terminal_output +1964,3953982,"TERMINAL",0,0,"1",,terminal_output +1965,3955102,"TERMINAL",0,0,"2",,terminal_output +1966,3956126,"TERMINAL",0,0,"3",,terminal_output +1967,3957150,"TERMINAL",0,0,"4",,terminal_output +1968,3958138,"TERMINAL",0,0,"5",,terminal_output +1969,3959175,"TERMINAL",0,0,"6",,terminal_output +1970,3960225,"TERMINAL",0,0,"7",,terminal_output +1971,3961259,"TERMINAL",0,0,"8",,terminal_output +1972,3962293,"TERMINAL",0,0,"9",,terminal_output +1973,3963405,"TERMINAL",0,0,"3:00",,terminal_output +1974,3964426,"TERMINAL",0,0,"1",,terminal_output +1975,3965447,"TERMINAL",0,0,"2",,terminal_output +1976,3966442,"TERMINAL",0,0,"3",,terminal_output +1977,3967492,"TERMINAL",0,0,"4",,terminal_output +1978,3968526,"TERMINAL",0,0,"5",,terminal_output +1979,3969561,"TERMINAL",0,0,"6",,terminal_output +1980,3970611,"TERMINAL",0,0,"7",,terminal_output +1981,3971701,"TERMINAL",0,0,"8",,terminal_output +1982,3972714,"TERMINAL",0,0,"9",,terminal_output +1983,3973741,"TERMINAL",0,0,"10",,terminal_output +1984,3974867,"TERMINAL",0,0,"1",,terminal_output +1985,3975890,"TERMINAL",0,0,"2",,terminal_output +1986,3976888,"TERMINAL",0,0,"4",,terminal_output +1987,3977937,"TERMINAL",0,0,"5",,terminal_output +1988,3978960,"TERMINAL",0,0,"6",,terminal_output +1989,3979987,"TERMINAL",0,0,"7",,terminal_output +1990,3981010,"TERMINAL",0,0,"8",,terminal_output +1991,3982139,"TERMINAL",0,0,"9",,terminal_output +1992,3983081,"TERMINAL",0,0,"20",,terminal_output +1993,3984130,"TERMINAL",0,0,"1",,terminal_output +1994,3985166,"TERMINAL",0,0,"2",,terminal_output +1995,3986210,"TERMINAL",0,0,"3",,terminal_output +1996,3987255,"TERMINAL",0,0,"4",,terminal_output +1997,3988381,"TERMINAL",0,0,"5",,terminal_output +1998,3989331,"TERMINAL",0,0,"6",,terminal_output +1999,3990377,"TERMINAL",0,0,"7",,terminal_output +2000,3991432,"TERMINAL",0,0,"8",,terminal_output +2001,3992456,"TERMINAL",0,0,"9",,terminal_output +2002,3993501,"TERMINAL",0,0,"30",,terminal_output +2003,3994564,"TERMINAL",0,0,"1",,terminal_output +2004,3995576,"TERMINAL",0,0,"2",,terminal_output +2005,3996610,"TERMINAL",0,0,"3",,terminal_output +2006,3997704,"TERMINAL",0,0,"4",,terminal_output +2007,3998725,"TERMINAL",0,0,"5",,terminal_output +2008,3999750,"TERMINAL",0,0,"6",,terminal_output +2009,4000875,"TERMINAL",0,0,"7",,terminal_output +2010,4001807,"TERMINAL",0,0,"8",,terminal_output +2011,4002852,"TERMINAL",0,0,"40",,terminal_output +2012,4003891,"TERMINAL",0,0,"1",,terminal_output +2013,4004935,"TERMINAL",0,0,"2",,terminal_output +2014,4005994,"TERMINAL",0,0,"3",,terminal_output +2015,4007015,"TERMINAL",0,0,"4",,terminal_output +2016,4008161,"TERMINAL",0,0,"5",,terminal_output +2017,4009169,"TERMINAL",0,0,"6",,terminal_output +2018,4010117,"TERMINAL",0,0,"7",,terminal_output +2019,4011151,"TERMINAL",0,0,"8",,terminal_output +2020,4012186,"TERMINAL",0,0,"9",,terminal_output +2021,4013224,"TERMINAL",0,0,"50",,terminal_output +2022,4014289,"TERMINAL",0,0,"1",,terminal_output +2023,4015313,"TERMINAL",0,0,"2",,terminal_output +2024,4016440,"TERMINAL",0,0,"3",,terminal_output +2025,4017382,"TERMINAL",0,0,"4",,terminal_output +2026,4018487,"TERMINAL",0,0,"5",,terminal_output +2027,4019514,"TERMINAL",0,0,"6",,terminal_output +2028,4020498,"TERMINAL",0,0,"7",,terminal_output +2029,4021539,"TERMINAL",0,0,"8",,terminal_output +2030,4022583,"TERMINAL",0,0,"9",,terminal_output +2031,4023398,"TERMINAL",0,0,"sh",,terminal_focus +2032,4023625,"TERMINAL",0,0,"4:00",,terminal_output +2033,4024929,"generate_dataset.py",0,0,"",python,tab +2034,4025007,"TERMINAL",0,0,"1",,terminal_output +2035,4025767,"TERMINAL",0,0,"2",,terminal_output +2036,4026737,"TERMINAL",0,0,"3",,terminal_output +2037,4027807,"TERMINAL",0,0,"4",,terminal_output +2038,4028831,"TERMINAL",0,0,"5",,terminal_output +2039,4029966,"TERMINAL",0,0,"7",,terminal_output +2040,4030981,"TERMINAL",0,0,"8",,terminal_output +2041,4031936,"TERMINAL",0,0,"9",,terminal_output +2042,4033029,"TERMINAL",0,0,"10",,terminal_output +2043,4034044,"TERMINAL",0,0,"1",,terminal_output +2044,4035077,"TERMINAL",0,0,"2",,terminal_output +2045,4037332,"TERMINAL",0,0,"32",,terminal_output +2046,4038464,"TERMINAL",0,0,"5",,terminal_output +2047,4039488,"TERMINAL",0,0,"6",,terminal_output +2048,4040436,"TERMINAL",0,0,"7",,terminal_output +2049,4041477,"TERMINAL",0,0,"8",,terminal_output +2050,4042520,"TERMINAL",0,0,"9",,terminal_output +2051,4043557,"TERMINAL",0,0,"20",,terminal_output +2052,4044592,"TERMINAL",0,0,"1",,terminal_output +2053,4045635,"TERMINAL",0,0,"2",,terminal_output +2054,4046700,"TERMINAL",0,0,"3",,terminal_output +2055,4047774,"TERMINAL",0,0,"4",,terminal_output +2056,4048801,"TERMINAL",0,0,"5",,terminal_output +2057,4049784,"TERMINAL",0,0,"6",,terminal_output +2058,4050848,"TERMINAL",0,0,"8",,terminal_output +2059,4051861,"TERMINAL",0,0,"9",,terminal_output +2060,4052998,"TERMINAL",0,0,"30",,terminal_output +2061,4053931,"TERMINAL",0,0,"1",,terminal_output +2062,4055046,"TERMINAL",0,0,"2",,terminal_output +2063,4056071,"TERMINAL",0,0,"3",,terminal_output +2064,4057102,"TERMINAL",0,0,"4",,terminal_output +2065,4058116,"TERMINAL",0,0,"5",,terminal_output +2066,4059156,"TERMINAL",0,0,"6",,terminal_output +2067,4060171,"TERMINAL",0,0,"7",,terminal_output +2068,4061214,"TERMINAL",0,0,"8",,terminal_output +2069,4062251,"TERMINAL",0,0,"9",,terminal_output +2070,4063338,"TERMINAL",0,0,"40",,terminal_output +2071,4064367,"TERMINAL",0,0,"1",,terminal_output +2072,4065392,"TERMINAL",0,0,"2",,terminal_output +2073,4066411,"TERMINAL",0,0,"3",,terminal_output +2074,4067537,"TERMINAL",0,0,"4",,terminal_output +2075,4068476,"TERMINAL",0,0,"5",,terminal_output +2076,4069534,"TERMINAL",0,0,"6",,terminal_output +2077,4070564,"TERMINAL",0,0,"73",,terminal_output +2078,4071601,"TERMINAL",0,0,"8",,terminal_output +2079,4072644,"TERMINAL",0,0,"9",,terminal_output +2080,4073679,"TERMINAL",0,0,"50",,terminal_output +2081,4074808,"TERMINAL",0,0,"1",,terminal_output +2082,4075833,"TERMINAL",0,0,"2",,terminal_output +2083,4077182,"TERMINAL",0,0,"3",,terminal_output +2084,4078254,"TERMINAL",0,0,"5",,terminal_output +2085,4079315,"TERMINAL",0,0,"6",,terminal_output +2086,4080341,"TERMINAL",0,0,"7",,terminal_output +2087,4081464,"TERMINAL",0,0,"8",,terminal_output +2088,4082493,"TERMINAL",0,0,"9",,terminal_output +2089,4083754,"TERMINAL",0,0,"5:00",,terminal_output +2090,4084550,"TERMINAL",0,0,"1",,terminal_output +2091,4085519,"TERMINAL",0,0,"2",,terminal_output +2092,4086566,"TERMINAL",0,0,"3",,terminal_output +2093,4087710,"TERMINAL",0,0,"4",,terminal_output +2094,4088744,"TERMINAL",0,0,"5",,terminal_output +2095,4089688,"TERMINAL",0,0,"6",,terminal_output +2096,4090728,"TERMINAL",0,0,"7",,terminal_output +2097,4091767,"TERMINAL",0,0,"8",,terminal_output +2098,4092831,"TERMINAL",0,0,"9",,terminal_output +2099,4093856,"TERMINAL",0,0,"11",,terminal_output +2100,4094891,"TERMINAL",0,0,"2",,terminal_output +2101,4096005,"TERMINAL",0,0,"3",,terminal_output +2102,4096967,"TERMINAL",0,0,"4",,terminal_output +2103,4098003,"TERMINAL",0,0,"5",,terminal_output +2104,4099078,"TERMINAL",0,0,"6",,terminal_output +2105,4100117,"TERMINAL",0,0,"7",,terminal_output +2106,4101118,"TERMINAL",0,0,"8",,terminal_output +2107,4102155,"TERMINAL",0,0,"9",,terminal_output +2108,4103206,"TERMINAL",0,0,"20",,terminal_output +2109,4104258,"TERMINAL",0,0,"1",,terminal_output +2110,4105289,"TERMINAL",0,0,"2",,terminal_output +2111,4106322,"TERMINAL",0,0,"3",,terminal_output +2112,4107359,"TERMINAL",0,0,"4",,terminal_output +2113,4108498,"TERMINAL",0,0,"5",,terminal_output +2114,4109524,"TERMINAL",0,0,"6",,terminal_output +2115,4110547,"TERMINAL",0,0,"7",,terminal_output +2116,4111519,"TERMINAL",0,0,"8",,terminal_output +2117,4112562,"TERMINAL",0,0,"9",,terminal_output +2118,4113601,"TERMINAL",0,0,"30",,terminal_output +2119,4114643,"TERMINAL",0,0,"1",,terminal_output +2120,4115679,"TERMINAL",0,0,"2",,terminal_output +2121,4116720,"TERMINAL",0,0,"3",,terminal_output +2122,4117901,"TERMINAL",0,0,"4",,terminal_output +2123,4118843,"TERMINAL",0,0,"5",,terminal_output +2124,4119864,"TERMINAL",0,0,"7",,terminal_output +2125,4120994,"TERMINAL",0,0,"8",,terminal_output +2126,4121928,"TERMINAL",0,0,"9",,terminal_output +2127,4123041,"TERMINAL",0,0,"40",,terminal_output +2128,4124167,"TERMINAL",0,0,"1",,terminal_output +2129,4125088,"TERMINAL",0,0,"2",,terminal_output +2130,4126125,"TERMINAL",0,0,"3",,terminal_output +2131,4127130,"TERMINAL",0,0,"4",,terminal_output +2132,4128167,"TERMINAL",0,0,"5",,terminal_output +2133,4129212,"TERMINAL",0,0,"6",,terminal_output +2134,4130259,"TERMINAL",0,0,"7",,terminal_output +2135,4131287,"TERMINAL",0,0,"8",,terminal_output +2136,4132358,"TERMINAL",0,0,"9",,terminal_output +2137,4133417,"TERMINAL",0,0,"50",,terminal_output +2138,4134403,"TERMINAL",0,0,"1",,terminal_output +2139,4135535,"TERMINAL",0,0,"2",,terminal_output +2140,4136487,"TERMINAL",0,0,"3",,terminal_output +2141,4137527,"TERMINAL",0,0,"4",,terminal_output +2142,4138571,"TERMINAL",0,0,"5",,terminal_output +2143,4139611,"TERMINAL",0,0,"6",,terminal_output +2144,4140648,"TERMINAL",0,0,"7",,terminal_output +2145,4141709,"TERMINAL",0,0,"8",,terminal_output +2146,4142804,"TERMINAL",0,0,"9",,terminal_output +2147,4143829,"TERMINAL",0,0,"6:00",,terminal_output +2148,4144802,"TERMINAL",0,0,"1",,terminal_output +2149,4145878,"TERMINAL",0,0,"3",,terminal_output +2150,4146889,"TERMINAL",0,0,"4",,terminal_output +2151,4147925,"TERMINAL",0,0,"5",,terminal_output +2152,4148965,"TERMINAL",0,0,"6",,terminal_output +2153,4150019,"TERMINAL",0,0,"7",,terminal_output +2154,4151049,"TERMINAL",0,0,"8",,terminal_output +2155,4152122,"TERMINAL",0,0,"9",,terminal_output +2156,4153152,"TERMINAL",0,0,"10",,terminal_output +2157,4154187,"TERMINAL",0,0,"1",,terminal_output +2158,4155228,"TERMINAL",0,0,"2",,terminal_output +2159,4156269,"TERMINAL",0,0,"3",,terminal_output +2160,4159189,"TERMINAL",0,0,"4",,terminal_output +2161,4160214,"TERMINAL",0,0,"7",,terminal_output +2162,4161217,"TERMINAL",0,0,"8",,terminal_output +2163,4162259,"TERMINAL",0,0,"9",,terminal_output +2164,4163297,"TERMINAL",0,0,"20",,terminal_output +2165,4164336,"TERMINAL",0,0,"1",,terminal_output +2166,4165434,"TERMINAL",0,0,"2",,terminal_output +2167,4166414,"TERMINAL",0,0,"3",,terminal_output +2168,4167483,"TERMINAL",0,0,"4",,terminal_output +2169,4168495,"TERMINAL",0,0,"5",,terminal_output +2170,4169549,"TERMINAL",0,0,"6",,terminal_output +2171,4170564,"TERMINAL",0,0,"7",,terminal_output +2172,4171600,"TERMINAL",0,0,"8",,terminal_output +2173,4172639,"TERMINAL",0,0,"9",,terminal_output +2174,4173680,"TERMINAL",0,0,"30",,terminal_output +2175,4174758,"TERMINAL",0,0,"1",,terminal_output +2176,4175965,"TERMINAL",0,0,"2",,terminal_output +2177,4176799,"TERMINAL",0,0,"3",,terminal_output +2178,4177928,"TERMINAL",0,0,"5",,terminal_output +2179,4178169,"TERMINAL",0,0,"^D\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/generate_dataset.py"", line 39, in \r\n rew, obs, first = env.observe()\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/generate_dataset.py"", line 39, in \r\n rew, obs, first = env.observe()\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +2180,4178529,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;1",,terminal_output +2181,4178875,"TERMINAL",0,0,"6",,terminal_output +2182,4179975,"TERMINAL",0,0,"7",,terminal_output +2183,4180998,"TERMINAL",0,0,"8",,terminal_output +2184,4182022,"TERMINAL",0,0,"9",,terminal_output +2185,4183057,"TERMINAL",0,0,"40",,terminal_output +2186,4184174,"TERMINAL",0,0,"1",,terminal_output +2187,4185104,"TERMINAL",0,0,"2",,terminal_output +2188,4186141,"TERMINAL",0,0,"3",,terminal_output +2189,4187180,"TERMINAL",0,0,"4",,terminal_output +2190,4188219,"TERMINAL",0,0,"5",,terminal_output +2191,4189263,"TERMINAL",0,0,"6",,terminal_output +2192,4190299,"TERMINAL",0,0,"7",,terminal_output +2193,4191343,"TERMINAL",0,0,"8",,terminal_output +2194,4192468,"TERMINAL",0,0,"9",,terminal_output +2195,4193492,"TERMINAL",0,0,"50",,terminal_output +2196,4194457,"TERMINAL",0,0,"1",,terminal_output +2197,4195546,"TERMINAL",0,0,"2",,terminal_output +2198,4196611,"TERMINAL",0,0,"3",,terminal_output +2199,4197575,"TERMINAL",0,0,"4",,terminal_output +2200,4198615,"TERMINAL",0,0,"5",,terminal_output +2201,4199650,"TERMINAL",0,0,"6",,terminal_output +2202,4200762,"TERMINAL",0,0,"7",,terminal_output +2203,4201727,"TERMINAL",0,0,"8",,terminal_output +2204,4202809,"TERMINAL",0,0,"9",,terminal_output +2205,4203834,"TERMINAL",0,0,"7:00",,terminal_output +2206,4204857,"TERMINAL",0,0,"2",,terminal_output +2207,4205984,"TERMINAL",0,0,"3",,terminal_output +2208,4206931,"TERMINAL",0,0,"4",,terminal_output +2209,4207981,"TERMINAL",0,0,"5",,terminal_output +2210,4209055,"TERMINAL",0,0,"6",,terminal_output +2211,4210080,"TERMINAL",0,0,"7",,terminal_output +2212,4211206,"TERMINAL",0,0,"8",,terminal_output +2213,4212233,"TERMINAL",0,0,"9",,terminal_output +2214,4213192,"TERMINAL",0,0,"10",,terminal_output +2215,4214230,"TERMINAL",0,0,"1",,terminal_output +2216,4215266,"TERMINAL",0,0,"2",,terminal_output +2217,4216326,"TERMINAL",0,0,"3",,terminal_output +2218,4217370,"TERMINAL",0,0,"4",,terminal_output +2219,4218479,"TERMINAL",0,0,"5",,terminal_output +2220,4219500,"TERMINAL",0,0,"6",,terminal_output +2221,4220527,"TERMINAL",0,0,"7",,terminal_output +2222,4221581,"TERMINAL",0,0,"8",,terminal_output +2223,4222577,"TERMINAL",0,0,"9",,terminal_output +2224,4223601,"TERMINAL",0,0,"20",,terminal_output +2225,4224630,"TERMINAL",0,0,"1",,terminal_output +2226,4225681,"TERMINAL",0,0,"21",,terminal_output +2227,4226716,"TERMINAL",0,0,"3",,terminal_output +2228,4227795,"TERMINAL",0,0,"4",,terminal_output +2229,4228802,"TERMINAL",0,0,"5",,terminal_output +2230,4229841,"TERMINAL",0,0,"7",,terminal_output +2231,4230969,"TERMINAL",0,0,"8",,terminal_output +2232,4231916,"TERMINAL",0,0,"9",,terminal_output +2233,4233020,"TERMINAL",0,0,"30",,terminal_output +2234,4234004,"TERMINAL",0,0,"1",,terminal_output +2235,4235066,"TERMINAL",0,0,"2",,terminal_output +2236,4236090,"TERMINAL",0,0,"3",,terminal_output +2237,4237217,"TERMINAL",0,0,"4",,terminal_output +2238,4238159,"TERMINAL",0,0,"5",,terminal_output +2239,4239195,"TERMINAL",0,0,"6",,terminal_output +2240,4240229,"TERMINAL",0,0,"7",,terminal_output +2241,4241274,"TERMINAL",0,0,"8",,terminal_output +2242,4242308,"TERMINAL",0,0,"9",,terminal_output +2243,4243349,"TERMINAL",0,0,"40",,terminal_output +2244,4244487,"TERMINAL",0,0,"1",,terminal_output +2245,4245425,"TERMINAL",0,0,"2",,terminal_output +2246,4246537,"TERMINAL",0,0,"3",,terminal_output +2247,4247548,"TERMINAL",0,0,"4",,terminal_output +2248,4248590,"TERMINAL",0,0,"5",,terminal_output +2249,4249628,"TERMINAL",0,0,"6",,terminal_output +2250,4250619,"TERMINAL",0,0,"7",,terminal_output +2251,4251254,"generate_dataset.py",0,0,"",python,tab +2252,4251714,"TERMINAL",0,0,"8",,terminal_output +2253,4252695,"TERMINAL",0,0,"9",,terminal_output +2254,4253731,"TERMINAL",0,0,"50",,terminal_output +2255,4254771,"TERMINAL",0,0,"1",,terminal_output +2256,4255807,"TERMINAL",0,0,"2",,terminal_output +2257,4256877,"TERMINAL",0,0,"4",,terminal_output +2258,4257894,"TERMINAL",0,0,"5",,terminal_output +2259,4258929,"TERMINAL",0,0,"6",,terminal_output +2260,4260053,"TERMINAL",0,0,"7",,terminal_output +2261,4261044,"TERMINAL",0,0,"8",,terminal_output +2262,4262111,"TERMINAL",0,0,"9",,terminal_output +2263,4263142,"TERMINAL",0,0,"8:00",,terminal_output +2264,4264148,"TERMINAL",0,0,"1",,terminal_output +2265,4265181,"TERMINAL",0,0,"2",,terminal_output +2266,4266227,"TERMINAL",0,0,"3",,terminal_output +2267,4267269,"TERMINAL",0,0,"4",,terminal_output +2268,4268349,"TERMINAL",0,0,"5",,terminal_output +2269,4269369,"TERMINAL",0,0,"6",,terminal_output +2270,4270424,"TERMINAL",0,0,"7",,terminal_output +2271,4271421,"TERMINAL",0,0,"8",,terminal_output +2272,4272568,"TERMINAL",0,0,"9",,terminal_output +2273,4273507,"TERMINAL",0,0,"10",,terminal_output +2274,4274572,"TERMINAL",0,0,"1",,terminal_output +2275,4275623,"TERMINAL",0,0,"2",,terminal_output +2276,4276642,"TERMINAL",0,0,"3",,terminal_output +2277,4277651,"TERMINAL",0,0,"4",,terminal_output +2278,4278693,"TERMINAL",0,0,"5",,terminal_output +2279,4281047,"TERMINAL",0,0,"620",,terminal_output +2280,4282069,"TERMINAL",0,0,"9",,terminal_output +2281,4283095,"TERMINAL",0,0,"20",,terminal_output +2282,4284115,"TERMINAL",0,0,"1",,terminal_output +2283,4285244,"TERMINAL",0,0,"2",,terminal_output +2284,4286198,"TERMINAL",0,0,"3",,terminal_output +2285,4287232,"TERMINAL",0,0,"4",,terminal_output +2286,4288269,"TERMINAL",0,0,"5",,terminal_output +2287,4289305,"TERMINAL",0,0,"6",,terminal_output +2288,4290363,"TERMINAL",0,0,"7",,terminal_output +2289,4291490,"TERMINAL",0,0,"8",,terminal_output +2290,4292423,"TERMINAL",0,0,"9",,terminal_output +2291,4293539,"TERMINAL",0,0,"30",,terminal_output +2292,4294516,"TERMINAL",0,0,"1",,terminal_output +2293,4295588,"TERMINAL",0,0,"2",,terminal_output +2294,4296611,"TERMINAL",0,0,"3",,terminal_output +2295,4297636,"TERMINAL",0,0,"4",,terminal_output +2296,4298687,"TERMINAL",0,0,"5",,terminal_output +2297,4299705,"TERMINAL",0,0,"6",,terminal_output +2298,4300809,"TERMINAL",0,0,"7",,terminal_output +2299,4301806,"TERMINAL",0,0,"8",,terminal_output +2300,4302855,"TERMINAL",0,0,"40",,terminal_output +2301,4303875,"TERMINAL",0,0,"1",,terminal_output +2302,4305009,"TERMINAL",0,0,"2",,terminal_output +2303,4306051,"TERMINAL",0,0,"3",,terminal_output +2304,4307062,"TERMINAL",0,0,"4",,terminal_output +2305,4308078,"TERMINAL",0,0,"5",,terminal_output +2306,4309102,"TERMINAL",0,0,"6",,terminal_output +2307,4310126,"TERMINAL",0,0,"7",,terminal_output +2308,4311150,"TERMINAL",0,0,"8",,terminal_output +2309,4312198,"TERMINAL",0,0,"9",,terminal_output +2310,4313227,"TERMINAL",0,0,"50",,terminal_output +2311,4314261,"TERMINAL",0,0,"13",,terminal_output +2312,4315302,"TERMINAL",0,0,"2",,terminal_output +2313,4316374,"TERMINAL",0,0,"3",,terminal_output +2314,4317398,"TERMINAL",0,0,"4",,terminal_output +2315,4318420,"TERMINAL",0,0,"5",,terminal_output +2316,4319548,"TERMINAL",0,0,"6",,terminal_output +2317,4320591,"TERMINAL",0,0,"7",,terminal_output +2318,4321595,"TERMINAL",0,0,"8",,terminal_output +2319,4322623,"TERMINAL",0,0,"9",,terminal_output +2320,4323647,"TERMINAL",0,0,"9:00",,terminal_output +2321,4324675,"TERMINAL",0,0,"1",,terminal_output +2322,4325693,"TERMINAL",0,0,"2",,terminal_output +2323,4326712,"TERMINAL",0,0,"3",,terminal_output +2324,4327750,"TERMINAL",0,0,"4",,terminal_output +2325,4328789,"TERMINAL",0,0,"5",,terminal_output +2326,4329889,"TERMINAL",0,0,"7",,terminal_output +2327,4330918,"TERMINAL",0,0,"8",,terminal_output +2328,4331910,"TERMINAL",0,0,"9",,terminal_output +2329,4333066,"TERMINAL",0,0,"10",,terminal_output +2330,4334090,"TERMINAL",0,0,"1",,terminal_output +2331,4335112,"TERMINAL",0,0,"2",,terminal_output +2332,4336136,"TERMINAL",0,0,"3",,terminal_output +2333,4337160,"TERMINAL",0,0,"4",,terminal_output +2334,4338191,"TERMINAL",0,0,"5",,terminal_output +2335,4339207,"TERMINAL",0,0,"6",,terminal_output +2336,4340228,"TERMINAL",0,0,"7",,terminal_output +2337,4341272,"TERMINAL",0,0,"8",,terminal_output +2338,4342308,"TERMINAL",0,0,"9",,terminal_output +2339,4343346,"TERMINAL",0,0,"201",,terminal_output +2340,4344397,"TERMINAL",0,0,"1",,terminal_output +2341,4345557,"TERMINAL",0,0,"2",,terminal_output +2342,4346480,"TERMINAL",0,0,"3",,terminal_output +2343,4347575,"TERMINAL",0,0,"4",,terminal_output +2344,4348630,"TERMINAL",0,0,"5",,terminal_output +2345,4349652,"TERMINAL",0,0,"6",,terminal_output +2346,4350679,"TERMINAL",0,0,"7",,terminal_output +2347,4351664,"TERMINAL",0,0,"8",,terminal_output +2348,4352686,"TERMINAL",0,0,"9",,terminal_output +2349,4353722,"TERMINAL",0,0,"30",,terminal_output +2350,4354794,"TERMINAL",0,0,"1",,terminal_output +2351,4355806,"TERMINAL",0,0,"2",,terminal_output +2352,4356852,"TERMINAL",0,0,"4",,terminal_output +2353,4357948,"TERMINAL",0,0,"5",,terminal_output +2354,4358971,"TERMINAL",0,0,"6",,terminal_output +2355,4359998,"TERMINAL",0,0,"7",,terminal_output +2356,4361021,"TERMINAL",0,0,"8",,terminal_output +2357,4362148,"TERMINAL",0,0,"9",,terminal_output +2358,4363169,"TERMINAL",0,0,"40",,terminal_output +2359,4364195,"TERMINAL",0,0,"1",,terminal_output +2360,4365218,"TERMINAL",0,0,"2",,terminal_output +2361,4366263,"TERMINAL",0,0,"3",,terminal_output +2362,4367266,"TERMINAL",0,0,"4",,terminal_output +2363,4368277,"TERMINAL",0,0,"5",,terminal_output +2364,4369314,"TERMINAL",0,0,"6",,terminal_output +2365,4370352,"TERMINAL",0,0,"7",,terminal_output +2366,4371395,"TERMINAL",0,0,"8",,terminal_output +2367,4372490,"TERMINAL",0,0,"9",,terminal_output +2368,4373624,"TERMINAL",0,0,"50",,terminal_output +2369,4374543,"TERMINAL",0,0,"1",,terminal_output +2370,4375663,"TERMINAL",0,0,"2",,terminal_output +2371,4376712,"TERMINAL",0,0,"3",,terminal_output +2372,4377637,"TERMINAL",0,0,"4",,terminal_output +2373,4378674,"TERMINAL",0,0,"5",,terminal_output +2374,4379719,"TERMINAL",0,0,"6",,terminal_output +2375,4380756,"TERMINAL",0,0,"7",,terminal_output +2376,4381793,"TERMINAL",0,0,"8",,terminal_output +2377,4382835,"TERMINAL",0,0,"10:00",,terminal_output +2378,4383877,"TERMINAL",0,0,"1",,terminal_output +2379,4384981,"TERMINAL",0,0,"2",,terminal_output +2380,4386016,"TERMINAL",0,0,"3",,terminal_output +2381,4387001,"TERMINAL",0,0,"4",,terminal_output +2382,4388053,"TERMINAL",0,0,"5",,terminal_output +2383,4389182,"TERMINAL",0,0,"6",,terminal_output +2384,4390204,"TERMINAL",0,0,"7",,terminal_output +2385,4391229,"TERMINAL",0,0,"8",,terminal_output +2386,4392251,"TERMINAL",0,0,"9",,terminal_output +2387,4393277,"TERMINAL",0,0,"10",,terminal_output +2388,4394276,"TERMINAL",0,0,"1",,terminal_output +2389,4395317,"TERMINAL",0,0,"2",,terminal_output +2390,4396452,"TERMINAL",0,0,"3",,terminal_output +2391,4397476,"TERMINAL",0,0,"4",,terminal_output +2392,4398499,"TERMINAL",0,0,"5",,terminal_output +2393,4399524,"TERMINAL",0,0,"6",,terminal_output +2394,4400548,"TERMINAL",0,0,"7",,terminal_output +2395,4403827,"TERMINAL",0,0,"82",,terminal_output +2396,4404891,"TERMINAL",0,0,"221",,terminal_output +2397,4405879,"TERMINAL",0,0,"3",,terminal_output +2398,4406034,"generate_dataset.py",1629,0,"",python,selection_mouse +2399,4406937,"TERMINAL",0,0,"4",,terminal_output +2400,4407967,"TERMINAL",0,0,"5",,terminal_output +2401,4409047,"TERMINAL",0,0,"6",,terminal_output +2402,4410078,"TERMINAL",0,0,"7",,terminal_output +2403,4411070,"TERMINAL",0,0,"8",,terminal_output +2404,4412170,"TERMINAL",0,0,"9",,terminal_output +2405,4413145,"TERMINAL",0,0,"30",,terminal_output +2406,4414267,"TERMINAL",0,0,"1",,terminal_output +2407,4415221,"TERMINAL",0,0,"2",,terminal_output +2408,4416263,"TERMINAL",0,0,"3",,terminal_output +2409,4417302,"TERMINAL",0,0,"4",,terminal_output +2410,4418335,"TERMINAL",0,0,"5",,terminal_output +2411,4419375,"TERMINAL",0,0,"6",,terminal_output +2412,4420413,"TERMINAL",0,0,"7",,terminal_output +2413,4421480,"TERMINAL",0,0,"8",,terminal_output +2414,4422563,"TERMINAL",0,0,"9",,terminal_output +2415,4423590,"TERMINAL",0,0,"40",,terminal_output +2416,4424610,"TERMINAL",0,0,"1",,terminal_output +2417,4425663,"TERMINAL",0,0,"2",,terminal_output +2418,4426701,"TERMINAL",0,0,"3",,terminal_output +2419,4427711,"TERMINAL",0,0,"4",,terminal_output +2420,4428741,"TERMINAL",0,0,"5",,terminal_output +2421,4429770,"TERMINAL",0,0,"6",,terminal_output +2422,4430806,"TERMINAL",0,0,"7",,terminal_output +2423,4431848,"TERMINAL",0,0,"9",,terminal_output +2424,4432918,"TERMINAL",0,0,"50",,terminal_output +2425,4433923,"TERMINAL",0,0,"1",,terminal_output +2426,4435065,"TERMINAL",0,0,"2",,terminal_output +2427,4435593,"input_pipeline/preprocess/npy_to_arrayrecords.py",0,0,"",python,tab +2428,4436034,"TERMINAL",0,0,"3",,terminal_output +2429,4437048,"TERMINAL",0,0,"4",,terminal_output +2430,4437706,"input_pipeline/preprocess/npy_to_arrayrecords.py",0,0,"import numpy as np\nimport os\nimport tyro\nimport multiprocessing as mp\nfrom dataclasses import dataclass\nimport json\nimport pickle\nfrom array_record.python.array_record_module import ArrayRecordWriter\n\n\n@dataclass\nclass Args:\n input_path: str = ""data/minecraft_npy""\n output_path: str = ""data/minecraft_arrayrecords""\n\ndef preprocess_video(\n idx, in_filename, output_path\n):\n print(f""Processing video {idx}, Filename: {in_filename}"")\n try:\n frames = np.load(in_filename)\n n_frames = frames.shape[0]\n\n output_path = os.path.join(\n output_path,\n os.path.splitext(os.path.basename(in_filename))[0] + "".array_record"",\n )\n\n writer = ArrayRecordWriter(str(output_path), ""group_size:1"")\n\n print(f""Saving video {idx} to {output_path}"")\n record = {""raw_video"": frames.tobytes(), ""sequence_length"": n_frames}\n writer.write(pickle.dumps(record))\n writer.close()\n\n return in_filename, n_frames\n except Exception as e:\n print(f""Error processing video {idx} ({in_filename}): {e}"")\n return in_filename, 0\n\n\ndef main():\n args = tyro.cli(Args)\n\n os.makedirs(args.output_path, exist_ok=True)\n print(f""Output path: {args.output_path}"")\n\n num_processes = mp.cpu_count()\n print(f""Number of processes: {num_processes}"")\n\n print(""Converting npy to array_record files..."")\n pool_args = [\n (\n idx,\n os.path.join(args.input_path, in_filename),\n args.output_path,\n )\n for idx, in_filename in enumerate(os.listdir(args.input_path))\n if in_filename.endswith("".npy"")\n ]\n\n results = []\n with mp.Pool(processes=num_processes) as pool:\n for result in pool.starmap(preprocess_video, pool_args):\n results.append(result)\n print(""Done converting npy to array_record files"")\n\n # count the number of failed videos\n failed_videos = [result for result in results if result[1] == 0]\n short_episodes = [result for result in results if result[1] < 1600]\n print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )\n print(f""Number of total videos: {len(results)}"")\n\n with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(results, f)\n\n\nif __name__ == ""__main__"":\n main()",python,content +2431,4438117,"TERMINAL",0,0,"5",,terminal_output +2432,4439121,"TERMINAL",0,0,"6",,terminal_output +2433,4439981,"generate_dataset.py",0,0,"",python,tab +2434,4440213,"TERMINAL",0,0,"7",,terminal_output +2435,4441310,"TERMINAL",0,0,"8",,terminal_output +2436,4441995,"TERMINAL",0,0,"bash",,terminal_focus +2437,4442274,"TERMINAL",0,0,"9",,terminal_output +2438,4443288,"TERMINAL",0,0,"1:00",,terminal_output +2439,4443758,"TERMINAL",0,0,"git branch",,terminal_command +2440,4443797,"TERMINAL",0,0,"]633;E;2025-09-04 11:11:00 git branch;e3f3d151-a063-4c85-891d-0bfb917c5617]633;C[?1h=\r add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n* coinrun-data-generation\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/darkness-filter\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n input_pipeline/add-npy2array_record\r\n logging-variants\r\n lr-schedules\r\n main\r\n maskgit-different-maskprob-per-sample\r\n maskgit-sampling-iterative-unmasking-fix\r\n metrics-logging-for-dynamics-model\r\n monkey-patch\r\n new-arch-sampling\r\n preprocess_video\r\n refactor-tmp\r\n revised-dataloader\r\n runner\r\n runner-grain\r\n sample-ali-branch\r\n sample-from-different-topologies\r\n sampling-startframe-indexing-fix\r\n speedup-tfrecord-preprocessing\r\n tmp\r\n\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +2441,4444395,"TERMINAL",0,0,"1",,terminal_output +2442,4445153,"input_pipeline/preprocess/npy_to_arrayrecords.py",0,0,"",python,tab +2443,4445589,"TERMINAL",0,0,"2",,terminal_output +2444,4446423,"TERMINAL",0,0,"3",,terminal_output +2445,4447506,"TERMINAL",0,0,"42",,terminal_output +2446,4448519,"TERMINAL",0,0,"5",,terminal_output +2447,4449598,"TERMINAL",0,0,"6",,terminal_output +2448,4450631,"TERMINAL",0,0,"7",,terminal_output +2449,4451648,"TERMINAL",0,0,"8",,terminal_output +2450,4452668,"TERMINAL",0,0,"9",,terminal_output +2451,4453695,"TERMINAL",0,0,"10",,terminal_output +2452,4454718,"TERMINAL",0,0,"1",,terminal_output +2453,4455774,"TERMINAL",0,0,"2",,terminal_output +2454,4456802,"TERMINAL",0,0,"3",,terminal_output +2455,4457891,"TERMINAL",0,0,"5",,terminal_output +2456,4458878,"TERMINAL",0,0,"6",,terminal_output +2457,4459944,"TERMINAL",0,0,"7",,terminal_output +2458,4460967,"TERMINAL",0,0,"8",,terminal_output +2459,4462047,"TERMINAL",0,0,"9",,terminal_output +2460,4462394,"TERMINAL",0,0,"git checkout input_pipeline/add-npy2array_record",,terminal_command +2461,4462433,"TERMINAL",0,0,"]633;E;2025-09-04 11:11:19 git checkout input_pipeline/add-npy2array_record;e3f3d151-a063-4c85-891d-0bfb917c5617]633;C",,terminal_output +2462,4462649,"TERMINAL",0,0,"M\tgenerate_dataset.py\r\nSwitched to branch 'input_pipeline/add-npy2array_record'\r\nYour branch is behind 'origin/input_pipeline/add-npy2array_record' by 3 commits, and can be fast-forwarded.\r\n (use ""git pull"" to update your local branch)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +2463,4463067,"TERMINAL",0,0,"20",,terminal_output +2464,4464115,"TERMINAL",0,0,"1",,terminal_output +2465,4464866,"TERMINAL",0,0,"git pull",,terminal_command +2466,4464908,"TERMINAL",0,0,"]633;E;2025-09-04 11:11:22 git pull;e3f3d151-a063-4c85-891d-0bfb917c5617]633;C",,terminal_output +2467,4465188,"TERMINAL",0,0,"2",,terminal_output +2468,4465699,"",0,0,"Switched from branch 'coinrun-data-generation' to 'input_pipeline/add-npy2array_record'",,git_branch_checkout +2469,4466186,"TERMINAL",0,0,"3",,terminal_output +2470,4466642,"TERMINAL",0,0,"Updating 4e0846b..eeb24a7\r\nFast-forward\r\n",,terminal_output +2471,4466784,"TERMINAL",0,0," input_pipeline/preprocess/npy_to_array_records.py | 2 +-\r\n input_pipeline/preprocess/pngs_to_array_records.py | 2 +-\r\n input_pipeline/preprocess/video_to_array_records.py | 4 ++--\r\n 3 files changed, 4 insertions(+), 4 deletions(-)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +2472,4467206,"TERMINAL",0,0,"4",,terminal_output +2473,4468240,"TERMINAL",0,0,"5",,terminal_output +2474,4469282,"TERMINAL",0,0,"6",,terminal_output +2475,4470340,"TERMINAL",0,0,"7",,terminal_output +2476,4471305,"generate_dataset.py",0,0,"""""""\nGenerates a dataset of random-action CoinRun episodes.\nEpisodes are saved individually as memory-mapped files for efficient loading.\n""""""\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom gym3 import types_np\nimport numpy as np\nfrom procgen import ProcgenGym3Env\nimport tyro\n\n\n@dataclass\nclass Args:\n num_episodes: int = 10000\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 50\n\n\nargs = tyro.cli(Args)\noutput_dir = Path(args.output_dir)\noutput_dir.mkdir(parents=True, exist_ok=True)\n\n# --- Generate episodes ---\ni = 0\nmetadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n dataseq = []\n\n # --- Run episode ---\n for j in range(1000):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n breakpoint()\n rew, obs, first = env.observe()\n dataseq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(dataseq) >= args.min_episode_length:\n episode_data = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.npy""\n np.save(episode_path, episode_data.astype(np.uint8))\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")\n",python,tab +2477,4471502,"TERMINAL",0,0,"8",,terminal_output +2478,4472535,"generate_dataset.py",1530,99,"ut_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")\n",python,selection_mouse +2479,4472536,"generate_dataset.py",611,1018," seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n dataseq = []\n\n # --- Run episode ---\n for j in range(1000):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n breakpoint()\n rew, obs, first = env.observe()\n dataseq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(dataseq) >= args.min_episode_length:\n episode_data = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.npy""\n np.save(episode_path, episode_data.astype(np.uint8))\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")\n",python,selection_mouse +2480,4472536,"generate_dataset.py",296,1333,"@dataclass\nclass Args:\n num_episodes: int = 10000\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 50\n\n\nargs = tyro.cli(Args)\noutput_dir = Path(args.output_dir)\noutput_dir.mkdir(parents=True, exist_ok=True)\n\n# --- Generate episodes ---\ni = 0\nmetadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n dataseq = []\n\n # --- Run episode ---\n for j in range(1000):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n breakpoint()\n rew, obs, first = env.observe()\n dataseq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(dataseq) >= args.min_episode_length:\n episode_data = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.npy""\n np.save(episode_path, episode_data.astype(np.uint8))\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")\n",python,selection_mouse +2481,4472537,"generate_dataset.py",0,1629,"""""""\nGenerates a dataset of random-action CoinRun episodes.\nEpisodes are saved individually as memory-mapped files for efficient loading.\n""""""\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom gym3 import types_np\nimport numpy as np\nfrom procgen import ProcgenGym3Env\nimport tyro\n\n\n@dataclass\nclass Args:\n num_episodes: int = 10000\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 50\n\n\nargs = tyro.cli(Args)\noutput_dir = Path(args.output_dir)\noutput_dir.mkdir(parents=True, exist_ok=True)\n\n# --- Generate episodes ---\ni = 0\nmetadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n dataseq = []\n\n # --- Run episode ---\n for j in range(1000):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n breakpoint()\n rew, obs, first = env.observe()\n dataseq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(dataseq) >= args.min_episode_length:\n episode_data = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.npy""\n np.save(episode_path, episode_data.astype(np.uint8))\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")\n",python,selection_mouse +2482,4472548,"generate_dataset.py",1530,99,"ut_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")\n",python,selection_command +2483,4472648,"generate_dataset.py",0,1629,"""""""\nGenerates a dataset of random-action CoinRun episodes.\nEpisodes are saved individually as memory-mapped files for efficient loading.\n""""""\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom gym3 import types_np\nimport numpy as np\nfrom procgen import ProcgenGym3Env\nimport tyro\n\n\n@dataclass\nclass Args:\n num_episodes: int = 10000\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 50\n\n\nargs = tyro.cli(Args)\noutput_dir = Path(args.output_dir)\noutput_dir.mkdir(parents=True, exist_ok=True)\n\n# --- Generate episodes ---\ni = 0\nmetadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n dataseq = []\n\n # --- Run episode ---\n for j in range(1000):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n breakpoint()\n rew, obs, first = env.observe()\n dataseq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(dataseq) >= args.min_episode_length:\n episode_data = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.npy""\n np.save(episode_path, episode_data.astype(np.uint8))\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")\n",python,selection_mouse +2484,4472728,"TERMINAL",0,0,"9",,terminal_output +2485,4473565,"generate_dataset.py",0,0,"",python,selection_command +2486,4473783,"TERMINAL",0,0,"30",,terminal_output +2487,4474520,"TERMINAL",0,0,"1",,terminal_output +2488,4475574,"TERMINAL",0,0,"2",,terminal_output +2489,4476576,"TERMINAL",0,0,"3",,terminal_output +2490,4477612,"TERMINAL",0,0,"4",,terminal_output +2491,4478680,"TERMINAL",0,0,"5",,terminal_output +2492,4479706,"TERMINAL",0,0,"6",,terminal_output +2493,4480714,"TERMINAL",0,0,"7",,terminal_output +2494,4481741,"TERMINAL",0,0,"8",,terminal_output +2495,4482827,"TERMINAL",0,0,"9",,terminal_output +2496,4483821,"TERMINAL",0,0,"40",,terminal_output +2497,4484966,"TERMINAL",0,0,"2",,terminal_output +2498,4485904,"TERMINAL",0,0,"3",,terminal_output +2499,4487093,"TERMINAL",0,0,"4",,terminal_output +2500,4488006,"TERMINAL",0,0,"5",,terminal_output +2501,4489123,"TERMINAL",0,0,"6",,terminal_output +2502,4490068,"TERMINAL",0,0,"7",,terminal_output +2503,4491124,"TERMINAL",0,0,"8",,terminal_output +2504,4492151,"TERMINAL",0,0,"9",,terminal_output +2505,4493222,"TERMINAL",0,0,"50",,terminal_output +2506,4494243,"TERMINAL",0,0,"1",,terminal_output +2507,4495474,"TERMINAL",0,0,"2",,terminal_output +2508,4496307,"TERMINAL",0,0,"3",,terminal_output +2509,4497343,"TERMINAL",0,0,"4",,terminal_output +2510,4498390,"TERMINAL",0,0,"5",,terminal_output +2511,4499466,"TERMINAL",0,0,"6",,terminal_output +2512,4500468,"TERMINAL",0,0,"7",,terminal_output +2513,4501512,"TERMINAL",0,0,"8",,terminal_output +2514,4502674,"TERMINAL",0,0,"9",,terminal_output +2515,4503601,"TERMINAL",0,0,"2:00",,terminal_output +2516,4504696,"TERMINAL",0,0,"1",,terminal_output +2517,4505715,"TERMINAL",0,0,"2",,terminal_output +2518,4506715,"TERMINAL",0,0,"3",,terminal_output +2519,4507784,"TERMINAL",0,0,"4",,terminal_output +2520,4508887,"TERMINAL",0,0,"5",,terminal_output +2521,4509827,"TERMINAL",0,0,"7",,terminal_output +2522,4510889,"TERMINAL",0,0,"8",,terminal_output +2523,4511908,"TERMINAL",0,0,"9",,terminal_output +2524,4512979,"TERMINAL",0,0,"10",,terminal_output +2525,4513988,"TERMINAL",0,0,"1",,terminal_output +2526,4515038,"TERMINAL",0,0,"2",,terminal_output +2527,4516068,"TERMINAL",0,0,"3",,terminal_output +2528,4517117,"TERMINAL",0,0,"4",,terminal_output +2529,4518145,"TERMINAL",0,0,"5",,terminal_output +2530,4519184,"TERMINAL",0,0,"6",,terminal_output +2531,4520228,"TERMINAL",0,0,"7",,terminal_output +2532,4521286,"TERMINAL",0,0,"8",,terminal_output +2533,4522314,"TERMINAL",0,0,"9",,terminal_output +2534,4523364,"TERMINAL",0,0,"20",,terminal_output +2535,4525370,"generate_dataset.py",0,0,"*You seem to be using an outdated version of Cursor. Please upgrade to the latest version by [downloading Cursor again from our website](https://www.cursor.com/). All your settings will be preserved.*\n",python,content +2536,4525375,"generate_dataset.py",201,1629,"",python,content +2537,4525913,"TERMINAL",0,0,"1",,terminal_output +2538,4526724,"TERMINAL",0,0,"3",,terminal_output +2539,4527775,"TERMINAL",0,0,"4",,terminal_output +2540,4528840,"TERMINAL",0,0,"5",,terminal_output +2541,4529839,"TERMINAL",0,0,"7",,terminal_output +2542,4530880,"TERMINAL",0,0,"8",,terminal_output +2543,4531938,"TERMINAL",0,0,"9",,terminal_output +2544,4533007,"TERMINAL",0,0,"30",,terminal_output +2545,4534035,"TERMINAL",0,0,"1",,terminal_output +2546,4535054,"TERMINAL",0,0,"2",,terminal_output +2547,4536215,"generate_dataset.py",0,200,"""""""\nGenerates a dataset of random-action CoinRun episodes.\nEpisodes are saved individually as memory-mapped files for efficient loading.\n""""""\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom gym3 import types_np\nimport numpy as np\nfrom procgen import ProcgenGym3Env\nimport tyro\n\n\n@dataclass\nclass Args:\n num_episodes: int = 10000\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 50\n\n\nargs = tyro.cli(Args)\noutput_dir = Path(args.output_dir)\noutput_dir.mkdir(parents=True, exist_ok=True)\n\n# --- Generate episodes ---\ni = 0\nmetadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n dataseq = []\n\n # --- Run episode ---\n for j in range(1000):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n breakpoint()\n rew, obs, first = env.observe()\n dataseq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(dataseq) >= args.min_episode_length:\n episode_data = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.npy""\n np.save(episode_path, episode_data.astype(np.uint8))\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")",python,content +2548,4536315,"TERMINAL",0,0,"3",,terminal_output +2549,4537130,"TERMINAL",0,0,"4",,terminal_output +2550,4538279,"TERMINAL",0,0,"5",,terminal_output +2551,4539249,"TERMINAL",0,0,"6",,terminal_output +2552,4540645,"TERMINAL",0,0,"7",,terminal_output +2553,4541321,"TERMINAL",0,0,"8",,terminal_output +2554,4542338,"TERMINAL",0,0,"9",,terminal_output +2555,4543380,"TERMINAL",0,0,"40",,terminal_output +2556,4544791,"TERMINAL",0,0,"1",,terminal_output +2557,4545527,"TERMINAL",0,0,"2",,terminal_output +2558,4546573,"TERMINAL",0,0,"3",,terminal_output +2559,4547594,"TERMINAL",0,0,"4",,terminal_output +2560,4548723,"TERMINAL",0,0,"5",,terminal_output +2561,4549751,"TERMINAL",0,0,"6",,terminal_output +2562,4550742,"TERMINAL",0,0,"7",,terminal_output +2563,4551753,"TERMINAL",0,0,"8",,terminal_output +2564,4552900,"TERMINAL",0,0,"9",,terminal_output +2565,4553876,"TERMINAL",0,0,"51",,terminal_output +2566,4555086,"TERMINAL",0,0,"2",,terminal_output +2567,4555922,"TERMINAL",0,0,"3",,terminal_output +2568,4556953,"TERMINAL",0,0,"4",,terminal_output +2569,4558045,"TERMINAL",0,0,"5",,terminal_output +2570,4559158,"TERMINAL",0,0,"6",,terminal_output +2571,4560151,"TERMINAL",0,0,"7",,terminal_output +2572,4561155,"TERMINAL",0,0,"8",,terminal_output +2573,4562262,"TERMINAL",0,0,"9",,terminal_output +2574,4563251,"TERMINAL",0,0,"3:00",,terminal_output +2575,4564285,"TERMINAL",0,0,"1",,terminal_output +2576,4565324,"TERMINAL",0,0,"2",,terminal_output +2577,4566307,"TERMINAL",0,0,"3",,terminal_output +2578,4567333,"TERMINAL",0,0,"4",,terminal_output +2579,4568370,"TERMINAL",0,0,"5",,terminal_output +2580,4569410,"TERMINAL",0,0,"6",,terminal_output +2581,4570540,"TERMINAL",0,0,"7",,terminal_output +2582,4571537,"TERMINAL",0,0,"8",,terminal_output +2583,4572580,"TERMINAL",0,0,"9",,terminal_output +2584,4573566,"TERMINAL",0,0,"10",,terminal_output +2585,4574628,"TERMINAL",0,0,"1",,terminal_output +2586,4575660,"TERMINAL",0,0,"2",,terminal_output +2587,4576714,"TERMINAL",0,0,"3",,terminal_output +2588,4577802,"TERMINAL",0,0,"4",,terminal_output +2589,4579106,"TERMINAL",0,0,"5",,terminal_output +2590,4579164,"TERMINAL",0,0,"Every 1.0s: sinfo_t_idlehkn1990.localdomain: Thu Sep 4 11:13:16 2025Partition dev_cpuonly:\t 9 nodes idle\rPartition cpuonly: 12 nodes idle\rPartition dev_accelerated:\t 0 nodes idle\rPartition accelerated:\t 1 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 6 nodes idle\rPartition accelerated-h200:\t 0 nodes idle",,terminal_output +2591,4580193,"TERMINAL",0,0,"7",,terminal_output +2592,4581285,"TERMINAL",0,0,"8",,terminal_output +2593,4582244,"TERMINAL",0,0,"9",,terminal_output +2594,4583282,"TERMINAL",0,0,"20",,terminal_output +2595,4584357,"TERMINAL",0,0,"1",,terminal_output +2596,4585377,"TERMINAL",0,0,"2",,terminal_output +2597,4586405,"TERMINAL",0,0,"3",,terminal_output +2598,4587442,"TERMINAL",0,0,"4",,terminal_output +2599,4588486,"TERMINAL",0,0,"5",,terminal_output +2600,4589523,"TERMINAL",0,0,"6",,terminal_output +2601,4590604,"TERMINAL",0,0,"7",,terminal_output +2602,4591629,"TERMINAL",0,0,"8",,terminal_output +2603,4592699,"TERMINAL",0,0,"9",,terminal_output +2604,4593777,"TERMINAL",0,0,"30",,terminal_output +2605,4594802,"TERMINAL",0,0,"1",,terminal_output +2606,4595826,"TERMINAL",0,0,"2",,terminal_output +2607,4596802,"TERMINAL",0,0,"3",,terminal_output +2608,4597873,"TERMINAL",0,0,"5",,terminal_output +2609,4598898,"TERMINAL",0,0,"6",,terminal_output +2610,4600023,"TERMINAL",0,0,"7",,terminal_output +2611,4601051,"TERMINAL",0,0,"8",,terminal_output +2612,4602071,"TERMINAL",0,0,"9",,terminal_output +2613,4603047,"TERMINAL",0,0,"40",,terminal_output +2614,4604083,"TERMINAL",0,0,"1",,terminal_output +2615,4605124,"TERMINAL",0,0,"2",,terminal_output +2616,4606156,"TERMINAL",0,0,"3",,terminal_output +2617,4607294,"TERMINAL",0,0,"4",,terminal_output +2618,4608318,"TERMINAL",0,0,"5",,terminal_output +2619,4609355,"TERMINAL",0,0,"6",,terminal_output +2620,4610354,"TERMINAL",0,0,"7",,terminal_output +2621,4611355,"TERMINAL",0,0,"8",,terminal_output +2622,4612404,"TERMINAL",0,0,"9",,terminal_output +2623,4613447,"TERMINAL",0,0,"50",,terminal_output +2624,4614488,"TERMINAL",0,0,"1",,terminal_output +2625,4615589,"TERMINAL",0,0,"2",,terminal_output +2626,4616615,"TERMINAL",0,0,"3",,terminal_output +2627,4617637,"TERMINAL",0,0,"4",,terminal_output +2628,4618662,"TERMINAL",0,0,"5",,terminal_output +2629,4619850,"TERMINAL",0,0,"6",,terminal_output +2630,4620810,"TERMINAL",0,0,"7",,terminal_output +2631,4621758,"TERMINAL",0,0,"8",,terminal_output +2632,4622858,"TERMINAL",0,0,"9",,terminal_output +2633,4623860,"TERMINAL",0,0,"4:01",,terminal_output +2634,4624910,"TERMINAL",0,0,"2",,terminal_output +2635,4625422,"generate_dataset.py",1628,0,"",python,selection_mouse +2636,4625433,"generate_dataset.py",1627,0,"",python,selection_command +2637,4625583,"generate_dataset.py",1627,1,")",python,selection_mouse +2638,4625584,"generate_dataset.py",1414,213,"\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes""",python,selection_mouse +2639,4625624,"generate_dataset.py",1106,521,"ta = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.npy""\n np.save(episode_path, episode_data.astype(np.uint8))\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes""",python,selection_mouse +2640,4625625,"generate_dataset.py",1628,0,"",python,selection_command +2641,4625692,"generate_dataset.py",984,644,"if first:\n break\n\n # --- Save episode ---\n if len(dataseq) >= args.min_episode_length:\n episode_data = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.npy""\n np.save(episode_path, episode_data.astype(np.uint8))\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")",python,selection_mouse +2642,4625924,"generate_dataset.py",739,889," # --- Run episode ---\n for j in range(1000):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n breakpoint()\n rew, obs, first = env.observe()\n dataseq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(dataseq) >= args.min_episode_length:\n episode_data = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.npy""\n np.save(episode_path, episode_data.astype(np.uint8))\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")",python,selection_mouse +2643,4626048,"generate_dataset.py",568,1060,"metadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n dataseq = []\n\n # --- Run episode ---\n for j in range(1000):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n breakpoint()\n rew, obs, first = env.observe()\n dataseq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(dataseq) >= args.min_episode_length:\n episode_data = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.npy""\n np.save(episode_path, episode_data.astype(np.uint8))\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")",python,selection_mouse +2644,4626123,"TERMINAL",0,0,"3",,terminal_output +2645,4626266,"generate_dataset.py",562,1066,"i = 0\nmetadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n dataseq = []\n\n # --- Run episode ---\n for j in range(1000):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n breakpoint()\n rew, obs, first = env.observe()\n dataseq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(dataseq) >= args.min_episode_length:\n episode_data = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.npy""\n np.save(episode_path, episode_data.astype(np.uint8))\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")",python,selection_mouse +2646,4626959,"TERMINAL",0,0,"4",,terminal_output +2647,4627685,"generate_dataset.py",534,1094,"# --- Generate episodes ---\ni = 0\nmetadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n dataseq = []\n\n # --- Run episode ---\n for j in range(1000):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n breakpoint()\n rew, obs, first = env.observe()\n dataseq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(dataseq) >= args.min_episode_length:\n episode_data = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.npy""\n np.save(episode_path, episode_data.astype(np.uint8))\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")",python,selection_mouse +2648,4627998,"TERMINAL",0,0,"5",,terminal_output +2649,4628675,"generate_dataset.py",534,1094,"",python,content +2650,4629041,"TERMINAL",0,0,"6",,terminal_output +2651,4629149,"generate_dataset.py",534,0,"# --- Generate episodes ---\ni = 0\nmetadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n dataseq = []\n\n # --- Run episode ---\n for j in range(1000):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n rew, obs, first = env.observe()\n dataseq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(dataseq) >= args.min_episode_length:\n episode_data = np.concatenate(dataseq, axis=0)\n episode_path = output_dir / f""episode_{i}.array_record"" # <-- change extension\n\n # --- Save as ArrayRecord ---\n writer = ArrayRecordWriter(str(episode_path), ""group_size:1"")\n record = {""raw_video"": episode_data.tobytes(), ""sequence_length"": len(dataseq)}\n writer.write(pickle.dumps(record))\n writer.close()\n\n metadata.append({""path"": str(episode_path), ""length"": len(dataseq)})\n print(f""Episode {i} completed, length: {len(dataseq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(dataseq)}), resampling..."")\n\n# --- Save metadata ---\nnp.save(output_dir / ""metadata.npy"", metadata)\nprint(f""Dataset generated with {len(metadata)} valid episodes"")",python,content +2652,4630072,"TERMINAL",0,0,"7",,terminal_output +2653,4630728,"generate_dataset.py",1842,0,"",python,selection_command +2654,4631290,"TERMINAL",0,0,"8",,terminal_output +2655,4632179,"TERMINAL",0,0,"9",,terminal_output +2656,4632602,"generate_dataset.py",1187,0,"",python,selection_mouse +2657,4632798,"generate_dataset.py",1186,1," ",python,selection_mouse +2658,4632878,"generate_dataset.py",1187,0,"",python,selection_mouse +2659,4632959,"generate_dataset.py",1187,1,"#",python,selection_mouse +2660,4632959,"generate_dataset.py",1187,23,"# <-- change extension\n",python,selection_mouse +2661,4633031,"generate_dataset.py",1187,61,"# <-- change extension\n\n # --- Save as ArrayRecord ---",python,selection_mouse +2662,4633184,"TERMINAL",0,0,"10",,terminal_output +2663,4633293,"generate_dataset.py",1187,23,"# <-- change extension\n",python,selection_mouse +2664,4634329,"TERMINAL",0,0,"1",,terminal_output +2665,4634970,"generate_dataset.py",1187,23,"",python,content +2666,4635062,"generate_dataset.py",1186,0,"",python,selection_command +2667,4635279,"TERMINAL",0,0,"2",,terminal_output +2668,4636301,"generate_dataset.py",1187,0,"\n ",python,content +2669,4636387,"TERMINAL",0,0,"3",,terminal_output +2670,4637314,"generate_dataset.py",1188,8,"",python,content +2671,4637370,"TERMINAL",0,0,"4",,terminal_output +2672,4638411,"TERMINAL",0,0,"5",,terminal_output +2673,4639413,"TERMINAL",0,0,"6",,terminal_output +2674,4640452,"TERMINAL",0,0,"7",,terminal_output +2675,4641598,"TERMINAL",0,0,"8",,terminal_output +2676,4642622,"TERMINAL",0,0,"9",,terminal_output +2677,4643646,"TERMINAL",0,0,"20",,terminal_output +2678,4644670,"TERMINAL",0,0,"1",,terminal_output +2679,4645820,"TERMINAL",0,0,"2",,terminal_output +2680,4647745,"TERMINAL",0,0,"30",,terminal_output +2681,4648774,"TERMINAL",0,0,"5",,terminal_output +2682,4649787,"TERMINAL",0,0,"6",,terminal_output +2683,4650836,"TERMINAL",0,0,"8",,terminal_output +2684,4651877,"TERMINAL",0,0,"9",,terminal_output +2685,4652912,"TERMINAL",0,0,"30",,terminal_output +2686,4653948,"TERMINAL",0,0,"1",,terminal_output +2687,4655006,"TERMINAL",0,0,"2",,terminal_output +2688,4656139,"TERMINAL",0,0,"3",,terminal_output +2689,4657062,"TERMINAL",0,0,"4",,terminal_output +2690,4658100,"TERMINAL",0,0,"5",,terminal_output +2691,4659143,"TERMINAL",0,0,"6",,terminal_output +2692,4660180,"TERMINAL",0,0,"7",,terminal_output +2693,4661260,"TERMINAL",0,0,"8",,terminal_output +2694,4662283,"TERMINAL",0,0,"9",,terminal_output +2695,4663307,"TERMINAL",0,0,"40",,terminal_output +2696,4664336,"TERMINAL",0,0,"1",,terminal_output +2697,4665376,"TERMINAL",0,0,"2",,terminal_output +2698,4666416,"TERMINAL",0,0,"3",,terminal_output +2699,4667453,"TERMINAL",0,0,"4",,terminal_output +2700,4668531,"TERMINAL",0,0,"5",,terminal_output +2701,4669561,"TERMINAL",0,0,"6",,terminal_output +2702,4670566,"TERMINAL",0,0,"7",,terminal_output +2703,4671600,"TERMINAL",0,0,"8",,terminal_output +2704,4672731,"TERMINAL",0,0,"9",,terminal_output +2705,4673865,"TERMINAL",0,0,"50",,terminal_output +2706,4674776,"TERMINAL",0,0,"1",,terminal_output +2707,4675756,"TERMINAL",0,0,"2",,terminal_output +2708,4676794,"TERMINAL",0,0,"3",,terminal_output +2709,4677850,"TERMINAL",0,0,"5",,terminal_output +2710,4678873,"TERMINAL",0,0,"6",,terminal_output +2711,4680000,"TERMINAL",0,0,"7",,terminal_output +2712,4680944,"TERMINAL",0,0,"8",,terminal_output +2713,4682049,"TERMINAL",0,0,"9",,terminal_output +2714,4683072,"TERMINAL",0,0,"5:006",,terminal_output +2715,4684101,"TERMINAL",0,0,"11",,terminal_output +2716,4685095,"TERMINAL",0,0,"2",,terminal_output +2717,4686133,"TERMINAL",0,0,"3",,terminal_output +2718,4687169,"TERMINAL",0,0,"4",,terminal_output +2719,4688223,"TERMINAL",0,0,"5",,terminal_output +2720,4689379,"TERMINAL",0,0,"6",,terminal_output +2721,4690343,"TERMINAL",0,0,"7",,terminal_output +2722,4691365,"TERMINAL",0,0,"8",,terminal_output +2723,4692389,"TERMINAL",0,0,"9",,terminal_output +2724,4693405,"TERMINAL",0,0,"10",,terminal_output +2725,4694444,"TERMINAL",0,0,"1",,terminal_output +2726,4695563,"TERMINAL",0,0,"2",,terminal_output +2727,4696587,"TERMINAL",0,0,"3",,terminal_output +2728,4697611,"TERMINAL",0,0,"4",,terminal_output +2729,4698636,"TERMINAL",0,0,"5",,terminal_output +2730,4699661,"TERMINAL",0,0,"6",,terminal_output +2731,4700788,"TERMINAL",0,0,"7",,terminal_output +2732,4701781,"TERMINAL",0,0,"8",,terminal_output +2733,4702838,"TERMINAL",0,0,"9",,terminal_output +2734,4703858,"TERMINAL",0,0,"20",,terminal_output +2735,4704882,"TERMINAL",0,0,"2",,terminal_output +2736,4705906,"TERMINAL",0,0,"3",,terminal_output +2737,4706926,"TERMINAL",0,0,"4",,terminal_output +2738,4707947,"TERMINAL",0,0,"5",,terminal_output +2739,4708980,"TERMINAL",0,0,"6",,terminal_output +2740,4710116,"TERMINAL",0,0,"7",,terminal_output +2741,4711061,"TERMINAL",0,0,"8",,terminal_output +2742,4711871,"generate_dataset.py",227,0,"",python,selection_mouse +2743,4711888,"generate_dataset.py",226,0,"",python,selection_command +2744,4712098,"TERMINAL",0,0,"9",,terminal_output +2745,4712405,"generate_dataset.py",294,0,"",python,selection_mouse +2746,4712845,"generate_dataset.py",201,0,"",python,selection_mouse +2747,4713177,"TERMINAL",0,0,"30",,terminal_output +2748,4714169,"TERMINAL",0,0,"1",,terminal_output +2749,4715208,"TERMINAL",0,0,"2",,terminal_output +2750,4715346,"generate_dataset.py",227,0,"",python,selection_mouse +2751,4715376,"generate_dataset.py",226,0,"",python,selection_command +2752,4716245,"TERMINAL",0,0,"3",,terminal_output +2753,4716957,"generate_dataset.py",293,0,"",python,selection_mouse +2754,4716969,"generate_dataset.py",292,0,"",python,selection_command +2755,4717283,"TERMINAL",0,0,"4",,terminal_output +2756,4718308,"generate_dataset.py",293,0,"\n",python,content +2757,4718442,"TERMINAL",0,0,"5",,terminal_output +2758,4718589,"generate_dataset.py",294,0,"import pickle # <-- add this\nfrom array_record.python.array_record_module import ArrayRecordWriter # <-- add this\n",python,content +2759,4719365,"TERMINAL",0,0,"6",,terminal_output +2760,4720381,"generate_dataset.py",308,0,"",python,selection_mouse +2761,4720401,"TERMINAL",0,0,"7",,terminal_output +2762,4720573,"generate_dataset.py",308,1," ",python,selection_mouse +2763,4720574,"generate_dataset.py",308,3," # ",python,selection_mouse +2764,4720574,"generate_dataset.py",308,9," # <-- ad",python,selection_mouse +2765,4720574,"generate_dataset.py",293,15,"\nimport pickle ",python,selection_mouse +2766,4720892,"generate_dataset.py",308,15," # <-- add this",python,selection_mouse +2767,4721379,"generate_dataset.py",308,15,"",python,content +2768,4721451,"TERMINAL",0,0,"8",,terminal_output +2769,4721698,"generate_dataset.py",307,1,"",python,content +2770,4722497,"TERMINAL",0,0,"9",,terminal_output +2771,4722683,"generate_dataset.py",378,0,"",python,selection_mouse +2772,4722795,"generate_dataset.py",378,1," ",python,selection_mouse +2773,4722797,"generate_dataset.py",378,9," # <-- ad",python,selection_mouse +2774,4722798,"generate_dataset.py",378,15," # <-- add this",python,selection_mouse +2775,4723213,"generate_dataset.py",378,15,"",python,content +2776,4723546,"TERMINAL",0,0,"40",,terminal_output +2777,4724295,"generate_dataset.py",377,0,"",python,selection_command +2778,4724613,"TERMINAL",0,0,"1",,terminal_output +2779,4725671,"TERMINAL",0,0,"2",,terminal_output +2780,4726693,"TERMINAL",0,0,"3",,terminal_output +2781,4727758,"TERMINAL",0,0,"4",,terminal_output +2782,4728748,"TERMINAL",0,0,"5",,terminal_output +2783,4729774,"TERMINAL",0,0,"6",,terminal_output +2784,4730903,"TERMINAL",0,0,"7",,terminal_output +2785,4731834,"TERMINAL",0,0,"9",,terminal_output +2786,4732350,"generate_dataset.py",236,0,"",python,selection_mouse +2787,4732868,"generate_dataset.py",201,0,"",python,selection_mouse +2788,4732937,"TERMINAL",0,0,"50",,terminal_output +2789,4733919,"TERMINAL",0,0,"1",,terminal_output +2790,4735104,"TERMINAL",0,0,"2",,terminal_output +2791,4736096,"TERMINAL",0,0,"3",,terminal_output +2792,4737047,"TERMINAL",0,0,"4",,terminal_output +2793,4738083,"TERMINAL",0,0,"5",,terminal_output +2794,4739124,"TERMINAL",0,0,"6",,terminal_output +2795,4740183,"TERMINAL",0,0,"7",,terminal_output +2796,4741206,"TERMINAL",0,0,"8",,terminal_output +2797,4742261,"TERMINAL",0,0,"9",,terminal_output +2798,4742858,"generate_dataset.py",379,0,"",python,selection_mouse +2799,4743308,"TERMINAL",0,0,"6:00",,terminal_output +2800,4743357,"generate_dataset.py",327,0,"",python,selection_mouse +2801,4744047,"generate_dataset.py",209,0,"",python,selection_mouse +2802,4744412,"TERMINAL",0,0,"1",,terminal_output +2803,4745364,"TERMINAL",0,0,"2",,terminal_output +2804,4745401,"generate_dataset.py",874,0,"",python,selection_mouse +2805,4746014,"generate_dataset.py",1348,0,"",python,selection_mouse +2806,4746411,"TERMINAL",0,0,"3",,terminal_output +2807,4746639,"generate_dataset.py",1191,0,"",python,selection_mouse +2808,4747179,"generate_dataset.py",1312,0,"",python,selection_mouse +2809,4747189,"generate_dataset.py",1311,0,"",python,selection_command +2810,4747446,"TERMINAL",0,0,"4",,terminal_output +2811,4747861,"generate_dataset.py",1274,0,"",python,selection_mouse +2812,4748480,"generate_dataset.py",1536,0,"",python,selection_mouse +2813,4748496,"generate_dataset.py",1535,0,"",python,selection_command +2814,4748507,"TERMINAL",0,0,"5",,terminal_output +2815,4749175,"generate_dataset.py",1274,0,"",python,selection_mouse +2816,4749527,"TERMINAL",0,0,"6",,terminal_output +2817,4750659,"TERMINAL",0,0,"7",,terminal_output +2818,4751697,"TERMINAL",0,0,"8",,terminal_output +2819,4752706,"TERMINAL",0,0,"9",,terminal_output +2820,4753727,"TERMINAL",0,0,"10",,terminal_output +2821,4754730,"TERMINAL",0,0,"1",,terminal_output +2822,4755778,"TERMINAL",0,0,"2",,terminal_output +2823,4756793,"TERMINAL",0,0,"3",,terminal_output +2824,4757834,"TERMINAL",0,0,"5",,terminal_output +2825,4758873,"TERMINAL",0,0,"6",,terminal_output +2826,4759975,"TERMINAL",0,0,"7",,terminal_output +2827,4760997,"TERMINAL",0,0,"8",,terminal_output +2828,4761990,"TERMINAL",0,0,"9",,terminal_output +2829,4763050,"TERMINAL",0,0,"20",,terminal_output +2830,4764070,"TERMINAL",0,0,"1",,terminal_output +2831,4765116,"TERMINAL",0,0,"21",,terminal_output +2832,4766152,"TERMINAL",0,0,"3",,terminal_output +2833,4767189,"TERMINAL",0,0,"4",,terminal_output +2834,4769744,"TERMINAL",0,0,"5 0",,terminal_output +2835,4771407,"TERMINAL",0,0,"70",,terminal_output +2836,4772460,"TERMINAL",0,0,"9",,terminal_output +2837,4773448,"TERMINAL",0,0,"30",,terminal_output +2838,4774515,"TERMINAL",0,0,"1",,terminal_output +2839,4775539,"TERMINAL",0,0,"2",,terminal_output +2840,4776586,"TERMINAL",0,0,"3",,terminal_output +2841,4777695,"TERMINAL",0,0,"4",,terminal_output +2842,4778635,"input_pipeline/preprocess/npy_to_tfrecord.py",0,0,"from dataclasses import dataclass\n\nimport tensorflow as tf\nimport concurrent.futures\nimport numpy as np\nimport logging\nimport tyro\nfrom pathlib import Path\nfrom tqdm import tqdm\n\nlogging.basicConfig(level=logging.INFO)\n\n\n@dataclass\nclass Args:\n source_data_dir: str = ""data/coinrun_episodes""\n output_tfrecords_dir: str = ""data/coinrun_tfrecords""\n num_shards: int = 50\n\n\nargs = tyro.cli(Args)\n\n\ndef _bytes_feature(value):\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy()\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef create_tfrecord_example(episode_numpy_array):\n feature = {\n ""height"": _int64_feature(episode_numpy_array.shape[1]),\n ""width"": _int64_feature(episode_numpy_array.shape[2]),\n ""channels"": _int64_feature(episode_numpy_array.shape[3]),\n ""sequence_length"": _int64_feature(episode_numpy_array.shape[0]),\n ""raw_video"": _bytes_feature(episode_numpy_array.tobytes()),\n }\n return tf.train.Example(features=tf.train.Features(feature=feature))\n\n\ndef process_shard(shard_idx, episode_paths, output_filename):\n """"""Process a single shard: load episodes, write to one TFRecord file.""""""\n with tf.io.TFRecordWriter(output_filename) as writer:\n for npy_path in tqdm(\n episode_paths,\n desc=f""Shard {shard_idx:03d}"",\n leave=False,\n ):\n try:\n episode_data = np.load(npy_path)\n tf_example = create_tfrecord_example(episode_data)\n writer.write(tf_example.SerializeToString())\n except Exception as e:\n logging.error(\n f""Shard {shard_idx}: Skipping {npy_path} due to error: {e}""\n )\n\n\ndef main_preprocess(data_dir_str, output_dir_str, num_shards):\n data_dir = Path(data_dir_str)\n output_dir = Path(output_dir_str)\n output_dir.mkdir(parents=True, exist_ok=True)\n\n metadata = np.load(data_dir / ""metadata.npy"", allow_pickle=True)\n episode_source_paths = [Path(item[""path""]) for item in metadata]\n num_total_episodes = len(episode_source_paths)\n\n if num_shards <= 0:\n raise ValueError(""num_shards must be positive."")\n if num_shards > num_total_episodes:\n logging.warning(\n f""Warning: num_shards ({num_shards}) is greater than total episodes ({num_total_episodes}). ""\n f""Setting num_shards to {num_total_episodes}.""\n )\n num_shards = num_total_episodes\n\n logging.info(\n f""Preparing to write {num_total_episodes} episodes to {num_shards} TFRecord shards in {output_dir}...""\n )\n\n output_filenames = [\n str(output_dir / f""shard-{i:05d}-of-{num_shards:05d}.tfrecord"")\n for i in range(num_shards)\n ]\n\n # Split episode paths into shards\n shards = [[] for _ in range(num_shards)]\n for idx, npy_path in enumerate(episode_source_paths):\n shards[idx % num_shards].append(npy_path)\n\n with concurrent.futures.ProcessPoolExecutor(max_workers=num_shards) as executor:\n futures = []\n for shard_idx, (shard_paths, out_fname) in enumerate(\n zip(shards, output_filenames)\n ):\n futures.append(\n executor.submit(process_shard, shard_idx, shard_paths, out_fname)\n )\n for f in tqdm(\n concurrent.futures.as_completed(futures), total=num_shards, desc=""Shards""\n ):\n f.result() # Propagate exceptions\n\n logging.info(\n f""TFRecord sharding complete. {num_shards} shards written to {output_dir}.""\n )\n logging.info(""Generated shard files:"")\n for fname in output_filenames:\n logging.info(f"" {fname}"")\n\n\nif __name__ == ""__main__"":\n if (\n not Path(args.source_data_dir).exists()\n or not (Path(args.source_data_dir) / ""metadata.npy"").exists()\n ):\n logging.error(f""Please generate data in '{args.source_data_dir}' first."")\n else:\n main_preprocess(\n args.source_data_dir, args.output_tfrecords_dir, args.num_shards\n )\n",python,tab +2843,4778840,"TERMINAL",0,0,"5",,terminal_output +2844,4779787,"TERMINAL",0,0,"6",,terminal_output +2845,4780949,"TERMINAL",0,0,"7",,terminal_output +2846,4781770,"TERMINAL",0,0,"8",,terminal_output +2847,4783010,"TERMINAL",0,0,"9",,terminal_output +2848,4783973,"TERMINAL",0,0,"41",,terminal_output +2849,4784888,"TERMINAL",0,0,"2",,terminal_output +2850,4785984,"TERMINAL",0,0,"3",,terminal_output +2851,4786982,"TERMINAL",0,0,"4",,terminal_output +2852,4788035,"TERMINAL",0,0,"5",,terminal_output +2853,4788456,"generate_dataset.py",0,0,"",python,tab +2854,4789107,"TERMINAL",0,0,"6",,terminal_output +2855,4790194,"TERMINAL",0,0,"7",,terminal_output +2856,4791138,"TERMINAL",0,0,"8",,terminal_output +2857,4792175,"TERMINAL",0,0,"9",,terminal_output +2858,4793209,"TERMINAL",0,0,"50",,terminal_output +2859,4794250,"TERMINAL",0,0,"1",,terminal_output +2860,4795302,"TERMINAL",0,0,"2",,terminal_output +2861,4796440,"TERMINAL",0,0,"3",,terminal_output +2862,4797381,"TERMINAL",0,0,"4",,terminal_output +2863,4798432,"TERMINAL",0,0,"5",,terminal_output +2864,4798707,"TERMINAL",0,0,"bash",,terminal_focus +2865,4799463,"TERMINAL",0,0,"6",,terminal_output +2866,4800502,"TERMINAL",0,0,"7",,terminal_output +2867,4801550,"TERMINAL",0,0,"8",,terminal_output +2868,4802583,"TERMINAL",0,0,"9",,terminal_output +2869,4803633,"TERMINAL",0,0,"7:00",,terminal_output +2870,4804723,"TERMINAL",0,0,"1",,terminal_output +2871,4805746,"TERMINAL",0,0,"21",,terminal_output +2872,4806726,"TERMINAL",0,0,"3",,terminal_output +2873,4807795,"TERMINAL",0,0,"4",,terminal_output +2874,4808118,"generate_dataset.py",0,0,"",python,tab +2875,4808810,"TERMINAL",0,0,"5",,terminal_output +2876,4809449,"generate_dataset.py",1076,0,"",python,selection_mouse +2877,4809455,"generate_dataset.py",1075,0,"",python,selection_command +2878,4809945,"TERMINAL",0,0,"7",,terminal_output +2879,4810970,"TERMINAL",0,0,"8",,terminal_output +2880,4811934,"TERMINAL",0,0,"9",,terminal_output +2881,4812978,"TERMINAL",0,0,"10",,terminal_output +2882,4814042,"TERMINAL",0,0,"1",,terminal_output +2883,4815068,"TERMINAL",0,0,"2",,terminal_output +2884,4816192,"TERMINAL",0,0,"3",,terminal_output +2885,4817217,"TERMINAL",0,0,"4",,terminal_output +2886,4818252,"TERMINAL",0,0,"5",,terminal_output +2887,4819205,"TERMINAL",0,0,"6",,terminal_output +2888,4820250,"TERMINAL",0,0,"7",,terminal_output +2889,4821286,"TERMINAL",0,0,"8",,terminal_output +2890,4822324,"TERMINAL",0,0,"9",,terminal_output +2891,4823379,"TERMINAL",0,0,"20",,terminal_output +2892,4824408,"TERMINAL",0,0,"1",,terminal_output +2893,4825446,"TERMINAL",0,0,"20",,terminal_output +2894,4826490,"TERMINAL",0,0,"3",,terminal_output +2895,4827559,"TERMINAL",0,0,"4",,terminal_output +2896,4828583,"TERMINAL",0,0,"5",,terminal_output +2897,4829607,"TERMINAL",0,0,"6",,terminal_output +2898,4830733,"TERMINAL",0,0,"7",,terminal_output +2899,4831698,"TERMINAL",0,0,"8",,terminal_output +2900,4832782,"TERMINAL",0,0,"9",,terminal_output +2901,4833805,"TERMINAL",0,0,"30",,terminal_output +2902,4834830,"TERMINAL",0,0,"1",,terminal_output +2903,4835958,"TERMINAL",0,0,"3",,terminal_output +2904,4836910,"TERMINAL",0,0,"4",,terminal_output +2905,4838004,"TERMINAL",0,0,"5",,terminal_output +2906,4838249,"generate_dataset.py",1421,0,"",python,selection_mouse +2907,4838399,"generate_dataset.py",1414,12,"episode_data",python,selection_mouse +2908,4839027,"TERMINAL",0,0,"6",,terminal_output +2909,4840014,"TERMINAL",0,0,"7",,terminal_output +2910,4841142,"TERMINAL",0,0,"8",,terminal_output +2911,4842205,"TERMINAL",0,0,"9",,terminal_output +2912,4843231,"TERMINAL",0,0,"40",,terminal_output +2913,4844258,"TERMINAL",0,0,"1",,terminal_output +2914,4845216,"TERMINAL",0,0,"2",,terminal_output +2915,4846257,"TERMINAL",0,0,"3",,terminal_output +2916,4847306,"TERMINAL",0,0,"4",,terminal_output +2917,4848352,"TERMINAL",0,0,"5",,terminal_output +2918,4849397,"TERMINAL",0,0,"6",,terminal_output +2919,4850438,"TERMINAL",0,0,"7",,terminal_output +2920,4851480,"TERMINAL",0,0,"8",,terminal_output +2921,4852516,"TERMINAL",0,0,"9",,terminal_output +2922,4853570,"TERMINAL",0,0,"50",,terminal_output +2923,4854696,"TERMINAL",0,0,"1",,terminal_output +2924,4855718,"TERMINAL",0,0,"2",,terminal_output +2925,4856712,"TERMINAL",0,0,"3",,terminal_output +2926,4857767,"TERMINAL",0,0,"4",,terminal_output +2927,4858766,"TERMINAL",0,0,"5",,terminal_output +2928,4859821,"TERMINAL",0,0,"6",,terminal_output +2929,4860942,"TERMINAL",0,0,"8",,terminal_output +2930,4861896,"TERMINAL",0,0,"9",,terminal_output +2931,4862991,"TERMINAL",0,0,"8:00",,terminal_output +2932,4864014,"TERMINAL",0,0,"1",,terminal_output +2933,4865039,"TERMINAL",0,0,"2",,terminal_output +2934,4866062,"TERMINAL",0,0,"3",,terminal_output +2935,4867189,"TERMINAL",0,0,"4",,terminal_output +2936,4868214,"TERMINAL",0,0,"5",,terminal_output +2937,4869236,"TERMINAL",0,0,"6",,terminal_output +2938,4869871,"generate_dataset.py",814,0,"",python,selection_mouse +2939,4870334,"TERMINAL",0,0,"7",,terminal_output +2940,4871258,"TERMINAL",0,0,"8",,terminal_output +2941,4872282,"TERMINAL",0,0,"9",,terminal_output +2942,4873319,"TERMINAL",0,0,"10",,terminal_output +2943,4874361,"TERMINAL",0,0,"1",,terminal_output +2944,4875403,"TERMINAL",0,0,"2",,terminal_output +2945,4876451,"TERMINAL",0,0,"3",,terminal_output +2946,4877531,"TERMINAL",0,0,"4",,terminal_output +2947,4878349,"generate_dataset.py",1747,1,"ions_",python,content +2948,4878350,"generate_dataset.py",1744,1,"observ",python,content +2949,4878350,"generate_dataset.py",1670,1,"ions_",python,content +2950,4878350,"generate_dataset.py",1667,1,"observ",python,content +2951,4878350,"generate_dataset.py",1607,1,"ions_",python,content +2952,4878351,"generate_dataset.py",1604,1,"observ",python,content +2953,4878351,"generate_dataset.py",1464,1,"ions_",python,content +2954,4878351,"generate_dataset.py",1461,1,"observ",python,content +2955,4878351,"generate_dataset.py",1194,1,"ions_",python,content +2956,4878351,"generate_dataset.py",1191,1,"observ",python,content +2957,4878351,"generate_dataset.py",1119,1,"ions_",python,content +2958,4878351,"generate_dataset.py",1116,1,"observ",python,content +2959,4878351,"generate_dataset.py",1017,1,"ions_",python,content +2960,4878351,"generate_dataset.py",1014,1,"observ",python,content +2961,4878351,"generate_dataset.py",813,1,"ions_",python,content +2962,4878351,"generate_dataset.py",810,1,"observ",python,content +2963,4878531,"TERMINAL",0,0,"5",,terminal_output +2964,4879311,"generate_dataset.py",1198,0,"",python,selection_mouse +2965,4879608,"TERMINAL",0,0,"6",,terminal_output +2966,4880629,"TERMINAL",0,0,"7",,terminal_output +2967,4881711,"TERMINAL",0,0,"8",,terminal_output +2968,4882754,"TERMINAL",0,0,"9",,terminal_output +2969,4883790,"TERMINAL",0,0,"20",,terminal_output +2970,4884789,"TERMINAL",0,0,"1",,terminal_output +2971,4885927,"TERMINAL",0,0,"3",,terminal_output +2972,4886873,"TERMINAL",0,0,"4",,terminal_output +2973,4887916,"TERMINAL",0,0,"5",,terminal_output +2974,4888957,"TERMINAL",0,0,"6",,terminal_output +2975,4889995,"TERMINAL",0,0,"7",,terminal_output +2976,4891036,"TERMINAL",0,0,"8",,terminal_output +2977,4891543,"generate_dataset.py",1454,3,"",python,content +2978,4891544,"generate_dataset.py",1453,0,"on",python,content +2979,4891544,"generate_dataset.py",1451,1,"rvat",python,content +2980,4891544,"generate_dataset.py",1450,0,"obs",python,content +2981,4891544,"generate_dataset.py",1192,3,"",python,content +2982,4891544,"generate_dataset.py",1191,0,"on",python,content +2983,4891544,"generate_dataset.py",1189,1,"rvat",python,content +2984,4891544,"generate_dataset.py",1188,0,"obs",python,content +2985,4893354,"TERMINAL",0,0,"90",,terminal_output +2986,4894352,"TERMINAL",0,0,"31",,terminal_output +2987,4895448,"TERMINAL",0,0,"2",,terminal_output +2988,4896439,"TERMINAL",0,0,"3",,terminal_output +2989,4897473,"TERMINAL",0,0,"4",,terminal_output +2990,4898517,"TERMINAL",0,0,"5",,terminal_output +2991,4899650,"TERMINAL",0,0,"6",,terminal_output +2992,4900673,"TERMINAL",0,0,"7",,terminal_output +2993,4901713,"TERMINAL",0,0,"8",,terminal_output +2994,4902727,"TERMINAL",0,0,"9",,terminal_output +2995,4903744,"TERMINAL",0,0,"40",,terminal_output +2996,4904771,"TERMINAL",0,0,"1",,terminal_output +2997,4905800,"TERMINAL",0,0,"2",,terminal_output +2998,4906818,"TERMINAL",0,0,"3",,terminal_output +2999,4907851,"TERMINAL",0,0,"5",,terminal_output +3000,4908890,"TERMINAL",0,0,"6",,terminal_output +3001,4909995,"TERMINAL",0,0,"7",,terminal_output +3002,4910828,"generate_dataset.py",431,0,"",python,selection_mouse +3003,4910975,"generate_dataset.py",429,5,"10000",python,selection_mouse +3004,4910988,"TERMINAL",0,0,"8",,terminal_output +3005,4912052,"TERMINAL",0,0,"9",,terminal_output +3006,4913063,"TERMINAL",0,0,"50",,terminal_output +3007,4914086,"TERMINAL",0,0,"1",,terminal_output +3008,4915216,"TERMINAL",0,0,"2",,terminal_output +3009,4916368,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"#!/usr/bin/env bash\n\npython generate_dataset.py \\n --num_episodes 10000 \\n --output_dir /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev \\n --min_episode_length 1000",shellscript,tab +3010,4916451,"TERMINAL",0,0,"3",,terminal_output +3011,4917263,"TERMINAL",0,0,"4",,terminal_output +3012,4918251,"TERMINAL",0,0,"5",,terminal_output +3013,4918368,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",72,0,"",shellscript,selection_mouse +3014,4919218,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",71,0,"",shellscript,selection_command +3015,4919296,"TERMINAL",0,0,"6",,terminal_output +3016,4919711,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",71,1,"",shellscript,content +3017,4919858,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",71,1,"",shellscript,content +3018,4920011,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",71,1,"",shellscript,content +3019,4920340,"TERMINAL",0,0,"7",,terminal_output +3020,4921581,"TERMINAL",0,0,"8",,terminal_output +3021,4922451,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/generate_dataset_10m.sh",,terminal_command +3022,4922465,"TERMINAL",0,0,"9",,terminal_output +3023,4922500,"TERMINAL",0,0,"]633;E;2025-09-04 11:18:59 sh slurm/dev/mihir/horeka/generate_dataset_10m.sh;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +3024,4923459,"TERMINAL",0,0,"9:00",,terminal_output +3025,4924499,"TERMINAL",0,0,"1",,terminal_output +3026,4925556,"TERMINAL",0,0,"2",,terminal_output +3027,4926713,"TERMINAL",0,0,"3",,terminal_output +3028,4926723,"TERMINAL",0,0,"Gym has been unmaintained since 2022 and does not support NumPy 2.0 amongst other critical functionality.\r\nPlease upgrade to Gymnasium, the maintained drop-in replacement of Gym, or contact the authors of your software and request that they upgrade.\r\nUsers of this version of Gym should be able to simply replace 'import gym' with 'import gymnasium as gym' in the vast majority of cases.\r\nSee the migration guide at https://gymnasium.farama.org/introduction/migration_guide/ for additional information.\r\n",,terminal_output +3029,4927709,"TERMINAL",0,0,"41",,terminal_output +3030,4928731,"TERMINAL",0,0,"5",,terminal_output +3031,4929756,"TERMINAL",0,0,"6",,terminal_output +3032,4930799,"TERMINAL",0,0,"7",,terminal_output +3033,4931764,"TERMINAL",0,0,"8",,terminal_output +3034,4932827,"TERMINAL",0,0,"9",,terminal_output +3035,4933852,"TERMINAL",0,0,"11",,terminal_output +3036,4934978,"TERMINAL",0,0,"2",,terminal_output +3037,4936003,"TERMINAL",0,0,"3",,terminal_output +3038,4937012,"TERMINAL",0,0,"4",,terminal_output +3039,4938049,"TERMINAL",0,0,"Episode too short (726), resampling...\r\n",,terminal_output +3040,4938065,"TERMINAL",0,0,"5",,terminal_output +3041,4938125,"TERMINAL",0,0,"Episode too short (493), resampling...\r\n",,terminal_output +3042,4938188,"TERMINAL",0,0,"Episode too short (164), resampling...\r\n",,terminal_output +3043,4938870,"TERMINAL",0,0,"Episode 0 completed, length: 1000\r\n",,terminal_output +3044,4939053,"TERMINAL",0,0,"6",,terminal_output +3045,4939194,"TERMINAL",0,0,"Episode too short (685), resampling...\r\n",,terminal_output +3046,4939302,"TERMINAL",0,0,"Episode too short (341), resampling...\r\n",,terminal_output +3047,4939997,"TERMINAL",0,0,"Episode 1 completed, length: 1000\r\n",,terminal_output +3048,4940105,"TERMINAL",0,0,"7",,terminal_output +3049,4940370,"TERMINAL",0,0,"Episode too short (930), resampling...\r\n",,terminal_output +3050,4940579,"TERMINAL",0,0,"Episode too short (426), resampling...\r\n",,terminal_output +3051,4940916,"TERMINAL",0,0,"Episode too short (409), resampling...\r\n",,terminal_output +3052,4941235,"TERMINAL",0,0,"8",,terminal_output +3053,4941276,"TERMINAL",0,0,"Episode 2 completed, length: 1000\r\n",,terminal_output +3054,4941786,"TERMINAL",0,0,"Episode 3 completed, length: 1000\r\n",,terminal_output +3055,4942248,"TERMINAL",0,0,"9",,terminal_output +3056,4942259,"TERMINAL",0,0,"Episode too short (854), resampling...\r\n",,terminal_output +3057,4942865,"TERMINAL",0,0,"Episode 4 completed, length: 1000\r\n",,terminal_output +3058,4943274,"TERMINAL",0,0,"20",,terminal_output +3059,4943293,"TERMINAL",0,0,"Episode 5 completed, length: 1000\r\n",,terminal_output +3060,4943902,"TERMINAL",0,0,"Episode 6 completed, length: 1000\r\nEpisode too short (75), resampling...\r\n",,terminal_output +3061,4944299,"TERMINAL",0,0,"1",,terminal_output +3062,4944403,"TERMINAL",0,0,"Episode 7 completed, length: 1000\r\n",,terminal_output +3063,4945030,"TERMINAL",0,0,"Episode 8 completed, length: 1000\r\nEpisode too short (132), resampling...\r\n",,terminal_output +3064,4945201,"TERMINAL",0,0,"Episode too short (277), resampling...\r\n",,terminal_output +3065,4945308,"TERMINAL",0,0,"2",,terminal_output +3066,4945629,"TERMINAL",0,0,"Episode 9 completed, length: 1000\r\nDataset generated with 10 valid episodes\r\n",,terminal_output +3067,4945688,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jasmine",,terminal_output +3068,4946333,"TERMINAL",0,0,"38",,terminal_output +3069,4947381,"TERMINAL",0,0,"4",,terminal_output +3070,4947956,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +3071,4948412,"TERMINAL",0,0,"5",,terminal_output +3072,4949455,"TERMINAL",0,0,"6",,terminal_output +3073,4950256,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,0,"",shellscript,selection_mouse +3074,4950443,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,1,"/",shellscript,selection_mouse +3075,4950444,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,6,"/hkfs/",shellscript,selection_mouse +3076,4950445,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,9,"/hkfs/wor",shellscript,selection_mouse +3077,4950445,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,15,"/hkfs/work/work",shellscript,selection_mouse +3078,4950445,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,17,"/hkfs/work/worksp",shellscript,selection_mouse +3079,4950477,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,25,"/hkfs/work/workspace/scra",shellscript,selection_mouse +3080,4950508,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,34,"/hkfs/work/workspace/scratch/tum_i",shellscript,selection_mouse +3081,4950539,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",73,18,"\n --output_dir ",shellscript,selection_mouse +3082,4950581,"TERMINAL",0,0,"7",,terminal_output +3083,4950916,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,62,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_c",shellscript,selection_mouse +3084,4950917,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,63,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_co",shellscript,selection_mouse +3085,4950917,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,65,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coin",shellscript,selection_mouse +3086,4950918,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,66,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinr",shellscript,selection_mouse +3087,4950918,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,67,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinru",shellscript,selection_mouse +3088,4950918,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,68,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun",shellscript,selection_mouse +3089,4950954,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,69,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/",shellscript,selection_mouse +3090,4950998,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,70,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/d",shellscript,selection_mouse +3091,4951031,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,71,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/de",shellscript,selection_mouse +3092,4951175,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",91,72,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev",shellscript,selection_mouse +3093,4951548,"TERMINAL",0,0,"8",,terminal_output +3094,4952573,"TERMINAL",0,0,"9",,terminal_output +3095,4953620,"TERMINAL",0,0,"30",,terminal_output +3096,4954759,"TERMINAL",0,0,"1",,terminal_output +3097,4955689,"TERMINAL",0,0,"2",,terminal_output +3098,4956285,"TERMINAL",0,0,"ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev",,terminal_command +3099,4956337,"TERMINAL",0,0,"]633;E;2025-09-04 11:19:33 ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;Cepisode_0.array_record episode_2.array_record episode_4.array_record episode_6.array_record episode_8.array_record metadata.npy\r\nepisode_1.array_record episode_3.array_record episode_5.array_record episode_7.array_record episode_9.array_record\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +3100,4956727,"TERMINAL",0,0,"3",,terminal_output +3101,4957771,"TERMINAL",0,0,"4",,terminal_output +3102,4958837,"TERMINAL",0,0,"5",,terminal_output +3103,4959862,"TERMINAL",0,0,"7",,terminal_output +3104,4960881,"TERMINAL",0,0,"8",,terminal_output +3105,4961919,"TERMINAL",0,0,"9",,terminal_output +3106,4963042,"TERMINAL",0,0,"40",,terminal_output +3107,4964093,"TERMINAL",0,0,"1",,terminal_output +3108,4965085,"TERMINAL",0,0,"2",,terminal_output +3109,4966110,"TERMINAL",0,0,"3",,terminal_output +3110,4966489,"input_pipeline/preprocess/npy_to_array_records.py",0,0,"import numpy as np\nimport os\nimport tyro\nimport multiprocessing as mp\nfrom dataclasses import dataclass\nimport json\nimport pickle\nfrom array_record.python.array_record_module import ArrayRecordWriter\n\n\n@dataclass\nclass Args:\n input_path: str = ""data/minecraft_npy""\n output_path: str = ""data/minecraft_arrayrecords""\n\ndef preprocess_video(\n idx, in_filename, output_path\n):\n print(f""Processing video {idx}, Filename: {in_filename}"")\n try:\n frames = np.load(in_filename)\n n_frames = frames.shape[0]\n\n output_path = os.path.join(\n output_path,\n os.path.splitext(os.path.basename(in_filename))[0] + "".array_record"",\n )\n\n writer = ArrayRecordWriter(str(output_path), ""group_size:1"")\n\n print(f""Saving video {idx} to {output_path}"")\n record = {""raw_video"": frames.tobytes(), ""sequence_length"": n_frames}\n writer.write(pickle.dumps(record))\n writer.close()\n\n return in_filename, n_frames\n except Exception as e:\n print(f""Error processing video {idx} ({in_filename}): {e}"")\n return in_filename, 0\n\n\ndef main():\n args = tyro.cli(Args)\n\n os.makedirs(args.output_path, exist_ok=True)\n print(f""Output path: {args.output_path}"")\n\n num_processes = mp.cpu_count()\n print(f""Number of processes: {num_processes}"")\n\n print(""Converting npy to array_record files..."")\n pool_args = [\n (\n idx,\n os.path.join(args.input_path, in_filename),\n args.output_path,\n )\n for idx, in_filename in enumerate(os.listdir(args.input_path))\n if in_filename.endswith("".npy"")\n ]\n\n results = []\n with mp.Pool(processes=num_processes) as pool:\n for result in pool.starmap(preprocess_video, pool_args):\n results.append(result)\n print(""Done converting npy to array_record files"")\n\n # count the number of failed videos\n failed_videos = [result for result in results if result[1] == 0]\n short_episodes = [result for result in results if result[1] < 1600]\n print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )\n print(f""Number of total videos: {len(results)}"")\n\n with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(results, f)\n\n\nif __name__ == ""__main__"":\n main()\n",python,tab +3111,4967335,"TERMINAL",0,0,"4",,terminal_output +3112,4968274,"TERMINAL",0,0,"5",,terminal_output +3113,4969288,"TERMINAL",0,0,"6",,terminal_output +3114,4970310,"TERMINAL",0,0,"7",,terminal_output +3115,4971303,"TERMINAL",0,0,"8",,terminal_output +3116,4972317,"TERMINAL",0,0,"9",,terminal_output +3117,4973352,"TERMINAL",0,0,"50",,terminal_output +3118,4974408,"TERMINAL",0,0,"1",,terminal_output +3119,4975571,"TERMINAL",0,0,"2",,terminal_output +3120,4976481,"TERMINAL",0,0,"3",,terminal_output +3121,4977517,"TERMINAL",0,0,"4",,terminal_output +3122,4978543,"TERMINAL",0,0,"5",,terminal_output +3123,4979581,"TERMINAL",0,0,"6",,terminal_output +3124,4980649,"TERMINAL",0,0,"7",,terminal_output +3125,4981697,"TERMINAL",0,0,"8",,terminal_output +3126,4982803,"TERMINAL",0,0,"9",,terminal_output +3127,4983825,"TERMINAL",0,0,"20:00",,terminal_output +3128,4984800,"TERMINAL",0,0,"1",,terminal_output +3129,4985151,"TERMINAL",0,0,"ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/array_records/*.npy",,terminal_command +3130,4985235,"TERMINAL",0,0,"]633;E;2025-09-04 11:20:02 ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/array_records/*.npy;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;Cls: cannot access '/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/array_records/*.npy': No such file or directory\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;2",,terminal_output +3131,4985816,"TERMINAL",0,0,"2",,terminal_output +3132,4986852,"TERMINAL",0,0,"4",,terminal_output +3133,4987948,"TERMINAL",0,0,"5",,terminal_output +3134,4988922,"TERMINAL",0,0,"6",,terminal_output +3135,4990360,"input_pipeline/preprocess/npy_to_array_records.py",0,0,"",python,tab +3136,4990434,"TERMINAL",0,0,"7",,terminal_output +3137,4991004,"TERMINAL",0,0,"8",,terminal_output +3138,4992038,"TERMINAL",0,0,"9",,terminal_output +3139,4992190,"input_pipeline/preprocess/npy_to_array_records.py",2103,0,"",python,selection_mouse +3140,4993141,"TERMINAL",0,0,"10",,terminal_output +3141,4994166,"TERMINAL",0,0,"1",,terminal_output +3142,4995331,"TERMINAL",0,0,"2",,terminal_output +3143,4996244,"TERMINAL",0,0,"3",,terminal_output +3144,4997239,"TERMINAL",0,0,"4",,terminal_output +3145,4998263,"TERMINAL",0,0,"5",,terminal_output +3146,4998755,"TERMINAL",0,0,"ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/array_records/*.json",,terminal_command +3147,4998835,"TERMINAL",0,0,"]633;E;2025-09-04 11:20:15 ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/array_records/*.json;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/array_records/meta_data.json\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +3148,4999294,"TERMINAL",0,0,"6",,terminal_output +3149,5000332,"TERMINAL",0,0,"7",,terminal_output +3150,5001453,"TERMINAL",0,0,"8",,terminal_output +3151,5002461,"TERMINAL",0,0,"9",,terminal_output +3152,5003062,"TERMINAL",0,0,"cat /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/array_records/meta_data.json",,terminal_command +3153,5003214,"TERMINAL",0,0,"]633;E;2025-09-04 11:20:20 cat /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/array_records/meta_data.json;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3727.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_136.npy"",\r\n 694\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7448.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1134.npy"",\r\n 622\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2056.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3111.npy"",\r\n 95\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_573.npy"",\r\n 59\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9599.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_743.npy"",\r\n 68\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8240.npy"",\r\n 617\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5611.npy"",\r\n 629\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9288.npy"",\r\n 573\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5373.npy"",\r\n 870\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4082.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8248.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8503.npy"",\r\n 702\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9606.npy"",\r\n 796\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7152.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2560.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1536.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8143.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6758.npy"",\r\n 334\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6621.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2052.npy"",\r\n 461\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4287.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5467.npy"",\r\n 895\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8091.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8462.npy"",\r\n 578\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6041.npy"",\r\n 225\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5124.npy"",\r\n 610\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5519.npy"",\r\n 361\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8645.npy"",\r\n 770\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3167.npy"",\r\n 496\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9779.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5105.npy"",\r\n 161\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1982.npy"",\r\n 749\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_269.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2080.npy"",\r\n 779\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8065.npy"",\r\n 350\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4322.npy"",\r\n 105\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6799.npy"",\r\n 85\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8097.npy"",\r\n 293\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6543.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8374.npy"",\r\n 523\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_95.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_509.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_618.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3953.npy"",\r\n 881\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2925.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9179.npy"",\r\n 441\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4587.npy"",\r\n 531\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8363.npy"",\r\n 694\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2582.npy"",\r\n 581\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6446.npy"",\r\n 756\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_620.npy"",\r\n 700\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5918.npy"",\r\n 750\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5486.npy"",\r\n 113\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1859.npy"",\r\n 388\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9314.npy"",\r\n 543\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_217.npy"",\r\n 232\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2596.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2078.npy"",\r\n 375\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8902.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8952.npy"",\r\n 99\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4487.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4018.npy"",\r\n 237\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3477.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1401.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_765.npy"",\r\n 861\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_562.npy"",\r\n 313\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6752.npy"",\r\n 548\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3389.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7904.npy"",\r\n 365\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8590.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2479.npy"",\r\n 655\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9887.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4972.npy"",\r\n 321\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1377.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6191.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4417.npy"",\r\n 235\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1822.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9844.npy"",\r\n 494\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3599.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4787.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3661.npy"",\r\n 575\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1228.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_392.npy"",\r\n 326\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8294.npy"",\r\n 714\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5170.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9463.npy"",\r\n 771\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_301.npy"",\r\n 231\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2422.npy"",\r\n 246\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_559.npy"",\r\n 262\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5842.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4524.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_380.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6551.npy"",\r\n 332\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4522.npy"",\r\n 125\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3486.npy"",\r\n 156\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9495.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3570.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3636.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6463.npy"",\r\n 925\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9531.npy"",\r\n 267\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8360.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6120.npy"",\r\n 797\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8598.npy"",\r\n 464\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7783.npy"",\r\n 730\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4445.npy"",\r\n 726\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8641.npy"",\r\n 486\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_583.npy"",\r\n 561\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4505.npy"",\r\n 869\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1150.npy"",\r\n 137\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1788.npy"",\r\n 854\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8620.npy"",\r\n 139\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7593.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1307.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3088.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8659.npy"",\r\n 726\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9722.npy"",\r\n 521\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9284.npy"",\r\n 220\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1974.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9027.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7108.npy"",\r\n 132\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5362.npy"",\r\n 674\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2205.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9696.npy"",\r\n 586\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4211.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5296.npy"",\r\n 799\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4763.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_182.npy"",\r\n 222\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6518.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_297.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8359.npy"",\r\n 223\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7779.npy"",\r\n 717\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_210.npy"",\r\n 673\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9552.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2023.npy"",\r\n 937\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9475.npy"",\r\n 734\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8005.npy"",\r\n 78\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6329.npy"",\r\n 457\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7556.npy"",\r\n 506\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7374.npy"",\r\n 924\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3459.npy"",\r\n 367\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1979.npy"",\r\n 786\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4218.npy"",\r\n 176\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5596.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3740.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4010.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7559.npy"",\r\n 74\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6442.npy"",\r\n 298\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8933.npy"",\r\n 304\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6000.npy"",\r\n 840\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6057.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7451.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2678.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5023.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2042.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1895.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5442.npy"",\r\n 570\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5393.npy"",\r\n 489\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8204.npy"",\r\n 275\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9983.npy"",\r\n 110\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_816.npy"",\r\n 261\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9483.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9981.npy"",\r\n 72\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1571.npy"",\r\n 145\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4997.npy"",\r\n 928\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3845.npy"",\r\n 56\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4014.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_891.npy"",\r\n 600\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8327.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4031.npy"",\r\n 260\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3473.npy"",\r\n 428\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4340.npy"",\r\n 274\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_147.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2896.npy"",\r\n 376\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5733.npy"",\r\n 177\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3385.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6588.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8665.npy"",\r\n 652\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6787.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3376.npy"",\r\n 181\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9406.npy"",\r\n 802\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3907.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5257.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6858.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9053.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5111.npy"",\r\n 351\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2843.npy"",\r\n 401\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1362.npy"",\r\n 152\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2574.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1738.npy"",\r\n 439\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9517.npy"",\r\n 624\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3921.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5410.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8121.npy"",\r\n 525\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4632.npy"",\r\n 552\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2026.npy"",\r\n 781\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9316.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_28.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8684.npy"",\r\n 413\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8298.npy"",\r\n 892\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8761.npy"",\r\n 139\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7527.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5556.npy"",\r\n 544\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1548.npy"",\r\n 102\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4160.npy"",\r\n 51\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1467.npy"",\r\n 440\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9262.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7581.npy"",\r\n 638\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1395.npy"",\r\n 781\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4952.npy"",\r\n 68\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_868.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4640.npy"",\r\n 802\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8100.npy"",\r\n 677\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7844.npy"",\r\n 869\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6942.npy"",\r\n 783\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_838.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2981.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4006.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2504.npy"",\r\n 304\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7774.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3052.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4023.npy"",\r\n 579\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1970.npy"",\r\n 185\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8636.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1359.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9253.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7713.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2088.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5292.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4595.npy"",\r\n 546\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4007.npy"",\r\n 170\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5441.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5939.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_663.npy"",\r\n 414\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7192.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2279.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2467.npy"",\r\n 431\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9824.npy"",\r\n 180\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9667.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8334.npy"",\r\n 852\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3350.npy"",\r\n 275\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2855.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5514.npy"",\r\n 211\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7770.npy"",\r\n 709\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5163.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3068.npy"",\r\n 261\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1287.npy"",\r\n 70\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2269.npy"",\r\n 208\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9165.npy"",\r\n 812\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6332.npy"",\r\n 262\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3276.npy"",\r\n 269\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3463.npy"",\r\n 414\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_370.npy"",\r\n 338\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8216.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6666.npy"",\r\n 99\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_707.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1117.npy"",\r\n 250\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5530.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_973.npy"",\r\n 200\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_156.npy"",\r\n 152\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6753.npy"",\r\n 246\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4829.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5527.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9466.npy"",\r\n 857\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7711.npy"",\r\n 253\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8381.npy"",\r\n 541\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3691.npy"",\r\n 136\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2660.npy"",\r\n 341\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8637.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5989.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5151.npy"",\r\n 944\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2293.npy"",\r\n 871\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2792.npy"",\r\n 359\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5536.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_311.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7507.npy"",\r\n 369\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3758.npy"",\r\n 409\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9076.npy"",\r\n 577\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9806.npy"",\r\n 695\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7554.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2307.npy"",\r\n 950\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1136.npy"",\r\n 583\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2305.npy"",\r\n 348\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6512.npy"",\r\n 81\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8573.npy"",\r\n 92\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9682.npy"",\r\n 483\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8212.npy"",\r\n 926\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1213.npy"",\r\n 100\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8567.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1984.npy"",\r\n 359\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1535.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_798.npy"",\r\n 589\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8913.npy"",\r\n 578\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9170.npy"",\r\n 171\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4311.npy"",\r\n 483\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5253.npy"",\r\n 54\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5775.npy"",\r\n 630\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6776.npy"",\r\n 413\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7674.npy"",\r\n 363\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2210.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5268.npy"",\r\n 571\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4557.npy"",\r\n 364\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4883.npy"",\r\n 701\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7970.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_200.npy"",\r\n 487\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6462.npy"",\r\n 945\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8130.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2003.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3238.npy"",\r\n 618\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8688.npy"",\r\n 858\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2339.npy"",\r\n 943\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6792.npy"",\r\n 583\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2789.npy"",\r\n 193\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_807.npy"",\r\n 872\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7865.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1235.npy"",\r\n 248\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2404.npy"",\r\n 459\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9857.npy"",\r\n 367\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3508.npy"",\r\n 228\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2152.npy"",\r\n 372\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2725.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8440.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4609.npy"",\r\n 111\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_436.npy"",\r\n 418\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4484.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1836.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5039.npy"",\r\n 103\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1188.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9464.npy"",\r\n 773\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6045.npy"",\r\n 998\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_195.npy"",\r\n 808\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2061.npy"",\r\n 473\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4955.npy"",\r\n 414\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5653.npy"",\r\n 607\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6880.npy"",\r\n 937\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4870.npy"",\r\n 717\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2784.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9391.npy"",\r\n 357\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1039.npy"",\r\n 132\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4036.npy"",\r\n 68\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2084.npy"",\r\n 156\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7366.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3253.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6845.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9242.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9662.npy"",\r\n 90\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3516.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7070.npy"",\r\n 815\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8888.npy"",\r\n 214\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2184.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4873.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3049.npy"",\r\n 764\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2884.npy"",\r\n 745\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9827.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2899.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4568.npy"",\r\n 533\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4566.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_456.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5768.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4424.npy"",\r\n 601\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7594.npy"",\r\n 988\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4309.npy"",\r\n 359\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2185.npy"",\r\n 333\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6472.npy"",\r\n 648\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4364.npy"",\r\n 601\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1424.npy"",\r\n 285\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1422.npy"",\r\n 149\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1932.npy"",\r\n 947\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6395.npy"",\r\n 461\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1416.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5468.npy"",\r\n 250\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6864.npy"",\r\n 515\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2875.npy"",\r\n 836\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9690.npy"",\r\n 253\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5233.npy"",\r\n 503\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4705.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4450.npy"",\r\n 581\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4197.npy"",\r\n 799\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7150.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8998.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_622.npy"",\r\n 172\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9953.npy"",\r\n 521\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5477.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4976.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5552.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3213.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7503.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6013.npy"",\r\n 707\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6816.npy"",\r\n 304\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9072.npy"",\r\n 288\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1934.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7991.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4742.npy"",\r\n 585\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1500.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5705.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2291.npy"",\r\n 395\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6116.npy"",\r\n 705\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8805.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9746.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6952.npy"",\r\n 502\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4716.npy"",\r\n 324\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7172.npy"",\r\n 314\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2153.npy"",\r\n 894\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2790.npy"",\r\n 603\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2416.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7362.npy"",\r\n 337\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3624.npy"",\r\n 346\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8624.npy"",\r\n 354\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2576.npy"",\r\n 103\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9984.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6380.npy"",\r\n 420\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1956.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8469.npy"",\r\n 240\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3413.npy"",\r\n 792\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6668.npy"",\r\n 533\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4461.npy"",\r\n 137\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_544.npy"",\r\n 529\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1079.npy"",\r\n 585\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3195.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7290.npy"",\r\n 100\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7161.npy"",\r\n 640\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_963.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2209.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4005.npy"",\r\n 457\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8016.npy"",\r\n 77\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9068.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_809.npy"",\r\n 359\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4480.npy"",\r\n 148\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1905.npy"",\r\n 702\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_774.npy"",\r\n 406\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7578.npy"",\r\n 340\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_927.npy"",\r\n 75\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2767.npy"",\r\n 630\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4291.npy"",\r\n 494\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2511.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5933.npy"",\r\n 346\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8762.npy"",\r\n 225\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_31.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5128.npy"",\r\n 361\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7817.npy"",\r\n 587\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3797.npy"",\r\n 635\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1549.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1972.npy"",\r\n 214\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4844.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5699.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9115.npy"",\r\n 811\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1627.npy"",\r\n 130\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6420.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1167.npy"",\r\n 639\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2434.npy"",\r\n 683\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3559.npy"",\r\n 470\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9169.npy"",\r\n 889\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1114.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6579.npy"",\r\n 541\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4027.npy"",\r\n 364\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7158.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4360.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2169.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6724.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4708.npy"",\r\n 235\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1533.npy"",\r\n 556\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6781.npy"",\r\n 131\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6192.npy"",\r\n 453\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3964.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8795.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5498.npy"",\r\n 324\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7225.npy"",\r\n 950\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4723.npy"",\r\n 185\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9694.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2429.npy"",\r\n 130\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9741.npy"",\r\n 246\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3794.npy"",\r\n 495\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6423.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2462.npy"",\r\n 865\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9957.npy"",\r\n 347\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6417.npy"",\r\n 94\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2720.npy"",\r\n 851\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5774.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_624.npy"",\r\n 244\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3664.npy"",\r\n 412\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3782.npy"",\r\n 285\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5433.npy"",\r\n 560\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3776.npy"",\r\n 505\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8760.npy"",\r\n 180\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4892.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7335.npy"",\r\n 253\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3288.npy"",\r\n 61\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1185.npy"",\r\n 131\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3162.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5363.npy"",\r\n 272\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2571.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1008.npy"",\r\n 149\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6748.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9909.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6924.npy"",\r\n 493\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1610.npy"",\r\n 876\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_476.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5218.npy"",\r\n 187\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7178.npy"",\r\n 627\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6573.npy"",\r\n 99\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6737.npy"",\r\n 445\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2459.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6168.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1589.npy"",\r\n 123\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9289.npy"",\r\n 499\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5624.npy"",\r\n 425\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9752.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8168.npy"",\r\n 554\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_938.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8575.npy"",\r\n 145\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3814.npy"",\r\n 248\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7165.npy"",\r\n 235\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_199.npy"",\r\n 622\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5569.npy"",\r\n 289\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5915.npy"",\r\n 678\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4827.npy"",\r\n 765\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7912.npy"",\r\n 62\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8487.npy"",\r\n 190\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_724.npy"",\r\n 414\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2507.npy"",\r\n 364\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9727.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5554.npy"",\r\n 72\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4051.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5518.npy"",\r\n 201\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9931.npy"",\r\n 269\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1261.npy"",\r\n 239\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5766.npy"",\r\n 525\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7736.npy"",\r\n 775\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3440.npy"",\r\n 601\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3050.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_496.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_784.npy"",\r\n 611\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2430.npy"",\r\n 675\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6103.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6674.npy"",\r\n 901\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5470.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1677.npy"",\r\n 381\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1012.npy"",\r\n 192\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1522.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9059.npy"",\r\n 710\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5119.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3952.npy"",\r\n 753\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4812.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6135.npy"",\r\n 185\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1943.npy"",\r\n 488\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4817.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1619.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9749.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2595.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4329.npy"",\r\n 316\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7235.npy"",\r\n 323\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7666.npy"",\r\n 321\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3016.npy"",\r\n 154\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3534.npy"",\r\n 526\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1898.npy"",\r\n 495\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6918.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2391.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4382.npy"",\r\n 208\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9435.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8258.npy"",\r\n 627\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7983.npy"",\r\n 404\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7143.npy"",\r\n 388\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8779.npy"",\r\n 352\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1873.npy"",\r\n 136\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9952.npy"",\r\n 219\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9397.npy"",\r\n 445\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3249.npy"",\r\n 436\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7889.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9704.npy"",\r\n 132\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8142.npy"",\r\n 88\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2715.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7693.npy"",\r\n 86\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3112.npy"",\r\n 998\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_840.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7237.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4569.npy"",\r\n 615\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3827.npy"",\r\n 585\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3949.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7205.npy"",\r\n 73\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5439.npy"",\r\n 409\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2091.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1835.npy"",\r\n 157\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6702.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7009.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2070.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6567.npy"",\r\n 569\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2173.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_92.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2288.npy"",\r\n 863\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8126.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4368.npy"",\r\n 225\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4670.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_250.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9111.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_849.npy"",\r\n 715\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5214.npy"",\r\n 265\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8158.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3024.npy"",\r\n 301\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9074.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_520.npy"",\r\n 246\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4262.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7854.npy"",\r\n 692\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4874.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_536.npy"",\r\n 346\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9337.npy"",\r\n 341\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8291.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1923.npy"",\r\n 475\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2865.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1151.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5919.npy"",\r\n 472\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9945.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8231.npy"",\r\n 206\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1502.npy"",\r\n 362\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2285.npy"",\r\n 590\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9723.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9400.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7804.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4041.npy"",\r\n 340\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9578.npy"",\r\n 781\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6181.npy"",\r\n 79\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3368.npy"",\r\n 351\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2808.npy"",\r\n 200\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6598.npy"",\r\n 550\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7691.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3142.npy"",\r\n 839\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_397.npy"",\r\n 122\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9379.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6958.npy"",\r\n 580\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3054.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_556.npy"",\r\n 537\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6983.npy"",\r\n 988\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1915.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4727.npy"",\r\n 661\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6336.npy"",\r\n 376\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2505.npy"",\r\n 949\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5729.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2870.npy"",\r\n 490\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6421.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2811.npy"",\r\n 553\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1645.npy"",\r\n 645\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5235.npy"",\r\n 225\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8763.npy"",\r\n 796\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5117.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5062.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3530.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5325.npy"",\r\n 565\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_564.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3958.npy"",\r\n 143\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2533.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2268.npy"",\r\n 170\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3437.npy"",\r\n 605\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2711.npy"",\r\n 380\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9383.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7650.npy"",\r\n 240\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1465.npy"",\r\n 401\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_187.npy"",\r\n 146\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4029.npy"",\r\n 169\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2383.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6419.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2345.npy"",\r\n 472\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_338.npy"",\r\n 114\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8851.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4631.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9369.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2969.npy"",\r\n 310\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2741.npy"",\r\n 312\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9047.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9281.npy"",\r\n 622\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8599.npy"",\r\n 194\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8198.npy"",\r\n 825\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_505.npy"",\r\n 55\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2457.npy"",\r\n 145\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2206.npy"",\r\n 916\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5451.npy"",\r\n 721\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3778.npy"",\r\n 869\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8800.npy"",\r\n 260\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2258.npy"",\r\n 633\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7546.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5198.npy"",\r\n 313\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8693.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8035.npy"",\r\n 545\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5302.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8740.npy"",\r\n 97\r\n ],\r\n",,terminal_output +3154,5003299,"TERMINAL",0,0," [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8726.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4604.npy"",\r\n 139\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3732.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7799.npy"",\r\n 383\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8833.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9775.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1553.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1054.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2617.npy"",\r\n 59\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9486.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3561.npy"",\r\n 610\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5095.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4772.npy"",\r\n 739\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8299.npy"",\r\n 459\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6700.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3994.npy"",\r\n 996\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8011.npy"",\r\n 248\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9097.npy"",\r\n 137\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9038.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3264.npy"",\r\n 380\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7014.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2087.npy"",\r\n 508\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_861.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2842.npy"",\r\n 510\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2146.npy"",\r\n 65\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9816.npy"",\r\n 465\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6246.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3026.npy"",\r\n 933\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3256.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2851.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6317.npy"",\r\n 860\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5275.npy"",\r\n 684\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5464.npy"",\r\n 467\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9345.npy"",\r\n 220\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7342.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8817.npy"",\r\n 258\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9247.npy"",\r\n 863\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_683.npy"",\r\n 203\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6403.npy"",\r\n 385\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6633.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1499.npy"",\r\n 225\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8864.npy"",\r\n 89\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4068.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7777.npy"",\r\n 608\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6464.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5976.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2272.npy"",\r\n 744\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8971.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8587.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2197.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3338.npy"",\r\n 839\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9781.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_850.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8891.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7934.npy"",\r\n 347\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7421.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3629.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9964.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7497.npy"",\r\n 176\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2431.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1473.npy"",\r\n 496\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_827.npy"",\r\n 75\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9210.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4953.npy"",\r\n 330\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1323.npy"",\r\n 323\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5383.npy"",\r\n 906\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2898.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4909.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3464.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1461.npy"",\r\n 263\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1263.npy"",\r\n 511\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7368.npy"",\r\n 404\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7066.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6805.npy"",\r\n 335\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1370.npy"",\r\n 482\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8766.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2341.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1742.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1526.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9354.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5010.npy"",\r\n 460\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3106.npy"",\r\n 162\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8405.npy"",\r\n 636\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_687.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7622.npy"",\r\n 237\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7956.npy"",\r\n 112\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5430.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4158.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8452.npy"",\r\n 127\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7542.npy"",\r\n 699\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3767.npy"",\r\n 363\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1649.npy"",\r\n 286\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6720.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2795.npy"",\r\n 415\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9204.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8903.npy"",\r\n 469\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1116.npy"",\r\n 568\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9626.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8789.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_576.npy"",\r\n 127\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3734.npy"",\r\n 508\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8076.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4781.npy"",\r\n 546\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2226.npy"",\r\n 128\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9212.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8751.npy"",\r\n 726\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9118.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8977.npy"",\r\n 842\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7134.npy"",\r\n 295\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1288.npy"",\r\n 393\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5521.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9739.npy"",\r\n 744\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2949.npy"",\r\n 337\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8956.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7075.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6985.npy"",\r\n 956\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2436.npy"",\r\n 529\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6846.npy"",\r\n 118\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_642.npy"",\r\n 659\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7893.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5781.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5389.npy"",\r\n 982\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2883.npy"",\r\n 838\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3822.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6564.npy"",\r\n 543\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9082.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_473.npy"",\r\n 438\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8502.npy"",\r\n 269\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5508.npy"",\r\n 602\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3446.npy"",\r\n 414\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_667.npy"",\r\n 780\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6555.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8914.npy"",\r\n 108\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8685.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9380.npy"",\r\n 401\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1010.npy"",\r\n 720\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_100.npy"",\r\n 220\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8561.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6467.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9030.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6053.npy"",\r\n 75\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2211.npy"",\r\n 330\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2378.npy"",\r\n 654\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9730.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9947.npy"",\r\n 993\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7276.npy"",\r\n 139\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8634.npy"",\r\n 461\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5136.npy"",\r\n 460\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9797.npy"",\r\n 586\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5784.npy"",\r\n 408\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4138.npy"",\r\n 259\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6959.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4314.npy"",\r\n 540\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5782.npy"",\r\n 582\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4312.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6529.npy"",\r\n 989\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6855.npy"",\r\n 684\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_581.npy"",\r\n 147\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1241.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4981.npy"",\r\n 456\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3623.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9877.npy"",\r\n 563\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4286.npy"",\r\n 356\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1520.npy"",\r\n 493\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2410.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2665.npy"",\r\n 124\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6468.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2074.npy"",\r\n 458\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4549.npy"",\r\n 841\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3888.npy"",\r\n 207\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1837.npy"",\r\n 545\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5367.npy"",\r\n 191\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2424.npy"",\r\n 493\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_450.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1183.npy"",\r\n 521\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1274.npy"",\r\n 329\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9596.npy"",\r\n 377\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5138.npy"",\r\n 596\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3102.npy"",\r\n 907\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4477.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5646.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7273.npy"",\r\n 586\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2575.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_235.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2706.npy"",\r\n 765\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4056.npy"",\r\n 129\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2125.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8409.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8034.npy"",\r\n 113\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6803.npy"",\r\n 657\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8870.npy"",\r\n 840\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2646.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_825.npy"",\r\n 288\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9622.npy"",\r\n 526\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2503.npy"",\r\n 644\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6729.npy"",\r\n 293\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6004.npy"",\r\n 839\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9114.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1720.npy"",\r\n 343\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2768.npy"",\r\n 302\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9172.npy"",\r\n 224\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2964.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3915.npy"",\r\n 237\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4573.npy"",\r\n 370\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4713.npy"",\r\n 507\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9684.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6360.npy"",\r\n 544\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1417.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9419.npy"",\r\n 445\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2577.npy"",\r\n 248\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4344.npy"",\r\n 648\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5385.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1821.npy"",\r\n 573\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9843.npy"",\r\n 751\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9031.npy"",\r\n 422\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_543.npy"",\r\n 204\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3136.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4977.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3335.npy"",\r\n 233\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4319.npy"",\r\n 401\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7201.npy"",\r\n 844\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6093.npy"",\r\n 186\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_66.npy"",\r\n 353\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9770.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3879.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2876.npy"",\r\n 858\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3832.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2243.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4650.npy"",\r\n 575\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3470.npy"",\r\n 985\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8922.npy"",\r\n 288\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1501.npy"",\r\n 363\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9514.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3916.npy"",\r\n 809\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9267.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4139.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3828.npy"",\r\n 592\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8811.npy"",\r\n 331\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6559.npy"",\r\n 613\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_411.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2495.npy"",\r\n 604\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5395.npy"",\r\n 491\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6739.npy"",\r\n 146\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_129.npy"",\r\n 609\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7272.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3571.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6692.npy"",\r\n 786\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8211.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1709.npy"",\r\n 154\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9864.npy"",\r\n 515\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2810.npy"",\r\n 576\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4816.npy"",\r\n 448\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8119.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4310.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7103.npy"",\r\n 575\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6699.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_321.npy"",\r\n 386\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7534.npy"",\r\n 312\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6669.npy"",\r\n 854\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2237.npy"",\r\n 540\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8244.npy"",\r\n 418\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5455.npy"",\r\n 149\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2028.npy"",\r\n 540\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8507.npy"",\r\n 265\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6963.npy"",\r\n 886\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9449.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6143.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2419.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_523.npy"",\r\n 394\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7038.npy"",\r\n 895\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_264.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9584.npy"",\r\n 410\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5140.npy"",\r\n 791\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3222.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7955.npy"",\r\n 511\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8366.npy"",\r\n 292\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8880.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4661.npy"",\r\n 433\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8837.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9088.npy"",\r\n 564\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4252.npy"",\r\n 450\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5421.npy"",\r\n 438\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2446.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6795.npy"",\r\n 260\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4396.npy"",\r\n 544\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4876.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5372.npy"",\r\n 544\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_260.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_172.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_484.npy"",\r\n 570\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4081.npy"",\r\n 818\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5914.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7120.npy"",\r\n 197\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5402.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5288.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8373.npy"",\r\n 831\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6014.npy"",\r\n 833\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2409.npy"",\r\n 140\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3721.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9308.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7147.npy"",\r\n 513\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1669.npy"",\r\n 286\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6659.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5876.npy"",\r\n 394\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7136.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2045.npy"",\r\n 250\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4734.npy"",\r\n 52\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7735.npy"",\r\n 749\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5443.npy"",\r\n 487\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4387.npy"",\r\n 534\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7979.npy"",\r\n 204\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6516.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1988.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8936.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8169.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_721.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4811.npy"",\r\n 200\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4315.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5923.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7142.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5896.npy"",\r\n 167\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_440.npy"",\r\n 832\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8356.npy"",\r\n 800\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9569.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8284.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7888.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3729.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8813.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9716.npy"",\r\n 959\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9243.npy"",\r\n 712\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4520.npy"",\r\n 750\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_314.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7719.npy"",\r\n 263\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9491.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7574.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5329.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3626.npy"",\r\n 239\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_762.npy"",\r\n 426\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5413.npy"",\r\n 239\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9558.npy"",\r\n 154\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6660.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8070.npy"",\r\n 914\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7687.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3942.npy"",\r\n 179\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2902.npy"",\r\n 741\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4196.npy"",\r\n 369\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_761.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3231.npy"",\r\n 625\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4679.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_945.npy"",\r\n 216\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_640.npy"",\r\n 451\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3150.npy"",\r\n 931\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4053.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_124.npy"",\r\n 508\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9692.npy"",\r\n 467\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7663.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4636.npy"",\r\n 691\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7997.npy"",\r\n 288\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1596.npy"",\r\n 469\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_873.npy"",\r\n 174\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8729.npy"",\r\n 968\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2227.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2415.npy"",\r\n 493\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_275.npy"",\r\n 219\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6552.npy"",\r\n 861\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6948.npy"",\r\n 839\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8132.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2287.npy"",\r\n 156\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_549.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5654.npy"",\r\n 647\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9216.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4367.npy"",\r\n 461\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2442.npy"",\r\n 560\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5563.npy"",\r\n 489\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4392.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_435.npy"",\r\n 148\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3173.npy"",\r\n 232\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1163.npy"",\r\n 312\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4683.npy"",\r\n 565\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9313.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_771.npy"",\r\n 91\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2363.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3359.npy"",\r\n 451\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5237.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8890.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4832.npy"",\r\n 625\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1891.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7017.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4210.npy"",\r\n 661\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6431.npy"",\r\n 773\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8884.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3685.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7414.npy"",\r\n 333\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4758.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_806.npy"",\r\n 836\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2397.npy"",\r\n 188\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8127.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2201.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1722.npy"",\r\n 996\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7798.npy"",\r\n 317\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8496.npy"",\r\n 333\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3549.npy"",\r\n 572\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3634.npy"",\r\n 632\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8673.npy"",\r\n 473\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7163.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2413.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_909.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3538.npy"",\r\n 212\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4240.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7613.npy"",\r\n 343\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9697.npy"",\r\n 519\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8509.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4213.npy"",\r\n 204\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6756.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9703.npy"",\r\n 357\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5986.npy"",\r\n 133\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_433.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3467.npy"",\r\n 292\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4633.npy"",\r\n 225\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8794.npy"",\r\n 771\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5691.npy"",\r\n 346\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9513.npy"",\r\n 111\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8537.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2931.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_17.npy"",\r\n 748\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7523.npy"",\r\n 408\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8385.npy"",\r\n 508\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8321.npy"",\r\n 390\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9988.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1543.npy"",\r\n 534\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6197.npy"",\r\n 313\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7492.npy"",\r\n 281\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9554.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7538.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2435.npy"",\r\n 270\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1391.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3125.npy"",\r\n 944\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3866.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8658.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5707.npy"",\r\n 329\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3905.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_533.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8621.npy"",\r\n 551\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6300.npy"",\r\n 966\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4295.npy"",\r\n 848\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9353.npy"",\r\n 763\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4697.npy"",\r\n 431\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5694.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6123.npy"",\r\n 514\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_563.npy"",\r\n 477\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6827.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6238.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1746.npy"",\r\n 170\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1289.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5759.npy"",\r\n 550\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3182.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1976.npy"",\r\n 727\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8015.npy"",\r\n 202\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9773.npy"",\r\n 975\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8508.npy"",\r\n 61\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8013.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4765.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5066.npy"",\r\n 320\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3147.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8784.npy"",\r\n 117\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4172.npy"",\r\n 277\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8754.npy"",\r\n 784\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5277.npy"",\r\n 204\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3430.npy"",\r\n 93\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5153.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7478.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6008.npy"",\r\n 945\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5716.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3235.npy"",\r\n 66\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2845.npy"",\r\n 537\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_229.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_94.npy"",\r\n 579\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1931.npy"",\r\n 907\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9089.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1316.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7733.npy"",\r\n 240\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6728.npy"",\r\n 345\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6322.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9042.npy"",\r\n 944\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9161.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5081.npy"",\r\n 854\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_998.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7303.npy"",\r\n 354\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5226.npy"",\r\n 52\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9271.npy"",\r\n 816\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_431.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9150.npy"",\r\n 66\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6337.npy"",\r\n 305\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9790.npy"",\r\n 963\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7596.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8320.npy"",\r\n 351\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2016.npy"",\r\n 158\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6917.npy"",\r\n 801\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3681.npy"",\r\n 907\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7410.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7871.npy"",\r\n 701\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9698.npy"",\r\n 70\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5534.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_594.npy"",\r\n 305\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7599.npy"",\r\n 252\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_57.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2393.npy"",\r\n 438\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5197.npy"",\r\n 524\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9666.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6600.npy"",\r\n 996\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6613.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2188.npy"",\r\n 190\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6454.npy"",\r\n 574\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6607.npy"",\r\n 206\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3630.npy"",\r\n 717\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1512.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5553.npy"",\r\n 569\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8088.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3959.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1069.npy"",\r\n 588\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1516.npy"",\r\n 427\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9693.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3416.npy"",\r\n 672\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1440.npy"",\r\n 632\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1124.npy"",\r\n 266\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9511.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8428.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6726.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5706.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8369.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7056.npy"",\r\n 226\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3365.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2325.npy"",\r\n 381\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3095.npy"",\r\n 608\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9563.npy"",\r\n 864\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7441.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8553.npy"",\r\n 895\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8074.npy"",\r\n 114\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9067.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7211.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7390.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3121.npy"",\r\n 184\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4342.npy"",\r\n 278\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1661.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1983.npy"",\r\n 322\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2721.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6498.npy"",\r\n 94\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8272.npy"",\r\n 168\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3266.npy"",\r\n 79\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4474.npy"",\r\n 907\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4605.npy"",\r\n 368\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5547.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8786.npy"",\r\n 749\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5035.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7087.npy"",\r\n 598\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9776.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4441.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5643.npy"",\r\n 503\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7144.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1813.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6234.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4945.npy"",\r\n 532\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_617.npy"",\r\n 330\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6665.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_623.npy"",\r\n 206\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8399.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4667.npy"",\r\n 95\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5836.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1986.npy"",\r\n 363\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9738.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6257.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_952.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7220.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5576.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9763.npy"",\r\n 225\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5273.npy"",\r\n 397\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9064.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7998.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7939.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6369.npy"",\r\n 118\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_782.npy"",\r\n 500\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4305.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_493.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7635.npy"",\r\n 195\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5501.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7083.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_629.npy"",\r\n 283\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4624.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6821.npy"",\r\n 423\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1412.npy"",\r\n 378\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8716.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4843.npy"",\r\n 708\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4492.npy"",\r\n 606\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7344.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6860.npy"",\r\n 188\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9146.npy"",\r\n 591\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4755.npy"",\r\n 120\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8376.npy"",\r\n 314\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3670.npy"",\r\n 425\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3383.npy"",\r\n 123\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1130.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2879.npy"",\r\n 904\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8382.npy"",\r\n 424\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8343.npy"",\r\n 624\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9545.npy"",\r\n 422\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9814.npy"",\r\n 838\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1597.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_606.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_893.npy"",\r\n 691\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9808.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1930.npy"",\r\n 771\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_107.npy"",\r\n 429\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2297.npy"",\r\n 827\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5509.npy"",\r\n 520\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8854.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5568.npy"",\r\n 925\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7521.npy"",\r\n 392\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6150.npy"",\r\n 333\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1654.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7209.npy"",\r\n 496\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_854.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1264.npy"",\r\n 840\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2006.npy"",\r\n 342\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7052.npy"",\r\n 540\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3361.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1503.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4750.npy"",\r\n 388\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4838.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3104.npy"",\r\n 200\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2470.npy"",\r\n 261\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5913.npy"",\r\n 634\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7117.npy"",\r\n 349\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3091.npy"",\r\n 391\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9399.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6343.npy"",\r\n 954\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6677.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7781.npy"",\r\n 302\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4648.npy"",\r\n 577\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2532.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9546.npy"",\r\n 207\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1952.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3191.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7543.npy"",\r\n 206\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_63.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5862.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1390.npy"",\r\n 439\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8408.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_968.npy"",\r\n 149\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1927.npy"",\r\n 946\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6740.npy"",\r\n 973\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1901.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9502.npy"",\r\n 567\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_760.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_335.npy"",\r\n 214\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7575.npy"",\r\n 250\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9213.npy"",\r\n 159\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7140.npy"",\r\n 322\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3120.npy"",\r\n 320\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7518.npy"",\r\n 528\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4978.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5765.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5541.npy"",\r\n 777\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8336.npy"",\r\n 548\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5004.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7340.npy"",\r\n 454\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7813.npy"",\r\n 913\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3002.npy"",\r\n 181\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4664.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5567.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3146.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_670.npy"",\r\n 932\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6847.npy"",\r\n 378\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2569.npy"",\r\n 356\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3650.npy"",\r\n 170\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8725.npy"",\r\n 399\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3058.npy"",\r\n 139\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9013.npy"",\r\n 417\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1110.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5497.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1872.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6083.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5428.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6081.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7816.npy"",\r\n 394\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9767.npy"",\r\n 535\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7187.npy"",\r\n 202\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5411.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4916.npy"",\r\n 364\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8541.npy"",\r\n 324\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8535.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7633.npy"",\r\n 493\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_268.npy"",\r\n 509\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1635.npy"",\r\n 687\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9828.npy"",\r\n 313\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9477.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2312.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9985.npy"",\r\n 453\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7751.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6734.npy"",\r\n 561\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_116.npy"",\r\n 579\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_789.npy"",\r\n 154\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1663.npy"",\r\n 1000\r\n ],\r\n [\r\n",,terminal_output +3155,5003555,"TERMINAL",0,0,"20",,terminal_output +3156,5003582,"TERMINAL",0,0," ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2151.npy"",\r\n 378\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1926.npy"",\r\n 869\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1642.npy"",\r\n 162\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5967.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9702.npy"",\r\n 898\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4960.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3144.npy"",\r\n 219\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7387.npy"",\r\n 480\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5293.npy"",\r\n 138\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4156.npy"",\r\n 519\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6034.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1650.npy"",\r\n 278\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6725.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2250.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_895.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5593.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2411.npy"",\r\n 213\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7529.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7040.npy"",\r\n 362\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4386.npy"",\r\n 289\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1189.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8181.npy"",\r\n 293\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5368.npy"",\r\n 762\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3683.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2626.npy"",\r\n 649\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7267.npy"",\r\n 581\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9956.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4756.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4201.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3023.npy"",\r\n 453\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6744.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9205.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9471.npy"",\r\n 140\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3946.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4497.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4888.npy"",\r\n 402\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5012.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5666.npy"",\r\n 176\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8625.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6823.npy"",\r\n 250\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7174.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2881.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4376.npy"",\r\n 321\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4959.npy"",\r\n 681\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8871.npy"",\r\n 552\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5450.npy"",\r\n 543\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3038.npy"",\r\n 244\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5382.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4749.npy"",\r\n 390\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2220.npy"",\r\n 311\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9834.npy"",\r\n 608\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8264.npy"",\r\n 168\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8483.npy"",\r\n 184\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1741.npy"",\r\n 395\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1394.npy"",\r\n 938\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6426.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8560.npy"",\r\n 341\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1957.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7927.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8200.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5550.npy"",\r\n 88\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7039.npy"",\r\n 169\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_845.npy"",\r\n 95\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3188.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4502.npy"",\r\n 954\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2333.npy"",\r\n 558\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5283.npy"",\r\n 580\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_702.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2386.npy"",\r\n 546\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6105.npy"",\r\n 338\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6715.npy"",\r\n 202\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9479.npy"",\r\n 391\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4103.npy"",\r\n 195\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5188.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5822.npy"",\r\n 745\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_985.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3654.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3460.npy"",\r\n 261\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3070.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5638.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7699.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1056.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8750.npy"",\r\n 537\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8145.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5274.npy"",\r\n 399\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7251.npy"",\r\n 761\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1109.npy"",\r\n 803\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3012.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9548.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6705.npy"",\r\n 731\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7584.npy"",\r\n 845\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8254.npy"",\r\n 176\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4448.npy"",\r\n 990\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1037.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8718.npy"",\r\n 882\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6185.npy"",\r\n 983\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8994.npy"",\r\n 946\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8577.npy"",\r\n 379\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4519.npy"",\r\n 369\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5844.npy"",\r\n 595\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7167.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6742.npy"",\r\n 382\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9771.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9700.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_87.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1444.npy"",\r\n 505\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2613.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8051.npy"",\r\n 608\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4090.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1727.npy"",\r\n 775\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1919.npy"",\r\n 808\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2157.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6366.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5282.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6451.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8060.npy"",\r\n 125\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_97.npy"",\r\n 564\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2456.npy"",\r\n 756\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_821.npy"",\r\n 518\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8323.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9147.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6786.npy"",\r\n 488\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2490.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7961.npy"",\r\n 321\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5490.npy"",\r\n 438\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9396.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4020.npy"",\r\n 873\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8250.npy"",\r\n 394\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1059.npy"",\r\n 911\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7123.npy"",\r\n 823\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4559.npy"",\r\n 380\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5000.npy"",\r\n 761\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_685.npy"",\r\n 95\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6497.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9246.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9208.npy"",\r\n 299\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1758.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9787.npy"",\r\n 779\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8317.npy"",\r\n 389\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4002.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8666.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4685.npy"",\r\n 62\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9618.npy"",\r\n 641\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8154.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9323.npy"",\r\n 867\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6513.npy"",\r\n 608\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7236.npy"",\r\n 520\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5016.npy"",\r\n 476\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3034.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_375.npy"",\r\n 190\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4949.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5525.npy"",\r\n 150\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8708.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7925.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5056.npy"",\r\n 914\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1893.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9830.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5030.npy"",\r\n 666\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8260.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4635.npy"",\r\n 300\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3598.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6606.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6422.npy"",\r\n 394\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2487.npy"",\r\n 529\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_791.npy"",\r\n 428\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8822.npy"",\r\n 744\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4294.npy"",\r\n 700\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8275.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9579.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7098.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6450.npy"",\r\n 370\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8623.npy"",\r\n 138\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8131.npy"",\r\n 79\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9250.npy"",\r\n 166\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5520.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6594.npy"",\r\n 282\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6653.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7577.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6061.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2822.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2194.npy"",\r\n 294\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8478.npy"",\r\n 288\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8771.npy"",\r\n 324\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1419.npy"",\r\n 652\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3379.npy"",\r\n 570\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4170.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_340.npy"",\r\n 103\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2200.npy"",\r\n 123\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4166.npy"",\r\n 935\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_192.npy"",\r\n 447\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6722.npy"",\r\n 355\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4033.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3528.npy"",\r\n 436\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3637.npy"",\r\n 358\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2825.npy"",\r\n 495\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7460.npy"",\r\n 629\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8572.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4831.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9333.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_803.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_320.npy"",\r\n 631\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5760.npy"",\r\n 211\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4539.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2916.npy"",\r\n 397\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_741.npy"",\r\n 130\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9244.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9263.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7494.npy"",\r\n 876\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9130.npy"",\r\n 276\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2085.npy"",\r\n 570\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2760.npy"",\r\n 337\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5786.npy"",\r\n 615\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6345.npy"",\r\n 617\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5043.npy"",\r\n 436\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7732.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1337.npy"",\r\n 708\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8257.npy"",\r\n 171\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8351.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3497.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1658.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7911.npy"",\r\n 355\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_540.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1723.npy"",\r\n 279\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_853.npy"",\r\n 939\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2168.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8667.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1128.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6374.npy"",\r\n 794\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3369.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3215.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_734.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3603.npy"",\r\n 152\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3145.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8595.npy"",\r\n 464\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4753.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4582.npy"",\r\n 581\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4615.npy"",\r\n 236\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1953.npy"",\r\n 799\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3407.npy"",\r\n 148\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4285.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6402.npy"",\r\n 203\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2123.npy"",\r\n 204\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7820.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_283.npy"",\r\n 467\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1790.npy"",\r\n 623\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7240.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5671.npy"",\r\n 71\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3190.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6876.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9673.npy"",\r\n 381\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4681.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9217.npy"",\r\n 361\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1050.npy"",\r\n 938\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1909.npy"",\r\n 622\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5171.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4689.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4891.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9936.npy"",\r\n 100\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5052.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4966.npy"",\r\n 642\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9688.npy"",\r\n 123\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1328.npy"",\r\n 854\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4303.npy"",\r\n 378\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2083.npy"",\r\n 942\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9980.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3492.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6242.npy"",\r\n 466\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7018.npy"",\r\n 200\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6087.npy"",\r\n 518\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7256.npy"",\r\n 333\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6085.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7665.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4867.npy"",\r\n 52\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6563.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9388.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6307.npy"",\r\n 262\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6274.npy"",\r\n 901\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7805.npy"",\r\n 796\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3731.npy"",\r\n 459\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_719.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7863.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2450.npy"",\r\n 598\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1415.npy"",\r\n 137\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5762.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5505.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3836.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5437.npy"",\r\n 124\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4654.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2353.npy"",\r\n 165\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4988.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3613.npy"",\r\n 188\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1570.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2108.npy"",\r\n 226\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9270.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7183.npy"",\r\n 359\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4476.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4917.npy"",\r\n 158\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_717.npy"",\r\n 781\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7595.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1405.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9888.npy"",\r\n 517\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4869.npy"",\r\n 482\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9565.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8490.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2966.npy"",\r\n 499\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_481.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3896.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6658.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3673.npy"",\r\n 431\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6651.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5909.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1631.npy"",\r\n 139\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_488.npy"",\r\n 189\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2001.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1319.npy"",\r\n 503\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_251.npy"",\r\n 215\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_166.npy"",\r\n 292\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6837.npy"",\r\n 279\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1061.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7716.npy"",\r\n 174\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8465.npy"",\r\n 939\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6015.npy"",\r\n 255\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9639.npy"",\r\n 747\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_118.npy"",\r\n 978\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5537.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8253.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2599.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4901.npy"",\r\n 460\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5423.npy"",\r\n 203\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3493.npy"",\r\n 140\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1477.npy"",\r\n 63\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6349.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8898.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3714.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1334.npy"",\r\n 288\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7703.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_291.npy"",\r\n 529\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3511.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5903.npy"",\r\n 54\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3400.npy"",\r\n 126\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2177.npy"",\r\n 289\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5331.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8170.npy"",\r\n 813\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3141.npy"",\r\n 696\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4935.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4032.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5191.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7658.npy"",\r\n 269\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8018.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4656.npy"",\r\n 666\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7637.npy"",\r\n 80\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5454.npy"",\r\n 499\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4288.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4460.npy"",\r\n 77\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6577.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4343.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9701.npy"",\r\n 156\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5086.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6112.npy"",\r\n 525\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8801.npy"",\r\n 405\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5930.npy"",\r\n 725\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7195.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8785.npy"",\r\n 379\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4057.npy"",\r\n 967\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1223.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2039.npy"",\r\n 630\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7124.npy"",\r\n 474\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6438.npy"",\r\n 302\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5377.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2289.npy"",\r\n 640\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8973.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8757.npy"",\r\n 572\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1420.npy"",\r\n 793\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_339.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1670.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_522.npy"",\r\n 856\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3393.npy"",\r\n 296\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6711.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1488.npy"",\r\n 398\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5545.npy"",\r\n 326\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6109.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6772.npy"",\r\n 123\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2965.npy"",\r\n 417\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9572.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3137.npy"",\r\n 75\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5353.npy"",\r\n 295\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2453.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_805.npy"",\r\n 98\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7897.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2148.npy"",\r\n 547\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7001.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3317.npy"",\r\n 477\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6909.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4495.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6323.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6303.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4583.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6560.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5927.npy"",\r\n 615\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8523.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7146.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2948.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5328.npy"",\r\n 302\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5071.npy"",\r\n 219\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1175.npy"",\r\n 835\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3682.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4893.npy"",\r\n 632\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6063.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4297.npy"",\r\n 705\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_22.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2679.npy"",\r\n 437\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5485.npy"",\r\n 577\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7382.npy"",\r\n 388\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6738.npy"",\r\n 230\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7149.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6762.npy"",\r\n 558\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7469.npy"",\r\n 210\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7645.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4111.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_701.npy"",\r\n 527\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6654.npy"",\r\n 352\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7985.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9865.npy"",\r\n 71\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2473.npy"",\r\n 520\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9608.npy"",\r\n 455\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4191.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4348.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1712.npy"",\r\n 574\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4248.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8033.npy"",\r\n 349\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2680.npy"",\r\n 256\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8461.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4613.npy"",\r\n 168\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9589.npy"",\r\n 965\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3736.npy"",\r\n 288\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9202.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5688.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_899.npy"",\r\n 553\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9126.npy"",\r\n 518\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_125.npy"",\r\n 93\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_951.npy"",\r\n 345\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8178.npy"",\r\n 215\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4282.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5723.npy"",\r\n 249\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8273.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5494.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2661.npy"",\r\n 239\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1251.npy"",\r\n 341\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8926.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2525.npy"",\r\n 195\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4545.npy"",\r\n 696\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2414.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5714.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3600.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5483.npy"",\r\n 167\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4526.npy"",\r\n 890\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4409.npy"",\r\n 53\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2420.npy"",\r\n 538\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_647.npy"",\r\n 588\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5306.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4370.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5841.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7164.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7093.npy"",\r\n 866\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3330.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4931.npy"",\r\n 143\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7177.npy"",\r\n 978\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_823.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5571.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9677.npy"",\r\n 171\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1880.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7714.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6698.npy"",\r\n 204\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7277.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_325.npy"",\r\n 249\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8459.npy"",\r\n 673\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5529.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5358.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9331.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4802.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_971.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4739.npy"",\r\n 790\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1011.npy"",\r\n 714\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5945.npy"",\r\n 367\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9311.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9799.npy"",\r\n 968\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8378.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9555.npy"",\r\n 170\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8662.npy"",\r\n 616\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8150.npy"",\r\n 290\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7186.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2337.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1413.npy"",\r\n 448\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1161.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4187.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6333.npy"",\r\n 340\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6714.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8775.npy"",\r\n 254\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6218.npy"",\r\n 442\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4043.npy"",\r\n 666\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_350.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3009.npy"",\r\n 363\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4986.npy"",\r\n 568\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7843.npy"",\r\n 166\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3847.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_524.npy"",\r\n 394\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1197.npy"",\r\n 130\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_793.npy"",\r\n 359\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2370.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4244.npy"",\r\n 193\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6933.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5983.npy"",\r\n 588\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4000.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4093.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3939.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1019.npy"",\r\n 434\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2527.npy"",\r\n 734\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2938.npy"",\r\n 861\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3569.npy"",\r\n 880\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4850.npy"",\r\n 234\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2936.npy"",\r\n 379\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3692.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6495.npy"",\r\n 406\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7782.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1211.npy"",\r\n 803\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2872.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_698.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9490.npy"",\r\n 660\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8172.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9341.npy"",\r\n 841\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1284.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6056.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7975.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6998.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_238.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_570.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8966.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6811.npy"",\r\n 370\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2984.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9600.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6678.npy"",\r\n 809\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4152.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6695.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9922.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2135.npy"",\r\n 423\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6857.npy"",\r\n 327\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9187.npy"",\r\n 822\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8430.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4261.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1609.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_372.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8125.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7620.npy"",\r\n 820\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_525.npy"",\r\n 677\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8048.npy"",\r\n 849\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9424.npy"",\r\n 263\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4799.npy"",\r\n 118\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3495.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_834.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7034.npy"",\r\n 330\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2376.npy"",\r\n 161\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5803.npy"",\r\n 324\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6652.npy"",\r\n 368\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7160.npy"",\r\n 180\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3166.npy"",\r\n 148\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4979.npy"",\r\n 426\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5037.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4087.npy"",\r\n 794\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5901.npy"",\r\n 210\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_865.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8032.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7082.npy"",\r\n 746\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6231.npy"",\r\n 510\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8276.npy"",\r\n 893\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9470.npy"",\r\n 586\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4665.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2144.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9201.npy"",\r\n 564\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9468.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1177.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3590.npy"",\r\n 220\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9278.npy"",\r\n 364\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2878.npy"",\r\n 815\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1028.npy"",\r\n 677\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5304.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1026.npy"",\r\n 926\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_54.npy"",\r\n 408\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9241.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_438.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_689.npy"",\r\n 249\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9839.npy"",\r\n 67\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2405.npy"",\r\n 455\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1772.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8993.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4466.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4606.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7461.npy"",\r\n 741\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_539.npy"",\r\n 978\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9577.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3478.npy"",\r\n 282\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6214.npy"",\r\n 150\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9003.npy"",\r\n 216\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9069.npy"",\r\n 299\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1146.npy"",\r\n 239\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2198.npy"",\r\n 364\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9148.npy"",\r\n 272\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7686.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5187.npy"",\r\n 736\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3433.npy"",\r\n 98\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8932.npy"",\r\n 350\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5748.npy"",\r\n 91\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2997.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7851.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8855.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8909.npy"",\r\n 79\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5892.npy"",\r\n 907\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5808.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7600.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1787.npy"",\r\n 255\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2756.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6937.npy"",\r\n 876\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8288.npy"",\r\n 165\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4523.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9712.npy"",\r\n 360\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8810.npy"",\r\n 125\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6624.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9494.npy"",\r\n 398\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1778.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_651.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9057.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7715.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4563.npy"",\r\n 270\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7484.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9785.npy"",\r\n 312\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_961.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6019.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4542.npy"",\r\n 226\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8828.npy"",\r\n 149\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5578.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1208.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2350.npy"",\r\n 873\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4961.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6377.npy"",\r\n 411\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6872.npy"",\r\n 594\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_150.npy"",\r\n 448\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7173.npy"",\r\n 362\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3983.npy"",\r\n 316\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5607.npy"",\r\n 158\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6475.npy"",\r\n 337\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7585.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_165.npy"",\r\n 99\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8689.npy"",\r\n 535\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3489.npy"",\r\n 463\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3487.npy"",\r\n 481\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7848.npy"",\r\n 268\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9368.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3552.npy"",\r\n 266\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8720.npy"",\r\n 778\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3259.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4618.npy"",\r\n 148\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9832.npy"",\r\n 541\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5897.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5871.npy"",\r\n 437\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3728.npy"",\r\n 582\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1975.npy"",\r\n 642\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8679.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_745.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3816.npy"",\r\n 239\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3103.npy"",\r\n 750\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6978.npy"",\r\n 70\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7857.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7948.npy"",\r\n 119\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8613.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_980.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_416.npy"",\r\n 456\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4283.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_422.npy"",\r\n 342\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5033.npy"",\r\n 371\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1098.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8058.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8773.npy"",\r\n 277\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_688.npy"",\r\n 667\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_950.npy"",\r\n 349\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5080.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3574.npy"",\r\n 74\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2241.npy"",\r\n 520\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5584.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9860.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9718.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8397.npy"",\r\n 260\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_381.npy"",\r\n 540\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2642.npy"",\r\n 966\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8206.npy"",\r\n 65\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5057.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6176.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9829.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7348.npy"",\r\n 336\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8189.npy"",\r\n 258\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9175.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2551.npy"",\r\n 397\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7013.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9982.npy"",\r\n 256\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_941.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1094.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5868.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3387.npy"",\r\n 600\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5785.npy"",\r\n 188\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8500.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3029.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_730.npy"",\r\n 898\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3474.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8861.npy"",\r\n 457\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4238.npy"",\r\n 286\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9549.npy"",\r\n 537\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_219.npy"",\r\n 568\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3660.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8394.npy"",\r\n 187\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8749.npy"",\r\n 414\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8412.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6186.npy"",\r\n 797\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5019.npy"",\r\n 169\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2189.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8657.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9859.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4108.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1432.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7900.npy"",\r\n 867\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3505.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2097.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2390.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8917.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2693.npy"",\r\n 652\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9633.npy"",\r\n 267\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6969.npy"",\r\n 800\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3704.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9871.npy"",\r\n 719\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_389.npy"",\r\n 756\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4507.npy"",\r\n 162\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9245.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8950.npy"",\r\n 161\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7480.npy"",\r\n 262\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_912.npy"",\r\n 407\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8653.npy"",\r\n 907\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1585.npy"",\r\n 525\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1065.npy"",\r\n 178\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_464.npy"",\r\n 223\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8251.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6373.npy"",\r\n 338\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_976.npy"",\r\n 925\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4080.npy"",\r\n 301\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7977.npy"",\r\n 404\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9604.npy"",\r\n 858\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8594.npy"",\r\n 368\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2234.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9933.npy"",\r\n 208\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7771.npy"",\r\n 581\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_535.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1448.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2111.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8365.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5722.npy"",\r\n 96\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2051.npy"",\r\n 273\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1404.npy"",\r\n 563\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3124.npy"",\r\n 802\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7395.npy"",\r\n 78\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9534.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6478.npy"",\r\n 514\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7176.npy"",\r\n 831\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2986.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4122.npy"",\r\n 424\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5008.npy"",\r\n 868\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6408.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_682.npy"",\r\n 726\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8174.npy"",\r\n 859\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9868.npy"",\r\n 841\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5572.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2607.npy"",\r\n 302\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6697.npy"",\r\n 232\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4668.npy"",\r\n 202\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8504.npy"",\r\n 684\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9442.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3622.npy"",\r\n 263\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8047.npy"",\r\n 687\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3523.npy"",\r\n 784\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4359.npy"",\r\n 674\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_233.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7129.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9705.npy"",\r\n 159\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2738.npy"",\r\n 258\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4346.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2591.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5580.npy"",\r\n 802\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3168.npy"",\r\n 683\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1935.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6018.npy"",\r\n 132\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3212.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8202.npy"",\r\n 804\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7978.npy"",\r\n 127\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5719.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3716.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5053.npy"",\r\n 435\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7742.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6172.npy"",\r\n 170\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3429.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8911.npy"",\r\n 982\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7862.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8506.npy"",\r\n 598\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5594.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6351.npy"",\r\n 765\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6822.npy"",\r\n 358\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6774.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6750.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4950.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_140.npy"",\r\n 169\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7338.npy"",\r\n 553\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3642.npy"",\r\n 212\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1381.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3240.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6470.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3554.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5582.npy"",\r\n 300\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6655.npy"",\r\n 115\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9264.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3148.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_818.npy"",\r\n 290\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_104.npy"",\r\n 645\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4968.npy"",\r\n 213\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3247.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6090.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_759.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5127.npy"",\r\n 83\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5840.npy"",\r\n 964\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6921.npy"",\r\n 415\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4411.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_379.npy"",\r\n 375\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_740.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2905.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3513.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_723.npy"",\r\n 569\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6916.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3883.npy"",\r\n 421\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7759.npy"",\r\n 695\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4956.npy"",\r\n 312\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6348.npy"",\r\n 284\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2406.npy"",\r\n 321\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4548.npy"",\r\n 796\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2321.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5742.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5044.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5181.npy"",\r\n 565\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4205.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4134.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8386.npy"",\r\n 524\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3726.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_237.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2578.npy"",\r\n 776\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8043.npy"",\r\n 901\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9496.npy"",\r\n 452\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2303.npy"",\r\n 360\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5386.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_588.npy"",\r\n 97\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5665.npy"",\r\n 372\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7840.npy"",\r\n 948\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7000.npy"",\r\n 675\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1702.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6540.npy"",\r\n 329\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1881.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7015.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9428.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7958.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_600.npy"",\r\n 298\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3220.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_472.npy"",\r\n 734\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5863.npy"",\r\n 450\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4485.npy"",\r\n 295\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2854.npy"",\r\n 385\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4374.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9029.npy"",\r\n 262\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_285.npy"",\r\n 777\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4748.npy"",\r\n 64\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5917.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7502.npy"",\r\n 189\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5263.npy"",\r\n 576\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1884.npy"",\r\n 857\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2866.npy"",\r\n 707\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2957.npy"",\r\n 381\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8571.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6027.npy"",\r\n 733\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2921.npy"",\r\n 651\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4567.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9663.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7837.npy"",\r\n 534\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5991.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9740.npy"",\r\n 332\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8330.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6182.npy"",\r\n 357\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4620.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4908.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5756.npy"",\r\n 664\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1759.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3614.npy"",\r\n 341\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3484.npy"",\r\n 60\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1165.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5880.npy"",\r\n 145\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6923.npy"",\r\n 392\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6539.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7739.npy"",\r\n 157\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7428.npy"",\r\n 685\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8631.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2374.npy"",\r\n 782\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4054.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6956.npy"",\r\n 96\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4273.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3244.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1043.npy"",\r\n 496\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8328.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6397.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1306.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8979.npy"",\r\n 309\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4185.npy"",\r\n 445\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9586.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4298.npy"",\r\n 145\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8752.npy"",\r\n 406\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9529.npy"",\r\n 350\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8954.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8182.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7965.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1676.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3775.npy"",\r\n 946\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7755.npy"",\r\n 659\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8237.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6619.npy"",\r\n 970\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3100.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7617.npy"",\r\n 167\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2099.npy"",\r\n 279\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_987.npy"",\r\n 991\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9001.npy"",\r\n 399\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8559.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_85.npy"",\r\n 942\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6989.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1164.npy"",\r\n 188\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5040.npy"",\r\n 312\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5462.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7501.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6766.npy"",\r\n 250\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8157.npy"",\r\n 179\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1805.npy"",\r\n 73\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8780.npy"",\r\n 369\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1770.npy"",\r\n 202\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4644.npy"",\r\n 352\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5107.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_405.npy"",\r\n 124\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2897.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1427.npy"",\r\n 334\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3978.npy"",\r\n 509\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4584.npy"",\r\n 780\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9492.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9236.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7049.npy"",\r\n 379\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_202.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5092.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1852.npy"",\r\n 117\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6261.npy"",\r\n 156\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2326.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6747.npy"",\r\n 577\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8404.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6524.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8687.npy"",\r\n 796\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4180.npy"",\r\n 336\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1290.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8758.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8456.npy"",\r\n 659\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9927.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4443.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7037.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7673.npy"",\r\n 709\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9856.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4721.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1814.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9266.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_516.npy"",\r\n 660\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1141.npy"",\r\n 386\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4660.npy"",\r\n 944\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7936.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1303.npy"",\r\n 214\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4096.npy"",\r\n 176\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3705.npy"",\r\n 885\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2299.npy"",\r\n 718\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3076.npy"",\r\n 348\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3929.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_384.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2862.npy"",\r\n 757\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3097.npy"",\r\n 808\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9805.npy"",\r\n 389\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2274.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4807.npy"",\r\n 389\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6362.npy"",\r\n 775\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2927.npy"",\r\n 840\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8531.npy"",\r\n 639\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3257.npy"",\r\n 274\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6852.npy"",\r\n 312\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1875.npy"",\r\n 159\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_582.npy"",\r\n 275\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9181.npy"",\r\n 655\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1978.npy"",\r\n 917\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6765.npy"",\r\n 768\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4181.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5764.npy"",\r\n 191\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1038.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_180.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5540.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7800.npy"",\r\n 195\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2849.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8129.npy"",\r\n 987\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3610.npy"",\r\n 734\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7180.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2489.npy"",\r\n 557\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8713.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3284.npy"",\r\n 423\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_290.npy"",\r\n 368\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2535.npy"",\r\n 191\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_319.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7346.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5672.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5493.npy"",\r\n 149\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7298.npy"",\r\n 537\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1600.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2909.npy"",\r\n 171\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5427.npy"",\r\n 72\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1131.npy"",\r\n 62\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8297.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2300.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2526.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4188.npy"",\r\n 275\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2011.npy"",\r\n 555\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1084.npy"",\r\n 810\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5731.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8906.npy"",\r\n 405\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9707.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8605.npy"",\r\n 335\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6997.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1615.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7023.npy"",\r\n 977\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1347.npy"",\r\n 182\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6161.npy"",\r\n 766\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2165.npy"",\r\n 162\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5343.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5746.npy"",\r\n 763\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1566.npy"",\r\n 282\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7481.npy"",\r\n 911\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2735.npy"",\r\n 462\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1564.npy"",\r\n 464\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4619.npy"",\r\n 956\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2338.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3837.npy"",\r\n 70\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9796.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8326.npy"",\r\n 324\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6406.npy"",\r\n 774\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1767.npy"",\r\n 106\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9769.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_390.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7653.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4034.npy"",\r\n 896\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1160.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2781.npy"",\r\n 493\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8153.npy"",\r\n 163\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6631.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1292.npy"",\r\n 189\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4884.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_160.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2109.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2636.npy"",\r\n 400\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1820.npy"",\r\n 274\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1939.npy"",\r\n 59\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_326.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8843.npy"",\r\n 265\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4580.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3909.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7045.npy"",\r\n 133\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1299.npy"",\r\n 531\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9822.npy"",\r\n 115\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_988.npy"",\r\n 88\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3811.npy"",\r\n 135\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4882.npy"",\r\n 855\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5683.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4934.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5946.npy"",\r\n 218\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4932.npy"",\r\n 228\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4199.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7035.npy"",\r\n 453\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2967.npy"",\r\n 409\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7033.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5802.npy"",\r\n 470\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6892.npy"",\r\n 475\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4327.npy"",\r\n 324\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2382.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_558.npy"",\r\n 567\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1673.npy"",\r\n 398\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_828.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1294.npy"",\r\n 573\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2437.npy"",\r\n 517\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_312.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8618.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1817.npy"",\r\n 211\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2515.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6992.npy"",\r\n 424\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_518.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8329.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2357.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2059.npy"",\r\n 228\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6330.npy"",\r\n 672\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4511.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4657.npy"",\r\n 874\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6496.npy"",\r\n 743\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7386.npy"",\r\n 353\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6308.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1256.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3436.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3799.npy"",\r\n 139\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5348.npy"",\r\n 697\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2913.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5858.npy"",\r\n 606\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3855.npy"",\r\n 650\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9878.npy"",\r\n 135\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2314.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5670.npy"",\r\n 693\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6790.npy"",\r\n 113\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1210.npy"",\r\n 254\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4444.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3769.npy"",\r\n 986\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3280.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9993.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9911.npy"",\r\n 100\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2238.npy"",\r\n 452\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4707.npy"",\r\n 579\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3434.npy"",\r\n 623\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3790.npy"",\r\n 530\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4659.npy"",\r\n 581\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3130.npy"",\r\n 863\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_918.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8723.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2093.npy"",\r\n 742\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8841.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_151.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1272.npy"",\r\n 299\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6095.npy"",\r\n 406\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3857.npy"",\r\n 329\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1507.npy"",\r\n 531\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1611.npy"",\r\n 740\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4754.npy"",\r\n 150\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_39.npy"",\r\n 931\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2703.npy"",\r\n 186\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6696.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6671.npy"",\r\n 277\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8518.npy"",\r\n 506\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9480.npy"",\r\n 463\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2731.npy"",\r\n 771\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4440.npy"",\r\n 591\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3542.npy"",\r\n 98\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2671.npy"",\r\n 501\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1629.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4555.npy"",\r\n 630\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5669.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7278.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2002.npy"",\r\n 146\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2402.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5606.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6710.npy"",\r\n 957\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_48.npy"",\r\n 171\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_215.npy"",\r\n 600\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1929.npy"",\r\n 248\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_171.npy"",\r\n 318\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_772.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7909.npy"",\r\n 570\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9614.npy"",\r\n 771\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9687.npy"",\r\n 296\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5767.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8448.npy"",\r\n 948\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6693.npy"",\r\n 333\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6733.npy"",\r\n 123\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6489.npy"",\r\n 375\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1674.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3128.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3153.npy"",\r\n 596\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_441.npy"",\r\n 437\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_633.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7089.npy"",\r\n 468\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4131.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8026.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9008.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2483.npy"",\r\n 104\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_439.npy"",\r\n 521\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1046.npy"",\r\n 650\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3161.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9240.npy"",\r\n 225\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_126.npy"",\r\n 362\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_388.npy"",\r\n 469\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1681.npy"",\r\n 513\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2769.npy"",\r\n 83\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9683.npy"",\r\n 475\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8527.npy"",\r\n 475\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4991.npy"",\r\n 503\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_190.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_957.npy"",\r\n 274\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3521.npy"",\r\n 717\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6898.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3353.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8136.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8159.npy"",\r\n 296\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1603.npy"",\r\n 835\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2110.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6024.npy"",\r\n 323\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_792.npy"",\r\n 534\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9149.npy"",\r\n 313\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2982.npy"",\r\n 430\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2695.npy"",\r\n 443\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2871.npy"",\r\n 791\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2451.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1225.npy"",\r\n 907\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3578.npy"",\r\n 223\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_167.npy"",\r\n 179\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4912.npy"",\r\n 228\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4872.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8314.npy"",\r\n 353\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1291.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9359.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6764.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9593.npy"",\r\n 131\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4798.npy"",\r\n 379\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1861.npy"",\r\n 370\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4115.npy"",\r\n 740\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1646.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4073.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1431.npy"",\r\n 572\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8830.npy"",\r\n 489\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3677.npy"",\r\n 184\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7490.npy"",\r\n 477\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6625.npy"",\r\n 462\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3911.npy"",\r\n 194\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7688.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6911.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7392.npy"",\r\n 380\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5422.npy"",\r\n 74\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4425.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6968.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9268.npy"",\r\n 123\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_454.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7867.npy"",\r\n 758\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4655.npy"",\r\n 502\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2823.npy"",\r\n 235\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3061.npy"",\r\n 607\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9456.npy"",\r\n 465\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8532.npy"",\r\n 488\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3984.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7632.npy"",\r\n 999\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6476.npy"",\r\n 269\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_864.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_750.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1969.npy"",\r\n 379\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4094.npy"",\r\n 754\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4963.npy"",\r\n 250\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6869.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9870.npy"",\r\n 89\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3133.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1.npy"",\r\n 271\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3219.npy"",\r\n 376\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5661.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9734.npy"",\r\n 338\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4281.npy"",\r\n 873\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4168.npy"",\r\n 879\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_108.npy"",\r\n 346\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3063.npy"",\r\n 395\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6052.npy"",\r\n 99\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8166.npy"",\r\n 58\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5007.npy"",\r\n 778\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9048.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4833.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5603.npy"",\r\n 289\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7988.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1885.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5949.npy"",\r\n 952\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9610.npy"",\r\n 822\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9559.npy"",\r\n 115\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3718.npy"",\r\n 597\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1487.npy"",\r\n 334\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5210.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7308.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5069.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9324.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6789.npy"",\r\n 284\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_434.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9658.npy"",\r\n 524\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6718.npy"",\r\n 489\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2464.npy"",\r\n 843\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1055.npy"",\r\n 72\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7300.npy"",\r\n 331\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7252.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9941.npy"",\r\n 442\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1220.npy"",\r\n 601\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3917.npy"",\r\n 289\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6502.npy"",\r\n 474\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2224.npy"",\r\n 591\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_504.npy"",\r\n 638\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4347.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9744.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4418.npy"",\r\n 213\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3806.npy"",\r\n 462\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6017.npy"",\r\n 574\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_270.npy"",\r\n 929\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_867.npy"",\r\n 74\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8432.npy"",\r\n 745\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7265.npy"",\r\n 207\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9139.npy"",\r\n 708\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_487.npy"",\r\n 195\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7892.npy"",\r\n 588\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7465.npy"",\r\n 138\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5215.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_289.npy"",\r\n 839\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2837.npy"",\r\n 491\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7987.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5884.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7322.npy"",\r\n 393\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4736.npy"",\r\n 739\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_787.npy"",\r\n 761\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7096.npy"",\r\n 526\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8296.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9409.npy"",\r\n 651\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8857.npy"",\r\n 800\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6519.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1936.npy"",\r\n 577\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9938.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5609.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1066.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5788.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7698.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6709.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1033.npy"",\r\n 143\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2486.npy"",\r\n 504\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2235.npy"",\r\n 245\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2309.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7772.npy"",\r\n 607\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1080.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3499.npy"",\r\n 159\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2748.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8023.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3757.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4860.npy"",\r\n 257\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5852.npy"",\r\n 740\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9233.npy"",\r\n 530\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1343.npy"",\r\n 659\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1259.npy"",\r\n 58\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5987.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2428.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4062.npy"",\r\n 864\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8059.npy"",\r\n 380\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7505.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6871.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8194.npy"",\r\n 715\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3334.npy"",\r\n 704\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8969.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1443.npy"",\r\n 911\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4266.npy"",\r\n 635\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8050.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5195.npy"",\r\n 111\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1921.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1888.npy"",\r\n 101\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4936.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7337.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2758.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3048.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7080.npy"",\r\n 685\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5308.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1828.npy"",\r\n 437\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_956.npy"",\r\n 226\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2894.npy"",\r\n 129\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6672.npy"",\r\n 286\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9883.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9949.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5065.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9019.npy"",\r\n 458\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8105.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6534.npy"",\r\n 383\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8997.npy"",\r\n 685\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3370.npy"",\r\n 917\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9654.npy"",\r\n 495\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6078.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3846.npy"",\r\n 159\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8912.npy"",\r\n 394\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_860.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2371.npy"",\r\n 169\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3127.npy"",\r\n 316\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3035.npy"",\r\n 774\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8868.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7929.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3662.npy"",\r\n 615\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1897.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1139.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1470.npy"",\r\n 418\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4190.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9472.npy"",\r\n 599\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5324.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_83.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5859.npy"",\r\n 799\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_73.npy"",\r\n 896\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3248.npy"",\r\n 209\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3223.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6628.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3392.npy"",\r\n 220\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3501.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_920.npy"",\r\n 93\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9966.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9433.npy"",\r\n 263\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_330.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_246.npy"",\r\n 396\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8860.npy"",\r\n 384\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1073.npy"",\r\n 885\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4701.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2275.npy"",\r\n 223\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3295.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_329.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6460.npy"",\r\n 239\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7349.npy"",\r\n 367\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4634.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3987.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2183.npy"",\r\n 144\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9427.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2538.npy"",\r\n 403\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2958.npy"",\r\n 604\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7627.npy"",\r\n 250\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2193.npy"",\r\n 152\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5064.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6128.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5346.npy"",\r\n 582\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6515.npy"",\r\n 292\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1612.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_153.npy"",\r\n 489\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9122.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7029.npy"",\r\n 58\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7027.npy"",\r\n 430\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_500.npy"",\r\n 197\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1127.npy"",\r\n 345\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1703.npy"",\r\n 507\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7668.npy"",\r\n 95\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3066.npy"",\r\n 461\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2764.npy"",\r\n 683\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9455.npy"",\r\n 231\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9819.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8648.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4405.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8996.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_888.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4008.npy"",\r\n 595\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5856.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7682.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5084.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7415.npy"",\r\n 610\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4452.npy"",\r\n 521\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5779.npy"",\r\n 463\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_444.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8455.npy"",\r\n 532\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9106.npy"",\r\n 629\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2988.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9793.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9178.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3973.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1367.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2117.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3609.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6854.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5435.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7088.npy"",\r\n 102\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2734.npy"",\r\n 442\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2555.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4401.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9094.npy"",\r\n 102\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4639.npy"",\r\n 875\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5336.npy"",\r\n 539\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3326.npy"",\r\n 844\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6530.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4752.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6975.npy"",\r\n 443\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3169.npy"",\r\n 257\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8746.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4593.npy"",\r\n 708\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3194.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8516.npy"",\r\n 638\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1845.npy"",\r\n 940\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7138.npy"",\r\n 407\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8109.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1718.npy"",\r\n 893\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7850.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_644.npy"",\r\n 499\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4652.npy"",\r\n 627\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3039.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7773.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3281.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3321.npy"",\r\n 228\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3177.npy"",\r\n 287\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_128.npy"",\r\n 980\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3028.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2394.npy"",\r\n 85\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3040.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9394.npy"",\r\n 694\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2069.npy"",\r\n 851\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5855.npy"",\r\n 921\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2644.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2723.npy"",\r\n 214\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8176.npy"",\r\n 581\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_65.npy"",\r\n 603\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7643.npy"",\r\n 147\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1799.npy"",\r\n 93\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9249.npy"",\r\n 737\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1544.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8235.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1221.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9896.npy"",\r\n 321\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9962.npy"",\r\n 477\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_157.npy"",\r\n 367\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5697.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7413.npy"",\r\n 407\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_387.npy"",\r\n 458\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3283.npy"",\r\n 859\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6444.npy"",\r\n 360\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_551.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9758.npy"",\r\n 836\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3444.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4630.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5217.npy"",\r\n 633\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6818.npy"",\r\n 239\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9937.npy"",\r\n 392\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9021.npy"",\r\n 101\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5465.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5060.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9568.npy"",\r\n 379\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8263.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9708.npy"",\r\n 224\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_515.npy"",\r\n 148\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_775.npy"",\r\n 649\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_341.npy"",\r\n 313\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6339.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6634.npy"",\r\n 138\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2033.npy"",\r\n 841\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9237.npy"",\r\n 88\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6878.npy"",\r\n 975\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7902.npy"",\r\n 276\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5773.npy"",\r\n 81\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7064.npy"",\r\n 981\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3018.npy"",\r\n 818\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1637.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9083.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6248.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5832.npy"",\r\n 139\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7435.npy"",\r\n 174\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3576.npy"",\r\n 442\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3912.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2106.npy"",\r\n 689\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2675.npy"",\r\n 399\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1205.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6947.npy"",\r\n 102\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2372.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4271.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8411.npy"",\r\n 95\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4699.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6865.npy"",\r\n 582\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4101.npy"",\r\n 650\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9339.npy"",\r\n 957\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5703.npy"",\r\n 540\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2439.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4725.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1699.npy"",\r\n 152\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1858.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_231.npy"",\r\n 788\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9102.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3217.npy"",\r\n 711\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9023.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6850.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4482.npy"",\r\n 293\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7084.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6310.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9274.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1592.npy"",\r\n 467\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_684.npy"",\r\n 447\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2128.npy"",\r\n 450\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3322.npy"",\r\n 615\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1745.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2818.npy"",\r\n 858\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7629.npy"",\r\n 69\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5988.npy"",\r\n 299\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3165.npy"",\r\n 273\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6887.npy"",\r\n 757\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2904.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1868.npy"",\r\n 526\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_362.npy"",\r\n 410\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5025.npy"",\r\n 218\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3834.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6147.npy"",\r\n 64\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8714.npy"",\r\n 507\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5950.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4565.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9020.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2619.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6326.npy"",\r\n 436\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_76.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4921.npy"",\r\n 75\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3043.npy"",\r\n 299\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5969.npy"",\r\n 344\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3218.npy"",\r\n 934\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2502.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_90.npy"",\r\n 207\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4479.npy"",\r\n 591\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_394.npy"",\r\n 931\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3667.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3306.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9648.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7533.npy"",\r\n 882\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2912.npy"",\r\n 987\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4553.npy"",\r\n 163\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3702.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9141.npy"",\r\n 59\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6340.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2648.npy"",\r\n 606\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3207.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4410.npy"",\r\n 441\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2218.npy"",\r\n 389\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3089.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2073.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5693.npy"",\r\n 838\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4277.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3087.npy"",\r\n 563\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9206.npy"",\r\n 168\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5180.npy"",\r\n 766\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9754.npy"",\r\n 133\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7216.npy"",\r\n 978\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4475.npy"",\r\n 152\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5391.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9199.npy"",\r\n 321\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5721.npy"",\r\n 282\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3065.npy"",\r\n 639\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5093.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7019.npy"",\r\n 436\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1547.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8536.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_33.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6783.npy"",\r\n 302\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2327.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2669.npy"",\r\n 85\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4121.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1508.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4864.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9585.npy"",\r\n 129\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7707.npy"",\r\n 486\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9005.npy"",\r\n 967\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2702.npy"",\r\n 80\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3746.npy"",\r\n 461\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3224.npy"",\r\n 458\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5893.npy"",\r\n 376\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7073.npy"",\r\n 418\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4575.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3724.npy"",\r\n 853\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3014.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4378.npy"",\r\n 478\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9493.npy"",\r\n 304\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6943.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3812.npy"",\r\n 225\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4154.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7917.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7853.npy"",\r\n 576\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_271.npy"",\r\n 729\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9063.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8162.npy"",\r\n 195\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_742.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3323.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6193.npy"",\r\n 439\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_458.npy"",\r\n 624\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4941.npy"",\r\n 464\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5207.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6409.npy"",\r\n 317\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3657.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4845.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7302.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1255.npy"",\r\n 775\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_266.npy"",\r\n 557\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9371.npy"",\r\n 743\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2512.npy"",\r\n 340\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5718.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3551.npy"",\r\n 950\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9714.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_708.npy"",\r\n 487\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2724.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_931.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9000.npy"",\r\n 541\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5164.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8853.npy"",\r\n 122\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3468.npy"",\r\n 457\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8591.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1757.npy"",\r\n 349\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9825.npy"",\r\n 375\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2600.npy"",\r\n 106\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_306.npy"",\r\n 70\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3821.npy"",\r\n 306\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5203.npy"",\r\n 381\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5425.npy"",\r\n 444\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2352.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2.npy"",\r\n 236\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9899.npy"",\r\n 296\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4075.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4251.npy"",\r\n 187\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3809.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7835.npy"",\r\n 580\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2932.npy"",\r\n 140\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7379.npy"",\r\n 444\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7323.npy"",\r\n 861\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2593.npy"",\r\n 457\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8265.npy"",\r\n 648\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4854.npy"",\r\n 630\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7281.npy"",\r\n 344\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3346.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9329.npy"",\r\n 455\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7626.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3830.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8377.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8745.npy"",\r\n 903\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6029.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_753.npy"",\r\n 88\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_739.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3118.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6537.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6713.npy"",\r\n 566\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1330.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5585.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8958.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4856.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6035.npy"",\r\n 229\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2031.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7046.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5220.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_591.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9950.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1528.npy"",\r\n 237\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3725.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6499.npy"",\r\n 177\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_537.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1774.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3619.npy"",\r\n 973\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1616.npy"",\r\n 527\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8300.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1004.npy"",\r\n 711\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3547.npy"",\r\n 550\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1829.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9803.npy"",\r\n 65\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7041.npy"",\r\n 618\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5096.npy"",\r\n 177\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8479.npy"",\r\n 203\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6265.npy"",\r\n 350\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8772.npy"",\r\n 203\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1802.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1731.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8809.npy"",\r\n 461\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2232.npy"",\r\n 987\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6107.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5374.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_347.npy"",\r\n 751\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7849.npy"",\r\n 376\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3001.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5309.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6694.npy"",\r\n 598\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7077.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_332.npy"",\r\n 92\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_471.npy"",\r\n 323\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3591.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4494.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7.npy"",\r\n 426\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8014.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5877.npy"",\r\n 322\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3709.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4195.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8680.npy"",\r\n 122\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_577.npy"",\r\n 170\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4355.npy"",\r\n 387\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3327.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6919.npy"",\r\n 312\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8943.npy"",\r\n 261\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4863.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1204.npy"",\r\n 210\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8173.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4473.npy"",\r\n 330\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_55.npy"",\r\n 484\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_744.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9605.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4614.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5642.npy"",\r\n 383\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4226.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5205.npy"",\r\n 433\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3485.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3483.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1780.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6400.npy"",\r\n 210\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6657.npy"",\r\n 441\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6538.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5590.npy"",\r\n 353\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9043.npy"",\r\n 881\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_571.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5101.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5954.npy"",\r\n 308\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9346.npy"",\r\n 393\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1190.npy"",\r\n 527\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_822.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6367.npy"",\r\n 166\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_870.npy"",\r\n 788\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7891.npy"",\r\n 123\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8859.npy"",\r\n 236\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3342.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2398.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1049.npy"",\r\n 696\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6684.npy"",\r\n 210\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_393.npy"",\r\n 698\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3886.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7621.npy"",\r\n 169\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8974.npy"",\r\n 555\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2722.npy"",\r\n 237\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4274.npy"",\r\n 71\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5347.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5581.npy"",\r\n 610\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7526.npy"",\r\n 906\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9065.npy"",\r\n 650\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8505.npy"",\r\n 960\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1181.npy"",\r\n 536\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5112.npy"",\r\n 453\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3792.npy"",\r\n 444\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9556.npy"",\r\n 275\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4147.npy"",\r\n 349\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6344.npy"",\r\n 642\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2027.npy"",\r\n 583\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7592.npy"",\r\n 864\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8904.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1942.npy"",\r\n 730\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6568.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7612.npy"",\r\n 514\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1195.npy"",\r\n 838\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7532.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8701.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2330.npy"",\r\n 920\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4058.npy"",\r\n 808\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6542.npy"",\r\n 122\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5698.npy"",\r\n 521\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7242.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4740.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4910.npy"",\r\n 787\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3676.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9583.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5575.npy"",\r\n 731\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7011.npy"",\r\n 253\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8690.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1707.npy"",\r\n 290\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8703.npy"",\r\n 400\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8054.npy"",\r\n 105\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3545.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3901.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1806.npy"",\r\n 483\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3032.npy"",\r\n 156\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_832.npy"",\r\n 512\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8086.npy"",\r\n 480\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2062.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7483.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3982.npy"",\r\n 117\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8148.npy"",\r\n 518\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4489.npy"",\r\n 285\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_240.npy"",\r\n 244\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4363.npy"",\r\n 212\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1298.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_53.npy"",\r\n 189\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8655.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8042.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7092.npy"",\r\n 229\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4761.npy"",\r\n 296\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5115.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6996.npy"",\r\n 919\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5960.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1887.npy"",\r\n 982\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9211.npy"",\r\n 154\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4433.npy"",\r\n 584\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4610.npy"",\r\n 202\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9845.npy"",\r\n 849\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4766.npy"",\r\n 742\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6390.npy"",\r\n 836\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3289.npy"",\r\n 826\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4222.npy"",\r\n 540\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9900.npy"",\r\n 636\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5445.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8179.npy"",\r\n 536\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5196.npy"",\r\n 760\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9084.npy"",\r\n 325\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4577.npy"",\r\n 267\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6457.npy"",\r\n 234\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2318.npy"",\r\n 232\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3932.npy"",\r\n 965\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9837.npy"",\r\n 778\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2316.npy"",\r\n 370\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8348.npy"",\r\n 138\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1169.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3999.npy"",\r\n 308\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2365.npy"",\r\n 725\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1194.npy"",\r\n 299\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2010.npy"",\r\n 803\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5889.npy"",\r\n 692\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8678.npy"",\r\n 629\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8961.npy"",\r\n 400\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9436.npy"",\r\n 108\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2900.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4454.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1122.npy"",\r\n 97\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9689.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_367.npy"",\r\n 62\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7508.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5289.npy"",\r\n 158\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2086.npy"",\r\n 458\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6393.npy"",\r\n 118\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1302.npy"",\r\n 231\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_627.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4532.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6387.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3401.npy"",\r\n 100\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_824.npy"",\r\n 167\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1662.npy"",\r\n 946\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_925.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_323.npy"",\r\n 204\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9525.npy"",\r\n 973\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7522.npy"",\r\n 504\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6458.npy"",\r\n 232\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1827.npy"",\r\n 517\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1078.npy"",\r\n 877\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4777.npy"",\r\n 308\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7217.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_78.npy"",\r\n 634\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8555.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_797.npy"",\r\n 233\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6269.npy"",\r\n 185\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8743.npy"",\r\n 436\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_513.npy"",\r\n 340\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9541.npy"",\r\n 582\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3585.npy"",\r\n 445\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1003.npy"",\r\n 496\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4206.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7060.npy"",\r\n 435\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8188.npy"",\r\n 574\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4576.npy"",\r\n 715\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_561.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7616.npy"",\r\n 459\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6638.npy"",\r\n 146\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8699.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8839.npy"",\r\n 245\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1463.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3562.npy"",\r\n 860\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7068.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4666.npy"",\r\n 793\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5835.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8683.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5399.npy"",\r\n 326\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2343.npy"",\r\n 98\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5155.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3698.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3297.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8832.npy"",\r\n 377\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1874.npy"",\r\n 54\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4275.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4769.npy"",\r\n 985\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4432.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9100.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_913.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8652.npy"",\r\n 845\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_209.npy"",\r\n 326\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4743.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6469.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1145.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_519.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3925.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5981.npy"",\r\n 252\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3688.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6920.npy"",\r\n 495\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2797.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4406.npy"",\r\n 295\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2759.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_303.npy"",\r\n 787\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1849.npy"",\r\n 182\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2455.npy"",\r\n 371\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8388.npy"",\r\n 498\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9537.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6346.npy"",\r\n 236\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8046.npy"",\r\n 729\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2013.npy"",\r\n 446\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7519.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8037.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_866.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3312.npy"",\r\n 236\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6141.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9986.npy"",\r\n 388\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8062.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1534.npy"",\r\n 144\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6227.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5380.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5639.npy"",\r\n 513\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9848.npy"",\r\n 392\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2485.npy"",\r\n 80\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_747.npy"",\r\n 632\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2610.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4671.npy"",\r\n 216\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_175.npy"",\r\n 504\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8858.npy"",\r\n 690\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2361.npy"",\r\n 950\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3711.npy"",\r\n 688\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7334.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1683.npy"",\r\n 110\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7486.npy"",\r\n 840\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3287.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4897.npy"",\r\n 80\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8425.npy"",\r\n 904\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6250.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_652.npy"",\r\n 216\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8423.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7846.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9154.npy"",\r\n 260\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_292.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_977.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6492.npy"",\r\n 586\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8671.npy"",\r\n 120\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2035.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8612.npy"",\r\n 361\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6381.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_201.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_98.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5285.npy"",\r\n 129\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3772.npy"",\r\n 383\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4212.npy"",\r\n 626\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7464.npy"",\r\n 245\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3382.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4462.npy"",\r\n 70\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6755.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1556.npy"",\r\n 472\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3084.npy"",\r\n 766\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9874.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3708.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5266.npy"",\r\n 66\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6435.npy"",\r\n 982\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6953.npy"",\r\n 181\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4902.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5332.npy"",\r\n 120\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3099.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4459.npy"",\r\n 362\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7966.npy"",\r\n 494\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5294.npy"",\r\n 781\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2247.npy"",\r\n 419\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6814.npy"",\r\n 687\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9759.npy"",\r\n 690\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8588.npy"",\r\n 490\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5557.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2012.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5974.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6817.npy"",\r\n 386\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2017.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9997.npy"",\r\n 432\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4202.npy"",\r\n 466\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2652.npy"",\r\n 575\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2105.npy"",\r\n 609\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1688.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8522.npy"",\r\n 143\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2856.npy"",\r\n 157\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_692.npy"",\r\n 498\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2140.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4472.npy"",\r\n 954\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_42.npy"",\r\n 857\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5403.npy"",\r\n 380\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9357.npy"",\r\n 433\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5636.npy"",\r\n 281\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9538.npy"",\r\n 538\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2118.npy"",\r\n 117\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4924.npy"",\r\n 162\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2627.npy"",\r\n 259\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4662.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5831.npy"",\r\n 413\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3815.npy"",\r\n 127\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_720.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_432.npy"",\r\n 440\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_943.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1870.npy"",\r\n 864\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7474.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5846.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7514.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9153.npy"",\r\n 395\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_14.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6732.npy"",\r\n 213\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2609.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8654.npy"",\r\n 882\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6690.npy"",\r\n 167\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8152.npy"",\r\n 1000\r\n ],\r\n [\r\n",,terminal_output +3157,5003657,"TERMINAL",0,0," ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8450.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5456.npy"",\r\n 317\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_382.npy"",\r\n 191\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6201.npy"",\r\n 135\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1682.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7249.npy"",\r\n 255\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6716.npy"",\r\n 115\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7247.npy"",\r\n 430\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2755.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1324.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_844.npy"",\r\n 108\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9127.npy"",\r\n 568\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9362.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_915.npy"",\r\n 464\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6295.npy"",\r\n 166\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1035.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8569.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6763.npy"",\r\n 324\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_635.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4881.npy"",\r\n 861\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7746.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7812.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8915.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8030.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6251.npy"",\r\n 945\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8940.npy"",\r\n 658\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6304.npy"",\r\n 516\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1252.npy"",\r\n 218\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2155.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5777.npy"",\r\n 283\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6778.npy"",\r\n 460\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4307.npy"",\r\n 448\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9017.npy"",\r\n 105\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5854.npy"",\r\n 554\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3851.npy"",\r\n 253\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_287.npy"",\r\n 58\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2290.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8562.npy"",\r\n 505\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2075.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3859.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1468.npy"",\r\n 887\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4420.npy"",\r\n 726\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7171.npy"",\r\n 326\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3239.npy"",\r\n 456\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9012.npy"",\r\n 806\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3826.npy"",\r\n 322\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_227.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_654.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8824.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8564.npy"",\r\n 702\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9733.npy"",\r\n 73\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1445.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9086.npy"",\r\n 102\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6038.npy"",\r\n 636\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2614.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_935.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3357.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_415.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3873.npy"",\r\n 472\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4214.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4570.npy"",\r\n 87\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_965.npy"",\r\n 741\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3853.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7063.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2699.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8923.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3140.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7591.npy"",\r\n 131\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7980.npy"",\r\n 370\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2528.npy"",\r\n 464\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7326.npy"",\r\n 234\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7908.npy"",\r\n 81\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2783.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2603.npy"",\r\n 231\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_284.npy"",\r\n 820\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_553.npy"",\r\n 417\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_410.npy"",\r\n 207\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7962.npy"",\r\n 505\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9755.npy"",\r\n 681\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7274.npy"",\r\n 736\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4849.npy"",\r\n 98\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_768.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5579.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2766.npy"",\r\n 231\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1925.npy"",\r\n 208\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2622.npy"",\r\n 786\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4785.npy"",\r\n 458\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4371.npy"",\r\n 728\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8207.npy"",\r\n 723\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8230.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1514.npy"",\r\n 499\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5763.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4117.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8444.npy"",\r\n 398\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2362.npy"",\r\n 127\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1363.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4246.npy"",\r\n 620\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5036.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2625.npy"",\r\n 893\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_954.npy"",\r\n 637\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3156.npy"",\r\n 337\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7964.npy"",\r\n 385\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2623.npy"",\r\n 604\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4200.npy"",\r\n 336\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3872.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8731.npy"",\r\n 406\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7114.npy"",\r\n 340\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2286.npy"",\r\n 557\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2195.npy"",\r\n 476\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6493.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7760.npy"",\r\n 886\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4184.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2606.npy"",\r\n 870\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5962.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6020.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3072.npy"",\r\n 117\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_981.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9140.npy"",\r\n 767\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2952.npy"",\r\n 68\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1818.npy"",\r\n 149\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7069.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_542.npy"",\r\n 180\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3397.npy"",\r\n 573\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4894.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9800.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6550.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3055.npy"",\r\n 54\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8311.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3992.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7737.npy"",\r\n 397\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5479.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2917.npy"",\r\n 51\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1573.npy"",\r\n 983\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5787.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4737.npy"",\r\n 125\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1387.npy"",\r\n 744\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6525.npy"",\r\n 719\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_67.npy"",\r\n 289\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2742.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4704.npy"",\r\n 821\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7231.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9774.npy"",\r\n 727\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3117.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9526.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9452.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4626.npy"",\r\n 992\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2556.npy"",\r\n 201\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3843.npy"",\r\n 722\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1793.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1218.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2859.npy"",\r\n 429\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7660.npy"",\r\n 448\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5907.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1965.npy"",\r\n 676\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_149.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1708.npy"",\r\n 591\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2919.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3849.npy"",\r\n 435\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_425.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7189.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1946.npy"",\r\n 166\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2477.npy"",\r\n 428\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4157.npy"",\r\n 290\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8863.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1593.npy"",\r\n 662\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_886.npy"",\r\n 828\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_597.npy"",\r\n 152\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6612.npy"",\r\n 596\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7918.npy"",\r\n 317\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2334.npy"",\r\n 294\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4066.npy"",\r\n 640\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2835.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2762.npy"",\r\n 281\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4085.npy"",\r\n 317\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_655.npy"",\r\n 545\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6429.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8228.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3263.npy"",\r\n 558\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5817.npy"",\r\n 101\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8426.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_369.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6427.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3597.npy"",\r\n 338\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8089.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1260.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4267.npy"",\r\n 798\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5461.npy"",\r\n 380\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7657.npy"",\r\n 301\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3788.npy"",\r\n 590\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6785.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9641.npy"",\r\n 145\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3786.npy"",\r\n 281\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9104.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6703.npy"",\r\n 301\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1268.npy"",\r\n 497\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2229.npy"",\r\n 682\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_206.npy"",\r\n 427\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2460.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8744.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9350.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8869.npy"",\r\n 534\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9542.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_409.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3722.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1051.npy"",\r\n 289\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8602.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9974.npy"",\r\n 108\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7952.npy"",\r\n 399\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2933.npy"",\r\n 865\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_468.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9152.npy"",\r\n 301\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2712.npy"",\r\n 458\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6.npy"",\r\n 514\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5833.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3804.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9575.npy"",\r\n 479\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3285.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8266.npy"",\r\n 222\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_955.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1369.npy"",\r\n 96\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9225.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9821.npy"",\r\n 655\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7569.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9710.npy"",\r\n 559\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9135.npy"",\r\n 279\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_400.npy"",\r\n 528\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2563.npy"",\r\n 391\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8627.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6236.npy"",\r\n 461\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7065.npy"",\r\n 528\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9676.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6681.npy"",\r\n 520\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8893.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5211.npy"",\r\n 503\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8338.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4021.npy"",\r\n 197\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8239.npy"",\r\n 424\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5276.npy"",\r\n 182\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3969.npy"",\r\n 812\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_93.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8286.npy"",\r\n 803\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1250.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8098.npy"",\r\n 158\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5379.npy"",\r\n 817\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8727.npy"",\r\n 99\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6309.npy"",\r\n 532\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6170.npy"",\r\n 776\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7949.npy"",\r\n 371\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_975.npy"",\r\n 188\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8451.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1529.npy"",\r\n 447\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5341.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5398.npy"",\r\n 557\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3996.npy"",\r\n 474\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2310.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2081.npy"",\r\n 379\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1339.npy"",\r\n 119\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6808.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1024.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_258.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2639.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1022.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1869.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_324.npy"",\r\n 484\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8649.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8400.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9598.npy"",\r\n 567\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4536.npy"",\r\n 922\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2482.npy"",\r\n 324\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1732.npy"",\r\n 507\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7221.npy"",\r\n 306\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5857.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9553.npy"",\r\n 613\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2160.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4571.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_477.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7293.npy"",\r\n 575\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1247.npy"",\r\n 97\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_643.npy"",\r\n 795\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3656.npy"",\r\n 512\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7097.npy"",\r\n 855\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6416.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9177.npy"",\r\n 372\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4598.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4729.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7697.npy"",\r\n 633\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6207.npy"",\r\n 542\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7822.npy"",\r\n 462\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_604.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7678.npy"",\r\n 111\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4895.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4125.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7785.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2065.npy"",\r\n 654\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6158.npy"",\r\n 975\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9582.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_96.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1937.npy"",\r\n 630\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8422.npy"",\r\n 543\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3861.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_131.npy"",\r\n 924\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7197.npy"",\r\n 524\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2839.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5173.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_674.npy"",\r\n 353\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3607.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6914.npy"",\r\n 368\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2973.npy"",\r\n 229\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9192.npy"",\r\n 700\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4801.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2253.npy"",\r\n 605\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6775.npy"",\r\n 108\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4694.npy"",\r\n 77\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5238.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1489.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1685.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2472.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_921.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9650.npy"",\r\n 745\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4423.npy"",\r\n 169\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8777.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1554.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9971.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3347.npy"",\r\n 320\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9366.npy"",\r\n 502\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3027.npy"",\r\n 920\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5900.npy"",\r\n 55\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1095.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5405.npy"",\r\n 259\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8519.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4123.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5281.npy"",\r\n 324\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7283.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5480.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1411.npy"",\r\n 308\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4680.npy"",\r\n 442\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5551.npy"",\r\n 203\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6533.npy"",\r\n 984\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9145.npy"",\r\n 974\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3320.npy"",\r\n 305\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1031.npy"",\r\n 409\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5272.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3396.npy"",\r\n 347\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1360.npy"",\r\n 163\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7219.npy"",\r\n 766\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_712.npy"",\r\n 192\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5184.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1717.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3291.npy"",\r\n 548\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3892.npy"",\r\n 581\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8467.npy"",\r\n 292\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9423.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3225.npy"",\r\n 868\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4728.npy"",\r\n 503\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6449.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5725.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1375.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3509.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4578.npy"",\r\n 404\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6972.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6877.npy"",\r\n 952\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5067.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1866.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1295.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2438.npy"",\r\n 916\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5772.npy"",\r\n 430\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4499.npy"",\r\n 502\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4868.npy"",\r\n 314\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1636.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5246.npy"",\r\n 787\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9595.npy"",\r\n 494\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5708.npy"",\r\n 415\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4264.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7580.npy"",\r\n 230\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2048.npy"",\r\n 626\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_429.npy"",\r\n 304\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1832.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4527.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7006.npy"",\r\n 841\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6372.npy"",\r\n 443\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9451.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9815.npy"",\r\n 589\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1903.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1082.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5146.npy"",\r\n 224\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2827.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8087.npy"",\r\n 763\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9520.npy"",\r\n 626\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9916.npy"",\r\n 575\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7411.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_991.npy"",\r\n 511\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_773.npy"",\r\n 470\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9174.npy"",\r\n 544\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7941.npy"",\r\n 633\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5549.npy"",\r\n 425\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1099.npy"",\r\n 448\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2068.npy"",\r\n 275\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2579.npy"",\r\n 229\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7937.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8735.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1311.npy"",\r\n 460\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4938.npy"",\r\n 348\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6282.npy"",\r\n 84\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6391.npy"",\r\n 320\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5234.npy"",\r\n 873\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_304.npy"",\r\n 360\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6971.npy"",\r\n 863\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7729.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8247.npy"",\r\n 337\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6219.npy"",\r\n 790\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7951.npy"",\r\n 645\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7809.npy"",\r\n 361\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5054.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4597.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_278.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_44.npy"",\r\n 667\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_881.npy"",\r\n 920\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4994.npy"",\r\n 226\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2323.npy"",\r\n 596\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3232.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_550.npy"",\r\n 957\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1091.npy"",\r\n 705\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8420.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3645.npy"",\r\n 320\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3643.npy"",\r\n 154\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3703.npy"",\r\n 966\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2133.npy"",\r\n 549\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4552.npy"",\r\n 104\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9437.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3810.npy"",\r\n 87\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3344.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9812.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5851.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3906.npy"",\r\n 457\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2159.npy"",\r\n 758\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3044.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7516.npy"",\r\n 362\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9239.npy"",\r\n 608\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6388.npy"",\r\n 703\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3668.npy"",\r\n 550\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9892.npy"",\r\n 97\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1106.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2993.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7215.npy"",\r\n 623\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6596.npy"",\r\n 430\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5544.npy"",\r\n 421\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7394.npy"",\r\n 408\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1476.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6683.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_226.npy"",\r\n 140\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5213.npy"",\r\n 452\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8083.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6890.npy"",\r\n 559\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1797.npy"",\r\n 771\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5489.npy"",\r\n 433\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_714.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4574.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9564.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6961.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1357.npy"",\r\n 433\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9036.npy"",\r\n 859\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4925.npy"",\r\n 180\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3922.npy"",\r\n 890\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6751.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6479.npy"",\r\n 225\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8802.npy"",\r\n 161\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6156.npy"",\r\n 208\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2697.npy"",\r\n 290\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4260.npy"",\r\n 956\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_911.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1555.npy"",\r\n 158\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6244.npy"",\r\n 358\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5502.npy"",\r\n 951\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2554.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4325.npy"",\r\n 214\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5757.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3098.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7016.npy"",\r\n 255\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1201.npy"",\r\n 755\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3107.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2230.npy"",\r\n 125\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3143.npy"",\r\n 279\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2191.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8486.npy"",\r\n 304\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9680.npy"",\r\n 495\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4375.npy"",\r\n 396\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8852.npy"",\r\n 598\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3742.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1249.npy"",\r\n 540\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4299.npy"",\r\n 123\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8748.npy"",\r\n 606\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_168.npy"",\r\n 479\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4700.npy"",\r\n 856\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_732.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_916.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_414.npy"",\r\n 201\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5474.npy"",\r\n 517\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1315.npy"",\r\n 542\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7125.npy"",\r\n 361\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3763.npy"",\r\n 414\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6288.npy"",\r\n 483\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7625.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6286.npy"",\r\n 953\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6215.npy"",\r\n 529\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2273.npy"",\r\n 220\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1297.npy"",\r\n 53\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8020.npy"",\r\n 288\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_820.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2199.npy"",\r\n 935\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7254.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_208.npy"",\r\n 381\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1331.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3319.npy"",\r\n 67\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4796.npy"",\r\n 129\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7558.npy"",\r\n 286\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_877.npy"",\r\n 179\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8437.npy"",\r\n 103\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9973.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2833.npy"",\r\n 224\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7601.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_101.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7269.npy"",\r\n 799\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3640.npy"",\r\n 457\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5965.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1346.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5434.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1344.npy"",\r\n 288\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9168.npy"",\r\n 305\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2590.npy"",\r\n 218\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8262.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9004.npy"",\r\n 491\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1120.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8424.npy"",\r\n 267\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3680.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3302.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_846.npy"",\r\n 266\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7866.npy"",\r\n 893\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1775.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8803.npy"",\r\n 639\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3616.npy"",\r\n 292\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1102.npy"",\r\n 681\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3646.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7397.npy"",\r\n 676\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4842.npy"",\r\n 507\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9881.npy"",\r\n 161\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_757.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7224.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3503.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8929.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1595.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4773.npy"",\r\n 222\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1537.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6566.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9660.npy"",\r\n 535\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9561.npy"",\r\n 386\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7062.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6488.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1085.npy"",\r\n 554\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5466.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8825.npy"",\r\n 209\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7325.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3036.npy"",\r\n 302\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3010.npy"",\r\n 237\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4130.npy"",\r\n 288\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7399.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5231.npy"",\r\n 286\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1638.npy"",\r\n 87\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6523.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_830.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4284.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1270.npy"",\r\n 495\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_665.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9958.npy"",\r\n 358\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_857.npy"",\r\n 93\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6708.npy"",\r\n 73\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4133.npy"",\r\n 707\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4083.npy"",\r\n 688\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5602.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5720.npy"",\r\n 65\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8806.npy"",\r\n 137\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9299.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8907.npy"",\r\n 316\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4173.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2942.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7181.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2256.npy"",\r\n 272\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1232.npy"",\r\n 680\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4436.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6922.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7640.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2961.npy"",\r\n 156\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7405.npy"",\r\n 536\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9385.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3633.npy"",\r\n 557\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5684.npy"",\r\n 389\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7430.npy"",\r\n 92\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4366.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6999.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_404.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7375.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7121.npy"",\r\n 768\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6798.npy"",\r\n 283\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_659.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6037.npy"",\r\n 702\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8563.npy"",\r\n 154\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5980.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5148.npy"",\r\n 725\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3527.npy"",\r\n 724\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6259.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1848.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1227.npy"",\r\n 569\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5771.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3482.npy"",\r\n 368\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9498.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8307.npy"",\r\n 927\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_365.npy"",\r\n 469\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2417.npy"",\r\n 308\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1342.npy"",\r\n 345\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2548.npy"",\r\n 98\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7605.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6278.npy"",\r\n 397\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5958.npy"",\r\n 114\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7784.npy"",\r\n 122\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1577.npy"",\r\n 333\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8407.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6641.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1524.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3345.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_668.npy"",\r\n 731\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_848.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1634.npy"",\r\n 872\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3934.npy"",\r\n 289\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1513.npy"",\r\n 147\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5229.npy"",\r\n 124\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4815.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_754.npy"",\r\n 91\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1000.npy"",\r\n 331\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6980.npy"",\r\n 586\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1262.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2813.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6824.npy"",\r\n 635\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6616.npy"",\r\n 405\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1006.npy"",\r\n 813\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_38.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_924.npy"",\r\n 327\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5159.npy"",\r\n 667\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1735.npy"",\r\n 364\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9636.npy"",\r\n 537\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2858.npy"",\r\n 69\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7112.npy"",\r\n 239\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8783.npy"",\r\n 804\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6663.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_181.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1801.npy"",\r\n 704\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9869.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8187.npy"",\r\n 676\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2009.npy"",\r\n 853\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7091.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6995.npy"",\r\n 578\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4503.npy"",\r\n 83\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4280.npy"",\r\n 558\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1149.npy"",\r\n 353\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6591.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9474.npy"",\r\n 220\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1809.npy"",\r\n 477\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8732.npy"",\r\n 171\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2834.npy"",\r\n 259\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9631.npy"",\r\n 194\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7555.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7456.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9319.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_81.npy"",\r\n 162\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3659.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2874.npy"",\r\n 178\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6507.npy"",\r\n 850\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3304.npy"",\r\n 430\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7734.npy"",\r\n 460\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4255.npy"",\r\n 647\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6944.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_86.npy"",\r\n 62\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_123.npy"",\r\n 561\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6494.npy"",\r\n 499\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8876.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5692.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1057.npy"",\r\n 788\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6990.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2317.npy"",\r\n 420\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9836.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3539.npy"",\r\n 916\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_993.npy"",\r\n 252\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1076.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1776.npy"",\r\n 177\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3564.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2476.npy"",\r\n 498\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2687.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1267.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_680.npy"",\r\n",,terminal_output +3158,5003750,"TERMINAL",0,0," 315\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8660.npy"",\r\n 612\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6967.npy"",\r\n 383\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2245.npy"",\r\n 505\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_678.npy"",\r\n 447\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_383.npy"",\r\n 222\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2934.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6794.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6306.npy"",\r\n 367\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1090.npy"",\r\n 983\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6059.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8205.npy"",\r\n 958\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_492.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2137.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9060.npy"",\r\n 491\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8427.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3500.npy"",\r\n 120\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8580.npy"",\r\n 294\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9238.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3889.npy"",\r\n 432\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4561.npy"",\r\n 148\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4904.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3094.npy"",\r\n 996\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8107.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8692.npy"",\r\n 429\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_354.npy"",\r\n 519\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8511.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3672.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4533.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2260.npy"",\r\n 760\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5298.npy"",\r\n 297\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3314.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5789.npy"",\r\n 965\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1191.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4550.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1587.npy"",\r\n 371\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1042.npy"",\r\n 86\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8056.npy"",\r\n 400\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3341.npy"",\r\n 346\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1630.npy"",\r\n 256\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6545.npy"",\r\n 470\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6217.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8280.npy"",\r\n 809\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2055.npy"",\r\n 110\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4900.npy"",\r\n 219\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4097.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8965.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6592.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2342.npy"",\r\n 367\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4543.npy"",\r\n 519\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1333.npy"",\r\n 131\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2351.npy"",\r\n 654\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5299.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3760.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4431.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2908.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1504.npy"",\r\n 424\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1907.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9093.npy"",\r\n 57\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8084.npy"",\r\n 973\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_897.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4049.npy"",\r\n 260\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8144.npy"",\r\n 335\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1308.npy"",\r\n 178\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9356.npy"",\r\n 369\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9105.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5189.npy"",\r\n 779\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6239.npy"",\r\n 682\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8118.npy"",\r\n 991\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2331.npy"",\r\n 100\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7947.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2529.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6576.npy"",\r\n 239\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7330.npy"",\r\n 56\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7945.npy"",\r\n 716\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5834.npy"",\r\n 133\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6503.npy"",\r\n 353\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9576.npy"",\r\n 84\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9616.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6993.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3311.npy"",\r\n 902\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2640.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7443.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1599.npy"",\r\n 108\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6769.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9544.npy"",\r\n 868\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1072.npy"",\r\n 281\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2294.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3960.npy"",\r\n 360\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5586.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8549.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8534.npy"",\r\n 662\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5452.npy"",\r\n 168\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9293.npy"",\r\n 360\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2298.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7373.npy"",\r\n 281\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5807.npy"",\r\n 101\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3842.npy"",\r\n 966\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7495.npy"",\r\n 219\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6962.npy"",\r\n 568\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_995.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8568.npy"",\r\n 377\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1285.npy"",\r\n 919\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2375.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9737.npy"",\r\n 369\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8670.npy"",\r\n 594\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6623.npy"",\r\n 56\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1096.npy"",\r\n 989\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3388.npy"",\r\n 959\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4330.npy"",\r\n 331\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8209.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3550.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_224.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5947.npy"",\r\n 214\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6108.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2082.npy"",\r\n 993\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9234.npy"",\r\n 191\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9637.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_308.npy"",\r\n 212\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5888.npy"",\r\n 240\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9011.npy"",\r\n 921\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7406.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4483.npy"",\r\n 218\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5685.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3316.npy"",\r\n 362\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6908.npy"",\r\n 698\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9851.npy"",\r\n 70\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9078.npy"",\r\n 484\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7473.npy"",\r\n 193\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5652.npy"",\r\n 375\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7213.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_135.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4518.npy"",\r\n 294\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1063.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9918.npy"",\r\n 541\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5047.npy"",\r\n 489\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5957.npy"",\r\n 356\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8677.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8767.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5711.npy"",\r\n 850\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4702.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6411.npy"",\r\n 447\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_634.npy"",\r\n 930\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9197.npy"",\r\n 280\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6549.npy"",\r\n 702\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6945.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3003.npy"",\r\n 801\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_851.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4171.npy"",\r\n 900\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7921.npy"",\r\n 706\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6159.npy"",\r\n 244\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2113.npy"",\r\n 742\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4922.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4513.npy"",\r\n 320\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9484.npy"",\r\n 687\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9999.npy"",\r\n 829\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_547.npy"",\r\n 245\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4290.npy"",\r\n 369\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5599.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5457.npy"",\r\n 386\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5940.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_232.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5185.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5360.npy"",\r\n 166\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_756.npy"",\r\n 809\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1494.npy"",\r\n 489\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6016.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4365.npy"",\r\n 183\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2295.npy"",\r\n 86\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_871.npy"",\r\n 327\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_835.npy"",\r\n 383\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1423.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8114.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2926.npy"",\r\n 887\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6151.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6072.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9182.npy"",\r\n 452\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2447.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_710.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6473.npy"",\r\n 103\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5438.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4996.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_179.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2216.npy"",\r\n 714\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7793.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3021.npy"",\r\n 515\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5481.npy"",\r\n 380\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_947.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1365.npy"",\r\n 100\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1876.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6021.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_254.npy"",\r\n 306\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8031.npy"",\r\n 197\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8967.npy"",\r\n 738\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8269.npy"",\r\n 301\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4768.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1867.npy"",\r\n 135\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8823.npy"",\r\n 302\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9085.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2514.npy"",\r\n 256\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3602.npy"",\r\n 428\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9726.npy"",\r\n 593\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_901.npy"",\r\n 445\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2057.npy"",\r\n 785\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4116.npy"",\r\n 187\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4862.npy"",\r\n 213\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3455.npy"",\r\n 229\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7319.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1092.npy"",\r\n 357\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8421.npy"",\r\n 75\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6169.npy"",\r\n 289\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3226.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8387.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3644.npy"",\r\n 329\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_580.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3611.npy"",\r\n 268\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4948.npy"",\r\n 850\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7419.npy"",\r\n 487\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1530.npy"",\r\n 941\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6759.npy"",\r\n 369\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9026.npy"",\r\n 503\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6688.npy"",\r\n 265\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9603.npy"",\r\n 537\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9592.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4455.npy"",\r\n 154\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8122.npy"",\r\n 383\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1997.npy"",\r\n 300\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3457.npy"",\r\n 459\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5290.npy"",\r\n 629\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2714.npy"",\r\n 556\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1700.npy"",\r\n 867\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9855.npy"",\r\n 835\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_948.npy"",\r\n 244\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6886.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4778.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1317.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5656.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3995.npy"",\r\n 323\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2749.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9434.npy"",\r\n 654\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8953.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4269.npy"",\r\n 244\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2038.npy"",\r\n 277\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5717.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4990.npy"",\r\n 147\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2278.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_272.npy"",\r\n 254\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9108.npy"",\r\n 254\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7329.npy"",\r\n 178\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3798.npy"",\r\n 455\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5147.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1305.npy"",\r\n 103\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6276.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2021.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9478.npy"",\r\n 514\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4830.npy"",\r\n 433\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8736.npy"",\r\n 649\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3479.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3655.npy"",\r\n 764\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7931.npy"",\r\n 415\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_758.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1246.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4878.npy"",\r\n 467\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2911.npy"",\r\n 176\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9343.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5194.npy"",\r\n 309\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5837.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2605.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1672.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4775.npy"",\r\n 206\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8947.npy"",\r\n 119\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9157.npy"",\r\n 165\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5287.npy"",\r\n 284\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1748.npy"",\r\n 497\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8156.npy"",\r\n 89\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7115.npy"",\r\n 168\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8111.npy"",\r\n 902\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9014.npy"",\r\n 317\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3720.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6636.npy"",\r\n 186\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_294.npy"",\r\n 52\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9260.npy"",\r\n 282\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1518.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4150.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2249.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4446.npy"",\r\n 326\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5244.npy"",\r\n 625\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1466.npy"",\r\n 376\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9510.npy"",\r\n 740\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3663.npy"",\r\n 87\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6490.npy"",\r\n 892\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8546.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3997.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5020.npy"",\r\n 311\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1987.npy"",\r\n 738\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1361.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_669.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3913.npy"",\r\n 439\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6511.npy"",\r\n 717\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9532.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6465.npy"",\r\n 607\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5487.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1666.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7706.npy"",\r\n 148\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8978.npy"",\r\n 403\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7315.npy"",\r\n 299\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1606.npy"",\r\n 772\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1714.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9414.npy"",\r\n 101\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8992.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6620.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3805.npy"",\r\n 364\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5617.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5301.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8372.npy"",\r\n 358\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8927.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_611.npy"",\r\n 426\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3203.npy"",\r\n 682\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5873.npy"",\r\n 437\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4065.npy"",\r\n 783\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8635.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9131.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6487.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5017.npy"",\r\n 442\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4628.npy"",\r\n 505\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5730.npy"",\r\n 265\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7789.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1763.npy"",\r\n 184\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2231.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9765.npy"",\r\n 484\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8776.npy"",\r\n 601\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3865.npy"",\r\n 632\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2494.npy"",\r\n 79\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3863.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4828.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8203.npy"",\r\n 368\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_727.npy"",\r\n 772\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5597.npy"",\r\n 547\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5796.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8335.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8949.npy"",\r\n 755\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_705.npy"",\r\n 438\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8184.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4242.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2621.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4427.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1296.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7588.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9728.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3391.npy"",\r\n 258\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1446.npy"",\r\n 114\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8096.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5404.npy"",\r\n 885\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8368.npy"",\r\n 621\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9853.npy"",\r\n 584\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_875.npy"",\r\n 268\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8528.npy"",\r\n 402\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5284.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5337.npy"",\r\n 451\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6010.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6934.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2852.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6863.npy"",\r\n 394\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1171.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2941.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1808.npy"",\r\n 883\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6788.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_194.npy"",\r\n 556\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_281.npy"",\r\n 74\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1510.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2329.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5895.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8533.npy"",\r\n 869\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_615.npy"",\r\n 495\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1495.npy"",\r\n 188\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5966.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7289.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3282.npy"",\r\n 655\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1364.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3593.npy"",\r\n 701\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5042.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8704.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7701.npy"",\r\n 245\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2928.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4300.npy"",\r\n 616\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8108.npy"",\r\n 412\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_437.npy"",\r\n 638\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3051.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_932.npy"",\r\n 302\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6987.npy"",\r\n 329\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2130.npy"",\r\n 258\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5267.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6335.npy"",\r\n 106\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3466.npy"",\r\n 502\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6461.npy"",\r\n 462\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4338.npy"",\r\n 216\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7801.npy"",\r\n 298\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1126.npy"",\r\n 799\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6931.npy"",\r\n 312\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5532.npy"",\r\n 726\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2481.npy"",\r\n 342\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_567.npy"",\r\n 939\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4107.npy"",\r\n 819\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3450.npy"",\r\n 428\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3504.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4622.npy"",\r\n 632\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6528.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9410.npy"",\r\n 436\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_111.npy"",\r\n 102\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1214.npy"",\r\n 805\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1386.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2311.npy"",\r\n 666\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6801.npy"",\r\n 321\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4537.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1086.npy"",\r\n 535\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5125.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7393.npy"",\r\n 696\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2864.npy"",\r\n 701\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8640.npy"",\r\n 64\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4944.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4428.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2178.npy"",\r\n 706\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5126.npy"",\r\n 376\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7462.npy"",\r\n 634\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1580.npy"",\r\n 334\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9761.npy"",\r\n 936\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5813.npy"",\r\n 357\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4089.npy"",\r\n 991\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3476.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8225.npy"",\r\n 940\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9826.npy"",\r\n 304\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4818.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_467.npy"",\r\n 138\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5158.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8650.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2791.npy"",\r\n 407\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4853.npy"",\r\n 305\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2182.npy"",\r\n 201\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7182.npy"",\r\n 446\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9219.npy"",\r\n 395\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2257.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4192.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3981.npy"",\r\n 321\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_729.npy"",\r\n 833\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2114.npy"",\r\n 902\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7498.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6229.npy"",\r\n 360\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1442.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_19.npy"",\r\n 574\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7085.npy"",\r\n 135\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3621.npy"",\r\n 377\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5088.npy"",\r\n 268\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1075.npy"",\r\n 657\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1060.npy"",\r\n 959\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3800.npy"",\r\n 322\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9062.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1928.npy"",\r\n 891\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5280.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4710.npy"",\r\n 601\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1798.npy"",\r\n 803\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1687.npy"",\r\n 890\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6930.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3819.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2244.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9215.npy"",\r\n 376\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_342.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4556.npy"",\r\n 114\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3348.npy"",\r\n 230\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7061.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6375.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1121.npy"",\r\n 300\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5031.npy"",\r\n 228\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2500.npy"",\r\n 518\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7675.npy"",\r\n 252\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7285.npy"",\r\n 906\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5558.npy"",\r\n 327\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7050.npy"",\r\n 311\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8282.npy"",\r\n 396\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_374.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6160.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5375.npy"",\r\n 304\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6305.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5850.npy"",\r\n 754\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3438.npy"",\r\n 322\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8249.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1052.npy"",\r\n 193\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2267.npy"",\r\n 824\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8793.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1525.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3134.npy"",\r\n 117\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2032.npy"",\r\n 395\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_778.npy"",\r\n 441\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4871.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4334.npy"",\r\n 386\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5503.npy"",\r\n 60\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_483.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8639.npy"",\r\n 385\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3926.npy"",\r\n 379\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1034.npy"",\r\n 358\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9570.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1542.npy"",\r\n 458\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1902.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_91.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8622.npy"",\r\n 788\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9833.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8017.npy"",\r\n 278\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5555.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3498.npy"",\r\n 253\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_462.npy"",\r\n 667\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9061.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2727.npy"",\r\n 659\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6051.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9016.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2261.npy"",\r\n 204\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5853.npy"",\r\n 277\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9119.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7553.npy"",\r\n 422\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8722.npy"",\r\n 54\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1453.npy"",\r\n 350\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2733.npy"",\r\n 88\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3170.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3581.npy"",\r\n 239\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1667.npy"",\r\n 159\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9736.npy"",\r\n 534\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8022.npy"",\r\n 499\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_61.npy"",\r\n 144\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2090.npy"",\r\n 543\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7391.npy"",\r\n 798\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3652.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9173.npy"",\r\n 684\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4594.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7942.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_903.npy"",\r\n 689\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6941.npy"",\r\n 261\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7090.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6203.npy"",\r\n 453\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_847.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7307.npy"",\r\n 403\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5704.npy"",\r\n 208\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8609.npy"",\r\n 906\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1551.npy"",\r\n 591\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2040.npy"",\r\n 213\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6891.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6437.npy"",\r\n 660\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8982.npy"",\r\n 548\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8398.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9811.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9571.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2037.npy"",\r\n 361\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8221.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3957.npy"",\r\n 716\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7898.npy"",\r\n 925\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2910.npy"",\r\n 380\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3204.npy"",\r\n 578\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3371.npy"",\r\n 883\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2466.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5255.npy"",\r\n 170\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6838.npy"",\r\n 169\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6913.npy"",\r\n 370\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7999.npy"",\r\n 242\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8413.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6022.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4918.npy"",\r\n 891\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7258.npy"",\r\n 81\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9144.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4999.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1924.npy"",\r\n 637\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8223.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3403.npy"",\r\n 337\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3377.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4164.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7992.npy"",\r\n 101\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2594.npy"",\r\n 337\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1231.npy"",\r\n 136\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7515.npy"",\r\n 812\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6949.npy"",\r\n 529\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3803.npy"",\r\n 669\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2233.npy"",\r\n 683\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2857.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2425.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1879.npy"",\r\n 215\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8101.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9729.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3765.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1159.npy"",\r\n 178\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8435.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_869.npy"",\r\n 224\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9075.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5806.npy"",\r\n 306\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6517.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_349.npy"",\r\n 545\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1582.npy"",\r\n 587\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4429.npy"",\r\n 127\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_333.npy"",\r\n 603\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4140.npy"",\r\n 429\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6670.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2491.npy"",\r\n 559\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5416.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5199.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6413.npy"",\r\n 104\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9418.npy"",\r\n 951\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6637.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3374.npy"",\r\n 73\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6689.npy"",\r\n 338\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5219.npy"",\r\n 850\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1679.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9374.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8980.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2773.npy"",\r\n 631\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7661.npy"",\r\n 279\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1455.npy"",\r\n 253\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3791.npy"",\r\n 823\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3678.npy"",\r\n 765\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5305.npy"",\r\n 179\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7989.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4253.npy"",\r\n 126\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1258.npy"",\r\n 522\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2427.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1971.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6844.npy"",\r\n 372\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3139.npy"",\r\n 194\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5948.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1242.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6106.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3780.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6988.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_419.npy"",\r\n 588\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1505.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1704.npy"",\r\n 625\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9784.npy"",\r\n 320\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1457.npy"",\r\n 489\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_421.npy"",\r\n 515\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5046.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3069.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_584.npy"",\r\n 514\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5394.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8565.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2449.npy"",\r\n 775\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9372.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1408.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4923.npy"",\r\n 965\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7434.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7894.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_548.npy"",\r\n 939\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3920.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_967.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_566.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5630.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2047.npy"",\r\n 431\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7725.npy"",\r\n 81\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8243.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2719.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9335.npy"",\r\n 269\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8155.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8232.npy"",\r\n 467\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5050.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2215.npy"",\r\n 446\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9401.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6137.npy"",\r\n 257\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5695.npy"",\r\n 104\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2689.npy"",\r\n 675\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6412.npy"",\r\n 456\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6485.npy"",\r\n 841\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7044.npy"",\r\n 95\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2465.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5015.npy"",\r\n 399\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3149.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1286.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6055.npy"",\r\n 864\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4349.npy"",\r\n 443\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2728.npy"",\r\n 568\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4469.npy"",\r\n 611\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4249.npy"",\r\n 422\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_482.npy"",\r\n 201\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6760.npy"",\r\n 572\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1933.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3601.npy"",\r\n 203\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_296.npy"",\r\n 909\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7570.npy"",\r\n 899\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7609.npy"",\r\n 400\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_529.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7020.npy"",\r\n 125\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9235.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4558.npy"",\r\n 418\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5345.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2935.npy"",\r\n 249\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8340.npy"",\r\n 84\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8406.npy"",\r\n 571\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9071.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4819.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8220.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9375.npy"",\r\n 266\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2751.npy"",\r\n 446\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2553.npy"",\r\n 947\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4675.npy"",\r\n 934\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7233.npy"",\r\n 789\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5130.npy"",\r\n 467\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8905.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4794.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2164.npy"",\r\n 133\r\n",,terminal_output +3159,5003834,"TERMINAL",0,0," ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6430.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1254.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2423.npy"",\r\n 722\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8309.npy"",\r\n 930\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2488.npy"",\r\n 259\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9073.npy"",\r\n 271\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9503.npy"",\r\n 246\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4592.npy"",\r\n 606\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6835.npy"",\r\n 309\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8886.npy"",\r\n 271\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5350.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5996.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7012.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_619.npy"",\r\n 431\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_887.npy"",\r\n 621\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1584.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4951.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2558.npy"",\r\n 219\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6862.npy"",\r\n 281\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7560.npy"",\r\n 215\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4004.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2753.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1756.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2186.npy"",\r\n 773\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3355.npy"",\r\n 363\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7051.npy"",\r\n 178\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1747.npy"",\r\n 213\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7982.npy"",\r\n 655\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3618.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_794.npy"",\r\n 809\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6069.npy"",\r\n 186\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4098.npy"",\r\n 136\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7608.npy"",\r\n 219\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2922.npy"",\r\n 117\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4451.npy"",\r\n 270\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3462.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6508.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4169.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7794.npy"",\r\n 183\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6284.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4589.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2717.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4714.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_639.npy"",\r\n 302\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4026.npy"",\r\n 668\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9685.npy"",\r\n 483\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_728.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3067.npy"",\r\n 732\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2559.npy"",\r\n 558\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8308.npy"",\r\n 651\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8728.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5484.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9398.npy"",\r\n 285\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2704.npy"",\r\n 784\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4400.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9252.npy"",\r\n 423\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2461.npy"",\r\n 722\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6122.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3990.npy"",\r\n 668\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4859.npy"",\r\n 395\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1340.npy"",\r\n 571\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2212.npy"",\r\n 646\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7442.npy"",\r\n 284\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9349.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8515.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_630.npy"",\r\n 267\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5431.npy"",\r\n 400\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6907.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7705.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4028.npy"",\r\n 296\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5904.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6602.npy"",\r\n 100\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3481.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3612.npy"",\r\n 898\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4554.npy"",\r\n 276\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2880.npy"",\r\n 72\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1918.npy"",\r\n 621\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6338.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6974.npy"",\r\n 420\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7671.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4175.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3649.npy"",\r\n 91\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_697.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9465.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5021.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5355.npy"",\r\n 577\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3961.npy"",\r\n 209\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8224.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6292.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1701.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_602.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5931.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2355.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3532.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4061.npy"",\r\n 775\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_26.npy"",\r\n 355\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6484.npy"",\r\n 282\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_508.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2412.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6129.npy"",\r\n 224\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5152.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_936.npy"",\r\n 207\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_310.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8747.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1148.npy"",\r\n 59\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5094.npy"",\r\n 753\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5737.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3395.npy"",\r\n 500\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3950.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2788.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2187.npy"",\r\n 446\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5129.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2831.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6321.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4331.npy"",\r\n 903\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6976.npy"",\r\n 347\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1853.npy"",\r\n 527\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3658.npy"",\r\n 233\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4600.npy"",\r\n 595\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1009.npy"",\r\n 120\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9954.npy"",\r\n 101\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4770.npy"",\r\n 882\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_286.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_130.npy"",\r\n 409\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8167.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4886.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9363.npy"",\r\n 186\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6546.npy"",\r\n 421\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_672.npy"",\r\n 290\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8149.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2521.npy"",\r\n 563\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_889.npy"",\r\n 358\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1032.npy"",\r\n 573\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_733.npy"",\r\n 126\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1749.npy"",\r\n 376\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7537.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8112.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2441.npy"",\r\n 256\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1497.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3639.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1425.npy"",\r\n 325\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_282.npy"",\r\n 248\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6065.npy"",\r\n 115\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9392.npy"",\r\n 727\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3985.npy"",\r\n 289\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4353.npy"",\r\n 753\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9969.npy"",\r\n 67\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5621.npy"",\r\n 354\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9977.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4998.npy"",\r\n 909\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4890.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5271.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6826.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9389.npy"",\r\n 582\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1660.npy"",\r\n 482\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1561.npy"",\r\n 607\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6314.npy"",\r\n 249\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_170.npy"",\r\n 691\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8818.npy"",\r\n 140\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9460.npy"",\r\n 995\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5376.npy"",\r\n 536\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9794.npy"",\r\n 358\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7541.npy"",\r\n 944\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7797.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2308.npy"",\r\n 555\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8807.npy"",\r\n 713\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7922.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2138.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8606.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4857.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4855.npy"",\r\n 388\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3332.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5795.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1626.npy"",\r\n 367\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7426.npy"",\r\n 312\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7312.npy"",\r\n 523\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6615.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8039.npy"",\r\n 330\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7641.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6190.npy"",\r\n 654\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6073.npy"",\r\n 745\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6163.npy"",\r\n 681\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6831.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6382.npy"",\r\n 317\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5677.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9058.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9378.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_52.npy"",\r\n 885\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9055.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4463.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9566.npy"",\r\n 803\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6334.npy"",\r\n 96\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3930.npy"",\r\n 529\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9132.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8010.npy"",\r\n 576\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3019.npy"",\r\n 519\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5632.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3808.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4458.npy"",\r\n 921\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3046.npy"",\r\n 690\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5735.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6820.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7212.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8589.npy"",\r\n 464\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1648.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2951.npy"",\r\n 245\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3114.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_650.npy"",\r\n 536\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8301.npy"",\r\n 188\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_62.npy"",\r\n 177\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3199.npy"",\r\n 426\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9355.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7360.npy"",\r\n 409\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5797.npy"",\r\n 202\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6687.npy"",\r\n 554\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7696.npy"",\r\n 542\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2737.npy"",\r\n 691\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7623.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9344.npy"",\r\n 126\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3318.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9817.npy"",\r\n 322\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4943.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9943.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1498.npy"",\r\n 229\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7946.npy"",\r\n 598\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4638.npy"",\r\n 867\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5079.npy"",\r\n 996\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4663.npy"",\r\n 751\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3745.npy"",\r\n 233\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_102.npy"",\r\n 686\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1678.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1433.npy"",\r\n 417\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6155.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6932.npy"",\r\n 424\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9336.npy"",\r\n 208\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9186.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8019.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1871.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9898.npy"",\r\n 722\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7343.npy"",\r\n 899\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9886.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5747.npy"",\r\n 169\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4341.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1493.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6867.npy"",\r\n 298\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5378.npy"",\r\n 318\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2683.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5997.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5972.npy"",\r\n 176\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7086.npy"",\r\n 793\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2924.npy"",\r\n 716\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3838.npy"",\r\n 268\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_59.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1371.npy"",\r\n 331\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4161.npy"",\r\n 132\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1914.npy"",\r\n 549\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9315.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7352.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8379.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4397.npy"",\r\n 271\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7350.npy"",\r\n 581\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2730.npy"",\r\n 493\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2112.npy"",\r\n 872\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8836.npy"",\r\n 449\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6355.npy"",\r\n 338\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5829.npy"",\r\n 432\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6812.npy"",\r\n 230\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8711.npy"",\r\n 764\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3315.npy"",\r\n 599\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2266.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9117.npy"",\r\n 527\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2367.npy"",\r\n 371\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2597.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1187.npy"",\r\n 801\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4227.npy"",\r\n 436\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3090.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8103.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6145.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5994.npy"",\r\n 143\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2686.npy"",\r\n 886\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1795.npy"",\r\n 320\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5157.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_601.npy"",\r\n 883\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1737.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6140.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4362.npy"",\r\n 497\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2718.npy"",\r\n 771\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_298.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6583.npy"",\r\n 127\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2968.npy"",\r\n 429\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_328.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4412.npy"",\r\n 547\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8491.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5038.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6230.npy"",\r\n 921\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6174.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7920.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2777.npy"",\r\n 865\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5032.npy"",\r\n 570\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3631.npy"",\r\n 324\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5354.npy"",\r\n 85\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_173.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9046.npy"",\r\n 178\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9756.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8585.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7829.npy"",\r\n 893\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7365.npy"",\r\n 481\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2141.npy"",\r\n 478\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2240.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6830.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5245.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2726.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5686.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6861.npy"",\r\n 299\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2806.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8080.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3242.npy"",\r\n 62\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9081.npy"",\r\n 72\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4045.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3333.npy"",\r\n 211\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7738.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4468.npy"",\r\n 230\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7347.npy"",\r\n 184\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4804.npy"",\r\n 357\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6735.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_32.npy"",\r\n 192\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5116.npy"",\r\n 207\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6829.npy"",\r\n 120\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8582.npy"",\r\n 357\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8115.npy"",\r\n 128\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5588.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4106.npy"",\r\n 977\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1608.npy"",\r\n 633\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4228.npy"",\r\n 235\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8526.npy"",\r\n 568\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3269.npy"",\r\n 208\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9612.npy"",\r\n 268\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7906.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3956.npy"",\r\n 340\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_77.npy"",\r\n 764\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5743.npy"",\r\n 455\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1068.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6883.npy"",\r\n 344\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4969.npy"",\r\n 131\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1300.npy"",\r\n 61\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4504.npy"",\r\n 672\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7449.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8545.npy"",\r\n 52\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5370.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3243.npy"",\r\n 108\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9580.npy"",\r\n 969\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2501.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3701.npy"",\r\n 890\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5633.npy"",\r\n 311\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9256.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5738.npy"",\r\n 689\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5426.npy"",\r\n 263\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6067.npy"",\r\n 780\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1777.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5317.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6627.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1961.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8429.npy"",\r\n 528\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8349.npy"",\r\n 272\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9609.npy"",\r\n 666\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2700.npy"",\r\n 75\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3874.npy"",\r\n 275\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6359.npy"",\r\n 367\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7057.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2098.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8520.npy"",\r\n 463\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3227.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8970.npy"",\r\n 724\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8514.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6043.npy"",\r\n 459\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5648.npy"",\r\n 634\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6609.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3096.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_900.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7868.npy"",\r\n 393\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3841.npy"",\r\n 541\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6807.npy"",\r\n 191\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9914.npy"",\r\n 236\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2359.npy"",\r\n 193\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8403.npy"",\r\n 473\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1694.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4712.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2846.npy"",\r\n 412\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5513.npy"",\r\n 250\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6640.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7229.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5644.npy"",\r\n 103\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_158.npy"",\r\n 405\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7145.npy"",\r\n 110\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3700.npy"",\r\n 685\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5999.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_722.npy"",\r\n 78\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6277.npy"",\r\n 343\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4529.npy"",\r\n 974\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8371.npy"",\r\n 301\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1552.npy"",\r\n 273\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3719.npy"",\r\n 245\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1280.npy"",\r\n 820\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_466.npy"",\r\n 425\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_585.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8222.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4774.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3653.npy"",\r\n 868\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6074.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7826.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9792.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_905.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3184.npy"",\r\n 670\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7873.npy"",\r\n 258\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9310.npy"",\r\n 697\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1751.npy"",\r\n 911\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9560.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8210.npy"",\r\n 650\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7841.npy"",\r\n 358\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1765.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3336.npy"",\r\n 618\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9587.npy"",\r\n 720\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3779.npy"",\r\n 613\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6404.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2443.npy"",\r\n 140\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1209.npy"",\r\n 245\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9747.npy"",\r\n 650\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3404.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4071.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_288.npy"",\r\n 701\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8310.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7359.npy"",\r\n 252\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1460.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2014.npy"",\r\n 491\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7914.npy"",\r\n 137\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_621.npy"",\r\n 332\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8919.npy"",\r\n 383\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3398.npy"",\r\n 172\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4508.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5206.npy"",\r\n 559\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1053.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_691.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1421.npy"",\r\n 197\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7024.npy"",\r\n 335\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7540.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5381.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8501.npy"",\r\n 907\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_738.npy"",\r\n 595\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5160.npy"",\r\n 66\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3553.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8246.npy"",\r\n 126\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4540.npy"",\r\n 870\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5800.npy"",\r\n 919\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7444.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7263.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3465.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9768.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7447.npy"",\r\n 273\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7203.npy"",\r\n 786\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3515.npy"",\r\n 385\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7053.npy"",\r\n 874\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1794.npy"",\r\n 75\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_641.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3707.npy"",\r\n 413\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3615.npy"",\r\n 476\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4898.npy"",\r\n 136\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2803.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8633.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_396.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2895.npy"",\r\n 55\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2690.npy"",\r\n 881\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1993.npy"",\r\n 662\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3453.npy"",\r\n 520\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_451.npy"",\r\n 126\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5406.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_89.npy"",\r\n 402\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7614.npy"",\r\n 651\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1182.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_898.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3991.npy"",\r\n 520\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_105.npy"",\r\n 936\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3113.npy"",\r\n 476\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_273.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1705.npy"",\r\n 495\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1980.npy"",\r\n 274\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9232.npy"",\r\n 699\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7141.npy"",\r\n 347\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9320.npy"",\r\n 872\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_587.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4625.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4070.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4219.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8700.npy"",\r\n 620\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1301.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4808.npy"",\r\n 133\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7552.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8721.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3015.npy"",\r\n 156\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1726.npy"",\r\n 283\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2549.npy"",\r\n 850\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_796.npy"",\r\n 165\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3475.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3651.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6802.npy"",\r\n 604\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4820.npy"",\r\n 500\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4760.npy"",\r\n 331\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7310.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7861.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1578.npy"",\r\n 120\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5190.npy"",\r\n 101\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8468.npy"",\r\n 851\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2841.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9841.npy"",\r\n 408\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2054.npy"",\r\n 276\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9101.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3935.npy"",\r\n 878\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1755.npy"",\r\n 59\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9010.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3867.npy"",\r\n 363\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6379.npy"",\r\n 552\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7288.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4647.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4481.npy"",\r\n 334\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5816.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8574.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_703.npy"",\r\n 783\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/metadata.npy"",\r\n 0\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6187.npy"",\r\n 312\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5830.npy"",\r\n 114\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5240.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1462.npy"",\r\n 226\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1855.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7548.npy"",\r\n 780\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7919.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4791.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_193.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4879.npy"",\r\n 687\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7510.npy"",\r\n 968\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6780.npy"",\r\n 573\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9934.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4649.npy"",\r\n 261\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8274.npy"",\r\n 89\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3567.npy"",\r\n 716\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3158.npy"",\r\n 234\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3245.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9006.npy"",\r\n 185\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8756.npy"",\r\n 169\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6370.npy"",\r\n 195\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5674.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_575.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_25.npy"",\r\n 481\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5613.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3684.npy"",\r\n 345\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9621.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_979.npy"",\r\n 847\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4052.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6483.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5013.npy"",\r\n 445\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4069.npy"",\r\n 694\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_262.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7610.npy"",\r\n 600\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5734.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_198.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2589.npy"",\r\n 738\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2221.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8584.npy"",\r\n 86\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6939.npy"",\r\n 672\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8053.npy"",\r\n 406\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7228.npy"",\r\n 85\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5397.npy"",\r\n 510\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9301.npy"",\r\n 399\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2497.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3445.npy"",\r\n 729\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4324.npy"",\r\n 461\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3541.npy"",\r\n 983\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4060.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1740.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_358.npy"",\r\n 69\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2701.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3928.npy"",\r\n 237\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3796.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4232.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9222.npy"",\r\n 624\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2103.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_811.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4149.npy"",\r\n 147\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5364.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5809.npy"",\r\n 320\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5259.npy"",\r\n 565\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8004.npy"",\r\n 612\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5469.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9724.npy"",\r\n 111\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8417.npy"",\r\n 253\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9381.npy"",\r\n 577\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_497.npy"",\r\n 308\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4099.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1686.npy"",\r\n 398\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8629.npy"",\r\n 447\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2572.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1174.npy"",\r\n 305\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2677.npy"",\r\n 60\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8227.npy"",\r\n 171\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1207.npy"",\r\n 135\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6414.npy"",\r\n 261\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9462.npy"",\r\n 106\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3420.npy"",\r\n 400\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2716.npy"",\r\n 919\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_589.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3977.npy"",\r\n 197\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3931.npy"",\r\n 192\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1563.npy"",\r\n 180\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7008.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_831.npy"",\r\n 82\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4361.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9852.npy"",\r\n 150\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6025.npy"",\r\n 140\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7550.npy"",\r\n 442\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5891.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2732.npy"",\r\n 805\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8827.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1491.npy"",\r\n 676\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7417.npy"",\r\n 207\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8538.npy"",\r\n 777\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8557.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8439.npy"",\r\n 95\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4789.npy"",\r\n 54\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8664.npy"",\r\n 347\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8104.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8717.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_763.npy"",\r\n 427\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4866.npy"",\r\n 441\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_907.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1558.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9309.npy"",\r\n 462\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7589.npy"",\r\n 215\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7345.npy"",\r\n 593\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5177.npy"",\r\n 801\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4776.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3572.npy"",\r\n 716\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2071.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7202.npy"",\r\n 274\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1896.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_502.npy"",\r\n 417\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6358.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1459.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3155.npy"",\r\n 167\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2510.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4612.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6047.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3241.npy"",\r\n 984\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_315.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1441.npy"",\r\n 633\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5623.npy"",\r\n 143\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8106.npy"",\r\n 162\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3255.npy"",\r\n 912\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9760.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8686.npy"",\r\n 63\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8790.npy"",\r\n 100\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3310.npy"",\r\n 376\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5953.npy"",\r\n 734\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_491.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2772.npy"",\r\n 843\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1754.npy"",\r\n 771\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2774.npy"",\r\n 465\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3472.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7367.npy"",\r\n 942\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9788.npy"",\r\n 316\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4136.npy"",\r\n 626\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4067.npy"",\r\n 368\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7297.npy"",\r\n 296\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_446.npy"",\r\n 379\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5076.npy"",\r\n 378\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4048.npy"",\r\n 814\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8921.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3525.npy"",\r\n 81\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4326.npy"",\r\n 406\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8675.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5631.npy"",\r\n 351\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2036.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_13.npy"",\r\n 909\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9116.npy"",\r\n 741\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2284.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2025.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7662.npy"",\r\n 846\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3271.npy"",\r\n 825\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4834.npy"",\r\n 152\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1253.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5352.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_923.npy"",\r\n 643\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7369.npy"",\r\n 620\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3924.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7827.npy"",\r\n 335\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8895.npy"",\r\n 845\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6501.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5798.npy"",\r\n 346\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1689.npy"",\r\n 323\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3971.npy"",\r\n 896\r\n ],",,terminal_output +3160,5003955,"TERMINAL",0,0,"\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8362.npy"",\r\n 650\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8415.npy"",\r\n 837\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_68.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5910.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8071.npy"",\r\n 66\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3986.npy"",\r\n 958\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5106.npy"",\r\n 435\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5641.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5104.npy"",\r\n 852\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3172.npy"",\r\n 814\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2058.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7048.npy"",\r\n 184\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1410.npy"",\r\n 234\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6154.npy"",\r\n 157\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9054.npy"",\r\n 54\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1680.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3108.npy"",\r\n 478\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6806.npy"",\r\n 252\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7528.npy"",\r\n 254\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1304.npy"",\r\n 301\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1125.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6275.npy"",\r\n 410\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5349.npy"",\r\n 882\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1567.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2736.npy"",\r\n 440\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6142.npy"",\r\n 780\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1550.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7811.npy"",\r\n 499\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9519.npy"",\r\n 575\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5663.npy"",\r\n 443\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7731.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8383.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9373.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7547.npy"",\r\n 317\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8245.npy"",\r\n 93\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8364.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_99.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2499.npy"",\r\n 540\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6091.npy"",\r\n 496\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6164.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4398.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3877.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8094.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5678.npy"",\r\n 640\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3406.npy"",\r\n 904\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_142.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9533.npy"",\r\n 718\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6354.npy"",\r\n 185\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2873.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1135.npy"",\r\n 959\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1469.npy"",\r\n 331\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6131.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2638.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2170.npy"",\r\n 994\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2663.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4954.npy"",\r\n 494\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7512.npy"",\r\n 353\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3579.npy"",\r\n 306\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8102.npy"",\r\n 158\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2971.npy"",\r\n 280\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3490.npy"",\r\n 298\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9607.npy"",\r\n 715\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1485.npy"",\r\n 713\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9255.npy"",\r\n 159\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2271.npy"",\r\n 695\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1750.npy"",\r\n 257\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8052.npy"",\r\n 568\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2077.npy"",\r\n 658\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6951.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7241.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2757.npy"",\r\n 114\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_418.npy"",\r\n 509\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6541.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3555.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3246.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5072.npy"",\r\n 190\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7728.npy"",\r\n 396\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5700.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6642.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_364.npy"",\r\n 309\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_398.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2192.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4035.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9668.npy"",\r\n 346\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2280.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4967.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3947.npy"",\r\n 277\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2838.npy"",\r\n 81\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3848.npy"",\r\n 657\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8375.npy"",\r\n 135\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8586.npy"",\r\n 635\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6754.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3529.npy"",\r\n 89\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6782.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1559.npy"",\r\n 504\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1070.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4824.npy"",\r\n 369\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7994.npy"",\r\n 254\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7823.npy"",\r\n 570\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9795.npy"",\r\n 192\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4764.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3200.npy"",\r\n 937\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1938.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5970.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9807.npy"",\r\n 572\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5251.npy"",\r\n 442\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1137.npy"",\r\n 383\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1193.npy"",\r\n 784\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8540.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2306.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9998.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5973.npy"",\r\n 118\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2433.npy"",\r\n 520\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4984.npy"",\r\n 542\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1523.npy"",\r\n 518\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_305.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5610.npy"",\r\n 255\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2565.npy"",\r\n 424\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4989.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4987.npy"",\r\n 802\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6071.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4899.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7409.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7280.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9644.npy"",\r\n 876\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_154.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5533.npy"",\r\n 658\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7709.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8712.npy"",\r\n 786\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2963.npy"",\r\n 138\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2537.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_501.npy"",\r\n 494\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4272.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5254.npy"",\r\n 609\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2421.npy"",\r\n 681\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9725.npy"",\r\n 261\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6080.npy"",\r\n 201\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9209.npy"",\r\n 860\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2302.npy"",\r\n 350\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4603.npy"",\r\n 54\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9719.npy"",\r\n 262\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4858.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9574.npy"",\r\n 924\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9515.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5110.npy"",\r\n 409\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3522.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8804.npy"",\r\n 467\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3364.npy"",\r\n 678\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9151.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7544.npy"",\r\n 665\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5359.npy"",\r\n 815\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3965.npy"",\r\n 533\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5248.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8241.npy"",\r\n 351\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6177.npy"",\r\n 192\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5024.npy"",\r\n 83\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6066.npy"",\r\n 135\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3093.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6685.npy"",\r\n 334\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_751.npy"",\r\n 443\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1834.npy"",\r\n 427\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7704.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7814.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8289.npy"",\r\n 687\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_527.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6622.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6679.npy"",\r\n 159\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_625.npy"",\r\n 528\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3595.npy"",\r\n 582\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4947.npy"",\r\n 475\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3648.npy"",\r\n 189\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6042.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2581.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6614.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1111.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9512.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4421.npy"",\r\n 903\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6082.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9040.npy"",\r\n 380\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1481.npy"",\r\n 683\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_486.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6648.npy"",\r\n 368\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5906.npy"",\r\n 300\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_413.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_176.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7196.npy"",\r\n 352\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5491.npy"",\r\n 403\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2332.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_933.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4521.npy"",\r\n 925\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4805.npy"",\r\n 350\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3520.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9226.npy"",\r\n 984\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9847.npy"",\r\n 60\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7078.npy"",\r\n 118\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_184.npy"",\r\n 494\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8581.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2445.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4102.npy"",\r\n 578\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6434.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4385.npy"",\r\n 263\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8036.npy"",\r\n 330\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9521.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_982.npy"",\r\n 426\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2131.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_427.npy"",\r\n 78\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3989.npy"",\r\n 135\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_58.npy"",\r\n 714\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7831.npy"",\r\n 871\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1418.npy"",\r\n 126\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2043.npy"",\r\n 269\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8261.npy"",\r\n 215\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6389.npy"",\r\n 447\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6341.npy"",\r\n 298\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6884.npy"",\r\n 106\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_813.npy"",\r\n 380\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1664.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5881.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6318.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9994.npy"",\r\n 111\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5371.npy"",\r\n 476\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4110.npy"",\r\n 185\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7749.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1804.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3918.npy"",\r\n 987\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5869.npy"",\r\n 189\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8306.npy"",\r\n 828\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6721.npy"",\r\n 329\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9798.npy"",\r\n 258\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2685.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5634.npy"",\r\n 912\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6481.npy"",\r\n 170\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9352.npy"",\r\n 306\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_499.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5011.npy"",\r\n 254\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2561.npy"",\r\n 284\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_203.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8380.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9348.npy"",\r\n 354\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7677.npy"",\r\n 515\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4345.npy"",\r\n 229\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6062.npy"",\r\n 591\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9976.npy"",\r\n 833\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5680.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5143.npy"",\r\n 400\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7488.npy"",\r\n 145\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_15.npy"",\r\n 574\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8295.npy"",\r\n 226\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4762.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6843.npy"",\r\n 159\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5546.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_766.npy"",\r\n 219\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_511.npy"",\r\n 509\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6312.npy"",\r\n 457\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2388.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3443.npy"",\r\n 564\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_930.npy"",\r\n 774\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8354.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2156.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8733.npy"",\r\n 787\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8797.npy"",\r\n 172\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6570.npy"",\r\n 618\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7876.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_578.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5507.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4783.npy"",\r\n 769\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9959.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8475.npy"",\r\n 181\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_72.npy"",\r\n 443\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6084.npy"",\r\n 194\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5998.npy"",\r\n 497\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2740.npy"",\r\n 265\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5009.npy"",\r\n 271\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8626.npy"",\r\n 403\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1653.npy"",\r\n 966\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3577.npy"",\r\n 657\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7131.npy"",\r\n 418\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_532.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7672.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_12.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4627.npy"",\r\n 928\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4732.npy"",\r\n 436\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8285.npy"",\r\n 458\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7845.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5926.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4120.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7818.npy"",\r\n 188\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2484.npy"",\r\n 838\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4490.npy"",\r\n 288\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2709.npy"",\r\n 361\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9657.npy"",\r\n 225\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4535.npy"",\r\n 182\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9231.npy"",\r\n 724\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8878.npy"",\r\n 583\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1115.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3216.npy"",\r\n 454\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2044.npy"",\r\n 642\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_452.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6211.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2847.npy"",\r\n 745\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6833.npy"",\r\n 309\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9522.npy"",\r\n 209\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6173.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4688.npy"",\r\n 541\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7246.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3507.npy"",\r\n 213\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1591.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3119.npy"",\r\n 260\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9305.npy"",\r\n 146\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1752.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5928.npy"",\r\n 583\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9303.npy"",\r\n 184\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3351.npy"",\r\n 272\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8899.npy"",\r\n 104\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1744.npy"",\r\n 301\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4439.npy"",\r\n 533\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2541.npy"",\r\n 250\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7364.npy"",\r\n 194\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7185.npy"",\r\n 543\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1967.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9364.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7420.npy"",\r\n 540\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6842.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8165.npy"",\r\n 240\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2785.npy"",\r\n 72\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2120.npy"",\r\n 115\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_521.npy"",\r\n 599\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7094.npy"",\r\n 519\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4037.npy"",\r\n 793\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6504.npy"",\r\n 915\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7790.npy"",\r\n 666\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8021.npy"",\r\n 73\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6385.npy"",\r\n 100\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7438.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7778.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1064.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5916.npy"",\r\n 419\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4782.npy"",\r\n 187\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7327.npy"",\r\n 125\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8040.npy"",\r\n 675\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_815.npy"",\r\n 690\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6378.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1336.npy"",\r\n 723\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8002.npy"",\r\n 613\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6452.npy"",\r\n 567\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3944.npy"",\r\n 808\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_679.npy"",\r\n 231\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4706.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2780.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8139.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7819.npy"",\r\n 520\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3340.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5985.npy"",\r\n 410\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3936.npy"",\r\n 734\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9302.npy"",\r\n 261\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_829.npy"",\r\n 787\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6574.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6970.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3856.npy"",\r\n 222\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2598.npy"",\r\n 391\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4929.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_313.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_127.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7924.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_810.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2101.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9955.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3250.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4293.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7670.npy"",\r\n 282\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2508.npy"",\r\n 695\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6474.npy"",\r\n 891\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6115.npy"",\r\n 422\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4623.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5249.npy"",\r\n 889\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9459.npy"",\r\n 626\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9661.npy"",\r\n 163\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8910.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_378.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1326.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5090.npy"",\r\n 234\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6118.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_453.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1954.npy"",\r\n 793\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6225.npy"",\r\n 181\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9220.npy"",\r\n 135\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3795.npy"",\r\n 630\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_474.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3037.npy"",\r\n 110\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5780.npy"",\r\n 575\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9635.npy"",\r\n 547\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2654.npy"",\r\n 103\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7664.npy"",\r\n 390\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3584.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8866.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1321.npy"",\r\n 189\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8146.npy"",\r\n 479\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8482.npy"",\r\n 384\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4323.npy"",\r\n 604\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5924.npy"",\r\n 968\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6771.npy"",\r\n 125\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3221.npy"",\r\n 629\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7667.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5186.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8901.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4435.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3927.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9557.npy"",\r\n 644\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6994.npy"",\r\n 216\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9360.npy"",\r\n 417\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4940.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9910.npy"",\r\n 883\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1400.npy"",\r\n 613\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2219.npy"",\r\n 450\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8719.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8418.npy"",\r\n 171\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9965.npy"",\r\n 923\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6188.npy"",\r\n 118\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6126.npy"",\r\n 641\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6028.npy"",\r\n 262\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2262.npy"",\r\n 347\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4357.npy"",\r\n 451\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3230.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7870.npy"",\r\n 655\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2708.npy"",\r\n 908\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7648.npy"",\r\n 213\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_243.npy"",\r\n 194\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5333.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7969.npy"",\r\n 114\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3237.npy"",\r\n 628\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3976.npy"",\r\n 785\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1684.npy"",\r\n 519\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2248.npy"",\r\n 420\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8583.npy"",\r\n 582\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1318.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9441.npy"",\r\n 342\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1030.npy"",\r\n 441\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9540.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5791.npy"",\r\n 964\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6096.npy"",\r\n 943\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4861.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3773.npy"",\r\n 564\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_442.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7396.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2172.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1129.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6127.npy"",\r\n 413\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8816.npy"",\r\n 348\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_879.npy"",\r\n 599\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2063.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9416.npy"",\r\n 527\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7880.npy"",\r\n 670\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1856.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1541.npy"",\r\n 994\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1796.npy"",\r\n 998\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8000.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_657.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9051.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2869.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6136.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6745.npy"",\r\n 679\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8796.npy"",\r\n 859\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9721.npy"",\r\n 772\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4875.npy"",\r\n 461\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9629.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5616.npy"",\r\n 430\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2653.npy"",\r\n 525\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4611.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5482.npy"",\r\n 117\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6046.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9207.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_958.npy"",\r\n 512\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4493.npy"",\r\n 220\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_21.npy"",\r\n 96\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_711.npy"",\r\n 135\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3110.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5668.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8892.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1655.npy"",\r\n 115\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_769.npy"",\r\n 250\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2805.npy"",\r\n 318\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3254.npy"",\r\n 350\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4263.npy"",\r\n 579\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8734.npy"",\r\n 626\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1329.npy"",\r\n 493\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4384.npy"",\r\n 129\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6736.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1579.npy"",\r\n 250\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2015.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1322.npy"",\r\n 218\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2775.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3294.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8972.npy"",\r\n 540\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5495.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5041.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7692.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6675.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1067.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9340.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1950.npy"",\r\n 656\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_786.npy"",\r\n 93\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6587.npy"",\r\n 450\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2523.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6664.npy"",\r\n 507\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1862.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5075.npy"",\r\n 215\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_660.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4333.npy"",\r\n 146\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8548.npy"",\r\n 334\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9717.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4841.npy"",\r\n 429\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3033.npy"",\r\n 358\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7453.npy"",\r\n 450\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8313.npy"",\r\n 779\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2650.npy"",\r\n 256\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2283.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6717.npy"",\r\n 297\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7104.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8603.npy"",\r\n 512\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5339.npy"",\r\n 864\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7834.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4119.npy"",\r\n 529\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1743.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9904.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_274.npy"",\r\n 204\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1478.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_420.npy"",\r\n 154\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5097.npy"",\r\n 710\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8558.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6266.npy"",\r\n 136\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4151.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8955.npy"",\r\n 674\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7381.npy"",\r\n 73\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3531.npy"",\r\n 378\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1479.npy"",\r\n 779\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6280.npy"",\r\n 410\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5320.npy"",\r\n 888\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4393.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9509.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1021.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4889.npy"",\r\n 365\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8513.npy"",\r\n 417\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4224.npy"",\r\n 399\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4735.npy"",\r\n 942\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5825.npy"",\r\n 305\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5598.npy"",\r\n 295\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6767.npy"",\r\n 327\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7730.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5150.npy"",\r\n 242\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7207.npy"",\r\n 260\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9951.npy"",\r\n 187\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8737.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5956.npy"",\r\n 448\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9906.npy"",\r\n 88\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3080.npy"",\r\n 628\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9113.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6255.npy"",\r\n 113\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6597.npy"",\r\n 335\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3535.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_426.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5169.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3025.npy"",\r\n 384\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3017.npy"",\r\n 77\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7995.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5990.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2682.npy"",\r\n 751\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7493.npy"",\r\n 673\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1791.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2694.npy"",\r\n 421\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6979.npy"",\r\n 299\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8303.npy"",\r\n 195\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9505.npy"",\r\n 52\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6386.npy"",\r\n 177\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1186.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_309.npy"",\r\n 551\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8350.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2980.npy"",\r\n 948\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5028.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1449.npy"",\r\n 438\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2618.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1789.npy"",\r\n 489\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1243.npy"",\r\n 808\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7371.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5645.npy"",\r\n 171\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7642.npy"",\r\n 424\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1496.npy"",\r\n 720\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6749.npy"",\r\n 725\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1403.npy"",\r\n 629\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7021.npy"",\r\n 426\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_295.npy"",\r\n 494\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9095.npy"",\r\n 545\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4629.npy"",\r\n 64\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5992.npy"",\r\n 275\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9022.npy"",\r\n 492\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3761.npy"",\r\n 77\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9507.npy"",\r\n 180\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7825.npy"",\r\n 235\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1908.npy"",\r\n 187\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7191.npy"",\r\n 200\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_64.npy"",\r\n 77\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9990.npy"",\r\n 148\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7059.npy"",\r\n 338\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_788.npy"",\r\n 547\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4380.npy"",\r\n 947\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6480.npy"",\r\n 334\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_430.npy"",\r\n 825\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5921.npy"",\r\n 924\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8976.npy"",\r\n 807\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6138.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4809.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7245.npy"",\r\n 394\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_726.npy"",\r\n 617\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1320.npy"",\r\n 106\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3730.npy"",\r\n 380\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7102.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2124.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7986.npy"",\r\n 854\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2474.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1176.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3042.npy"",\r\n 543\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7913.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7095.npy"",\r\n 189\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2807.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1001.npy"",\r\n 717\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8887.npy"",\r\n 443\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8753.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4715.npy"",\r\n 192\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4795.npy"",\r\n 864\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2608.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3628.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6405.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2296.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_43.npy"",\r\n 187\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5145.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5453.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3109.npy"",\r\n 908\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9387.npy"",\r\n 442\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7935.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3421.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6110.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5964.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1474.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7250.npy"",\r\n 955\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4437.npy"",\r\n 495\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4100.npy"",\r\n 496\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6643.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2008.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8554.npy"",\r\n 518\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7571.npy"",\r\n 226\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1206.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_526.npy"",\r\n 210\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3458.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5449.npy"",\r\n 484\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9506.npy"",\r\n 431\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9926.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6723.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_35.npy"",\r\n 670\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2139.npy"",\r\n 622\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4318.npy"",\r\n 628\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_826.npy"",\r\n 321\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9265.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3425.npy"",\r\n 234\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_614.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9440.npy"",\r\n 508\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4399.npy"",\r\n 774\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5728.npy"",\r\n 299\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7694.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6593.npy"",\r\n 87\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6342.npy"",\r\n 420\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2658.npy"",\r\n 103\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_412.npy"",\r\n 218\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5165.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5843.npy"",\r\n 230\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5908.npy"",\r\n 139\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7957.npy"",\r\n 71\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5476.npy"",\r\n 429\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5664.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2134.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6605.npy"",\r\n 497\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9835.npy"",\r\n 483\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5077.npy"",\r\n 505\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2000.npy"",\r\n 563\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7864.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5752.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4669.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7206.npy"",\r\n 545\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1947.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3833.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6509.npy"",\r\n 468\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3546.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9627.npy"",\r\n 352\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2344.npy"",\r\n 78\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1282.npy"",\r\n 692\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4373.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8850.npy"",\r\n 666\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_212.npy"",\r\n 500\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4703.npy"",\r\n 802\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6119.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5538.npy"",\r\n 841\r\n ],\r\n [\r\n",,terminal_output +3161,5004081,"TERMINAL",0,0," ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9370.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9332.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2264.npy"",\r\n 204\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6986.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3452.npy"",\r\n 830\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5804.npy"",\r\n 534\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2158.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9091.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9920.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8510.npy"",\r\n 233\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3715.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3589.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9395.npy"",\r\n 224\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2674.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9923.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2814.npy"",\r\n 318\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3954.npy"",\r\n 999\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9706.npy"",\r\n 570\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6635.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2937.npy"",\r\n 521\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2987.npy"",\r\n 171\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1517.npy"",\r\n 195\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9485.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9647.npy"",\r\n 912\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6294.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5971.npy"",\r\n 589\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_205.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_75.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8706.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3422.npy"",\r\n 935\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3557.npy"",\r\n 667\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8815.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8787.npy"",\r\n 279\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4906.npy"",\r\n 248\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4516.npy"",\r\n 98\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1172.npy"",\r\n 286\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4165.npy"",\r\n 92\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2401.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5912.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2604.npy"",\r\n 352\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2379.npy"",\r\n 409\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_939.npy"",\r\n 949\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2506.npy"",\r\n 204\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1200.npy"",\r\n 140\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4767.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6936.npy"",\r\n 502\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5783.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_345.npy"",\r\n 769\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1590.npy"",\r\n 181\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4135.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5968.npy"",\r\n 188\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8896.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8715.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5070.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9547.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6796.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3454.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4491.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6398.npy"",\r\n 564\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7491.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2816.npy"",\r\n 210\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7628.npy"",\r\n 230\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7545.npy"",\r\n 189\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5875.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3494.npy"",\r\n 259\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7808.npy"",\r\n 784\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5137.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8691.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9107.npy"",\r\n 969\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9764.npy"",\r\n 61\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9615.npy"",\r\n 809\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4790.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7644.npy"",\r\n 412\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9159.npy"",\r\n 673\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3596.npy"",\r\n 262\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2666.npy"",\r\n 207\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5827.npy"",\r\n 457\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6582.npy"",\r\n 333\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5448.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2885.npy"",\r\n 641\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9960.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3735.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4408.npy"",\r\n 581\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5600.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9623.npy"",\r\n 452\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9300.npy"",\r\n 102\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_498.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6649.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5635.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5099.npy"",\r\n 90\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8476.npy"",\r\n 841\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9670.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_592.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2977.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4231.npy"",\r\n 531\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4691.npy"",\r\n 168\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2145.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9488.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_510.npy"",\r\n 685\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8834.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6731.npy"",\r\n 83\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4621.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7214.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3975.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5702.npy"",\r\n 561\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2522.npy"",\r\n 190\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7126.npy"",\r\n 167\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3011.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6167.npy"",\r\n 263\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1173.npy"",\r\n 77\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8012.npy"",\r\n 361\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2239.npy"",\r\n 628\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7452.npy"",\r\n 628\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5108.npy"",\r\n 886\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_814.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9367.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3491.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5089.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1147.npy"",\r\n 237\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_16.npy"",\r\n 949\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_781.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_402.npy"",\r\n 407\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7389.npy"",\r\n 680\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8724.npy"",\r\n 962\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4470.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1490.npy"",\r\n 473\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5993.npy"",\r\n 593\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4643.npy"",\r\n 117\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5812.npy"",\r\n 286\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7135.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9196.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7127.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5424.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3337.npy"",\r\n 484\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6206.npy"",\r\n 357\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2691.npy"",\r\n 458\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_141.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3840.npy"",\r\n 309\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7489.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9052.npy"",\r\n 176\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3793.npy"",\r\n 979\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_50.npy"",\r\n 278\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3880.npy"",\r\n 647\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8447.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_478.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7842.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6272.npy"",\r\n 786\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5922.npy"",\r\n 78\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5673.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7022.npy"",\r\n 688\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2641.npy"",\r\n 439\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6198.npy"",\r\n 952\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_554.npy"",\r\n 242\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7458.npy"",\r\n 342\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3881.npy"",\r\n 389\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3305.npy"",\r\n 922\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4645.npy"",\r\n 154\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5587.npy"",\r\n 252\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1020.npy"",\r\n 552\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2667.npy"",\r\n 295\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4911.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2544.npy"",\r\n 997\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6964.npy"",\r\n 235\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8081.npy"",\r\n 554\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8991.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7439.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7976.npy"",\r\n 347\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7974.npy"",\r\n 420\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_255.npy"",\r\n 59\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_746.npy"",\r\n 533\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8135.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8344.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9880.npy"",\r\n 330\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6531.npy"",\r\n 295\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6955.npy"",\r\n 322\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1698.npy"",\r\n 152\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8542.npy"",\r\n 931\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3123.npy"",\r\n 58\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9861.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2662.npy"",\r\n 162\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5003.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_252.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8674.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2729.npy"",\r\n 604\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3092.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3890.npy"",\r\n 310\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3138.npy"",\r\n 356\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4039.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3864.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6856.npy"",\r\n 53\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3898.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1483.npy"",\r\n 300\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1017.npy"",\r\n 361\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4258.npy"",\r\n 349\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5446.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4320.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_395.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8709.npy"",\r\n 458\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1074.npy"",\r\n 278\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9699.npy"",\r\n 138\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5712.npy"",\r\n 608\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1691.npy"",\r\n 889\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1312.npy"",\r\n 767\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7304.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9176.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4182.npy"",\r\n 182\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8988.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3329.npy"",\r\n 129\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8764.npy"",\r\n 824\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9858.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3343.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3432.npy"",\r\n 273\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4055.npy"",\r\n 748\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5975.npy"",\r\n 73\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1910.npy"",\r\n 252\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8695.npy"",\r\n 372\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4337.npy"",\r\n 747\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8578.npy"",\r\n 271\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8925.npy"",\r\n 663\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6601.npy"",\r\n 296\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4771.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4851.npy"",\r\n 695\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4389.npy"",\r\n 913\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2444.npy"",\r\n 590\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7385.npy"",\r\n 388\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4926.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7954.npy"",\r\n 336\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8814.npy"",\r\n 64\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7328.npy"",\r\n 200\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1447.npy"",\r\n 104\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3405.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4328.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_595.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3869.npy"",\r\n 855\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5709.npy"",\r\n 384\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2254.npy"",\r\n 327\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8259.npy"",\r\n 945\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3442.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6202.npy"",\r\n 385\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1309.npy"",\r\n 330\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_385.npy"",\r\n 369\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6950.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9018.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2946.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1277.npy"",\r\n 853\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3105.npy"",\r\n 495\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1785.npy"",\r\n 428\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5055.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1892.npy"",\r\n 269\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7598.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3151.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2978.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4118.npy"",\r\n 279\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3910.npy"",\r\n 88\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9230.npy"",\r\n 143\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5605.npy"",\r\n 756\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3878.npy"",\r\n 409\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9162.npy"",\r\n 359\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_336.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9200.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9838.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4517.npy"",\r\n 777\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4127.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6618.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6611.npy"",\r\n 840\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2800.npy"",\r\n 193\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4079.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3251.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9327.npy"",\r\n 539\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3739.npy"",\r\n 355\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9646.npy"",\r\n 389\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6101.npy"",\r\n 362\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3186.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7753.npy"",\r\n 270\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_220.npy"",\r\n 275\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7318.npy"",\r\n 270\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6535.npy"",\r\n 641\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3449.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_748.npy"",\r\n 292\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5667.npy"",\r\n 538\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8642.npy"",\r\n 596\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1843.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6719.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4044.npy"",\r\n 449\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3524.npy"",\r\n 430\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2324.npy"",\r\n 404\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9090.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7446.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3787.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4813.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6874.npy"",\r\n 321\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3159.npy"",\r\n 664\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3894.npy"",\r\n 600\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9831.npy"",\r\n 404\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9112.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3181.npy"",\r\n 406\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5870.npy"",\r\n 114\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5681.npy"",\r\n 990\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9015.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7262.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2064.npy"",\r\n 225\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3233.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_904.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7188.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8547.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4304.npy"",\r\n 63\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2754.npy"",\r\n 721\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1257.npy"",\r\n 651\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9420.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3802.npy"",\r\n 820\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8945.npy"",\r\n 742\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8233.npy"",\r\n 411\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5755.npy"",\r\n 480\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4992.npy"",\r\n 74\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2126.npy"",\r\n 444\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1838.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8419.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8346.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3197.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9643.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9050.npy"",\r\n 673\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6290.npy"",\r\n 94\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4372.npy"",\r\n 635\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4920.npy"",\r\n 704\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_204.npy"",\r\n 582\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5749.npy"",\r\n 819\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9882.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3713.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6316.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2776.npy"",\r\n 765\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1279.npy"",\r\n 480\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2448.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9109.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5867.npy"",\r\n 905\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8322.npy"",\r\n 181\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7388.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_631.npy"",\r\n 384\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_112.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3423.npy"",\r\n 216\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6100.npy"",\r\n 296\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3006.npy"",\r\n 432\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9307.npy"",\r\n 764\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_228.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_230.npy"",\r\n 283\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3126.npy"",\r\n 239\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9417.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_530.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_377.npy"",\r\n 728\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8341.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_989.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1273.npy"",\r\n 192\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4394.npy"",\r\n 78\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4142.npy"",\r\n 204\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1781.npy"",\r\n 294\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7042.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2954.npy"",\r\n 145\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_858.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1104.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1693.npy"",\r\n 331\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5342.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2349.npy"",\r\n 402\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3081.npy"",\r\n 90\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7611.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9280.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2983.npy"",\r\n 56\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7109.npy"",\r\n 456\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6064.npy"",\r\n 797\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_113.npy"",\r\n 989\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3129.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5262.npy"",\r\n 640\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2550.npy"",\r\n 604\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9543.npy"",\r\n 944\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6003.npy"",\r\n 280\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1733.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8820.npy"",\r\n 446\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4744.npy"",\r\n 809\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7479.npy"",\r\n 233\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3752.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5412.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1713.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_731.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5174.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8213.npy"",\r\n 101\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5061.npy"",\r\n 760\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8061.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9155.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5172.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6676.npy"",\r\n 505\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3606.npy"",\r\n 980\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1482.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6836.npy"",\r\n 229\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4430.npy"",\r\n 125\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9439.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4693.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_222.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8838.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9664.npy"",\r\n 365\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2545.npy"",\r\n 504\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7903.npy"",\r\n 271\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_568.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9804.npy"",\r\n 236\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8333.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8630.npy"",\r\n 316\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7184.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3519.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7990.npy"",\r\n 340\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6565.npy"",\r\n 209\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4308.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_969.npy"",\r\n 916\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1840.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2225.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2223.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6356.npy"",\r\n 392\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2744.npy"",\r\n 774\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2129.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9905.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7259.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9351.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8769.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7432.npy"",\r\n 531\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9975.npy"",\r\n 269\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4525.npy"",\r\n 246\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9686.npy"",\r\n 709\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5390.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_946.npy"",\r\n 704\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3604.npy"",\r\n 391\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_242.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1769.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3174.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2377.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3471.npy"",\r\n 796\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1044.npy"",\r\n 108\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9325.npy"",\r\n 277\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3587.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9457.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7168.npy"",\r\n 617\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7776.npy"",\r\n 824\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1143.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_110.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7500.npy"",\r\n 702\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1771.npy"",\r\n 632\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6432.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4757.npy"",\r\n 662\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7586.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5754.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5250.npy"",\r\n 443\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2635.npy"",\r\n 681\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9715.npy"",\r\n 145\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3835.npy"",\r\n 220\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2389.npy"",\r\n 445\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3122.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6506.npy"",\r\n 262\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3955.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2492.npy"",\r\n 543\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7130.npy"",\r\n 77\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1506.npy"",\r\n 174\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3558.npy"",\r\n 245\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7416.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5384.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8113.npy"",\r\n 370\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1271.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7535.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_401.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_188.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2517.npy"",\r\n 700\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5559.npy"",\r\n 497\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_376.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1830.npy"",\r\n 62\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4442.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4784.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7539.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5959.npy"",\r\n 127\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1081.npy"",\r\n 350\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1244.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8161.npy"",\r\n 474\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9929.npy"",\r\n 259\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2094.npy"",\r\n 809\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5475.npy"",\r\n 335\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4733.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2585.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9876.npy"",\r\n 300\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6644.npy"",\r\n 94\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5902.npy"",\r\n 334\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3938.npy"",\r\n 115\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2519.npy"",\r\n 56\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_300.npy"",\r\n 271\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5247.npy"",\r\n 701\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9304.npy"",\r\n 240\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8470.npy"",\r\n 236\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9295.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_579.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6212.npy"",\r\n 805\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_371.npy"",\r\n 385\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2265.npy"",\r\n 446\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1695.npy"",\r\n 73\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7471.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7074.npy"",\r\n 269\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7266.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9921.npy"",\r\n 484\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5941.npy"",\r\n 318\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8342.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4381.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_695.npy"",\r\n 80\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2801.npy"",\r\n 209\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9028.npy"",\r\n 413\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8256.npy"",\r\n 53\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9425.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7437.npy"",\r\n 232\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1414.npy"",\r\n 909\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1811.npy"",\r\n 186\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5753.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8141.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8391.npy"",\r\n 832\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2615.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6130.npy"",\r\n 311\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5865.npy"",\r\n 457\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5120.npy"",\r\n 349\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5001.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7803.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9846.npy"",\r\n 118\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7572.npy"",\r\n 321\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6453.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8741.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8057.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2373.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6743.npy"",\r\n 283\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4189.npy"",\r\n 266\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2524.npy"",\r\n 950\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1040.npy"",\r\n 191\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4995.npy"",\r\n 342\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4780.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8238.npy"",\r\n 336\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_152.npy"",\r\n 362\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5340.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3426.npy"",\r\n 747\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3844.npy"",\r\n 414\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9421.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9377.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7482.npy"",\r\n 157\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2163.npy"",\r\n 366\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7427.npy"",\r\n 629\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5861.npy"",\r\n 284\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_351.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5750.npy"",\r\n 281\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4741.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3831.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3774.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5526.npy"",\r\n 232\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9158.npy"",\r\n 67\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4786.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8138.npy"",\r\n 400\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7028.npy"",\r\n 966\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1521.npy"",\r\n 551\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3566.npy"",\r\n 362\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8844.npy"",\r\n 220\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5955.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1632.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_480.npy"",\r\n 904\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7440.npy"",\r\n 785\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8281.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1675.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1451.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6889.npy"",\r\n 487\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6224.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4144.npy"",\r\n 246\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2860.npy"",\r\n 951\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2468.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6157.npy"",\r\n 287\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2778.npy"",\r\n 998\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2707.npy"",\r\n 323\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9940.npy"",\r\n 523\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5937.npy"",\r\n 392\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1641.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2705.npy"",\r\n 194\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5826.npy"",\r\n 467\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4531.npy"",\r\n 182\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5595.npy"",\r\n 690\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7768.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8038.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3135.npy"",\r\n 167\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1904.npy"",\r\n 755\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2196.npy"",\r\n 932\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8499.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9041.npy"",\r\n 211\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3679.npy"",\r\n 497\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_447.npy"",\r\n 165\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6433.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9002.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1613.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3588.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1812.npy"",\r\n 50\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4077.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4684.npy"",\r\n 484\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4001.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7072.npy"",\r\n 562\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1539.npy"",\r\n 381\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5799.npy"",\r\n 551\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1721.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8619.npy"",\r\n 992\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2050.npy"",\r\n 434\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8707.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2313.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3375.npy"",\r\n 444\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5121.npy"",\r\n 747\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_839.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8128.npy"",\r\n 501\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6327.npy"",\r\n 236\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3083.npy"",\r\n 679\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1456.npy"",\r\n 273\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9987.npy"",\r\n 123\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_60.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7067.npy"",\r\n 97\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9322.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5886.npy"",\r\n 396\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1977.npy"",\r\n 437\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7649.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1396.npy"",\r\n 507\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9979.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7647.npy"",\r\n 75\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1527.npy"",\r\n 146\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7320.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8694.npy"",\r\n 169\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3741.npy"",\r\n 982\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1958.npy"",\r\n 617\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9863.npy"",\r\n 552\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6905.npy"",\r\n 504\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_299.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6439.npy"",\r\n 472\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4059.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7407.npy"",\r\n 84\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7002.npy"",\r\n 743\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5577.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4509.npy"",\r\n 255\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9156.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3686.npy"",\r\n 684\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9895.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2611.npy"",\r\n 423\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6928.npy"",\r\n 360\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4078.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3870.npy"",\r\n 529\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_109.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8517.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6113.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_475.npy"",\r\n 118\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_970.npy"",\r\n 688\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_557.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7926.npy"",\r\n 284\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3060.npy"",\r\n 400\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8234.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3940.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6213.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1671.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9087.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7815.npy"",\r\n 54\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6200.npy"",\r\n 613\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5183.npy"",\r\n 608\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4696.npy"",\r\n 311\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6727.npy"",\r\n 233\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6194.npy"",\r\n 428\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9296.npy"",\r\n 682\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7520.npy"",\r\n 293\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_162.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7967.npy"",\r\n 789\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7568.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3228.npy"",\r\n 891\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3852.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6012.npy"",\r\n 186\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_448.npy"",\r\n 602\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9675.npy"",\r\n 145\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1354.npy"",\r\n 491\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3575.npy"",\r\n 357\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7378.npy"",\r\n 202\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5522.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2602.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9166.npy"",\r\n 753\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7928.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4692.npy"",\r\n 197\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7132.npy"",\r\n 903\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1951.npy"",\r\n 244\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8325.npy"",\r\n 223\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3941.npy"",\r\n 513\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8316.npy"",\r\n 104\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9258.npy"",\r\n 958\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8410.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_690.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4092.npy"",\r\n 671\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2471.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_978.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8305.npy"",\r\n 779\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6144.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9438.npy"",\r\n 270\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3671.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4840.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1973.npy"",\r\n 746\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5524.npy"",\r\n 182\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1900.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_36.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7517.npy"",\r\n 573\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8352.",,terminal_output +3162,5004175,"TERMINAL",0,0,"npy"",\r\n 191\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7296.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_546.npy"",\r\n 186\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6761.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_79.npy"",\r\n 465\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7597.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4719.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4198.npy"",\r\n 378\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7722.npy"",\r\n 54\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7720.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_144.npy"",\r\n 710\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_605.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4993.npy"",\r\n 387\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2651.npy"",\r\n 322\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5149.npy"",\r\n 699\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6834.npy"",\r\n 123\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_139.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_790.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_776.npy"",\r\n 765\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6166.npy"",\r\n 163\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6779.npy"",\r\n 399\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6114.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8867.npy"",\r\n 389\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4388.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7226.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4930.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_882.npy"",\r\n 513\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6977.npy"",\r\n 597\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8183.npy"",\r\n 194\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1392.npy"",\r\n 815\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8064.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_368.npy"",\r\n 224\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4500.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_929.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9283.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8698.npy"",\r\n 215\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8196.npy"",\r\n 133\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_983.npy"",\r\n 270\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2469.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9099.npy"",\r\n 394\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_855.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5156.npy"",\r\n 152\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6325.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2673.npy"",\r\n 128\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1968.npy"",\r\n 448\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_470.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9628.npy"",\r\n 102\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4012.npy"",\r\n 581\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9867.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9530.npy"",\r\n 135\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4927.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5535.npy"",\r\n 201\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2681.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7821.npy"",\r\n 257\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3268.npy"",\r\n 52\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3272.npy"",\r\n 678\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9142.npy"",\r\n 536\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2400.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9504.npy"",\r\n 130\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_391.npy"",\r\n 216\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3625.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7311.npy"",\r\n 483\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8396.npy"",\r\n 606\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9412.npy"",\r\n 176\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4607.npy"",\r\n 111\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6256.npy"",\r\n 56\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_27.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_51.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5357.npy"",\r\n 693\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6832.npy"",\r\n 249\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5801.npy"",\r\n 516\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7639.npy"",\r\n 704\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5542.npy"",\r\n 188\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1107.npy"",\r\n 115\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9133.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6088.npy"",\r\n 282\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7257.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1792.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_490.npy"",\r\n 469\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3296.npy"",\r\n 418\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7118.npy"",\r\n 95\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5736.npy"",\r\n 663\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7708.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9928.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5612.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8358.npy"",\r\n 344\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9527.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5058.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5566.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3766.npy"",\r\n 498\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9672.npy"",\r\n 988\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7383.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8353.npy"",\r\n 201\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3290.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9601.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3747.npy"",\r\n 550\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5236.npy"",\r\n 412\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8616.npy"",\r\n 531\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5261.npy"",\r\n 417\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_696.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6973.npy"",\r\n 864\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7244.npy"",\r\n 897\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5418.npy"",\r\n 554\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1995.npy"",\r\n 179\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4239.npy"",\r\n 965\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9007.npy"",\r\n 791\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1372.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8544.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9713.npy"",\r\n 604\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9448.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4124.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6098.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6667.npy"",\r\n 545\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8092.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7292.npy"",\r\n 315\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1944.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7100.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5335.npy"",\r\n 532\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9429.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4836.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3115.npy"",\r\n 538\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5761.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4072.npy"",\r\n 516\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2007.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1844.npy"",\r\n 639\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_735.npy"",\r\n 752\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2255.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1963.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5885.npy"",\r\n 95\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1706.npy"",\r\n 385\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5436.npy"",\r\n 405\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4256.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7162.npy"",\r\n 161\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7156.npy"",\r\n 310\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1602.npy"",\r\n 664\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2516.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8458.npy"",\r\n 146\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3638.npy"",\r\n 276\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8449.npy"",\r\n 530\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4488.npy"",\r\n 725\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5344.npy"",\r\n 945\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1101.npy"",\r\n 764\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1719.npy"",\r\n 479\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8292.npy"",\r\n 424\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4885.npy"",\r\n 348\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4220.npy"",\r\n 180\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1036.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9358.npy"",\r\n 734\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4653.npy"",\r\n 125\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9902.npy"",\r\n 609\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7950.npy"",\r\n 810\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9306.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2552.npy"",\r\n 74\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7504.npy"",\r\n 246\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1025.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7509.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1358.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_120.npy"",\r\n 266\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4379.npy"",\r\n 375\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9481.npy"",\r\n 464\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8392.npy"",\r\n 208\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8024.npy"",\r\n 137\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4155.npy"",\r\n 240\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6271.npy"",\r\n 670\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3543.npy"",\r\n 551\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1922.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1624.npy"",\r\n 915\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5361.npy"",\r\n 129\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7806.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2804.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7253.npy"",\r\n 618\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1994.npy"",\r\n 290\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8236.npy"",\r\n 313\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9405.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9674.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9430.npy"",\r\n 242\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3331.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4695.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8006.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5209.npy"",\r\n 583\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4109.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1492.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1058.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5447.npy"",\r\n 159\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9924.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4279.npy"",\r\n 941\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8829.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1380.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7646.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1356.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5098.npy"",\r\n 208\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6293.npy"",\r\n 513\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6086.npy"",\r\n 687\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1007.npy"",\r\n 444\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5819.npy"",\r\n 477\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6915.npy"",\r\n 648\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9642.npy"",\r\n 867\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3062.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1617.npy"",\r\n 526\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8840.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3829.npy"",\r\n 484\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2844.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3325.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9885.npy"",\r\n 572\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7004.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2202.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8318.npy"",\r\n 266\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_586.npy"",\r\n 56\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6221.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1839.npy"",\r\n 473\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8821.npy"",\r\n 878\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4296.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6124.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7106.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3580.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1168.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8597.npy"",\r\n 552\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_693.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9454.npy"",\r\n 528\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5241.npy"",\r\n 448\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7047.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1572.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1912.npy"",\r\n 342\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2612.npy"",\r\n 546\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7767.npy"",\r\n 515\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2228.npy"",\r\n 254\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5319.npy"",\r\n 429\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6298.npy"",\r\n 780\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3424.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3309.npy"",\r\n 690\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8201.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3086.npy"",\r\n 534\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6134.npy"",\r\n 252\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9098.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4426.npy"",\r\n 440\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7549.npy"",\r\n 122\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7305.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_552.npy"",\r\n 193\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7807.npy"",\r\n 260\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2920.npy"",\r\n 96\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4186.npy"",\r\n 268\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_610.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8199.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8454.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3583.npy"",\r\n 523\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1335.npy"",\r\n 716\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2176.npy"",\r\n 643\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1962.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1710.npy"",\r\n 235\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8995.npy"",\r\n 919\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1960.npy"",\r\n 700\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3768.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3512.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3586.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_613.npy"",\r\n 841\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7043.npy"",\r\n 886\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6746.npy"",\r\n 429\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3608.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2974.npy"",\r\n 354\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1105.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7690.npy"",\r\n 112\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_686.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4711.npy"",\r\n 667\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9640.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3206.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7400.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6581.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2292.npy"",\r\n 587\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_677.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8552.npy"",\r\n 701\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4678.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5073.npy"",\r\n 389\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6440.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7638.npy"",\r\n 449\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9991.npy"",\r\n 822\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_164.npy"",\r\n 713\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_29.npy"",\r\n 122\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8990.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6578.npy"",\r\n 945\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8324.npy"",\r\n 417\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6505.npy"",\r\n 410\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8566.npy"",\r\n 656\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_161.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7175.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3189.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7968.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8710.npy"",\r\n 59\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4377.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1659.npy"",\r\n 453\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1657.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5034.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7679.npy"",\r\n 419\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9820.npy"",\r\n 711\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5821.npy"",\r\n 188\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2242.npy"",\r\n 847\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4438.npy"",\r\n 478\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2670.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5109.npy"",\r\n 963\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5528.npy"",\r\n 536\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1604.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1860.npy"",\r\n 537\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7317.npy"",\r\n 783\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_713.npy"",\r\n 512\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6875.npy"",\r\n 912\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4209.npy"",\r\n 730\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7536.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6586.npy"",\r\n 51\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6263.npy"",\r\n 218\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2127.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9783.npy"",\r\n 177\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3394.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8730.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6368.npy"",\r\n 278\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8524.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7243.npy"",\r\n 847\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1397.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5500.npy"",\r\n 848\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7099.npy"",\r\n 268\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2817.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1472.npy"",\r\n 399\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6291.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2745.npy"",\r\n 451\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9294.npy"",\r\n 255\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2739.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1568.npy"",\r\n 114\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2336.npy"",\r\n 445\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8175.npy"",\r\n 454\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2149.npy"",\r\n 469\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2566.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3409.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7433.npy"",\r\n 281\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8894.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7972.npy"",\r\n 485\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6050.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_666.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3349.npy"",\r\n 435\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2005.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2770.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2513.npy"",\r\n 447\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6273.npy"",\r\n 517\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8959.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8593.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_885.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6896.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_69.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_662.npy"",\r\n 55\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7587.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2901.npy"",\r\n 366\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5315.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_155.npy"",\r\n 132\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8781.npy"",\r\n 253\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9033.npy"",\r\n 97\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5002.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_992.npy"",\r\n 758\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9009.npy"",\r\n 156\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6394.npy"",\r\n 911\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4579.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3164.npy"",\r\n 546\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8278.npy"",\r\n 81\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6810.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5510.npy"",\r\n 250\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5048.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7680.npy"",\r\n 383\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2518.npy"",\r\n 393\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3427.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4265.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5232.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1029.npy"",\r\n 185\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9611.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_962.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9638.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4528.npy"",\r\n 792\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6249.npy"",\r\n 549\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3993.npy"",\r\n 654\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9070.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1623.npy"",\r\n 108\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4806.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1198.npy"",\r\n 810\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3897.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3974.npy"",\r\n 582\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5154.npy"",\r\n 965\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2381.npy"",\r\n 476\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1016.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2174.npy"",\r\n 745\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4003.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_479.npy"",\r\n 775\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5242.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_974.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3185.npy"",\r\n 249\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5874.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6984.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8471.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2824.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8663.npy"",\r\n 375\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_245.npy"",\r\n 548\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_676.npy"",\r\n 349\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_736.npy"",\r\n 516\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5123.npy"",\r\n 414\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1690.npy"",\r\n 209\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7576.npy"",\r\n 586\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2356.npy"",\r\n 256\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9404.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8770.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5627.npy"",\r\n 53\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9903.npy"",\r\n 132\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_88.npy"",\r\n 202\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1162.npy"",\r\n 411\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4143.npy"",\r\n 269\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3945.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2779.npy"",\r\n 85\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7122.npy"",\r\n 811\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7218.npy"",\r\n 662\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_658.npy"",\r\n 525\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4183.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2475.npy"",\r\n 52\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3548.npy"",\r\n 364\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4413.npy"",\r\n 444\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6099.npy"",\r\n 99\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1071.npy"",\r\n 330\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6773.npy"",\r\n 239\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1374.npy"",\r\n 299\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1393.npy"",\r\n 84\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_715.npy"",\r\n 899\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5085.npy"",\r\n 143\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2976.npy"",\r\n 72\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6189.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3868.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7358.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6347.npy"",\r\n 171\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1464.npy"",\r\n 276\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2633.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9282.npy"",\r\n 209\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_494.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7752.npy"",\r\n 197\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5440.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7750.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5204.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8180.npy"",\r\n 149\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6500.npy"",\r\n 172\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6617.npy"",\r\n 244\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8614.npy"",\r\n 911\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5637.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8443.npy"",\r\n 569\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7727.npy"",\r\n 104\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9025.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9894.npy"",\r\n 490\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9935.npy"",\r\n 732\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5651.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1108.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8215.npy"",\r\n 572\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5182.npy"",\r\n 519\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_399.npy"",\r\n 427\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7468.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3085.npy"",\r\n 757\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6784.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6960.npy"",\r\n 265\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3496.npy"",\r\n 527\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4865.npy"",\r\n 434\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9032.npy"",\r\n 503\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8242.npy"",\r\n 637\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4846.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9573.npy"",\r\n 101\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_353.npy"",\r\n 585\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3435.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6599.npy"",\r\n 138\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_528.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_843.npy"",\r\n 911\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1083.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8177.npy"",\r\n 132\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1428.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8217.npy"",\r\n 611\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8068.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8163.npy"",\r\n 644\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7761.npy"",\r\n 231\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2384.npy"",\r\n 190\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4590.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1486.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5223.npy"",\r\n 638\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5221.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6610.npy"",\r\n 267\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7769.npy"",\r\n 344\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7306.npy"",\r\n 371\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2647.npy"",\r\n 488\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6673.npy"",\r\n 111\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2385.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7154.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8702.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1027.npy"",\r\n 113\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1989.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1863.npy"",\r\n 55\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3751.npy"",\r\n 311\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6879.npy"",\r\n 454\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2688.npy"",\r\n 118\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5690.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_928.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7230.npy"",\r\n 203\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6557.npy"",\r\n 369\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4541.npy"",\r\n 629\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_883.npy"",\r\n 176\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6815.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5068.npy"",\r\n 365\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7757.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1245.npy"",\r\n 207\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7877.npy"",\r\n 387\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6262.npy"",\r\n 234\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8339.npy"",\r\n 370\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5517.npy"",\r\n 446\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4672.npy"",\r\n 387\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4193.npy"",\r\n 466\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1920.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7159.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7354.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5338.npy"",\r\n 770\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_263.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5961.npy"",\r\n 493\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2998.npy"",\r\n 878\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8255.npy"",\r\n 108\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1313.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2496.npy"",\r\n 537\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7166.npy"",\r\n 218\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7723.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1851.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6233.npy"",\r\n 106\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5388.npy"",\r\n 245\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8117.npy"",\r\n 489\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6313.npy"",\r\n 515\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8999.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9123.npy"",\r\n 876\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3101.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5051.npy"",\r\n 395\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1941.npy"",\r\n 383\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2270.npy"",\r\n 969\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1203.npy"",\r\n 298\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4544.npy"",\r\n 578\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6410.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5713.npy"",\r\n 453\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8792.npy"",\r\n 806\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_186.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3592.npy"",\r\n 204\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_648.npy"",\r\n 726\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5420.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8219.npy"",\r\n 329\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6466.npy"",\r\n 988\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6477.npy"",\r\n 191\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9203.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7287.npy"",\r\n 474\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_612.npy"",\r\n 530\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7055.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4391.npy"",\r\n 769\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_878.npy"",\r\n 312\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4276.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1402.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_896.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2536.npy"",\r\n 389\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4137.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3273.npy"",\r\n 808\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6899.npy"",\r\n 582\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5878.npy"",\r\n 526\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9259.npy"",\r\n 162\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5396.npy"",\r\n 912\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8457.npy"",\r\n 977\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2380.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_906.npy"",\r\n 701\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_261.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2643.npy"",\r\n 166\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2985.npy"",\r\n 734\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2320.npy"",\r\n 448\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1515.npy"",\r\n 146\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6870.npy"",\r\n 989\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_279.npy"",\r\n 570\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8576.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1847.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4126.npy"",\r\n 651\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1018.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3415.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8543.npy"",\r\n 462\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2889.npy"",\r\n 644\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6639.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4022.npy"",\r\n 631\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9251.npy"",\r\n 430\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7721.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9044.npy"",\r\n 884\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_512.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7740.npy"",\r\n 642\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9347.npy"",\r\n 62\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9189.npy"",\r\n 696\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8395.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8151.npy"",\r\n 111\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4822.npy"",\r\n 60\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5078.npy"",\r\n 174\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6232.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5620.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_755.npy"",\r\n 369\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7531.npy"",\r\n 992\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_148.npy"",\r\n 236\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7286.npy"",\r\n 332\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9989.npy"",\r\n 679\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7436.npy"",\r\n 927\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1327.npy"",\r\n 440\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9897.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4148.npy"",\r\n 118\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4414.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9487.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4146.npy"",\r\n 671\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5334.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_503.npy"",\r\n 223\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2991.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4928.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8672.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1622.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2996.npy"",\r\n 281\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3180.npy"",\r\n 970\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2668.npy"",\r\n 955\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2994.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9066.npy"",\r\n 215\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2122.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1179.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7882.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_56.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2119.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9342.npy"",\r\n 144\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2649.npy"",\r\n 822\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7030.npy"",\r\n 340\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2747.npy"",\r\n 627\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3689.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4970.npy"",\r\n 422\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8946.npy"",\r\n 825\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_862.npy"",\r\n 413\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9413.npy"",\r\n 449\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4983.npy"",\r\n 575\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6828.npy"",\r\n 465\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1015.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4546.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9432.npy"",\r\n 92\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7139.npy"",\r\n 96\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5741.npy"",\r\n 406\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4176.npy"",\r\n 383\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3506.npy"",\r\n 342\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5879.npy"",\r\n 499\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6981.npy"",\r\n 407\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3202.npy"",\r\n 849\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8160.npy"",\r\n 885\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8384.npy"",\r\n 166\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1100.npy"",\r\n 846\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9743.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3178.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7583.npy"",\r\n 60\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1002.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_986.npy"",\r\n 520\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8987.npy"",\r\n 539\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9535.npy"",\r\n 315\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7511.npy"",\r\n 343\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6032.npy"",\r\n 391\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8596.npy"",\r\n 684\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1382.npy"",\r\n 920\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_994.npy"",\r\n 552\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8007.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9188.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2868.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_178.npy"",\r\n 185\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4046.npy"",\r\n 253\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4564.npy"",\r\n 222\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9447.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_804.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2143.npy"",\r\n 389\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4562.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9328.npy"",\r\n 749\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8682.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6848.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7384.npy"",\r\n 858\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_424.npy"",\r\n 294\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6520.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3801.npy"",\r\n 294\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7113.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9802.npy"",\r\n 536\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8085.npy"",\r\n 55\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8453.npy"",\r\n 610\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_445.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8774.npy"",\r\n 637\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3923.npy"",\r\n 545\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5330.npy"",\r\n 128\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7856.npy"",\r\n 210\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9183.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_214.npy"",\r\n 466\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3488.npy"",\r\n 342\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2030.npy"",\r\n 875\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7748.npy"",\r\n 118\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5982.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6680.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5660.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6966.npy"",\r\n 117\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2953.npy"",\r\n 126\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4302.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5898.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3895.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8093.npy"",\r\n 446\r\n ],\r\n [\r\n",,terminal_output +3163,5004260,"TERMINAL",0,0," ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3469.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2836.npy"",\r\n 306\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6873.npy"",\r\n 407\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2364.npy"",\r\n 149\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4095.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2956.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_628.npy"",\r\n 379\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_247.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9536.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3399.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3116.npy"",\r\n 78\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8812.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_569.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2713.npy"",\r\n 759\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1119.npy"",\r\n 211\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7618.npy"",\r\n 150\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4163.npy"",\r\n 411\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_863.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6730.npy"",\r\n 117\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8985.npy"",\r\n 83\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2886.npy"",\r\n 537\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_514.npy"",\r\n 867\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7157.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9382.npy"",\r\n 254\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2107.npy"",\r\n 663\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_632.npy"",\r\n 104\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6279.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1878.npy"",\r\n 235\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9645.npy"",\r\n 834\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5100.npy"",\r\n 862\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2347.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5860.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4050.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_902.npy"",\r\n 660\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8293.npy"",\r\n 230\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8856.npy"",\r\n 392\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_555.npy"",\r\n 215\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5063.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4456.npy"",\r\n 85\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5625.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6179.npy"",\r\n 152\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7356.npy"",\r\n 423\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5327.npy"",\r\n 660\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7524.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2631.npy"",\r\n 289\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2115.npy"",\r\n 197\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8464.npy"",\r\n 112\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5326.npy"",\r\n 256\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5848.npy"",\r\n 843\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1538.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1142.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9889.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1594.npy"",\r\n 71\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3885.npy"",\r\n 393\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3951.npy"",\r\n 285\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4958.npy"",\r\n 922\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3563.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_914.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1583.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3408.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2763.npy"",\r\n 710\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4086.npy"",\r\n 325\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4877.npy"",\r\n 133\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_49.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4915.npy"",\r\n 438\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5818.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2752.npy"",\r\n 614\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3193.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7689.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3205.npy"",\r\n 419\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9925.npy"",\r\n 310\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5647.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1144.npy"",\r\n 429\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3687.npy"",\r\n 937\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9184.npy"",\r\n 282\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1614.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8434.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2861.npy"",\r\n 270\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_996.npy"",\r\n 675\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_777.npy"",\r\n 272\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1224.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6935.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8579.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9469.npy"",\r\n 72\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7619.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_256.npy"",\r\n 515\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3275.npy"",\r\n 703\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8738.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9753.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7724.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4145.npy"",\r\n 495\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_517.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9907.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2655.npy"",\r\n 895\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4745.npy"",\r\n 663\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6558.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3536.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1166.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9665.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3627.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_352.npy"",\r\n 83\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8882.npy"",\r\n 681\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_366.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4965.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5179.npy"",\r\n 841\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_565.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7470.npy"",\r\n 108\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7404.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6957.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2387.npy"",\r\n 268\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9782.npy"",\r\n 530\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5979.npy"",\r\n 280\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6777.npy"",\r\n 476\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5307.npy"",\r\n 363\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1438.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6553.npy"",\r\n 863\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1436.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2632.npy"",\r\n 229\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9291.npy"",\r\n 147\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3858.npy"",\r\n 557\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4496.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2882.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6324.npy"",\r\n 461\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2171.npy"",\r\n 160\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_40.npy"",\r\n 538\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2540.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6521.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_917.npy"",\r\n 294\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8073.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4602.npy"",\r\n 451\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9942.npy"",\r\n 969\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7472.npy"",\r\n 248\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7485.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9508.npy"",\r\n 854\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4236.npy"",\r\n 842\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8446.npy"",\r\n 390\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7993.npy"",\r\n 210\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5619.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6522.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5570.npy"",\r\n 633\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6954.npy"",\r\n 271\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_236.npy"",\r\n 206\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6800.npy"",\r\n 495\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7454.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8460.npy"",\r\n 739\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_465.npy"",\r\n 822\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8390.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_47.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1697.npy"",\r\n 568\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5687.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4217.npy"",\r\n 156\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8218.npy"",\r\n 463\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1283.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_949.npy"",\r\n 448\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2629.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4720.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4204.npy"",\r\n 345\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3876.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3817.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1826.npy"",\r\n 157\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5029.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7700.npy"",\r\n 706\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6220.npy"",\r\n 664\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2557.npy"",\r\n 808\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4132.npy"",\r\n 641\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2832.npy"",\r\n 92\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4810.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6132.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3005.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6175.npy"",\r\n 771\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8900.npy"",\r\n 958\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6060.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7248.npy"",\r\n 546\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2049.npy"",\r\n 75\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7766.npy"",\r\n 193\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8935.npy"",\r\n 81\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5591.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8778.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7764.npy"",\r\n 224\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5696.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8495.npy"",\r\n 357\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_485.npy"",\r\n 545\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7475.npy"",\r\n 92\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1376.npy"",\r\n 284\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1132.npy"",\r\n 464\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7295.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2301.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8319.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7615.npy"",\r\n 119\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1062.npy"",\r\n 807\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5776.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4194.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3706.npy"",\r\n 355\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4673.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2782.npy"",\r\n 276\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2319.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_406.npy"",\r\n 658\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1325.npy"",\r\n 672\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6296.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1588.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4011.npy"",\r\n 254\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8229.npy"",\r\n 758\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6554.npy"",\r\n 249\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1575.npy"",\r\n 479\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8831.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9757.npy"",\r\n 926\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6252.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_779.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1353.npy"",\r\n 416\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6153.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7194.npy"",\r\n 613\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1452.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4826.npy"",\r\n 502\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_785.npy"",\r\n 312\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4434.npy"",\r\n 187\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9669.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1222.npy"",\r\n 98\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3293.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7860.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7398.npy"",\r\n 560\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5303.npy"",\r\n 80\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8290.npy"",\r\n 819\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3556.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9891.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_134.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4939.npy"",\r\n 720\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4637.npy"",\r\n 840\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5243.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3540.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_177.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5583.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3480.npy"",\r\n 433\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1819.npy"",\r\n 942\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1409.npy"",\r\n 939\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_603.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8494.npy"",\r\n 352\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_317.npy"",\r\n 344\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7791.npy"",\r\n 110\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9321.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1833.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9453.npy"",\r\n 56\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5511.npy"",\r\n 392\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4797.npy"",\r\n 224\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5102.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1846.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_919.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3514.npy"",\r\n 297\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6682.npy"",\r\n 414\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5492.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_10.npy"",\r\n 97\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3045.npy"",\r\n 467\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3201.npy"",\r\n 333\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3884.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2203.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7963.npy"",\r\n 231\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3171.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9528.npy"",\r\n 708\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4896.npy"",\r\n 666\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_259.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4779.npy"",\r\n 937\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6117.npy"",\r\n 768\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4114.npy"",\r\n 641\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5166.npy"",\r\n 115\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6075.npy"",\r\n 409\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8287.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5592.npy"",\r\n 321\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1350.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8214.npy"",\r\n 673\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4313.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8611.npy"",\r\n 594\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_267.npy"",\r\n 683\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5429.npy"",\r\n 616\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1133.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_709.npy"",\r\n 456\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6270.npy"",\r\n 601\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7372.npy"",\r\n 158\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8049.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9731.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9194.npy"",\r\n 687\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2304.npy"",\r\n 617\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2637.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9501.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_344.npy"",\r\n 124\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1480.npy"",\r\n 297\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2943.npy"",\r\n 806\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3211.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_534.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1398.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9160.npy"",\r\n 647\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7081.npy"",\r\n 276\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1955.npy"",\r\n 315\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6691.npy"",\r\n 300\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_18.npy"",\r\n 242\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2698.npy"",\r\n 503\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4208.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_959.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5805.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9292.npy"",\r\n 869\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3839.npy"",\r\n 197\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4040.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7762.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8931.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_593.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2567.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4617.npy"",\r\n 145\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6097.npy"",\r\n 978\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8484.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9653.npy"",\r\n 314\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9581.npy"",\r\n 570\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7401.npy"",\r\n 669\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6940.npy"",\r\n 382\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5689.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9443.npy"",\r\n 550\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7513.npy"",\r\n 499\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_223.npy"",\r\n 280\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1996.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_183.npy"",\r\n 794\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8939.npy"",\r\n 720\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1430.npy"",\r\n 238\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9277.npy"",\r\n 79\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2877.npy"",\r\n 278\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6139.npy"",\r\n 971\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1407.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5820.npy"",\r\n 185\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_495.npy"",\r\n 997\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7025.npy"",\r\n 59\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6005.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7916.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_331.npy"",\r\n 108\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1138.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1123.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7838.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_191.npy"",\r\n 314\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6791.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7744.npy"",\r\n 376\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1276.npy"",\r\n 518\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3560.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9467.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8989.npy"",\r\n 845\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5265.npy"",\r\n 70\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8312.npy"",\r\n 517\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3783.npy"",\r\n 200\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7309.npy"",\r\n 694\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2281.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8782.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1379.npy"",\r\n 663\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2918.npy"",\r\n 639\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8192.npy"",\r\n 305\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7833.npy"",\r\n 579\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9361.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2930.npy"",\r\n 439\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1966.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1429.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1964.npy"",\r\n 637\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7890.npy"",\r\n 282\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8632.npy"",\r\n 894\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9624.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6223.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_322.npy"",\r\n 297\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2478.npy"",\r\n 614\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5401.npy"",\r\n 159\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9.npy"",\r\n 376\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5629.npy"",\r\n 113\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8025.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5286.npy"",\r\n 916\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6455.npy"",\r\n 292\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_276.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8498.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3582.npy"",\r\n 726\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1668.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_675.npy"",\r\n 411\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6428.npy"",\r\n 896\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3510.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9482.npy"",\r\n 336\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3041.npy"",\r\n 87\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5387.npy"",\r\n 488\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1439.npy"",\r\n 334\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9124.npy"",\r\n 544\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2950.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5745.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9137.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1784.npy"",\r\n 959\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4038.npy"",\r\n 999\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8001.npy"",\r\n 944\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4063.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8197.npy"",\r\n 138\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4731.npy"",\r\n 458\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8277.npy"",\r\n 611\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7887.npy"",\r\n 197\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5161.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4074.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9523.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1355.npy"",\r\n 72\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8604.npy"",\r\n 844\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2396.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4407.npy"",\r\n 663\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_799.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5316.npy"",\r\n 113\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7652.npy"",\r\n 525\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5139.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5601.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7155.npy"",\r\n 344\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4167.npy"",\r\n 595\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5935.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1618.npy"",\r\n 324\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3372.npy"",\r\n 284\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6589.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8477.npy"",\r\n 906\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6007.npy"",\r\n 835\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1864.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6365.npy"",\r\n 776\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1640.npy"",\r\n 129\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5866.npy"",\r\n 278\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1546.npy"",\r\n 283\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7455.npy"",\r\n 794\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2960.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9809.npy"",\r\n 387\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7370.npy"",\r\n 447\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2458.npy"",\r\n 289\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9550.npy"",\r\n 311\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3967.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5103.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1991.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_346.npy"",\r\n 455\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6445.npy"",\r\n 184\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8480.npy"",\r\n 144\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6068.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9298.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9180.npy"",\r\n 911\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1048.npy"",\r\n 177\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6235.npy"",\r\n 808\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_407.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5318.npy"",\r\n 285\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8488.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8920.npy"",\r\n 651\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1761.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7105.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4880.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1940.npy"",\r\n 862\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9326.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8027.npy"",\r\n 368\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7355.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6297.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_638.npy"",\r\n 219\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7636.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8615.npy"",\r\n 859\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4914.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9290.npy"",\r\n 206\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2829.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1023.npy"",\r\n 210\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_174.npy"",\r\n 739\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8195.npy"",\r\n 192\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1233.npy"",\r\n 626\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6319.npy"",\r\n 814\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9171.npy"",\r\n 232\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8918.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8445.npy"",\r\n 181\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_327.npy"",\r\n 660\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9163.npy"",\r\n 664\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2358.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7208.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3384.npy"",\r\n 618\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4203.npy"",\r\n 57\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8883.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1601.npy"",\r\n 157\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2992.npy"",\r\n 513\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3948.npy"",\r\n 512\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2990.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9334.npy"",\r\n 296\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4698.npy"",\r\n 83\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8846.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8600.npy"",\r\n 538\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9276.npy"",\r\n 695\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6208.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8041.npy"",\r\n 147\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8493.npy"",\r\n 297\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6927.npy"",\r\n 550\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9190.npy"",\r\n 423\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_960.npy"",\r\n 814\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8304.npy"",\r\n 517\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4369.npy"",\r\n 467\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3056.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8302.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4316.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4751.npy"",\r\n 912\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3860.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6459.npy"",\r\n 292\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7294.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3428.npy"",\r\n 123\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5951.npy"",\r\n 321\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9193.npy"",\r\n 524\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1990.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1435.npy"",\r\n 295\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_122.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9191.npy"",\r\n 735\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9992.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4599.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7874.npy"",\r\n 689\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_459.npy"",\r\n 355\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4724.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8610.npy"",\r\n 729\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9695.npy"",\r\n 316\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4718.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8877.npy"",\r\n 210\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5313.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3354.npy"",\r\n 712\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2392.npy"",\r\n 927\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4403.npy"",\r\n 777\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6662.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_234.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5531.npy"",\r\n 509\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2539.npy"",\r\n 840\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6571.npy"",\r\n 734\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7792.npy"",\r\n 625\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1352.npy"",\r\n 602\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7271.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3157.npy"",\r\n 894\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6851.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_197.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8941.npy"",\r\n 136\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1651.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_837.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_656.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9995.npy"",\r\n 126\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8067.npy"",\r\n 127\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6180.npy"",\r\n 243\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2890.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8347.npy"",\r\n 572\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_115.npy"",\r\n 679\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2204.npy"",\r\n 206\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_307.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3963.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7741.npy"",\r\n 356\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1625.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6443.npy"",\r\n 225\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9818.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6441.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7971.npy"",\r\n 193\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2809.npy"",\r\n 83\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1724.npy"",\r\n 213\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3258.npy"",\r\n 146\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5640.npy"",\r\n 431\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2480.npy"",\r\n 499\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3324.npy"",\r\n 447\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8110.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5208.npy"",\r\n 50\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1883.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4179.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6026.npy"",\r\n 172\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8963.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9872.npy"",\r\n 327\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6897.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1349.npy"",\r\n 103\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_71.npy"",\r\n 139\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9450.npy"",\r\n 259\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5499.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3234.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7763.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1560.npy"",\r\n 131\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9939.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4907.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7566.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4887.npy"",\r\n 512\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8191.npy"",\r\n 223\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_934.npy"",\r\n 223\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6315.npy"",\r\n 393\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9789.npy"",\r\n 696\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8647.npy"",\r\n 598\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1598.npy"",\r\n 516\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8441.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_408.npy"",\r\n 63\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4905.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7905.npy"",\r\n 233\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5224.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3764.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5087.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1475.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9591.npy"",\r\n 449\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6162.npy"",\r\n 104\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3380.npy"",\r\n 428\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9946.npy"",\r\n 65\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7758.npy"",\r\n 85\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2067.npy"",\r\n 486\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5769.npy"",\r\n 410\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2962.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8489.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1199.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5626.npy"",\r\n 391\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1265.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7506.npy"",\r\n 163\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3236.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3697.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7333.npy"",\r\n 986\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4746.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5649.npy"",\r\n 781\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2787.npy"",\r\n 728\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3013.npy"",\r\n 995\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6133.npy"",\r\n 534\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1338.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9810.npy"",\r\n 573\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9823.npy"",\r\n 275\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6849.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6868.npy"",\r\n 128\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1202.npy"",\r\n 495\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4942.npy"",\r\n 362\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7279.npy"",\r\n 671\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4506.npy"",\r\n 208\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5618.npy"",\r\n 386\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6925.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3292.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8551.npy"",\r\n 53\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9594.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8124.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6580.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7938.npy"",\r\n 181\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3363.npy"",\r\n 139\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9562.npy"",\r\n 279\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_469.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3605.npy"",\r\n 994\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9720.npy"",\r\n 611\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1373.npy"",\r\n 875\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3132.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_386.npy"",\r\n 413\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6770.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1158.npy"",\r\n 157\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7659.npy"",\r\n 190\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4230.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3390.npy"",\r\n 669\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_833.npy"",\r\n 446\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6184.npy"",\r\n 968\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1047.npy"",\r\n 82\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1389.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6009.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4964.npy"",\r\n 118\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6447.npy"",\r\n 71\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8938.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8171.npy"",\r\n 242\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3274.npy"",\r\n 453\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6894.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_694.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3537.npy"",\r\n 206\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9651.npy"",\r\n 678\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5432.npy"",\r\n 135\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9813.npy"",\r\n 640\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4350.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4019.npy"",\r\n 989\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_489.npy"",\r\n 272\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4402.npy"",\r\n 275\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7403.npy"",\r\n 601\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9365.npy"",\r\n 191\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6532.npy"",\r\n 545\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5200.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7301.npy"",\r\n 322\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8879.npy"",\r\n 593\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8099.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_990.npy"",\r\n 405\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7530.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8416.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7357.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1378.npy"",\r\n 349\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_598.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2116.npy"",\r\n 202\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6704.npy"",\r\n 867\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3544.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8981.npy"",\r\n 326\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2547.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9037.npy"",\r\n 313\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3417.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_646.npy"",\r\n 603\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9750.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7756.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7754.npy"",\r\n 689\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_637.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5365.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4395.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1087.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5216.npy"",\r\n 648\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8808.npy"",\r\n 926\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3749.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9656.npy"",\r\n 358\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8485.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_244.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2418.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9681.npy"",\r\n 189\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7743.npy"",\r\n 953\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5222.npy"",\r\n 480\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3933.npy"",\r\n 583\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7321.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_876.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3030.npy"",\r\n 649\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2573.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9917.npy"",\r\n 326\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4847.npy"",\r\n 727\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6102.npy"",\r\n 585\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1426.npy"",\r\n 624\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3082.npy"",\r\n 514\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1557.npy"",\r\n 552\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3893.npy"",\r\n 631\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3750.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3744.npy"",\r\n 806\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1734.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3057.npy"",\r\n 477\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8872.npy"",\r\n 52\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4270.npy"",\r\n 306\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_277.npy"",\r\n 524\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4013.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9049.npy"",\r\n 727\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_357.npy"",\r\n 388\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2079.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6556.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2154.npy"",\r\n 369\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3410.npy"",\r\n 229\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9446.npy"",\r\n 388\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7234.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9709.npy"",\r\n 872\r\n ],\r\n [\r\n",,terminal_output +3164,5004374,"TERMINAL",0,0," ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5823.npy"",\r\n 722\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3047.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8008.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5472.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2236.npy"",\r\n 274\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3904.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5227.npy"",\r\n 310\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9948.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7193.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8875.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7984.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3079.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6853.npy"",\r\n 168\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1332.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1948.npy"",\r\n 482\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1437.npy"",\r\n 546\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3666.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2167.npy"",\r\n 493\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7054.npy"",\r\n 161\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_457.npy"",\r\n 410\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8393.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6357.npy"",\r\n 486\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9458.npy"",\r\n 803\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3820.npy"",\r\n 445\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7788.npy"",\r\n 499\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8791.npy"",\r\n 69\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8656.npy"",\r\n 780\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5458.npy"",\r\n 273\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_506.npy"",\r\n 127\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9655.npy"",\r\n 210\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2587.npy"",\r\n 646\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1118.npy"",\r\n 514\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4112.npy"",\r\n 769\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4104.npy"",\r\n 394\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6630.npy"",\r\n 598\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_812.npy"",\r\n 162\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9890.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7830.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2132.npy"",\r\n 448\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1565.npy"",\r\n 203\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3022.npy"",\r\n 511\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7361.npy"",\r\n 580\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5740.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1825.npy"",\r\n 283\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_590.npy"",\r\n 436\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_137.npy"",\r\n 184\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5512.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7573.npy"",\r\n 133\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8742.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7624.npy"",\r\n 94\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8826.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6595.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9590.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1112.npy"",\r\n 256\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8120.npy"",\r\n 605\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2588.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4030.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3439.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2166.npy"",\r\n 253\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1639.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8082.npy"",\r\n 824\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7282.npy"",\r\n 152\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3308.npy"",\r\n 415\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9128.npy"",\r\n 735\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9678.npy"",\r\n 497\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5167.npy"",\r\n 291\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8345.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_737.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7765.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9619.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5291.npy"",\r\n 92\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6407.npy"",\r\n 228\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4404.npy"",\r\n 215\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_121.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5920.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6361.npy"",\r\n 670\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1730.npy"",\r\n 169\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3053.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9679.npy"",\r\n 60\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6645.npy"",\r\n 808\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3899.npy"",\r\n 708\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4686.npy"",\r\n 535\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4726.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1913.npy"",\r\n 211\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7681.npy"",\r\n 379\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7747.npy"",\r\n 711\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8916.npy"",\r\n 150\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3303.npy"",\r\n 523\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2684.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7561.npy"",\r\n 742\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1644.npy"",\r\n 170\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2208.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1894.npy"",\r\n 181\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6002.npy"",\r\n 394\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1406.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7179.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1576.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8190.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5201.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5942.npy"",\r\n 465\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1911.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2746.npy"",\r\n 420\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2989.npy"",\r\n 782\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1519.npy"",\r\n 226\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4572.npy"",\r\n 607\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9649.npy"",\r\n 690\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_117.npy"",\r\n 999\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1621.npy"",\r\n 761\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5864.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4835.npy"",\r\n 224\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6353.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3770.npy"",\r\n 832\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2634.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9338.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6287.npy"",\r\n 412\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6418.npy"",\r\n 526\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1899.npy"",\r\n 857\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4128.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8607.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7467.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7606.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3031.npy"",\r\n 367\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8739.npy"",\r\n 182\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9908.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3737.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2914.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6392.npy"",\r\n 230\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1786.npy"",\r\n 143\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3777.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4025.npy"",\r\n 113\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9862.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4233.npy"",\r\n 895\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7943.npy"",\r\n 896\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8055.npy"",\r\n 784\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5952.npy"",\r\n 277\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9056.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8668.npy"",\r\n 60\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3176.npy"",\r\n 429\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6076.npy"",\r\n 533\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3301.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1605.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5141.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3209.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1384.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5682.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4616.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6264.npy"",\r\n 245\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2142.npy"",\r\n 324\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2251.npy"",\r\n 527\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3789.npy"",\r\n 996\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3160.npy"",\r\n 837\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8530.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9732.npy"",\r\n 183\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_859.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6527.npy"",\r\n 540\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_922.npy"",\r\n 914\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6647.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3192.npy"",\r\n 432\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7669.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2820.npy"",\r\n 597\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4352.npy"",\r\n 350\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6895.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9766.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9229.npy"",\r\n 819\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2975.npy"",\r\n 696\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1180.npy"",\r\n 256\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7702.npy"",\r\n 308\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3270.npy"",\r\n 601\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5838.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8848.npy"",\r\n 211\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2850.npy"",\r\n 401\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1889.npy"",\r\n 457\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3198.npy"",\r\n 800\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_984.npy"",\r\n 570\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6946.npy"",\r\n 665\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3970.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9919.npy"",\r\n 456\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7695.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5943.npy"",\r\n 304\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9444.npy"",\r\n 650\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6707.npy"",\r\n 378\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8768.npy"",\r\n 61\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5295.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3352.npy"",\r\n 564\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3882.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9297.npy"",\r\n 742\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6575.npy"",\r\n 271\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_802.npy"",\r\n 505\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3419.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8705.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9489.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6183.npy"",\r\n 147\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6425.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9786.npy"",\r\n 126\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5739.npy"",\r\n 400\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8984.npy"",\r\n 599\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7745.npy"",\r\n 525\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5074.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6178.npy"",\r\n 513\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_718.npy"",\r\n 216\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7032.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6044.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_185.npy"",\r\n 382\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3937.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6819.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6328.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1511.npy"",\r\n 480\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_706.npy"",\r\n 416\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3784.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5133.npy"",\r\n 231\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1760.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2282.npy"",\r\n 427\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3071.npy"",\r\n 811\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4534.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3533.npy"",\r\n 528\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9024.npy"",\r\n 484\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3328.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_541.npy"",\r\n 335\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8185.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7910.npy"",\r\n 378\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6302.npy"",\r\n 97\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6415.npy"",\r\n 356\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5662.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_716.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7424.npy"",\r\n 635\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7137.npy"",\r\n 750\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2630.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7324.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5574.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3431.npy"",\r\n 288\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6804.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6797.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9791.npy"",\r\n 622\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_119.npy"",\r\n 511\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8512.npy"",\r\n 676\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_316.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5321.npy"",\r\n 407\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5887.npy"",\r\n 148\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_852.npy"",\r\n 838\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6054.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7872.npy"",\r\n 825\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2645.npy"",\r\n 130\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8539.npy"",\r\n 273\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6383.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5018.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4464.npy"",\r\n 106\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8267.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2692.npy"",\r\n 293\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6258.npy"",\r\n 250\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7525.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1782.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8570.npy"",\r\n 89\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9772.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9077.npy"",\r\n 842\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9497.npy"",\r\n 345\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5005.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2955.npy"",\r\n 498\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1341.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9125.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3412.npy"",\r\n 469\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2970.npy"",\r\n 755\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7932.npy"",\r\n 274\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_699.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5228.npy"",\r\n 732\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7915.npy"",\r\n 71\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9269.npy"",\r\n 223\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7684.npy"",\r\n 467\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4962.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6285.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9224.npy"",\r\n 144\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5845.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6965.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8401.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_704.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8521.npy"",\r\n 114\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2369.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_800.npy"",\r\n 662\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5655.npy"",\r\n 877\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_302.npy"",\r\n 749\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3717.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9035.npy"",\r\n 381\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4560.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8069.npy"",\r\n 779\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4301.npy"",\r\n 308\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_359.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9762.npy"",\r\n 486\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5027.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7314.npy"",\r\n 896\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2277.npy"",\r\n 798\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_41.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_609.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4837.npy"",\r\n 527\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4177.npy"",\r\n 184\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6866.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7852.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2853.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3738.npy"",\r\n 200\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6268.npy"",\r\n 80\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7210.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6982.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6562.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9617.npy"",\r\n 586\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7884.npy"",\r\n 471\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3891.npy"",\r\n 963\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6089.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2360.npy"",\r\n 405\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3367.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6209.npy"",\r\n 391\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5049.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5573.npy"",\r\n 290\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8473.npy"",\r\n 309\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6401.npy"",\r\n 200\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2710.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4234.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5589.npy"",\r\n 146\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7336.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4289.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1692.npy"",\r\n 318\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_45.npy"",\r\n 348\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3755.npy"",\r\n 511\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5460.npy"",\r\n 484\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_37.npy"",\r\n 269\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4141.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6888.npy"",\r\n 413\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8072.npy"",\r\n 615\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6040.npy"",\r\n 276\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4690.npy"",\r\n 380\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9138.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9431.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6376.npy"",\r\n 180\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5310.npy"",\r\n 68\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4530.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_356.npy"",\r\n 741\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_836.npy"",\r\n 368\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8697.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1216.npy"",\r\n 230\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9634.npy"",\r\n 348\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_196.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7133.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7710.npy"",\r\n 928\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5144.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9866.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7959.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3008.npy"",\r\n 270\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4250.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1275.npy"",\r\n 601\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3887.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6793.npy"",\r\n 456\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5323.npy"",\r\n 761\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6536.npy"",\r\n 946\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5407.npy"",\r\n 293\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6561.npy"",\r\n 466\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9879.npy"",\r\n 494\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3175.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8957.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3998.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2761.npy"",\r\n 401\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6079.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5059.npy"",\r\n 533\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5297.npy"",\r\n 233\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5014.npy"",\r\n 379\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2531.npy"",\r\n 236\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4317.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1981.npy"",\r\n 360\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8270.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3914.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8885.npy"",\r\n 114\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9110.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5814.npy"",\r\n 178\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7875.npy"",\r\n 212\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9198.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3004.npy"",\r\n 279\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3077.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5488.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5369.npy"",\r\n 223\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7316.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1715.npy"",\r\n 648\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_146.npy"",\r\n 214\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1140.npy"",\r\n 601\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3208.npy"",\r\n 418\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9849.npy"",\r\n 476\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7026.npy"",\r\n 105\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8550.npy"",\r\n 184\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9711.npy"",\r\n 589\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9930.npy"",\r\n 533\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7859.npy"",\r\n 733\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_531.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2657.npy"",\r\n 900\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6149.npy"",\r\n 296\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_221.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3694.npy"",\r\n 633\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4498.npy"",\r\n 459\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7376.npy"",\r\n 338\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1725.npy"",\r\n 304\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_842.npy"",\r\n 842\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1156.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6146.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3699.npy"",\r\n 145\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2907.npy"",\r\n 869\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7402.npy"",\r\n 355\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9884.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5175.npy"",\r\n 686\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8414.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5045.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7003.npy"",\r\n 915\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2821.npy"",\r\n 498\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2601.npy"",\r\n 328\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7227.npy"",\r\n 696\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9218.npy"",\r\n 157\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4216.npy"",\r\n 761\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1865.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2252.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3854.npy"",\r\n 325\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1581.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5506.npy"",\r\n 209\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7567.npy"",\r\n 254\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4586.npy"",\r\n 342\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4335.npy"",\r\n 102\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1773.npy"",\r\n 496\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5504.npy"",\r\n 766\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5408.npy"",\r\n 706\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_942.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2102.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4225.npy"",\r\n 456\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3710.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6768.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3386.npy"",\r\n 606\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_538.npy"",\r\n 255\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2053.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7607.npy"",\r\n 429\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2616.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8436.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9630.npy"",\r\n 755\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8332.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_664.npy"",\r\n 577\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7450.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3447.npy"",\r\n 434\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4153.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7944.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6237.npy"",\r\n 82\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7655.npy"",\r\n 285\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9500.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5565.npy"",\r\n 156\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4247.npy"",\r\n 660\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7839.npy"",\r\n 399\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2534.npy"",\r\n 531\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2076.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7412.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8676.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7886.npy"",\r\n 370\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9970.npy"",\r\n 718\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1803.npy"",\r\n 415\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1783.npy"",\r\n 364\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3451.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_560.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5515.npy"",\r\n 584\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1711.npy"",\r\n 130\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2796.npy"",\r\n 179\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7116.npy"",\r\n 665\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1005.npy"",\r\n 159\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6171.npy"",\r\n 438\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4241.npy"",\r\n 246\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1196.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_649.npy"",\r\n 565\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7291.npy"",\r\n 487\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3152.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6152.npy"",\r\n 100\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6548.npy"",\r\n 273\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9620.npy"",\r\n 970\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6901.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5899.npy"",\r\n 485\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7855.npy"",\r\n 344\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2940.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_572.npy"",\r\n 140\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_661.npy"",\r\n 722\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8367.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7110.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_334.npy"",\r\n 533\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8944.npy"",\r\n 85\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2276.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7463.npy"",\r\n 739\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8279.npy"",\r\n 77\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2798.npy"",\r\n 312\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9473.npy"",\r\n 469\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_884.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3980.npy"",\r\n 522\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7726.npy"",\r\n 396\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6712.npy"",\r\n 625\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8389.npy"",\r\n 762\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7190.npy"",\r\n 441\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1230.npy"",\r\n 768\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9136.npy"",\r\n 700\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5894.npy"",\r\n 194\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1959.npy"",\r\n 125\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2426.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6048.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_207.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7832.npy"",\r\n 594\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8951.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9214.npy"",\r\n 331\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6486.npy"",\r\n 187\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9402.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5122.npy"",\r\n 104\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8874.npy"",\r\n 74\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3871.npy"",\r\n 113\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7847.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3078.npy"",\r\n 678\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7499.npy"",\r\n 533\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1450.npy"",\r\n 679\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5400.npy"",\r\n 123\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_681.npy"",\r\n 318\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3943.npy"",\r\n 93\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8928.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3850.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_428.npy"",\r\n 342\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2315.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5932.npy"",\r\n 788\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2750.npy"",\r\n 450\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4839.npy"",\r\n 660\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1815.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6910.npy"",\r\n 93\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4221.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9248.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_801.npy"",\r\n 149\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6058.npy"",\r\n 856\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8029.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2395.npy"",\r\n 964\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2520.npy"",\r\n 180\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7198.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6456.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8137.npy"",\r\n 467\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1088.npy"",\r\n 486\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8355.npy"",\r\n 428\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5977.npy"",\r\n 497\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7128.npy"",\r\n 647\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2213.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3265.npy"",\r\n 195\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9524.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1170.npy"",\r\n 191\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8283.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5322.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3456.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_318.npy"",\r\n 253\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4585.npy"",\r\n 790\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3418.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_910.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9539.npy"",\r\n 183\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5311.npy"",\r\n 70\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5561.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2162.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2530.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2072.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2580.npy"",\r\n 58\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4547.npy"",\r\n 78\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2066.npy"",\r\n 416\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8873.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9445.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_770.npy"",\r\n 156\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5984.npy"",\r\n 419\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4514.npy"",\r\n 496\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7299.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7895.npy"",\r\n 87\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6881.npy"",\r\n 422\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4076.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7071.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_626.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4113.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6885.npy"",\r\n 249\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8865.npy"",\r\n 378\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3502.npy"",\r\n 379\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6656.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2181.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5794.npy"",\r\n 334\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1807.npy"",\r\n 171\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1192.npy"",\r\n 451\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1998.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8897.npy"",\r\n 768\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_80.npy"",\r\n 927\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_856.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8009.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4803.npy"",\r\n 257\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1992.npy"",\r\n 862\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9842.npy"",\r\n 500\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5258.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9275.npy"",\r\n 419\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9273.npy"",\r\n 891\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8908.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9330.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_216.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2906.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8472.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1716.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2923.npy"",\r\n 845\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4358.npy"",\r\n 543\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_767.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8123.npy"",\r\n 823\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2794.npy"",\r\n 255\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_926.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7930.npy"",\r\n 389\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3733.npy"",\r\n 784\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1816.npy"",\r\n 951\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9901.npy"",\r\n 133\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1696.npy"",\r\n 487\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1266.npy"",\r\n 484\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7076.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2454.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5414.npy"",\r\n 948\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_23.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6092.npy"",\r\n 869\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3279.npy"",\r\n 86\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7676.npy"",\r\n 74\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4215.npy"",\r\n 684\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9912.npy"",\r\n 392\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8661.npy"",\r\n 184\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4538.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8924.npy"",\r\n 133\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8601.npy"",\r\n 748\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8845.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3723.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6893.npy"",\r\n 60\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5732.npy"",\r\n 479\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5131.npy"",\r\n 480\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6926.npy"",\r\n 382\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7377.npy"",\r\n 317\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2161.npy"",\r\n 588\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_780.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2542.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_636.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1800.npy"",\r\n 111\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3154.npy"",\r\n 992\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5351.npy"",\r\n 257\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9257.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1916.npy"",\r\n 154\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7222.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8133.npy"",\r\n 443\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2340.npy"",\r\n 294\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8889.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2564.npy"",\r\n 447\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8252.npy"",\r\n 818\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3261.npy"",\r\n 308\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8147.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9318.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1586.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9129.npy"",\r\n 592\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8842.npy"",\r\n 440\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8193.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4581.npy"",\r\n 80\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5168.npy"",\r\n 283\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4383.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3641.npy"",\r\n 281\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1945.npy"",\r\n 93\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1351.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9228.npy"",\r\n 546\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8075.npy"",\r\n 66\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3785.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8431.npy"",\r\n 242\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6228.npy"",\r\n 103\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5135.npy"",\r\n 329\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7824.npy"",\r\n 490\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6077.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3902.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8651.npy"",\r\n 642\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5676.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6585.npy"",\r\n 492\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4759.npy"",\r\n 443\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6436.npy"",\r\n 125\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8497.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4416.npy"",\r\n 718\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4687.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4332.npy"",\r\n 924\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4510.npy"",\r\n 923\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7380.npy"",\r\n 834\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8438.npy"",\r\n 672\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1823.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3448.npy"",\r\n 274\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2586.npy"",\r\n 592\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1540.npy"",\r\n 237\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6226.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9996.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1729.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1652.npy"",",,terminal_output +3165,5004478,"TERMINAL",0,0,"\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_894.npy"",\r\n 316\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5314.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5083.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2263.npy"",\r\n 183\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7101.npy"",\r\n 78\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8315.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8962.npy"",\r\n 620\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4091.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1348.npy"",\r\n 213\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2620.npy"",\r\n 179\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4512.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9279.npy"",\r\n 353\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5264.npy"",\r\n 944\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4354.npy"",\r\n 800\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5622.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5523.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7796.npy"",\r\n 128\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_700.npy"",\r\n 365\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_132.npy"",\r\n 130\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8964.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2214.npy"",\r\n 270\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4903.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4467.npy"",\r\n 500\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1458.npy"",\r\n 540\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1562.npy"",\r\n 864\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6320.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8798.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8638.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4129.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8063.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1093.npy"",\r\n 336\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2793.npy"",\r\n 356\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9742.npy"",\r\n 372\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7261.npy"",\r\n 715\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3573.npy"",\r\n 170\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4677.npy"",\r\n 270\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9597.npy"",\r\n 832\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5444.npy"",\r\n 982\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6608.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_841.npy"",\r\n 405\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3712.npy"",\r\n 483\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9103.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2029.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2022.npy"",\r\n 126\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7423.npy"",\r\n 245\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_20.npy"",\r\n 196\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4449.npy"",\r\n 76\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5496.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6204.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1509.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2207.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9407.npy"",\r\n 290\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3183.npy"",\r\n 297\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1154.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5082.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6049.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2335.npy"",\r\n 805\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2672.npy"",\r\n 857\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8556.npy"",\r\n 507\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8044.npy"",\r\n 381\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_460.npy"",\r\n 545\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4601.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6650.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1985.npy"",\r\n 572\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5366.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5006.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3748.npy"",\r\n 833\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7590.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4793.npy"",\r\n 195\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5995.npy"",\r\n 919\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1841.npy"",\r\n 472\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9691.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8433.npy"",\r\n 289\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7879.npy"",\r\n 273\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8788.npy"",\r\n 338\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1484.npy"",\r\n 204\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2018.npy"",\r\n 219\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7579.npy"",\r\n 766\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9164.npy"",\r\n 542\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8983.npy"",\r\n 917\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6840.npy"",\r\n 521\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9272.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6686.npy"",\r\n 246\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6363.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_355.npy"",\r\n 132\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5944.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3298.npy"",\r\n 232\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7270.npy"",\r\n 119\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7828.npy"",\r\n 731\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8646.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7953.npy"",\r\n 935\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8644.npy"",\r\n 624\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3675.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9840.npy"",\r\n 130\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5562.npy"",\r\n 237\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2180.npy"",\r\n 437\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8066.npy"",\r\n 801\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4722.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8402.npy"",\r\n 605\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_783.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4651.npy"",\r\n 419\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1434.npy"",\r\n 979\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5026.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8937.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6030.npy"",\r\n 437\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6222.npy"",\r\n 230\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2046.npy"",\r\n 768\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7459.npy"",\r\n 428\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4268.npy"",\r\n 62\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7685.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1153.npy"",\r\n 301\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8357.npy"",\r\n 229\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7058.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8208.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3824.npy"",\r\n 223\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5872.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4852.npy"",\r\n 458\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5925.npy"",\r\n 712\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_966.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2452.npy"",\r\n 398\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7810.npy"",\r\n 792\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7363.npy"",\r\n 224\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7883.npy"",\r\n 260\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_455.npy"",\r\n 137\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5260.npy"",\r\n 355\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3518.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5417.npy"",\r\n 450\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9659.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2624.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4821.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6001.npy"",\r\n 450\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5828.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3373.npy"",\r\n 152\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6646.npy"",\r\n 557\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1454.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7264.npy"",\r\n 519\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5778.npy"",\r\n 424\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_403.npy"",\r\n 158\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7786.npy"",\r\n 547\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1366.npy"",\r\n 120\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_953.npy"",\r\n 191\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7562.npy"",\r\n 499\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9567.npy"",\r\n 467\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_249.npy"",\r\n 188\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5560.npy"",\r\n 882\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7487.npy"",\r\n 235\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6384.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4465.npy"",\r\n 212\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8268.npy"",\r\n 245\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_257.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2175.npy"",\r\n 498\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8643.npy"",\r\n 133\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6604.npy"",\r\n 839\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7901.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2399.npy"",\r\n 468\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4596.npy"",\r\n 570\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6350.npy"",\r\n 287\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4292.npy"",\r\n 968\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3568.npy"",\r\n 245\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4447.npy"",\r\n 268\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1281.npy"",\r\n 537\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4982.npy"",\r\n 615\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6661.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1097.npy"",\r\n 135\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8361.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7564.npy"",\r\n 372\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9403.npy"",\r\n 665\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_937.npy"",\r\n 827\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2891.npy"",\r\n 230\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_373.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4321.npy"",\r\n 485\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7717.npy"",\r\n 855\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9317.npy"",\r\n 576\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5300.npy"",\r\n 54\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_599.npy"",\r\n 415\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8529.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5543.npy"",\r\n 356\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7275.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_417.npy"",\r\n 516\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1766.npy"",\r\n 411\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_673.npy"",\r\n 169\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5356.npy"",\r\n 483\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2498.npy"",\r\n 306\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_337.npy"",\r\n 184\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5614.npy"",\r\n 835\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5608.npy"",\r\n 214\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_241.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6526.npy"",\r\n 503\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8759.npy"",\r\n 438\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5978.npy"",\r\n 240\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7780.npy"",\r\n 505\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6584.npy"",\r\n 496\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9096.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6825.npy"",\r\n 178\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6196.npy"",\r\n 777\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4738.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8960.npy"",\r\n 492\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9625.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4646.npy"",\r\n 331\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5815.npy"",\r\n 580\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_764.npy"",\r\n 591\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8608.npy"",\r\n 315\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5225.npy"",\r\n 676\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4229.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7656.npy"",\r\n 480\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_461.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7284.npy"",\r\n 153\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1854.npy"",\r\n 472\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_817.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2815.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2440.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2592.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8934.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3695.npy"",\r\n 518\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2664.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8948.npy"",\r\n 687\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3825.npy"",\r\n 124\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7445.npy"",\r\n 277\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2771.npy"",\r\n 559\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7960.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3059.npy"",\r\n 365\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5929.npy"",\r\n 308\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3526.npy"",\r\n 213\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6912.npy"",\r\n 399\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5882.npy"",\r\n 916\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4015.npy"",\r\n 200\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1810.npy"",\r\n 534\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4980.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5824.npy"",\r\n 792\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1217.npy"",\r\n 326\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_46.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5230.npy"",\r\n 138\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_218.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_653.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5269.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6039.npy"",\r\n 827\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7907.npy"",\r\n 295\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8849.npy"",\r\n 408\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9801.npy"",\r\n 542\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9390.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9376.npy"",\r\n 509\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8337.npy"",\r\n 252\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3647.npy"",\r\n 418\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_361.npy"",\r\n 307\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4257.npy"",\r\n 874\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8271.npy"",\r\n 406\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4971.npy"",\r\n 844\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1388.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9932.npy"",\r\n 721\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3214.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3632.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5192.npy"",\r\n 263\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7881.npy"",\r\n 392\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9393.npy"",\r\n 203\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4024.npy"",\r\n 452\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7869.npy"",\r\n 506\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2259.npy"",\r\n 209\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5212.npy"",\r\n 427\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2354.npy"",\r\n 302\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1239.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2408.npy"",\r\n 566\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9652.npy"",\r\n 548\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6104.npy"",\r\n 492\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8481.npy"",\r\n 259\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1531.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3131.npy"",\r\n 282\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4957.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_880.npy"",\r\n 322\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7634.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3064.npy"",\r\n 751\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4356.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5252.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9913.npy"",\r\n 788\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1383.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4207.npy"",\r\n 440\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3669.npy"",\r\n 157\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1113.npy"",\r\n 823\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5657.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3565.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8116.npy"",\r\n 819\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_808.npy"",\r\n 849\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4178.npy"",\r\n 334\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8045.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4823.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1762.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1643.npy"",\r\n 989\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6514.npy"",\r\n 370\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5604.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3754.npy"",\r\n 947\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6210.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2915.npy"",\r\n 212\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2888.npy"",\r\n 330\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8492.npy"",\r\n 953\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1628.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5516.npy"",\r\n 373\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6929.npy"",\r\n 80\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8755.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2089.npy"",\r\n 516\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4471.npy"",\r\n 497\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9778.npy"",\r\n 231\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1728.npy"",\r\n 177\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1656.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2217.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7036.npy"",\r\n 308\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6121.npy"",\r\n 479\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7238.npy"",\r\n 574\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4551.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4642.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5811.npy"",\r\n 413\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6399.npy"",\r\n 297\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_872.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5963.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3900.npy"",\r\n 741\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1229.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1764.npy"",\r\n 190\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2034.npy"",\r\n 223\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6902.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3075.npy"",\r\n 692\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6900.npy"",\r\n 55\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_972.npy"",\r\n 217\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4709.npy"",\r\n 747\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5270.npy"",\r\n 198\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7631.npy"",\r\n 68\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1850.npy"",\r\n 523\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5905.npy"",\r\n 348\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3762.npy"",\r\n 731\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8930.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9261.npy"",\r\n 920\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5176.npy"",\r\n 604\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7466.npy"",\r\n 882\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1917.npy"",\r\n 582\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3743.npy"",\r\n 197\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6938.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_819.npy"",\r\n 124\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7031.npy"",\r\n 924\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2892.npy"",\r\n 101\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3771.npy"",\r\n 300\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3862.npy"",\r\n 91\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_443.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3381.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_293.npy"",\r\n 483\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3759.npy"",\r\n 940\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9972.npy"",\r\n 200\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2696.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2095.npy"",\r\n 304\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_225.npy"",\r\n 917\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_213.npy"",\r\n 617\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1886.npy"",\r\n 336\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4390.npy"",\r\n 810\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2959.npy"",\r\n 797\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8862.npy"",\r\n 327\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6243.npy"",\r\n 548\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7200.npy"",\r\n 489\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6572.npy"",\r\n 616\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7332.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5849.npy"",\r\n 995\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9875.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9978.npy"",\r\n 879\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3210.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3262.npy"",\r\n 215\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9461.npy"",\r\n 179\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8028.npy"",\r\n 136\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_169.npy"",\r\n 50\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2004.npy"",\r\n 275\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7923.npy"",\r\n 348\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9287.npy"",\r\n 771\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4975.npy"",\r\n 564\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7630.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1089.npy"",\r\n 233\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9476.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5419.npy"",\r\n 177\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4009.npy"",\r\n 166\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7557.npy"",\r\n 478\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6245.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5463.npy"",\r\n 533\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5256.npy"",\r\n 143\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7878.npy"",\r\n 159\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7477.npy"",\r\n 173\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7232.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9408.npy"",\r\n 803\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4105.npy"",\r\n 725\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_348.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4453.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5726.npy"",\r\n 622\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5724.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4237.npy"",\r\n 936\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6841.npy"",\r\n 564\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8164.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1368.npy"",\r\n 299\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1045.npy"",\r\n 445\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8077.npy"",\r\n 767\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4591.npy"",\r\n 320\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4682.npy"",\r\n 235\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6510.npy"",\r\n 266\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4848.npy"",\r\n 101\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1269.npy"",\r\n 220\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6424.npy"",\r\n 469\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2812.npy"",\r\n 944\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2463.npy"",\r\n 253\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5847.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4336.npy"",\r\n 314\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5883.npy"",\r\n 398\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1890.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3307.npy"",\r\n 288\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7565.npy"",\r\n 324\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6267.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6199.npy"",\r\n 448\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6352.npy"",\r\n 228\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5278.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2828.npy"",\r\n 161\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7255.npy"",\r\n 558\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7422.npy"",\r\n 304\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7170.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8765.npy"",\r\n 111\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_133.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4515.npy"",\r\n 208\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3968.npy"",\r\n 151\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8186.npy"",\r\n 695\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8799.npy"",\r\n 102\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1753.npy"",\r\n 139\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1739.npy"",\r\n 623\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3461.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6216.npy"",\r\n 678\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6148.npy"",\r\n 493\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7981.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2819.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8819.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9426.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4825.npy"",\r\n 605\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5548.npy"",\r\n 363\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2867.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4306.npy"",\r\n 647\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3356.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7996.npy"",\r\n 429\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2999.npy"",\r\n 611\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8847.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1385.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3362.npy"",\r\n 222\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6283.npy"",\r\n 356\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4254.npy"",\r\n 475\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7602.npy"",\r\n 727\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4588.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7582.npy"",\r\n 615\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2322.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7973.npy"",\r\n 364\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2848.npy"",\r\n 282\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4419.npy"",\r\n 621\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5751.npy"",\r\n 215\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_449.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8090.npy"",\r\n 445\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3278.npy"",\r\n 70\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6626.npy"",\r\n 124\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7199.npy"",\r\n 523\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4162.npy"",\r\n 429\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1882.npy"",\r\n 878\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7339.npy"",\r\n 543\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3635.npy"",\r\n 827\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2903.npy"",\r\n 511\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7683.npy"",\r\n 380\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_545.npy"",\r\n 693\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_749.npy"",\r\n 305\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9120.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9516.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1314.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3378.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6396.npy"",\r\n 297\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_890.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4608.npy"",\r\n 888\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6247.npy"",\r\n 366\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2368.npy"",\r\n 262\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4717.npy"",\r\n 441\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9034.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1633.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1877.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6253.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7010.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_74.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9285.npy"",\r\n 923\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5911.npy"",\r\n 443\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2676.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4422.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3252.npy"",\r\n 55\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8463.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5628.npy"",\r\n 141\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5415.npy"",\r\n 427\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6882.npy"",\r\n 399\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9221.npy"",\r\n 634\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3818.npy"",\r\n 295\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6241.npy"",\r\n 907\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3196.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_103.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_645.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6632.npy"",\r\n 563\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9080.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1184.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9735.npy"",\r\n 874\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5793.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6491.npy"",\r\n 199\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_795.npy"",\r\n 645\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9223.npy"",\r\n 555\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2104.npy"",\r\n 476\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9915.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1041.npy"",\r\n 815\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6031.npy"",\r\n 260\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7896.npy"",\r\n 176\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2826.npy"",\r\n 460\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4814.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5134.npy"",\r\n 936\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6125.npy"",\r\n 516\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7313.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5675.npy"",\r\n 66\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8525.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1779.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4933.npy"",\r\n 292\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7007.npy"",\r\n 624\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2830.npy"",\r\n 120\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1532.npy"",\r\n 576\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7351.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4973.npy"",\r\n 242\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2328.npy"",\r\n 105\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5022.npy"",\r\n 515\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9045.npy"",\r\n 227\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2190.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5392.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7107.npy"",\r\n 361\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1906.npy"",\r\n 565\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9092.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4339.npy"",\r\n 656\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7239.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8696.npy"",\r\n 603\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1471.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5727.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6706.npy"",\r\n 61\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9422.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8617.npy"",\r\n 986\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5459.npy"",\r\n 587\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_84.npy"",\r\n 523\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2659.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2863.npy"",\r\n 843\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6371.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9079.npy"",\r\n 374\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2995.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3979.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9588.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6070.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5178.npy"",\r\n 285\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2060.npy"",\r\n 451\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_265.npy"",\r\n 368\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3366.npy"",\r\n 524\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7268.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4415.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1831.npy"",\r\n 676\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2543.npy"",\r\n 175\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2562.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5539.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6254.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9602.npy"",\r\n 255\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3360.npy"",\r\n 372\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8134.npy"",\r\n 669\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5132.npy"",\r\n 621\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6903.npy"",\r\n 143\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7169.npy"",\r\n 506\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7331.npy"",\r\n 695\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6301.npy"",\r\n 705\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1310.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1238.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2407.npy"",\r\n 836\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1345.npy"",\r\n 187\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8226.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2432.npy"",\r\n 219\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6281.npy"",\r\n 822\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9167.npy"",\r\n 918\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8669.npy"",\r\n 444\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3163.npy"",\r\n 327\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3620.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3693.npy"",\r\n 960\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8095.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6299.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4674.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8370.npy"",\r\n 319\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1574.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1607.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7858.npy"",\r\n 363\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7457.npy"",\r\n 194\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_752.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3179.npy"",\r\n 83\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2020.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8881.npy"",\r\n 717\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_11.npy"",\r\n 918\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2743.npy"",\r\n 575\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2656.npy"",\r\n 971\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1155.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_908.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7476.npy"",\r\n 54\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6006.npy"",\r\n 399\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9411.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_24.npy"",\r\n 262\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6240.npy"",\r\n 178\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5758.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3277.npy"",\r\n 334\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9613.npy"",\r\n 77\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1545.npy"",\r\n 430\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3753.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7341.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6205.npy"",\r\n 223\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_30.npy"",\r\n 469\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7836.npy"",\r\n 735\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5312.npy"",\r\n 280\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1077.npy"",\r\n 170\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3617.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1768.npy"",\r\n 830\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9384.npy"",\r\n 452\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5936.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3908.npy"",\r\n 274\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1013.npy"",\r\n 242\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2509.npy"",\r\n 113\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2972.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2929.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3875.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4800.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9185.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8986.npy"",\r\n 499\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_423.npy"",\r\n 220\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5239.npy"",\r\n 464\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3286.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9134.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8078.npy"",\r\n 114\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3299.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2583.npy"",\r\n 465\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8331.npy"",\r\n 778\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8681.npy"",\r\n 547\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4913.npy"",\r\n 444\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6859.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_159.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3517.npy"",\r\n 858\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1212.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9854.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5938.npy"",\r\n 89\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4457.npy"",\r\n 476\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3674.npy"",\r\n 263\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1293.npy"",\r\n 766\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5770.npy"",\r\n 888\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4641.npy"",\r\n 775\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5810.npy"",\r\n 666\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1226.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7654.npy"",\r\n 758\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8466.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4792.npy"",\r\n 625\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8835.npy"",\r\n 131\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9671.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9143.npy"",\r\n 570\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3007.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2546.npy"",\r\n 218\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7603.npy"",\r\n 425\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1647.npy"",\r\n 764\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5934.npy"",\r\n 271\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7153.npy"",\r\n 491\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9499.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2584.npy"",\r\n 99\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3000.npy"",\r\n 195\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1234.npy"",\r\n 802\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4259.npy"",\r\n 292\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5478.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2403.npy"",\r\n 169\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3073.npy"",\r\n 565\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1842.npy"",\r\n 421\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4974.npy"",\r\n 330\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7899.npy"",\r\n 605\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7775.npy"",\r\n 138\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9254.npy"",\r\n 461\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4278.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4223.npy"",\r\n 265\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1248.npy"",\r\n 424\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5162.npy"",\r\n 246\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6364.npy"",\r\n 313\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5091.npy"",\r\n 301\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1824.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6260.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_940.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4658.npy"",\r\n 876\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_143.npy"",\r\n 934\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_138.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2121.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6094.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1399.npy"",\r\n 477\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6331.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9944.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3260.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2944.npy"",\r\n 828\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2568.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1236.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3594.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5113.npy"",\r\n 77\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7802.npy"",\r\n 109\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2366.npy"",\r\n 739\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7712.npy"",\r\n 191\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3690.npy"",\r\n 713\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_343.npy"",\r\n 876\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2939.npy"",\r\n 975\r\n",,terminal_output +3166,5004525,"TERMINAL",0,0," ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6590.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1152.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9963.npy"",\r\n 947\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3339.npy"",\r\n 916\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8140.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8474.npy"",\r\n 112\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7111.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5279.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9415.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4017.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6471.npy"",\r\n 129\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9286.npy"",\r\n 336\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1219.npy"",\r\n 652\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6448.npy"",\r\n 209\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7223.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3229.npy"",\r\n 305\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5710.npy"",\r\n 116\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9386.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4985.npy"",\r\n 779\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2246.npy"",\r\n 303\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3903.npy"",\r\n 593\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7204.npy"",\r\n 134\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7260.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2570.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7787.npy"",\r\n 596\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2096.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9745.npy"",\r\n 445\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7563.npy"",\r\n 894\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3807.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8975.npy"",\r\n 92\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5202.npy"",\r\n 146\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8942.npy"",\r\n 490\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4235.npy"",\r\n 992\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4159.npy"",\r\n 370\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9968.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9850.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3756.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7496.npy"",\r\n 957\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7551.npy"",\r\n 941\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7885.npy"",\r\n 293\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1103.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4946.npy"",\r\n 372\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4088.npy"",\r\n 185\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6023.npy"",\r\n 488\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4919.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_671.npy"",\r\n 731\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7940.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4676.npy"",\r\n 837\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6289.npy"",\r\n 455\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7651.npy"",\r\n 241\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2136.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4016.npy"",\r\n 295\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_248.npy"",\r\n 213\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2019.npy"",\r\n 190\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1949.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3313.npy"",\r\n 167\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4243.npy"",\r\n 539\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4486.npy"",\r\n 90\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_211.npy"",\r\n 247\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6839.npy"",\r\n 365\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7718.npy"",\r\n 162\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5659.npy"",\r\n 261\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7005.npy"",\r\n 215\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2346.npy"",\r\n 218\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2222.npy"",\r\n 259\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3441.npy"",\r\n 53\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9121.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1665.npy"",\r\n 90\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2840.npy"",\r\n 347\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1857.npy"",\r\n 592\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2799.npy"",\r\n 623\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5471.npy"",\r\n 386\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6111.npy"",\r\n 871\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2150.npy"",\r\n 482\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4501.npy"",\r\n 298\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5792.npy"",\r\n 749\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2979.npy"",\r\n 178\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_360.npy"",\r\n 107\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_574.npy"",\r\n 115\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_82.npy"",\r\n 460\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3020.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9227.npy"",\r\n 67\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3402.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4245.npy"",\r\n 751\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6544.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4174.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7079.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_874.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4047.npy"",\r\n 645\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_997.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_608.npy"",\r\n 304\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8628.npy"",\r\n 331\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3411.npy"",\r\n 762\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_145.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6036.npy"",\r\n 164\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_892.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5679.npy"",\r\n 871\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9632.npy"",\r\n 93\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7151.npy"",\r\n 216\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4937.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9039.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6906.npy"",\r\n 193\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6904.npy"",\r\n 923\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6547.npy"",\r\n 156\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_280.npy"",\r\n 299\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6813.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4747.npy"",\r\n 715\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3696.npy"",\r\n 716\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2887.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2147.npy"",\r\n 745\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7429.npy"",\r\n 120\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8968.npy"",\r\n 104\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7431.npy"",\r\n 722\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2100.npy"",\r\n 428\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5142.npy"",\r\n 386\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9967.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_725.npy"",\r\n 941\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6991.npy"",\r\n 205\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1215.npy"",\r\n 665\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5118.npy"",\r\n 406\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5890.npy"",\r\n 363\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2041.npy"",\r\n 142\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4730.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6701.npy"",\r\n 110\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9777.npy"",\r\n 952\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6311.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2947.npy"",\r\n 449\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7933.npy"",\r\n 652\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9961.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_463.npy"",\r\n 531\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6482.npy"",\r\n 785\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3972.npy"",\r\n 103\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4788.npy"",\r\n 786\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1157.npy"",\r\n 236\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3966.npy"",\r\n 371\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9873.npy"",\r\n 232\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3074.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_239.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1014.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7604.npy"",\r\n 234\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3823.npy"",\r\n 754\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2092.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7148.npy"",\r\n 388\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9312.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6629.npy"",\r\n 71\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9551.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2802.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3187.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6011.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5193.npy"",\r\n 338\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8442.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1620.npy"",\r\n 155\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5615.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_189.npy"",\r\n 128\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1178.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2348.npy"",\r\n 587\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4084.npy"",\r\n 71\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3988.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9893.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5650.npy"",\r\n 194\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2945.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_363.npy"",\r\n 478\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_507.npy"",\r\n 121\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5715.npy"",\r\n 190\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3414.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8079.npy"",\r\n 315\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8003.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6809.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1736.npy"",\r\n 200\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1278.npy"",\r\n 991\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5658.npy"",\r\n 521\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_616.npy"",\r\n 466\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4042.npy"",\r\n 143\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6033.npy"",\r\n 168\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1569.npy"",\r\n 856\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3300.npy"",\r\n 477\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_0.npy"",\r\n 264\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2765.npy"",\r\n 667\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7119.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9751.npy"",\r\n 772\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_944.npy"",\r\n 483\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2024.npy"",\r\n 179\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7425.npy"",\r\n 778\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3813.npy"",\r\n 896\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5744.npy"",\r\n 187\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2493.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_106.npy"",\r\n 122\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7795.npy"",\r\n 453\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5790.npy"",\r\n 274\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6757.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_114.npy"",\r\n 320\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4478.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9195.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_253.npy"",\r\n 256\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1240.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_607.npy"",\r\n 145\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3919.npy"",\r\n 251\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3781.npy"",\r\n 293\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_8592.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6741.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6165.npy"",\r\n 591\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3358.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2628.npy"",\r\n 316\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_964.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_999.npy"",\r\n 246\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5409.npy"",\r\n 111\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_596.npy"",\r\n 221\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7418.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_70.npy"",\r\n 846\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6195.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6603.npy"",\r\n 346\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5114.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_6569.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9748.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2786.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3665.npy"",\r\n 183\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5701.npy"",\r\n 354\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9780.npy"",\r\n 369\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2893.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5564.npy"",\r\n 929\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3267.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5839.npy"",\r\n 339\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4351.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1237.npy"",\r\n 146\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_3962.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_4064.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_2179.npy"",\r\n 568\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_9518.npy"",\r\n 611\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_1999.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_34.npy"",\r\n 114\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7408.npy"",\r\n 993\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_5473.npy"",\r\n 1000\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_163.npy"",\r\n 650\r\n ],\r\n [\r\n ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes/episode_7353.npy"",\r\n 1000\r\n ]\r\n]]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +3167,5004532,"TERMINAL",0,0,"1",,terminal_output +3168,5005533,"TERMINAL",0,0,"2",,terminal_output +3169,5006570,"TERMINAL",0,0,"3",,terminal_output +3170,5007722,"input_pipeline/preprocess/npy_to_array_records.py",0,0,"",python,tab +3171,5007904,"TERMINAL",0,0,"4",,terminal_output +3172,5008706,"TERMINAL",0,0,"5",,terminal_output +3173,5009683,"TERMINAL",0,0,"6",,terminal_output +3174,5010763,"input_pipeline/preprocess/npy_to_array_records.py",2392,0,"",python,selection_mouse +3175,5010788,"TERMINAL",0,0,"7",,terminal_output +3176,5010931,"input_pipeline/preprocess/npy_to_array_records.py",2389,9,"meta_data",python,selection_mouse +3177,5011687,"input_pipeline/preprocess/npy_to_array_records.py",2379,0,"",python,selection_mouse +3178,5011777,"TERMINAL",0,0,"8",,terminal_output +3179,5011826,"input_pipeline/preprocess/npy_to_array_records.py",2375,11,"output_path",python,selection_mouse +3180,5012904,"TERMINAL",0,0,"9",,terminal_output +3181,5014238,"input_pipeline/preprocess/npy_to_array_records.py",2440,0,"",python,selection_mouse +3182,5014384,"input_pipeline/preprocess/npy_to_array_records.py",2436,7,"results",python,selection_mouse +3183,5015337,"TERMINAL",0,0,"31",,terminal_output +3184,5016343,"TERMINAL",0,0,"3",,terminal_output +3185,5017398,"TERMINAL",0,0,"4",,terminal_output +3186,5018440,"TERMINAL",0,0,"5",,terminal_output +3187,5019480,"TERMINAL",0,0,"6",,terminal_output +3188,5020520,"TERMINAL",0,0,"7",,terminal_output +3189,5021650,"TERMINAL",0,0,"8",,terminal_output +3190,5022597,"TERMINAL",0,0,"9",,terminal_output +3191,5023657,"TERMINAL",0,0,"40",,terminal_output +3192,5024783,"TERMINAL",0,0,"1",,terminal_output +3193,5025733,"TERMINAL",0,0,"2",,terminal_output +3194,5026776,"TERMINAL",0,0,"3",,terminal_output +3195,5027795,"TERMINAL",0,0,"4",,terminal_output +3196,5028882,"TERMINAL",0,0,"6",,terminal_output +3197,5029905,"TERMINAL",0,0,"7",,terminal_output +3198,5030927,"TERMINAL",0,0,"8",,terminal_output +3199,5031742,"generate_dataset.py",0,0,"",python,tab +3200,5031960,"TERMINAL",0,0,"9",,terminal_output +3201,5033001,"TERMINAL",0,0,"50",,terminal_output +3202,5034086,"TERMINAL",0,0,"1",,terminal_output +3203,5035127,"TERMINAL",0,0,"2",,terminal_output +3204,5036151,"TERMINAL",0,0,"3",,terminal_output +3205,5037147,"TERMINAL",0,0,"4",,terminal_output +3206,5038182,"TERMINAL",0,0,"5",,terminal_output +3207,5039326,"TERMINAL",0,0,"6",,terminal_output +3208,5040266,"TERMINAL",0,0,"7",,terminal_output +3209,5041306,"TERMINAL",0,0,"8",,terminal_output +3210,5042347,"TERMINAL",0,0,"9",,terminal_output +3211,5043389,"TERMINAL",0,0,"1:00",,terminal_output +3212,5044431,"TERMINAL",0,0,"1",,terminal_output +3213,5044668,"generate_dataset.py",660,0,"",python,selection_mouse +3214,5044803,"generate_dataset.py",654,8,"metadata",python,selection_mouse +3215,5045472,"TERMINAL",0,0,"2",,terminal_output +3216,5046514,"TERMINAL",0,0,"3",,terminal_output +3217,5047553,"TERMINAL",0,0,"4",,terminal_output +3218,5048644,"TERMINAL",0,0,"5",,terminal_output +3219,5049666,"TERMINAL",0,0,"6",,terminal_output +3220,5050692,"TERMINAL",0,0,"7",,terminal_output +3221,5051711,"TERMINAL",0,0,"8",,terminal_output +3222,5052841,"TERMINAL",0,0,"9",,terminal_output +3223,5053868,"TERMINAL",0,0,"10",,terminal_output +3224,5054890,"TERMINAL",0,0,"2",,terminal_output +3225,5055914,"TERMINAL",0,0,"3",,terminal_output +3226,5056902,"TERMINAL",0,0,"4",,terminal_output +3227,5057963,"TERMINAL",0,0,"5",,terminal_output +3228,5058967,"TERMINAL",0,0,"6",,terminal_output +3229,5060005,"TERMINAL",0,0,"7",,terminal_output +3230,5061252,"TERMINAL",0,0,"8",,terminal_output +3231,5062159,"TERMINAL",0,0,"9",,terminal_output +3232,5063127,"TERMINAL",0,0,"20",,terminal_output +3233,5064209,"TERMINAL",0,0,"1",,terminal_output +3234,5065244,"TERMINAL",0,0,"2",,terminal_output +3235,5066254,"TERMINAL",0,0,"3",,terminal_output +3236,5067283,"TERMINAL",0,0,"4",,terminal_output +3237,5068323,"TERMINAL",0,0,"5",,terminal_output +3238,5069368,"TERMINAL",0,0,"6",,terminal_output +3239,5070396,"TERMINAL",0,0,"7",,terminal_output +3240,5071434,"TERMINAL",0,0,"8",,terminal_output +3241,5072466,"TERMINAL",0,0,"9",,terminal_output +3242,5073504,"TERMINAL",0,0,"30",,terminal_output +3243,5074547,"TERMINAL",0,0,"1",,terminal_output +3244,5075717,"TERMINAL",0,0,"2",,terminal_output +3245,5076620,"TERMINAL",0,0,"3",,terminal_output +3246,5077724,"TERMINAL",0,0,"4",,terminal_output +3247,5078749,"TERMINAL",0,0,"5",,terminal_output +3248,5079772,"TERMINAL",0,0,"6",,terminal_output +3249,5080807,"TERMINAL",0,0,"7",,terminal_output +3250,5081823,"TERMINAL",0,0,"9",,terminal_output +3251,5082906,"TERMINAL",0,0,"40",,terminal_output +3252,5083908,"TERMINAL",0,0,"1",,terminal_output +3253,5084951,"TERMINAL",0,0,"2",,terminal_output +3254,5085996,"TERMINAL",0,0,"3",,terminal_output +3255,5087043,"TERMINAL",0,0,"4",,terminal_output +3256,5088169,"TERMINAL",0,0,"5",,terminal_output +3257,5089195,"TERMINAL",0,0,"6",,terminal_output +3258,5090219,"TERMINAL",0,0,"7",,terminal_output +3259,5091202,"TERMINAL",0,0,"8",,terminal_output +3260,5092267,"TERMINAL",0,0,"9",,terminal_output +3261,5093292,"TERMINAL",0,0,"50",,terminal_output +3262,5094324,"TERMINAL",0,0,"1",,terminal_output +3263,5095353,"TERMINAL",0,0,"2",,terminal_output +3264,5096390,"TERMINAL",0,0,"3",,terminal_output +3265,5097424,"TERMINAL",0,0,"4",,terminal_output +3266,5098271,"generate_dataset.py",653,0,"",python,selection_mouse +3267,5098311,"generate_dataset.py",652,0,"",python,selection_command +3268,5098466,"TERMINAL",0,0,"5",,terminal_output +3269,5098761,"generate_dataset.py",667,0,"",python,selection_mouse +3270,5098790,"generate_dataset.py",666,0,"",python,selection_command +3271,5099503,"TERMINAL",0,0,"6",,terminal_output +3272,5100560,"TERMINAL",0,0,"7",,terminal_output +3273,5101697,"TERMINAL",0,0,"8",,terminal_output +3274,5102622,"TERMINAL",0,0,"9",,terminal_output +3275,5103665,"TERMINAL",0,0,"2:00",,terminal_output +3276,5104722,"TERMINAL",0,0,"1",,terminal_output +3277,5105783,"TERMINAL",0,0,"2",,terminal_output +3278,5106815,"TERMINAL",0,0,"3",,terminal_output +3279,5107934,"TERMINAL",0,0,"5",,terminal_output +3280,5108862,"TERMINAL",0,0,"6",,terminal_output +3281,5109902,"TERMINAL",0,0,"7",,terminal_output +3282,5111004,"TERMINAL",0,0,"8",,terminal_output +3283,5111983,"TERMINAL",0,0,"9",,terminal_output +3284,5113361,"TERMINAL",0,0,"10",,terminal_output +3285,5114345,"TERMINAL",0,0,"1",,terminal_output +3286,5115385,"TERMINAL",0,0,"2",,terminal_output +3287,5116426,"TERMINAL",0,0,"3",,terminal_output +3288,5117470,"TERMINAL",0,0,"4",,terminal_output +3289,5118513,"TERMINAL",0,0,"5",,terminal_output +3290,5119552,"TERMINAL",0,0,"6",,terminal_output +3291,5120630,"TERMINAL",0,0,"7",,terminal_output +3292,5121700,"TERMINAL",0,0,"8",,terminal_output +3293,5122687,"TERMINAL",0,0,"9",,terminal_output +3294,5123805,"TERMINAL",0,0,"20",,terminal_output +3295,5124834,"TERMINAL",0,0,"1",,terminal_output +3296,5125854,"TERMINAL",0,0,"27",,terminal_output +3297,5126834,"TERMINAL",0,0,"4",,terminal_output +3298,5127902,"TERMINAL",0,0,"5",,terminal_output +3299,5128929,"TERMINAL",0,0,"6",,terminal_output +3300,5130051,"TERMINAL",0,0,"7",,terminal_output +3301,5130988,"TERMINAL",0,0,"8",,terminal_output +3302,5132102,"TERMINAL",0,0,"9",,terminal_output +3303,5133123,"TERMINAL",0,0,"30",,terminal_output +3304,5134103,"TERMINAL",0,0,"116",,terminal_output +3305,5135141,"TERMINAL",0,0,"2",,terminal_output +3306,5137334,"TERMINAL",0,0,"3",,terminal_output +3307,5138349,"TERMINAL",0,0,"5",,terminal_output +3308,5139375,"TERMINAL",0,0,"6",,terminal_output +3309,5140447,"TERMINAL",0,0,"7",,terminal_output +3310,5141452,"TERMINAL",0,0,"8",,terminal_output +3311,5142486,"TERMINAL",0,0,"9",,terminal_output +3312,5143520,"TERMINAL",0,0,"40",,terminal_output +3313,5144563,"TERMINAL",0,0,"1",,terminal_output +3314,5145617,"TERMINAL",0,0,"2",,terminal_output +3315,5146711,"TERMINAL",0,0,"3",,terminal_output +3316,5147766,"TERMINAL",0,0,"4",,terminal_output +3317,5148791,"TERMINAL",0,0,"5",,terminal_output +3318,5149817,"TERMINAL",0,0,"6",,terminal_output +3319,5150840,"TERMINAL",0,0,"7",,terminal_output +3320,5151842,"TERMINAL",0,0,"9",,terminal_output +3321,5152989,"TERMINAL",0,0,"50",,terminal_output +3322,5153923,"TERMINAL",0,0,"1",,terminal_output +3323,5155039,"TERMINAL",0,0,"2",,terminal_output +3324,5156063,"TERMINAL",0,0,"3",,terminal_output +3325,5157087,"TERMINAL",0,0,"4",,terminal_output +3326,5158109,"TERMINAL",0,0,"5",,terminal_output +3327,5159133,"TERMINAL",0,0,"6",,terminal_output +3328,5160259,"TERMINAL",0,0,"7",,terminal_output +3329,5160311,"generate_dataset.py",1155,0,"",python,selection_mouse +3330,5161307,"TERMINAL",0,0,"8",,terminal_output +3331,5162277,"TERMINAL",0,0,"9",,terminal_output +3332,5163331,"TERMINAL",0,0,"3:008",,terminal_output +3333,5164360,"TERMINAL",0,0,"1",,terminal_output +3334,5165100,"input_pipeline/preprocess/npy_to_array_records.py",0,0,"",python,tab +3335,5165429,"TERMINAL",0,0,"2",,terminal_output +3336,5166520,"TERMINAL",0,0,"3",,terminal_output +3337,5167414,"input_pipeline/preprocess/npy_to_array_records.py",2447,0,"",python,selection_mouse +3338,5167420,"input_pipeline/preprocess/npy_to_array_records.py",2446,0,"",python,selection_command +3339,5167476,"TERMINAL",0,0,"4",,terminal_output +3340,5168037,"input_pipeline/preprocess/npy_to_array_records.py",2447,0,"",python,selection_mouse +3341,5168049,"input_pipeline/preprocess/npy_to_array_records.py",2446,0,"",python,selection_command +3342,5168189,"input_pipeline/preprocess/npy_to_array_records.py",2446,1,")",python,selection_mouse +3343,5168192,"input_pipeline/preprocess/npy_to_array_records.py",2447,0,"",python,selection_command +3344,5168279,"input_pipeline/preprocess/npy_to_array_records.py",2446,1,")",python,selection_mouse +3345,5168280,"input_pipeline/preprocess/npy_to_array_records.py",2439,8,"ults, f)",python,selection_mouse +3346,5168311,"input_pipeline/preprocess/npy_to_array_records.py",2436,11,"results, f)",python,selection_mouse +3347,5168339,"input_pipeline/preprocess/npy_to_array_records.py",2430,17,".dump(results, f)",python,selection_mouse +3348,5168368,"input_pipeline/preprocess/npy_to_array_records.py",2427,20,"son.dump(results, f)",python,selection_mouse +3349,5168394,"input_pipeline/preprocess/npy_to_array_records.py",2424,23," json.dump(results, f)",python,selection_mouse +3350,5168426,"input_pipeline/preprocess/npy_to_array_records.py",2421,26," json.dump(results, f)",python,selection_mouse +3351,5168455,"input_pipeline/preprocess/npy_to_array_records.py",2419,28," json.dump(results, f)",python,selection_mouse +3352,5168488,"input_pipeline/preprocess/npy_to_array_records.py",2343,104," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(results, f)",python,selection_mouse +3353,5168617,"TERMINAL",0,0,"5",,terminal_output +3354,5169529,"TERMINAL",0,0,"6",,terminal_output +3355,5170569,"TERMINAL",0,0,"7",,terminal_output +3356,5171593,"TERMINAL",0,0,"8",,terminal_output +3357,5172650,"TERMINAL",0,0,"9",,terminal_output +3358,5172856,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +3359,5173675,"TERMINAL",0,0,"10",,terminal_output +3360,5174227,"input_pipeline/preprocess/npy_to_array_records.py",0,0,"",python,tab +3361,5174913,"generate_dataset.py",0,0,"",python,tab +3362,5174977,"TERMINAL",0,0,"1",,terminal_output +3363,5175784,"TERMINAL",0,0,"2",,terminal_output +3364,5176174,"generate_dataset.py",1956,0,"",python,selection_mouse +3365,5176818,"TERMINAL",0,0,"3",,terminal_output +3366,5177465,"generate_dataset.py",1878,0,"",python,selection_mouse +3367,5177509,"generate_dataset.py",1877,0,"",python,selection_command +3368,5177875,"TERMINAL",0,0,"5",,terminal_output +3369,5178484,"generate_dataset.py",1878,0,"\n",python,content +3370,5178861,"generate_dataset.py",1879,0," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(results, f)",python,content +3371,5178870,"TERMINAL",0,0,"6",,terminal_output +3372,5179939,"TERMINAL",0,0,"7",,terminal_output +3373,5180067,"generate_dataset.py",1881,0,"",python,selection_mouse +3374,5180174,"generate_dataset.py",1881,1," ",python,selection_mouse +3375,5180265,"generate_dataset.py",1881,2," ",python,selection_mouse +3376,5180266,"generate_dataset.py",1881,4," wi",python,selection_mouse +3377,5180266,"generate_dataset.py",1881,82," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n j",python,selection_mouse +3378,5180266,"generate_dataset.py",1881,84," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n jso",python,selection_mouse +3379,5180267,"generate_dataset.py",1881,86," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.",python,selection_mouse +3380,5180288,"generate_dataset.py",1881,88," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.du",python,selection_mouse +3381,5180309,"generate_dataset.py",1881,90," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump",python,selection_mouse +3382,5180330,"generate_dataset.py",1881,92," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(r",python,selection_mouse +3383,5180347,"generate_dataset.py",1881,94," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(res",python,selection_mouse +3384,5180378,"generate_dataset.py",1881,95," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(resu",python,selection_mouse +3385,5180438,"generate_dataset.py",1881,96," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(resul",python,selection_mouse +3386,5180450,"generate_dataset.py",1881,97," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(result",python,selection_mouse +3387,5180498,"generate_dataset.py",1881,98," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(results",python,selection_mouse +3388,5180693,"generate_dataset.py",1881,99," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(results,",python,selection_mouse +3389,5180757,"generate_dataset.py",1881,100," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(results, ",python,selection_mouse +3390,5180758,"generate_dataset.py",1881,102," with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(results, f)",python,selection_mouse +3391,5180945,"TERMINAL",0,0,"8",,terminal_output +3392,5181412,"generate_dataset.py",1954,8," ",python,content +3393,5181412,"generate_dataset.py",1879,4,"",python,content +3394,5181984,"TERMINAL",0,0,"9",,terminal_output +3395,5183024,"TERMINAL",0,0,"20",,terminal_output +3396,5183854,"input_pipeline/preprocess/npy_to_array_records.py",0,0,"",python,tab +3397,5184082,"TERMINAL",0,0,"1",,terminal_output +3398,5185179,"generate_dataset.py",0,0,"",python,tab +3399,5185363,"TERMINAL",0,0,"2",,terminal_output +3400,5186170,"TERMINAL",0,0,"3",,terminal_output +3401,5187177,"TERMINAL",0,0,"4",,terminal_output +3402,5188249,"TERMINAL",0,0,"5",,terminal_output +3403,5189256,"TERMINAL",0,0,"6",,terminal_output +3404,5190365,"TERMINAL",0,0,"7",,terminal_output +3405,5191391,"TERMINAL",0,0,"8",,terminal_output +3406,5192380,"TERMINAL",0,0,"9",,terminal_output +3407,5192638,"generate_dataset.py",1987,0,"",python,selection_mouse +3408,5192844,"generate_dataset.py",1984,10,"output_dir",python,selection_mouse +3409,5193499,"TERMINAL",0,0,"30",,terminal_output +3410,5194465,"TERMINAL",0,0,"1",,terminal_output +3411,5195501,"TERMINAL",0,0,"2",,terminal_output +3412,5196538,"TERMINAL",0,0,"3",,terminal_output +3413,5197578,"TERMINAL",0,0,"4",,terminal_output +3414,5198615,"TERMINAL",0,0,"5",,terminal_output +3415,5199684,"TERMINAL",0,0,"6",,terminal_output +3416,5200708,"TERMINAL",0,0,"7",,terminal_output +3417,5201735,"TERMINAL",0,0,"8",,terminal_output +3418,5202860,"TERMINAL",0,0,"9",,terminal_output +3419,5203888,"TERMINAL",0,0,"40",,terminal_output +3420,5204906,"TERMINAL",0,0,"2",,terminal_output +3421,5205932,"TERMINAL",0,0,"3",,terminal_output +3422,5206934,"TERMINAL",0,0,"4",,terminal_output +3423,5208082,"TERMINAL",0,0,"5",,terminal_output +3424,5208405,"generate_dataset.py",1986,0,"",python,selection_mouse +3425,5209106,"TERMINAL",0,0,"6",,terminal_output +3426,5210130,"TERMINAL",0,0,"7",,terminal_output +3427,5211091,"TERMINAL",0,0,"8",,terminal_output +3428,5212178,"TERMINAL",0,0,"9",,terminal_output +3429,5213228,"TERMINAL",0,0,"50",,terminal_output +3430,5213795,"generate_dataset.py",1890,0,"",python,selection_mouse +3431,5214003,"generate_dataset.py",1890,1,"s",python,selection_mouse +3432,5214004,"generate_dataset.py",1890,5,"s.pat",python,selection_mouse +3433,5214004,"generate_dataset.py",1890,7,"s.path.",python,selection_mouse +3434,5214025,"generate_dataset.py",1890,8,"s.path.j",python,selection_mouse +3435,5214053,"generate_dataset.py",1890,9,"s.path.jo",python,selection_mouse +3436,5214077,"generate_dataset.py",1890,10,"s.path.joi",python,selection_mouse +3437,5214103,"generate_dataset.py",1890,12,"s.path.join(",python,selection_mouse +3438,5214134,"generate_dataset.py",1890,13,"s.path.join(a",python,selection_mouse +3439,5214166,"generate_dataset.py",1890,14,"s.path.join(ar",python,selection_mouse +3440,5214247,"generate_dataset.py",1890,16,"s.path.join(args",python,selection_mouse +3441,5214336,"generate_dataset.py",1890,17,"s.path.join(args.",python,selection_mouse +3442,5214347,"TERMINAL",0,0,"1",,terminal_output +3443,5215170,"generate_dataset.py",1890,17,"",python,content +3444,5215280,"TERMINAL",0,0,"2",,terminal_output +3445,5215992,"generate_dataset.py",1889,1,"",python,content +3446,5216321,"TERMINAL",0,0,"3",,terminal_output +3447,5216848,"generate_dataset.py",1899,1,"",python,content +3448,5216993,"generate_dataset.py",1898,1,"",python,content +3449,5217131,"generate_dataset.py",1897,1,"",python,content +3450,5217335,"TERMINAL",0,0,"4",,terminal_output +3451,5217416,"generate_dataset.py",1896,1,"",python,content +3452,5217553,"generate_dataset.py",1896,0,"d",python,content +3453,5217554,"generate_dataset.py",1897,0,"",python,selection_keyboard +3454,5218387,"generate_dataset.py",1889,8,"output_dir",python,content +3455,5218417,"TERMINAL",0,0,"5",,terminal_output +3456,5219080,"generate_dataset.py",1900,0,"",python,selection_command +3457,5219400,"generate_dataset.py",1899,1,"",python,content +3458,5219429,"TERMINAL",0,0,"6",,terminal_output +3459,5219895,"generate_dataset.py",1899,0," ",python,content +3460,5219897,"generate_dataset.py",1900,0,"",python,selection_keyboard +3461,5220342,"generate_dataset.py",1900,0,"/",python,content +3462,5220343,"generate_dataset.py",1901,0,"",python,selection_keyboard +3463,5220446,"TERMINAL",0,0,"7",,terminal_output +3464,5221489,"TERMINAL",0,0,"8",,terminal_output +3465,5222523,"TERMINAL",0,0,"9",,terminal_output +3466,5222621,"generate_dataset.py",1919,0,"",python,selection_command +3467,5223566,"TERMINAL",0,0,"4:00",,terminal_output +3468,5224634,"TERMINAL",0,0,"1",,terminal_output +3469,5225657,"TERMINAL",0,0,"2",,terminal_output +3470,5226710,"TERMINAL",0,0,"3",,terminal_output +3471,5227745,"TERMINAL",0,0,"4",,terminal_output +3472,5227978,"generate_dataset.py",1957,0,"",python,selection_mouse +3473,5228870,"TERMINAL",0,0,"5",,terminal_output +3474,5229027,"generate_dataset.py",1919,0,"",python,selection_mouse +3475,5229326,"generate_dataset.py",1918,1,"",python,content +3476,5229881,"TERMINAL",0,0,"6",,terminal_output +3477,5230916,"TERMINAL",0,0,"8",,terminal_output +3478,5231943,"TERMINAL",0,0,"9",,terminal_output +3479,5232964,"TERMINAL",0,0,"10",,terminal_output +3480,5233983,"TERMINAL",0,0,"1",,terminal_output +3481,5234110,"generate_dataset.py",293,0,"",python,selection_mouse +3482,5235116,"TERMINAL",0,0,"2",,terminal_output +3483,5235215,"generate_dataset.py",305,0,"",python,selection_command +3484,5236089,"TERMINAL",0,0,"3",,terminal_output +3485,5236377,"generate_dataset.py",307,0,"\n",python,content +3486,5237000,"generate_dataset.py",308,0,"i",python,content +3487,5237001,"generate_dataset.py",309,0,"",python,selection_keyboard +3488,5237114,"generate_dataset.py",309,0,"m",python,content +3489,5237115,"generate_dataset.py",310,0,"",python,selection_keyboard +3490,5237208,"TERMINAL",0,0,"4",,terminal_output +3491,5237262,"generate_dataset.py",310,0,"p",python,content +3492,5237264,"generate_dataset.py",311,0,"",python,selection_keyboard +3493,5237344,"generate_dataset.py",311,0,"o",python,content +3494,5237345,"generate_dataset.py",312,0,"",python,selection_keyboard +3495,5237691,"generate_dataset.py",308,4,"import",python,content +3496,5237974,"generate_dataset.py",314,0,"j",python,content +3497,5237977,"generate_dataset.py",315,0,"",python,selection_keyboard +3498,5238150,"TERMINAL",0,0,"5",,terminal_output +3499,5238447,"generate_dataset.py",314,1,"",python,content +3500,5238482,"generate_dataset.py",314,0," ",python,content +3501,5238483,"generate_dataset.py",315,0,"",python,selection_keyboard +3502,5238595,"generate_dataset.py",315,0,"j",python,content +3503,5238596,"generate_dataset.py",316,0,"",python,selection_keyboard +3504,5238730,"generate_dataset.py",316,0,"s",python,content +3505,5238731,"generate_dataset.py",317,0,"",python,selection_keyboard +3506,5238794,"generate_dataset.py",317,0,"o",python,content +3507,5238795,"generate_dataset.py",318,0,"",python,selection_keyboard +3508,5238916,"generate_dataset.py",318,0,"n",python,content +3509,5238917,"generate_dataset.py",319,0,"",python,selection_keyboard +3510,5239055,"generate_dataset.py",318,0,"",python,selection_command +3511,5239231,"TERMINAL",0,0,"6",,terminal_output +3512,5240298,"TERMINAL",0,0,"7",,terminal_output +3513,5241019,"generate_dataset.py",2080,0,"",python,selection_mouse +3514,5241269,"TERMINAL",0,0,"8",,terminal_output +3515,5241574,"generate_dataset.py",1959,0,"",python,selection_mouse +3516,5241744,"generate_dataset.py",1957,7,"results",python,selection_mouse +3517,5242469,"TERMINAL",0,0,"9",,terminal_output +3518,5243411,"TERMINAL",0,0,"20",,terminal_output +3519,5243663,"generate_dataset.py",1957,7,"",python,content +3520,5244158,"generate_dataset.py",1957,0,"m",python,content +3521,5244159,"generate_dataset.py",1958,0,"",python,selection_keyboard +3522,5244225,"generate_dataset.py",1958,0,"e",python,content +3523,5244226,"generate_dataset.py",1959,0,"",python,selection_keyboard +3524,5244421,"TERMINAL",0,0,"1",,terminal_output +3525,5245425,"TERMINAL",0,0,"2",,terminal_output +3526,5245708,"generate_dataset.py",1957,2,"metadata",python,content +3527,5246259,"generate_dataset.py",1964,0,"",python,selection_command +3528,5246392,"generate_dataset.py",1991,0,"",python,selection_command +3529,5246478,"TERMINAL",0,0,"3733",,terminal_output +3530,5247496,"TERMINAL",0,0,"4",,terminal_output +3531,5247698,"generate_dataset.py",1970,0,"",python,selection_command +3532,5248557,"generate_dataset.py",1970,0,"#",python,content +3533,5248559,"generate_dataset.py",1971,0,"",python,selection_keyboard +3534,5248575,"TERMINAL",0,0,"5",,terminal_output +3535,5248629,"generate_dataset.py",1971,0," ",python,content +3536,5248630,"generate_dataset.py",1972,0,"",python,selection_keyboard +3537,5249005,"generate_dataset.py",1971,0,"",python,selection_command +3538,5249574,"TERMINAL",0,0,"6",,terminal_output +3539,5250617,"TERMINAL",0,0,"7",,terminal_output +3540,5251129,"generate_dataset.py",1969,0,"\n ",python,content +3541,5251163,"generate_dataset.py",1970,4,"",python,content +3542,5251467,"generate_dataset.py",1943,0,"",python,selection_command +3543,5251662,"TERMINAL",0,0,"8",,terminal_output +3544,5252066,"generate_dataset.py",1970,0,"",python,selection_command +3545,5252698,"TERMINAL",0,0,"9",,terminal_output +3546,5253751,"TERMINAL",0,0,"30",,terminal_output +3547,5254886,"TERMINAL",0,0,"1",,terminal_output +3548,5255838,"TERMINAL",0,0,"2",,terminal_output +3549,5256857,"TERMINAL",0,0,"47",,terminal_output +3550,5259281,"TERMINAL",0,0,"5",,terminal_output +3551,5260305,"TERMINAL",0,0,"7",,terminal_output +3552,5261431,"TERMINAL",0,0,"8",,terminal_output +3553,5261638,"TERMINAL",0,0,"s /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev",,terminal_command +3554,5261696,"TERMINAL",0,0,"]633;E;2025-09-04 11:24:38 ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;Cepisode_0.array_record episode_2.array_record episode_4.array_record episode_6.array_record episode_8.array_record metadata.npy\r\nepisode_1.array_record episode_3.array_record episode_5.array_record episode_7.array_record episode_9.array_record\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +3555,5262373,"TERMINAL",0,0,"9",,terminal_output +3556,5263423,"TERMINAL",0,0,"40",,terminal_output +3557,5264466,"TERMINAL",0,0,"1",,terminal_output +3558,5265498,"TERMINAL",0,0,"2",,terminal_output +3559,5266541,"TERMINAL",0,0,"3",,terminal_output +3560,5267581,"TERMINAL",0,0,"4 9",,terminal_output +3561,5268620,"TERMINAL",0,0,"5",,terminal_output +3562,5269698,"TERMINAL",0,0,"6",,terminal_output +3563,5269726,"TERMINAL",0,0,"rm /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/*",,terminal_command +3564,5269804,"TERMINAL",0,0,"]633;E;2025-09-04 11:24:46 rm /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/*;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +3565,5270710,"TERMINAL",0,0,"7",,terminal_output +3566,5271738,"TERMINAL",0,0,"8",,terminal_output +3567,5272800,"TERMINAL",0,0,"9",,terminal_output +3568,5273305,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/generate_dataset_10m.sh",,terminal_command +3569,5273371,"TERMINAL",0,0,"]633;E;2025-09-04 11:24:50 sh slurm/dev/mihir/horeka/generate_dataset_10m.sh;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +3570,5273822,"TERMINAL",0,0,"50",,terminal_output +3571,5274950,"TERMINAL",0,0,"2",,terminal_output +3572,5275292,"TERMINAL",0,0,"Gym has been unmaintained since 2022 and does not support NumPy 2.0 amongst other critical functionality.\r\nPlease upgrade to Gymnasium, the maintained drop-in replacement of Gym, or contact the authors of your software and request that they upgrade.\r\nUsers of this version of Gym should be able to simply replace 'import gym' with 'import gymnasium as gym' in the vast majority of cases.\r\nSee the migration guide at https://gymnasium.farama.org/introduction/migration_guide/ for additional information.\r\n",,terminal_output +3573,5275896,"TERMINAL",0,0,"3",,terminal_output +3574,5276937,"TERMINAL",0,0,"48",,terminal_output +3575,5277979,"TERMINAL",0,0,"5",,terminal_output +3576,5279046,"TERMINAL",0,0,"6",,terminal_output +3577,5280071,"TERMINAL",0,0,"7",,terminal_output +3578,5281198,"TERMINAL",0,0,"8",,terminal_output +3579,5282219,"TERMINAL",0,0,"9",,terminal_output +3580,5282834,"TERMINAL",0,0,"Episode too short (179), resampling...\r\n",,terminal_output +3581,5282891,"TERMINAL",0,0,"Episode too short (168), resampling...\r\n",,terminal_output +3582,5283006,"TERMINAL",0,0,"Episode too short (200), resampling...\r\n",,terminal_output +3583,5283183,"TERMINAL",0,0,"5:00",,terminal_output +3584,5283420,"TERMINAL",0,0,"Episode too short (997), resampling...\r\n",,terminal_output +3585,5283523,"TERMINAL",0,0,"Episode too short (146), resampling...\r\n",,terminal_output +3586,5284166,"TERMINAL",0,0,"Episode 0 completed, length: 1000\r\n",,terminal_output +3587,5284219,"TERMINAL",0,0,"1",,terminal_output +3588,5284612,"TERMINAL",0,0,"Episode too short (931), resampling...\r\n",,terminal_output +3589,5285291,"TERMINAL",0,0,"Episode 1 completed, length: 1000\r\n",,terminal_output +3590,5285310,"TERMINAL",0,0,"2",,terminal_output +3591,5285596,"TERMINAL",0,0,"Episode too short (584), resampling...\r\n",,terminal_output +3592,5286129,"TERMINAL",0,0,"Episode too short (830), resampling...\r\nEpisode too short (54), resampling...\r\n",,terminal_output +3593,5286237,"TERMINAL",0,0,"Episode too short (98), resampling...\r\n",,terminal_output +3594,5286299,"TERMINAL",0,0,"3",,terminal_output +3595,5286921,"TERMINAL",0,0,"Episode 2 completed, length: 1000\r\n",,terminal_output +3596,5287444,"TERMINAL",0,0,"4",,terminal_output +3597,5287550,"TERMINAL",0,0,"Episode 3 completed, length: 1000\r\n",,terminal_output +3598,5287676,"TERMINAL",0,0,"Episode too short (239), resampling...\r\n",,terminal_output +3599,5287954,"TERMINAL",0,0,"Episode too short (476), resampling...\r\n",,terminal_output +3600,5288477,"TERMINAL",0,0,"5",,terminal_output +3601,5288504,"TERMINAL",0,0,"Episode 4 completed, length: 1000\r\n",,terminal_output +3602,5289137,"TERMINAL",0,0,"Episode 5 completed, length: 1000\r\n",,terminal_output +3603,5289297,"TERMINAL",0,0,"Episode too short (285), resampling...\r\n",,terminal_output +3604,5289410,"TERMINAL",0,0,"6",,terminal_output +3605,5289597,"TERMINAL",0,0,"Episode too short (171), resampling...\r\n",,terminal_output +3606,5289901,"TERMINAL",0,0,"Episode too short (250), resampling...\r\n",,terminal_output +3607,5290442,"TERMINAL",0,0,"Episode 6 completed, length: 1000\r\nEpisode too short (143), resampling...\r\n",,terminal_output +3608,5290455,"TERMINAL",0,0,"7",,terminal_output +3609,5291027,"TERMINAL",0,0,"Episode 7 completed, length: 1000\r\n",,terminal_output +3610,5291438,"TERMINAL",0,0,"Episode too short (861), resampling...\r\n",,terminal_output +3611,5291489,"TERMINAL",0,0,"Episode too short (127), resampling...\r\n",,terminal_output +3612,5291490,"TERMINAL",0,0,"8",,terminal_output +3613,5291714,"TERMINAL",0,0,"Episode too short (517), resampling...\r\n",,terminal_output +3614,5292381,"TERMINAL",0,0,"Episode 8 completed, length: 1000\r\n",,terminal_output +3615,5292502,"TERMINAL",0,0,"Episode too short (478), resampling...\r\n",,terminal_output +3616,5292563,"TERMINAL",0,0,"9",,terminal_output +3617,5293192,"TERMINAL",0,0,"Episode 9 completed, length: 1000\r\nDataset generated with 10 valid episodes\r\n",,terminal_output +3618,5293255,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +3619,5293649,"TERMINAL",0,0,"10",,terminal_output +3620,5294676,"TERMINAL",0,0,"1",,terminal_output +3621,5295738,"TERMINAL",0,0,"2",,terminal_output +3622,5296714,"TERMINAL",0,0,"3",,terminal_output +3623,5297758,"TERMINAL",0,0,"4",,terminal_output +3624,5298810,"TERMINAL",0,0,"5",,terminal_output +3625,5299281,"TERMINAL",0,0,"ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev",,terminal_command +3626,5299833,"TERMINAL",0,0,"7",,terminal_output +3627,5300961,"TERMINAL",0,0,"8",,terminal_output +3628,5301909,"TERMINAL",0,0,"9",,terminal_output +3629,5302941,"TERMINAL",0,0,"20",,terminal_output +3630,5304036,"TERMINAL",0,0,"1",,terminal_output +3631,5305041,"TERMINAL",0,0,"2",,terminal_output +3632,5306072,"TERMINAL",0,0,"3",,terminal_output +3633,5307143,"TERMINAL",0,0,"4",,terminal_output +3634,5307154,"TERMINAL",0,0,"cat ^Ck/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/dev",,terminal_command +3635,5307207,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D",,terminal_output +3636,5308221,"TERMINAL",0,0,"5",,terminal_output +3637,5309196,"TERMINAL",0,0,"6",,terminal_output +3638,5309997,"TERMINAL",0,0,"ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/meta_data.json",,terminal_command +3639,5310011,"TERMINAL",0,0,"]633;E;2025-09-04 11:25:27 ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/meta_data.json ;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/meta_data.json\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +3640,5310229,"TERMINAL",0,0,"7",,terminal_output +3641,5311262,"TERMINAL",0,0,"8",,terminal_output +3642,5312298,"TERMINAL",0,0,"9",,terminal_output +3643,5313336,"TERMINAL",0,0,"30",,terminal_output +3644,5313433,"TERMINAL",0,0,"cat /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/meta_data.json",,terminal_command +3645,5313489,"TERMINAL",0,0,"]633;E;2025-09-04 11:25:30 cat /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/meta_data.json;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[{""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_0.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_1.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_2.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_3.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_4.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_5.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_6.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_7.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_8.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_9.array_record"", ""length"": 1000}]]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +3646,5314373,"TERMINAL",0,0,"1",,terminal_output +3647,5315418,"TERMINAL",0,0,"2",,terminal_output +3648,5316461,"TERMINAL",0,0,"3",,terminal_output +3649,5317667,"TERMINAL",0,0,"4",,terminal_output +3650,5318550,"TERMINAL",0,0,"5",,terminal_output +3651,5319583,"TERMINAL",0,0,"6",,terminal_output +3652,5320647,"TERMINAL",0,0,"7",,terminal_output +3653,5321736,"TERMINAL",0,0,"8",,terminal_output +3654,5322727,"TERMINAL",0,0,"9",,terminal_output +3655,5323757,"TERMINAL",0,0,"40",,terminal_output +3656,5324837,"TERMINAL",0,0,"1",,terminal_output +3657,5325853,"TERMINAL",0,0,"3",,terminal_output +3658,5326989,"TERMINAL",0,0,"4",,terminal_output +3659,5328107,"TERMINAL",0,0,"5",,terminal_output +3660,5328979,"TERMINAL",0,0,"6",,terminal_output +3661,5329972,"TERMINAL",0,0,"7",,terminal_output +3662,5331140,"TERMINAL",0,0,"8",,terminal_output +3663,5331551,".venv/lib/python3.10/site-packages/procgen/env.py",0,0,"import os\nimport random\nfrom typing import Sequence, Optional, List\n\nimport gym3\nfrom gym3.libenv import CEnv\nimport numpy as np\nfrom .builder import build\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\n\nMAX_STATE_SIZE = 2 ** 20\n\nENV_NAMES = [\n ""bigfish"",\n ""bossfight"",\n ""caveflyer"",\n ""chaser"",\n ""climber"",\n ""coinrun"",\n ""dodgeball"",\n ""fruitbot"",\n ""heist"",\n ""jumper"",\n ""leaper"",\n ""maze"",\n ""miner"",\n ""ninja"",\n ""plunder"",\n ""starpilot"",\n]\n\nEXPLORATION_LEVEL_SEEDS = {\n ""coinrun"": 1949448038,\n ""caveflyer"": 1259048185,\n ""leaper"": 1318677581,\n ""jumper"": 1434825276,\n ""maze"": 158988835,\n ""heist"": 876640971,\n ""climber"": 1561126160,\n ""ninja"": 1123500215,\n}\n\n# should match DistributionMode in game.h, except for 'exploration' which is handled by Python\nDISTRIBUTION_MODE_DICT = {\n ""easy"": 0,\n ""hard"": 1,\n ""extreme"": 2,\n ""memory"": 10,\n ""exploration"": 20,\n}\n\n\ndef create_random_seed():\n rand_seed = random.SystemRandom().randint(0, 2 ** 31 - 1)\n try:\n # force MPI processes to definitely choose different random seeds\n from mpi4py import MPI\n\n rand_seed = rand_seed - (rand_seed % MPI.COMM_WORLD.size) + MPI.COMM_WORLD.rank\n except ModuleNotFoundError:\n pass\n return rand_seed\n\n\nclass BaseProcgenEnv(CEnv):\n """"""\n Base procedurally generated environment\n """"""\n\n def __init__(\n self,\n num,\n env_name,\n options,\n debug=False,\n rand_seed=None,\n num_levels=0,\n start_level=0,\n use_sequential_levels=False,\n debug_mode=0,\n resource_root=None,\n num_threads=4,\n render_mode=None,\n ):\n if resource_root is None:\n resource_root = os.path.join(SCRIPT_DIR, ""data"", ""assets"") + os.sep\n assert os.path.exists(resource_root)\n\n lib_dir = os.path.join(SCRIPT_DIR, ""data"", ""prebuilt"")\n if os.path.exists(lib_dir):\n assert any([os.path.exists(os.path.join(lib_dir, name)) for name in [""libenv.so"", ""libenv.dylib"", ""env.dll""]]), ""package is installed, but the prebuilt environment library is missing""\n assert not debug, ""debug has no effect for pre-compiled library""\n else:\n # only compile if we don't find a pre-built binary\n lib_dir = build(debug=debug)\n \n self.combos = self.get_combos()\n\n if render_mode is None:\n render_human = False\n elif render_mode == ""rgb_array"":\n render_human = True\n else:\n raise Exception(f""invalid render mode {render_mode}"")\n\n if rand_seed is None:\n rand_seed = create_random_seed()\n\n options.update(\n {\n ""env_name"": env_name,\n ""num_levels"": num_levels,\n ""start_level"": start_level,\n ""num_actions"": len(self.combos),\n ""use_sequential_levels"": bool(use_sequential_levels),\n ""debug_mode"": debug_mode,\n ""rand_seed"": rand_seed,\n ""num_threads"": num_threads,\n ""render_human"": render_human,\n # these will only be used the first time an environment is created in a process\n ""resource_root"": resource_root,\n }\n )\n\n self.options = options\n\n super().__init__(\n lib_dir=lib_dir,\n num=num,\n options=options,\n c_func_defs=[\n ""int get_state(libenv_env *, int, char *, int);"",\n ""void set_state(libenv_env *, int, char *, int);"",\n ],\n )\n # don't use the dict space for actions\n self.ac_space = self.ac_space[""action""]\n\n def get_state(self):\n length = MAX_STATE_SIZE\n buf = self._ffi.new(f""char[{length}]"")\n result = []\n for env_idx in range(self.num):\n n = self.call_c_func(""get_state"", env_idx, buf, length)\n result.append(bytes(self._ffi.buffer(buf, n)))\n return result\n\n def set_state(self, states):\n assert len(states) == self.num\n for env_idx in range(self.num):\n state = states[env_idx]\n self.call_c_func(""set_state"", env_idx, state, len(state))\n\n def get_combos(self):\n return [\n (""LEFT"", ""DOWN""),\n (""LEFT"",),\n (""LEFT"", ""UP""),\n (""DOWN"",),\n (),\n (""UP"",),\n (""RIGHT"", ""DOWN""),\n (""RIGHT"",),\n (""RIGHT"", ""UP""),\n (""D"",),\n (""A"",),\n (""W"",),\n (""S"",),\n (""Q"",),\n (""E"",),\n ]\n\n def keys_to_act(self, keys_list: Sequence[Sequence[str]]) -> List[Optional[np.ndarray]]:\n """"""\n Convert list of keys being pressed to actions, used in interactive mode\n """"""\n result = []\n for keys in keys_list:\n action = None\n max_len = -1\n for i, combo in enumerate(self.get_combos()):\n pressed = True\n for key in combo:\n if key not in keys:\n pressed = False\n\n if pressed and (max_len < len(combo)):\n action = i\n max_len = len(combo)\n\n if action is not None:\n action = np.array([action])\n result.append(action)\n return result\n\n def act(self, ac):\n # tensorflow may return int64 actions (https://github.com/openai/gym/blob/master/gym/spaces/discrete.py#L13)\n # so always cast actions to int32\n return super().act({""action"": ac.astype(np.int32)})\n\n\nclass ProcgenGym3Env(BaseProcgenEnv):\n """"""\n gym3 interface for Procgen\n """"""\n def __init__(\n self,\n num,\n env_name,\n center_agent=True,\n use_backgrounds=True,\n use_monochrome_assets=False,\n restrict_themes=False,\n use_generated_assets=False,\n paint_vel_info=False,\n distribution_mode=""hard"",\n **kwargs,\n ):\n assert (\n distribution_mode in DISTRIBUTION_MODE_DICT\n ), f'""{distribution_mode}"" is not a valid distribution mode.'\n\n if distribution_mode == ""exploration"":\n assert (\n env_name in EXPLORATION_LEVEL_SEEDS\n ), f""{env_name} does not support exploration mode""\n\n distribution_mode = DISTRIBUTION_MODE_DICT[""hard""]\n assert ""num_levels"" not in kwargs, ""exploration mode overrides num_levels""\n kwargs[""num_levels""] = 1\n assert ""start_level"" not in kwargs, ""exploration mode overrides start_level""\n kwargs[""start_level""] = EXPLORATION_LEVEL_SEEDS[env_name]\n else:\n distribution_mode = DISTRIBUTION_MODE_DICT[distribution_mode]\n\n options = {\n ""center_agent"": bool(center_agent),\n ""use_generated_assets"": bool(use_generated_assets),\n ""use_monochrome_assets"": bool(use_monochrome_assets),\n ""restrict_themes"": bool(restrict_themes),\n ""use_backgrounds"": bool(use_backgrounds),\n ""paint_vel_info"": bool(paint_vel_info),\n ""distribution_mode"": distribution_mode,\n }\n super().__init__(num, env_name, options, **kwargs)\n \n \nclass ToBaselinesVecEnv(gym3.ToBaselinesVecEnv):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : 15\n }\n def render(self, mode=""human""):\n info = self.env.get_info()[0]\n _, ob, _ = self.env.observe()\n if mode == ""rgb_array"":\n if ""rgb"" in info:\n return info[""rgb""]\n else:\n return ob['rgb'][0] \n\n\ndef ProcgenEnv(num_envs, env_name, **kwargs):\n return ToBaselinesVecEnv(ProcgenGym3Env(num=num_envs, env_name=env_name, **kwargs))\n",python,tab +3664,5332224,"TERMINAL",0,0,"9",,terminal_output +3665,5333114,"TERMINAL",0,0,"50",,terminal_output +3666,5333518,"input_pipeline/preprocess/npy_to_array_records.py",0,0,"",python,tab +3667,5334173,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +3668,5334321,"TERMINAL",0,0,"1",,terminal_output +3669,5335130,"input_pipeline/preprocess/npy_to_array_records.py",0,0,"",python,tab +3670,5335338,"TERMINAL",0,0,"2",,terminal_output +3671,5336289,"TERMINAL",0,0,"3",,terminal_output +3672,5337270,"TERMINAL",0,0,"4",,terminal_output +3673,5337835,"generate_dataset.py",0,0,"",python,tab +3674,5338374,"TERMINAL",0,0,"5",,terminal_output +3675,5339293,"generate_dataset.py",669,0,"",python,selection_mouse +3676,5339346,"TERMINAL",0,0,"6",,terminal_output +3677,5339429,"generate_dataset.py",666,8,"metadata",python,selection_mouse +3678,5340399,"TERMINAL",0,0,"7",,terminal_output +3679,5341409,"TERMINAL",0,0,"8",,terminal_output +3680,5342445,"TERMINAL",0,0,"9",,terminal_output +3681,5343491,"TERMINAL",0,0,"6:00",,terminal_output +3682,5344531,"TERMINAL",0,0,"1",,terminal_output +3683,5345567,"TERMINAL",0,0,"2",,terminal_output +3684,5346615,"TERMINAL",0,0,"3",,terminal_output +3685,5347652,"generate_dataset.py",674,0,"",python,selection_command +3686,5347690,"generate_dataset.py",2056,0,"episode_",python,content +3687,5347691,"generate_dataset.py",1957,0,"episode_",python,content +3688,5347691,"generate_dataset.py",1613,0,"episode_",python,content +3689,5347691,"generate_dataset.py",666,0,"episode_",python,content +3690,5347694,"TERMINAL",0,0,"4",,terminal_output +3691,5348701,"TERMINAL",0,0,"5",,terminal_output +3692,5349806,"TERMINAL",0,0,"6",,terminal_output +3693,5350829,"TERMINAL",0,0,"7",,terminal_output +3694,5351794,"TERMINAL",0,0,"8",,terminal_output +3695,5352881,"TERMINAL",0,0,"10",,terminal_output +3696,5353903,"TERMINAL",0,0,"1",,terminal_output +3697,5353914,"generate_dataset.py",1924,0,"",python,selection_mouse +3698,5354501,"generate_dataset.py",1822,0,"",python,selection_mouse +3699,5354915,"TERMINAL",0,0,"2",,terminal_output +3700,5354966,"generate_dataset.py",1925,0,"",python,selection_mouse +3701,5355485,"generate_dataset.py",1906,0,"",python,selection_mouse +3702,5355498,"generate_dataset.py",1905,0,"",python,selection_command +3703,5355957,"TERMINAL",0,0,"3",,terminal_output +3704,5357074,"TERMINAL",0,0,"4",,terminal_output +3705,5358098,"TERMINAL",0,0,"5",,terminal_output +3706,5359229,"TERMINAL",0,0,"6",,terminal_output +3707,5360146,"TERMINAL",0,0,"7",,terminal_output +3708,5361173,"TERMINAL",0,0,"8",,terminal_output +3709,5362196,"TERMINAL",0,0,"9",,terminal_output +3710,5363222,"TERMINAL",0,0,"20",,terminal_output +3711,5364348,"TERMINAL",0,0,"1",,terminal_output +3712,5365369,"TERMINAL",0,0,"2",,terminal_output +3713,5366341,"TERMINAL",0,0,"3",,terminal_output +3714,5367422,"TERMINAL",0,0,"4",,terminal_output +3715,5368442,"TERMINAL",0,0,"5",,terminal_output +3716,5369456,"TERMINAL",0,0,"6",,terminal_output +3717,5370494,"TERMINAL",0,0,"7",,terminal_output +3718,5371527,"TERMINAL",0,0,"8",,terminal_output +3719,5372565,"TERMINAL",0,0,"9",,terminal_output +3720,5373609,"TERMINAL",0,0,"30",,terminal_output +3721,5374646,"TERMINAL",0,0,"1",,terminal_output +3722,5375712,"TERMINAL",0,0,"2",,terminal_output +3723,5376730,"TERMINAL",0,0,"3",,terminal_output +3724,5377299,"generate_dataset.py",1906,0,"\n",python,content +3725,5377774,"TERMINAL",0,0,"4",,terminal_output +3726,5378768,"generate_dataset.py",1907,0,"m",python,content +3727,5378770,"generate_dataset.py",1908,0,"",python,selection_keyboard +3728,5378857,"TERMINAL",0,0,"5",,terminal_output +3729,5378898,"generate_dataset.py",1908,0,"a",python,content +3730,5378899,"generate_dataset.py",1909,0,"",python,selection_keyboard +3731,5379092,"generate_dataset.py",1909,0,"t",python,content +3732,5379093,"generate_dataset.py",1910,0,"",python,selection_keyboard +3733,5379172,"generate_dataset.py",1910,0,"e",python,content +3734,5379173,"generate_dataset.py",1911,0,"",python,selection_keyboard +3735,5379733,"generate_dataset.py",1910,1,"",python,content +3736,5379874,"generate_dataset.py",1909,1,"",python,content +3737,5380144,"generate_dataset.py",1908,1,"",python,content +3738,5380296,"generate_dataset.py",1908,0,"e",python,content +3739,5380297,"generate_dataset.py",1909,0,"",python,selection_keyboard +3740,5380449,"generate_dataset.py",1909,0,"t",python,content +3741,5380450,"generate_dataset.py",1910,0,"",python,selection_keyboard +3742,5380601,"generate_dataset.py",1910,0,"a",python,content +3743,5380602,"generate_dataset.py",1911,0,"",python,selection_keyboard +3744,5380864,"generate_dataset.py",1911,0,"d",python,content +3745,5380865,"generate_dataset.py",1912,0,"",python,selection_keyboard +3746,5381048,"generate_dataset.py",1912,0,"a",python,content +3747,5381049,"generate_dataset.py",1913,0,"",python,selection_keyboard +3748,5381102,"generate_dataset.py",1913,0,"t",python,content +3749,5381105,"generate_dataset.py",1914,0,"",python,selection_keyboard +3750,5381254,"TERMINAL",0,0,"7",,terminal_output +3751,5381408,"generate_dataset.py",1914,0,"a",python,content +3752,5381409,"generate_dataset.py",1915,0,"",python,selection_keyboard +3753,5382248,"generate_dataset.py",1915,0," ",python,content +3754,5382249,"generate_dataset.py",1916,0,"",python,selection_keyboard +3755,5382325,"TERMINAL",0,0,"9",,terminal_output +3756,5382437,"generate_dataset.py",1916,0,"=",python,content +3757,5382439,"generate_dataset.py",1917,0,"",python,selection_keyboard +3758,5382501,"generate_dataset.py",1917,0," ",python,content +3759,5382502,"generate_dataset.py",1918,0,"",python,selection_keyboard +3760,5383261,"generate_dataset.py",1918,0,"{}",python,content +3761,5383262,"generate_dataset.py",1919,0,"",python,selection_keyboard +3762,5383349,"TERMINAL",0,0,"40",,terminal_output +3763,5383459,"generate_dataset.py",1919,0,"\n \n",python,content +3764,5384347,"TERMINAL",0,0,"1",,terminal_output +3765,5384550,"generate_dataset.py",1924,0,"""""",python,content +3766,5384553,"generate_dataset.py",1925,0,"",python,selection_keyboard +3767,5385377,"TERMINAL",0,0,"2",,terminal_output +3768,5386288,"generate_dataset.py",1925,0,"e",python,content +3769,5386289,"generate_dataset.py",1926,0,"",python,selection_keyboard +3770,5386433,"TERMINAL",0,0,"333",,terminal_output +3771,5386479,"generate_dataset.py",1926,0,"p",python,content +3772,5386480,"generate_dataset.py",1927,0,"",python,selection_keyboard +3773,5386746,"generate_dataset.py",1927,0,"i",python,content +3774,5386747,"generate_dataset.py",1928,0,"",python,selection_keyboard +3775,5387435,"generate_dataset.py",1928,0,"s",python,content +3776,5387436,"generate_dataset.py",1929,0,"",python,selection_keyboard +3777,5387525,"TERMINAL",0,0,"4",,terminal_output +3778,5387566,"generate_dataset.py",1929,0,"o",python,content +3779,5387567,"generate_dataset.py",1930,0,"",python,selection_keyboard +3780,5388168,"generate_dataset.py",1930,0,"d",python,content +3781,5388169,"generate_dataset.py",1931,0,"",python,selection_keyboard +3782,5388254,"generate_dataset.py",1931,0,"e",python,content +3783,5388255,"generate_dataset.py",1932,0,"",python,selection_keyboard +3784,5388491,"TERMINAL",0,0,"5",,terminal_output +3785,5388520,"generate_dataset.py",1932,0,"_",python,content +3786,5388521,"generate_dataset.py",1933,0,"",python,selection_keyboard +3787,5388975,"generate_dataset.py",1933,0,"m",python,content +3788,5388976,"generate_dataset.py",1934,0,"",python,selection_keyboard +3789,5389249,"generate_dataset.py",1934,0,"e",python,content +3790,5389250,"generate_dataset.py",1935,0,"",python,selection_keyboard +3791,5389484,"generate_dataset.py",1935,0,"t",python,content +3792,5389485,"generate_dataset.py",1936,0,"",python,selection_keyboard +3793,5389569,"generate_dataset.py",1936,0,"a",python,content +3794,5389570,"generate_dataset.py",1937,0,"",python,selection_keyboard +3795,5389601,"TERMINAL",0,0,"6",,terminal_output +3796,5389722,"generate_dataset.py",1937,0,"d",python,content +3797,5389722,"generate_dataset.py",1938,0,"",python,selection_keyboard +3798,5389866,"generate_dataset.py",1938,0,"a",python,content +3799,5389867,"generate_dataset.py",1939,0,"",python,selection_keyboard +3800,5389972,"generate_dataset.py",1939,0,"t",python,content +3801,5389973,"generate_dataset.py",1940,0,"",python,selection_keyboard +3802,5390179,"generate_dataset.py",1940,0,"a",python,content +3803,5390180,"generate_dataset.py",1941,0,"",python,selection_keyboard +3804,5390556,"generate_dataset.py",1942,0,"",python,selection_command +3805,5390644,"TERMINAL",0,0,"7",,terminal_output +3806,5391222,"generate_dataset.py",1942,0," ",python,content +3807,5391223,"generate_dataset.py",1943,0,"",python,selection_keyboard +3808,5391628,"TERMINAL",0,0,"8",,terminal_output +3809,5391738,"generate_dataset.py",1942,1,"",python,content +3810,5392002,"generate_dataset.py",1942,0,":",python,content +3811,5392003,"generate_dataset.py",1943,0,"",python,selection_keyboard +3812,5392198,"generate_dataset.py",1943,0," ",python,content +3813,5392198,"generate_dataset.py",1944,0,"",python,selection_keyboard +3814,5392388,"generate_dataset.py",1944,0,"e",python,content +3815,5392389,"generate_dataset.py",1945,0,"",python,selection_keyboard +3816,5392669,"TERMINAL",0,0,"9",,terminal_output +3817,5393206,"generate_dataset.py",1944,1,"episode_metadata",python,content +3818,5393720,"TERMINAL",0,0,"50",,terminal_output +3819,5393923,"generate_dataset.py",1960,0,",",python,content +3820,5393924,"generate_dataset.py",1961,0,"",python,selection_keyboard +3821,5394727,"TERMINAL",0,0,"1",,terminal_output +3822,5395307,"generate_dataset.py",1919,0,"",python,selection_command +3823,5395770,"TERMINAL",0,0,"2",,terminal_output +3824,5395915,"generate_dataset.py",1919,0,"\n ",python,content +3825,5396813,"TERMINAL",0,0,"3",,terminal_output +3826,5396841,"generate_dataset.py",1924,0,"""""",python,content +3827,5396842,"generate_dataset.py",1925,0,"",python,selection_keyboard +3828,5397879,"TERMINAL",0,0,"5",,terminal_output +3829,5398899,"TERMINAL",0,0,"6",,terminal_output +3830,5399676,"generate_dataset.py",1925,0,"e",python,content +3831,5399677,"generate_dataset.py",1926,0,"",python,selection_keyboard +3832,5399809,"generate_dataset.py",1926,0,"n",python,content +3833,5399810,"generate_dataset.py",1927,0,"",python,selection_keyboard +3834,5399907,"generate_dataset.py",1927,0,"v",python,content +3835,5399908,"generate_dataset.py",1928,0,"",python,selection_keyboard +3836,5399986,"TERMINAL",0,0,"7",,terminal_output +3837,5400781,"generate_dataset.py",1929,0,"",python,selection_command +3838,5401014,"TERMINAL",0,0,"8",,terminal_output +3839,5402007,"TERMINAL",0,0,"9",,terminal_output +3840,5402095,"generate_dataset.py",1929,0,"_",python,content +3841,5402096,"generate_dataset.py",1930,0,"",python,selection_keyboard +3842,5402598,"generate_dataset.py",1929,1,"",python,content +3843,5402823,"generate_dataset.py",1929,0,":",python,content +3844,5402824,"generate_dataset.py",1930,0,"",python,selection_keyboard +3845,5403061,"generate_dataset.py",1930,0," ",python,content +3846,5403062,"generate_dataset.py",1931,0,"",python,selection_keyboard +3847,5403062,"TERMINAL",0,0,"7:00",,terminal_output +3848,5404144,"generate_dataset.py",1931,0,"""""",python,content +3849,5404145,"generate_dataset.py",1932,0,"",python,selection_keyboard +3850,5404156,"TERMINAL",0,0,"1",,terminal_output +3851,5404553,"generate_dataset.py",1932,0,"c",python,content +3852,5404554,"generate_dataset.py",1933,0,"",python,selection_keyboard +3853,5404687,"generate_dataset.py",1933,0,"o",python,content +3854,5404688,"generate_dataset.py",1934,0,"",python,selection_keyboard +3855,5404875,"generate_dataset.py",1934,0,"i",python,content +3856,5404876,"generate_dataset.py",1935,0,"",python,selection_keyboard +3857,5404939,"generate_dataset.py",1935,0,"n",python,content +3858,5404940,"generate_dataset.py",1936,0,"",python,selection_keyboard +3859,5405124,"TERMINAL",0,0,"2",,terminal_output +3860,5405499,"generate_dataset.py",1936,0,"r",python,content +3861,5405500,"generate_dataset.py",1937,0,"",python,selection_keyboard +3862,5405560,"generate_dataset.py",1937,0,"u",python,content +3863,5405560,"generate_dataset.py",1938,0,"",python,selection_keyboard +3864,5405618,"generate_dataset.py",1938,0,"n",python,content +3865,5405619,"generate_dataset.py",1939,0,"",python,selection_keyboard +3866,5406191,"TERMINAL",0,0,"3",,terminal_output +3867,5407201,"TERMINAL",0,0,"4",,terminal_output +3868,5408241,"TERMINAL",0,0,"5",,terminal_output +3869,5409299,"TERMINAL",0,0,"6",,terminal_output +3870,5410434,"TERMINAL",0,0,"7",,terminal_output +3871,5411451,"TERMINAL",0,0,"8",,terminal_output +3872,5411563,"generate_dataset.py",1940,0,"",python,selection_mouse +3873,5412208,"generate_dataset.py",1940,0,",",python,content +3874,5412210,"generate_dataset.py",1941,0,"",python,selection_keyboard +3875,5412330,"generate_dataset.py",1941,0,"\n ",python,content +3876,5412439,"TERMINAL",0,0,"9",,terminal_output +3877,5412913,"generate_dataset.py",1946,0,"""""",python,content +3878,5412914,"generate_dataset.py",1947,0,"",python,selection_keyboard +3879,5413461,"TERMINAL",0,0,"10",,terminal_output +3880,5415393,"TERMINAL",0,0,"1",,terminal_output +3881,5416367,"TERMINAL",0,0,"3",,terminal_output +3882,5417404,"TERMINAL",0,0,"4",,terminal_output +3883,5418415,"TERMINAL",0,0,"5",,terminal_output +3884,5419687,"TERMINAL",0,0,"6",,terminal_output +3885,5420431,"TERMINAL",0,0,"7",,terminal_output +3886,5420742,"generate_dataset.py",1947,0,"n",python,content +3887,5420743,"generate_dataset.py",1948,0,"",python,selection_keyboard +3888,5420910,"generate_dataset.py",1948,0,"u",python,content +3889,5420910,"generate_dataset.py",1949,0,"",python,selection_keyboard +3890,5421063,"generate_dataset.py",1949,0,"m",python,content +3891,5421064,"generate_dataset.py",1950,0,"",python,selection_keyboard +3892,5421479,"TERMINAL",0,0,"8",,terminal_output +3893,5421556,"generate_dataset.py",1950,0,"_",python,content +3894,5421557,"generate_dataset.py",1951,0,"",python,selection_keyboard +3895,5421769,"generate_dataset.py",1951,0,"e",python,content +3896,5421770,"generate_dataset.py",1952,0,"",python,selection_keyboard +3897,5421870,"generate_dataset.py",1952,0,"p",python,content +3898,5421870,"generate_dataset.py",1953,0,"",python,selection_keyboard +3899,5422100,"generate_dataset.py",1953,0,"i",python,content +3900,5422101,"generate_dataset.py",1954,0,"",python,selection_keyboard +3901,5422441,"generate_dataset.py",1954,0,"s",python,content +3902,5422442,"generate_dataset.py",1955,0,"",python,selection_keyboard +3903,5422517,"TERMINAL",0,0,"9",,terminal_output +3904,5422606,"generate_dataset.py",1955,0,"o",python,content +3905,5422607,"generate_dataset.py",1956,0,"",python,selection_keyboard +3906,5422694,"generate_dataset.py",1956,0,"d",python,content +3907,5422695,"generate_dataset.py",1957,0,"",python,selection_keyboard +3908,5422804,"generate_dataset.py",1957,0,"e",python,content +3909,5422804,"generate_dataset.py",1958,0,"",python,selection_keyboard +3910,5422967,"generate_dataset.py",1958,0,"s",python,content +3911,5422967,"generate_dataset.py",1959,0,"",python,selection_keyboard +3912,5423132,"generate_dataset.py",1960,0,"",python,selection_command +3913,5423550,"TERMINAL",0,0,"20",,terminal_output +3914,5423782,"generate_dataset.py",1960,0,":",python,content +3915,5423783,"generate_dataset.py",1961,0,"",python,selection_keyboard +3916,5424610,"TERMINAL",0,0,"1",,terminal_output +3917,5424733,"generate_dataset.py",1960,1,"",python,content +3918,5425296,"generate_dataset.py",1960,0,":",python,content +3919,5425297,"generate_dataset.py",1961,0,"",python,selection_keyboard +3920,5425640,"TERMINAL",0,0,"27",,terminal_output +3921,5425929,"generate_dataset.py",1961,0," ",python,content +3922,5425930,"generate_dataset.py",1962,0,"",python,selection_keyboard +3923,5426694,"TERMINAL",0,0,"3",,terminal_output +3924,5426980,"generate_dataset.py",1962,0,"a",python,content +3925,5426981,"generate_dataset.py",1963,0,"",python,selection_keyboard +3926,5427682,"generate_dataset.py",1963,0,"r",python,content +3927,5427683,"generate_dataset.py",1964,0,"",python,selection_keyboard +3928,5427745,"TERMINAL",0,0,"4",,terminal_output +3929,5427805,"generate_dataset.py",1964,0,"g",python,content +3930,5427808,"generate_dataset.py",1965,0,"",python,selection_keyboard +3931,5427976,"generate_dataset.py",1965,0,"s",python,content +3932,5427977,"generate_dataset.py",1966,0,"",python,selection_keyboard +3933,5428174,"generate_dataset.py",1966,0,".",python,content +3934,5428175,"generate_dataset.py",1967,0,"",python,selection_keyboard +3935,5428562,"generate_dataset.py",1967,0,"n",python,content +3936,5428564,"generate_dataset.py",1968,0,"",python,selection_keyboard +3937,5428707,"generate_dataset.py",1968,0,"u",python,content +3938,5428708,"generate_dataset.py",1969,0,"",python,selection_keyboard +3939,5428808,"TERMINAL",0,0,"5",,terminal_output +3940,5428887,"generate_dataset.py",1969,0,"m",python,content +3941,5428888,"generate_dataset.py",1970,0,"",python,selection_keyboard +3942,5429258,"generate_dataset.py",1967,3,"num_episodes",python,content +3943,5429795,"TERMINAL",0,0,"6",,terminal_output +3944,5429925,"generate_dataset.py",1979,0,",",python,content +3945,5429926,"generate_dataset.py",1980,0,"",python,selection_keyboard +3946,5430161,"generate_dataset.py",1980,0,"\n ",python,content +3947,5430906,"TERMINAL",0,0,"8",,terminal_output +3948,5431929,"TERMINAL",0,0,"9",,terminal_output +3949,5432954,"TERMINAL",0,0,"30",,terminal_output +3950,5433965,"TERMINAL",0,0,"1",,terminal_output +3951,5435212,"TERMINAL",0,0,"2",,terminal_output +3952,5436070,"TERMINAL",0,0,"3",,terminal_output +3953,5437152,"TERMINAL",0,0,"4",,terminal_output +3954,5437235,"generate_dataset.py",1985,0,"""""",python,content +3955,5437237,"generate_dataset.py",1986,0,"",python,selection_keyboard +3956,5437445,"generate_dataset.py",1986,0,"a",python,content +3957,5437446,"generate_dataset.py",1987,0,"",python,selection_keyboard +3958,5437669,"generate_dataset.py",1987,0,"v",python,content +3959,5437670,"generate_dataset.py",1988,0,"",python,selection_keyboard +3960,5437897,"generate_dataset.py",1988,0,"g",python,content +3961,5437898,"generate_dataset.py",1989,0,"",python,selection_keyboard +3962,5438122,"TERMINAL",0,0,"5",,terminal_output +3963,5438307,"generate_dataset.py",1989,0,"_",python,content +3964,5438308,"generate_dataset.py",1990,0,"",python,selection_keyboard +3965,5438839,"generate_dataset.py",1990,0,"e",python,content +3966,5438840,"generate_dataset.py",1991,0,"",python,selection_keyboard +3967,5438922,"generate_dataset.py",1991,0,"p",python,content +3968,5438923,"generate_dataset.py",1992,0,"",python,selection_keyboard +3969,5439082,"generate_dataset.py",1992,0,"i",python,content +3970,5439083,"generate_dataset.py",1993,0,"",python,selection_keyboard +3971,5439170,"TERMINAL",0,0,"6",,terminal_output +3972,5439264,"generate_dataset.py",1993,0,"s",python,content +3973,5439265,"generate_dataset.py",1994,0,"",python,selection_keyboard +3974,5439346,"generate_dataset.py",1994,0,"o",python,content +3975,5439347,"generate_dataset.py",1995,0,"",python,selection_keyboard +3976,5439460,"generate_dataset.py",1995,0,"d",python,content +3977,5439461,"generate_dataset.py",1996,0,"",python,selection_keyboard +3978,5439566,"generate_dataset.py",1996,0,"e",python,content +3979,5439567,"generate_dataset.py",1997,0,"",python,selection_keyboard +3980,5439731,"generate_dataset.py",1997,0,"_",python,content +3981,5439732,"generate_dataset.py",1998,0,"",python,selection_keyboard +3982,5439989,"generate_dataset.py",1998,0,"l",python,content +3983,5439990,"generate_dataset.py",1999,0,"",python,selection_keyboard +3984,5440097,"generate_dataset.py",1999,0,"e",python,content +3985,5440098,"generate_dataset.py",2000,0,"",python,selection_keyboard +3986,5440214,"generate_dataset.py",2000,0,"n",python,content +3987,5440215,"generate_dataset.py",2001,0,"",python,selection_keyboard +3988,5440215,"TERMINAL",0,0,"7",,terminal_output +3989,5440743,"generate_dataset.py",2002,0,"",python,selection_command +3990,5441271,"generate_dataset.py",2002,0,":",python,content +3991,5441272,"generate_dataset.py",2003,0,"",python,selection_keyboard +3992,5441273,"TERMINAL",0,0,"8",,terminal_output +3993,5441413,"generate_dataset.py",2003,0," ",python,content +3994,5441413,"generate_dataset.py",2004,0,"",python,selection_keyboard +3995,5442268,"TERMINAL",0,0,"9",,terminal_output +3996,5443258,"generate_dataset.py",2004,0,"n",python,content +3997,5443259,"generate_dataset.py",2005,0,"",python,selection_keyboard +3998,5443315,"TERMINAL",0,0,"40",,terminal_output +3999,5443443,"generate_dataset.py",2005,0,"p",python,content +4000,5443444,"generate_dataset.py",2006,0,"",python,selection_keyboard +4001,5443661,"generate_dataset.py",2006,0,".",python,content +4002,5443662,"generate_dataset.py",2007,0,"",python,selection_keyboard +4003,5443899,"generate_dataset.py",2007,0,"m",python,content +4004,5443900,"generate_dataset.py",2008,0,"",python,selection_keyboard +4005,5444004,"generate_dataset.py",2008,0,"e",python,content +4006,5444005,"generate_dataset.py",2009,0,"",python,selection_keyboard +4007,5444196,"generate_dataset.py",2009,0,"a",python,content +4008,5444197,"generate_dataset.py",2010,0,"",python,selection_keyboard +4009,5444285,"generate_dataset.py",2010,0,"n",python,content +4010,5444286,"generate_dataset.py",2011,0,"",python,selection_keyboard +4011,5444347,"TERMINAL",0,0,"1",,terminal_output +4012,5445178,"generate_dataset.py",2011,0,"()",python,content +4013,5445179,"generate_dataset.py",2012,0,"",python,selection_keyboard +4014,5445483,"TERMINAL",0,0,"2",,terminal_output +4015,5446450,"TERMINAL",0,0,"3",,terminal_output +4016,5447508,"TERMINAL",0,0,"4",,terminal_output +4017,5448519,"TERMINAL",0,0,"5",,terminal_output +4018,5449571,"TERMINAL",0,0,"6",,terminal_output +4019,5450601,"TERMINAL",0,0,"7",,terminal_output +4020,5451639,"TERMINAL",0,0,"8",,terminal_output +4021,5452680,"TERMINAL",0,0,"9",,terminal_output +4022,5453743,"TERMINAL",0,0,"50",,terminal_output +4023,5454871,"TERMINAL",0,0,"1",,terminal_output +4024,5455476,"generate_dataset.py",1900,0,"",python,selection_mouse +4025,5455811,"TERMINAL",0,0,"2",,terminal_output +4026,5456877,"TERMINAL",0,0,"4",,terminal_output +4027,5457267,"generate_dataset.py",2012,0,"",python,selection_mouse +4028,5457939,"TERMINAL",0,0,"5",,terminal_output +4029,5458920,"TERMINAL",0,0,"6",,terminal_output +4030,5459988,"TERMINAL",0,0,"7",,terminal_output +4031,5461013,"TERMINAL",0,0,"8",,terminal_output +4032,5462140,"TERMINAL",0,0,"9",,terminal_output +4033,5463163,"TERMINAL",0,0,"8:00",,terminal_output +4034,5464186,"TERMINAL",0,0,"1",,terminal_output +4035,5465153,"TERMINAL",0,0,"2",,terminal_output +4036,5466191,"TERMINAL",0,0,"3",,terminal_output +4037,5467259,"TERMINAL",0,0,"4",,terminal_output +4038,5468276,"TERMINAL",0,0,"5",,terminal_output +4039,5468926,"generate_dataset.py",675,0,"",python,selection_mouse +4040,5469077,"generate_dataset.py",666,16,"episode_metadata",python,selection_mouse +4041,5469307,"TERMINAL",0,0,"6",,terminal_output +4042,5470432,"TERMINAL",0,0,"7",,terminal_output +4043,5471458,"TERMINAL",0,0,"8",,terminal_output +4044,5472484,"TERMINAL",0,0,"9",,terminal_output +4045,5473508,"TERMINAL",0,0,"10",,terminal_output +4046,5474518,"TERMINAL",0,0,"1",,terminal_output +4047,5475551,"generate_dataset.py",2012,0,"",python,selection_mouse +4048,5475617,"TERMINAL",0,0,"2",,terminal_output +4049,5476605,"TERMINAL",0,0,"3",,terminal_output +4050,5477632,"TERMINAL",0,0,"4",,terminal_output +4051,5478656,"TERMINAL",0,0,"5",,terminal_output +4052,5479378,"generate_dataset.py",2012,0,"[]",python,content +4053,5479379,"generate_dataset.py",2013,0,"",python,selection_keyboard +4054,5479732,"TERMINAL",0,0,"6",,terminal_output +4055,5480735,"TERMINAL",0,0,"7",,terminal_output +4056,5481776,"TERMINAL",0,0,"8",,terminal_output +4057,5482825,"TERMINAL",0,0,"9",,terminal_output +4058,5483227,"generate_dataset.py",2013,0,"e",python,content +4059,5483229,"generate_dataset.py",2014,0,"",python,selection_keyboard +4060,5483307,"generate_dataset.py",2014,0,"p",python,content +4061,5483309,"generate_dataset.py",2015,0,"",python,selection_keyboard +4062,5483877,"TERMINAL",0,0,"21",,terminal_output +4063,5484036,"generate_dataset.py",2015,0,"[]",python,content +4064,5484037,"generate_dataset.py",2016,0,"",python,selection_keyboard +4065,5484525,"generate_dataset.py",2016,0,"""""",python,content +4066,5484526,"generate_dataset.py",2017,0,"",python,selection_keyboard +4067,5484887,"TERMINAL",0,0,"2",,terminal_output +4068,5484919,"generate_dataset.py",2017,0,"l",python,content +4069,5484920,"generate_dataset.py",2018,0,"",python,selection_keyboard +4070,5485069,"generate_dataset.py",2018,0,"e",python,content +4071,5485070,"generate_dataset.py",2019,0,"",python,selection_keyboard +4072,5485184,"generate_dataset.py",2019,0,"n",python,content +4073,5485185,"generate_dataset.py",2020,0,"",python,selection_keyboard +4074,5485784,"generate_dataset.py",2020,0,"g",python,content +4075,5485784,"generate_dataset.py",2021,0,"",python,selection_keyboard +4076,5485957,"TERMINAL",0,0,"3",,terminal_output +4077,5486190,"generate_dataset.py",2020,1,"",python,content +4078,5486301,"generate_dataset.py",2019,1,"",python,content +4079,5486438,"generate_dataset.py",2018,1,"",python,content +4080,5486591,"generate_dataset.py",2017,1,"",python,content +4081,5486720,"generate_dataset.py",2017,0,"s",python,content +4082,5486721,"generate_dataset.py",2018,0,"",python,selection_keyboard +4083,5486967,"generate_dataset.py",2018,0,"e",python,content +4084,5486969,"generate_dataset.py",2019,0,"",python,selection_keyboard +4085,5486979,"TERMINAL",0,0,"4",,terminal_output +4086,5487128,"generate_dataset.py",2019,0,"q",python,content +4087,5487129,"generate_dataset.py",2020,0,"",python,selection_keyboard +4088,5487293,"generate_dataset.py",2020,0,"u",python,content +4089,5487294,"generate_dataset.py",2021,0,"",python,selection_keyboard +4090,5487519,"generate_dataset.py",2021,0,"n",python,content +4091,5487520,"generate_dataset.py",2022,0,"",python,selection_keyboard +4092,5487900,"generate_dataset.py",2021,1,"",python,content +4093,5488028,"generate_dataset.py",2021,0,"e",python,content +4094,5488029,"generate_dataset.py",2022,0,"",python,selection_keyboard +4095,5488066,"TERMINAL",0,0,"5",,terminal_output +4096,5488125,"generate_dataset.py",2022,0,"n",python,content +4097,5488126,"generate_dataset.py",2023,0,"",python,selection_keyboard +4098,5488529,"generate_dataset.py",2023,0,"c",python,content +4099,5488530,"generate_dataset.py",2024,0,"",python,selection_keyboard +4100,5488708,"generate_dataset.py",2024,0,"e",python,content +4101,5488709,"generate_dataset.py",2025,0,"",python,selection_keyboard +4102,5489080,"generate_dataset.py",2025,0,"_",python,content +4103,5489081,"generate_dataset.py",2026,0,"",python,selection_keyboard +4104,5489121,"TERMINAL",0,0,"68",,terminal_output +4105,5489345,"generate_dataset.py",2026,0,"l",python,content +4106,5489346,"generate_dataset.py",2027,0,"",python,selection_keyboard +4107,5489514,"generate_dataset.py",2027,0,"e",python,content +4108,5489515,"generate_dataset.py",2028,0,"",python,selection_keyboard +4109,5489683,"generate_dataset.py",2028,0,"n",python,content +4110,5489684,"generate_dataset.py",2029,0,"",python,selection_keyboard +4111,5489979,"generate_dataset.py",2029,0,"g",python,content +4112,5489980,"generate_dataset.py",2030,0,"",python,selection_keyboard +4113,5490089,"TERMINAL",0,0,"7",,terminal_output +4114,5490327,"generate_dataset.py",2030,0,"t",python,content +4115,5490328,"generate_dataset.py",2031,0,"",python,selection_keyboard +4116,5490460,"generate_dataset.py",2031,0,"h",python,content +4117,5490461,"generate_dataset.py",2032,0,"",python,selection_keyboard +4118,5491080,"generate_dataset.py",2033,0,"",python,selection_command +4119,5491147,"TERMINAL",0,0,"8",,terminal_output +4120,5491372,"generate_dataset.py",2034,0,"",python,selection_command +4121,5491962,"generate_dataset.py",2034,0," ",python,content +4122,5491964,"generate_dataset.py",2035,0,"",python,selection_keyboard +4123,5492083,"generate_dataset.py",2035,0,"f",python,content +4124,5492084,"generate_dataset.py",2036,0,"",python,selection_keyboard +4125,5492170,"generate_dataset.py",2036,0,"o",python,content +4126,5492171,"generate_dataset.py",2037,0,"",python,selection_keyboard +4127,5492171,"TERMINAL",0,0,"9",,terminal_output +4128,5492278,"generate_dataset.py",2037,0,"r",python,content +4129,5492279,"generate_dataset.py",2038,0,"",python,selection_keyboard +4130,5492347,"generate_dataset.py",2038,0," ",python,content +4131,5492349,"generate_dataset.py",2039,0,"",python,selection_keyboard +4132,5492548,"generate_dataset.py",2039,0,"e",python,content +4133,5492549,"generate_dataset.py",2040,0,"",python,selection_keyboard +4134,5492672,"generate_dataset.py",2040,0,"p",python,content +4135,5492673,"generate_dataset.py",2041,0,"",python,selection_keyboard +4136,5492791,"generate_dataset.py",2041,0," ",python,content +4137,5492792,"generate_dataset.py",2042,0,"",python,selection_keyboard +4138,5492944,"generate_dataset.py",2042,0,"i",python,content +4139,5492945,"generate_dataset.py",2043,0,"",python,selection_keyboard +4140,5493026,"generate_dataset.py",2043,0,"n",python,content +4141,5493026,"generate_dataset.py",2044,0,"",python,selection_keyboard +4142,5493153,"generate_dataset.py",2044,0," ",python,content +4143,5493154,"generate_dataset.py",2045,0,"",python,selection_keyboard +4144,5493237,"TERMINAL",0,0,"30",,terminal_output +4145,5493922,"generate_dataset.py",2045,0,"e",python,content +4146,5493922,"generate_dataset.py",2046,0,"",python,selection_keyboard +4147,5493998,"generate_dataset.py",2046,0,"p",python,content +4148,5493999,"generate_dataset.py",2047,0,"",python,selection_keyboard +4149,5494304,"TERMINAL",0,0,"1",,terminal_output +4150,5494703,"generate_dataset.py",2045,2,"episode_metadata",python,content +4151,5495278,"TERMINAL",0,0,"2",,terminal_output +4152,5496353,"TERMINAL",0,0,"3",,terminal_output +4153,5496811,"generate_dataset.py",2063,0,"",python,selection_mouse +4154,5497468,"generate_dataset.py",2063,0,",",python,content +4155,5497470,"generate_dataset.py",2064,0,"",python,selection_keyboard +4156,5497481,"TERMINAL",0,0,"4",,terminal_output +4157,5498401,"TERMINAL",0,0,"5",,terminal_output +4158,5498916,"generate_dataset.py",1994,0,"",python,selection_mouse +4159,5499518,"TERMINAL",0,0,"6",,terminal_output +4160,5499595,"generate_dataset.py",1999,0,"",python,selection_mouse +4161,5500541,"TERMINAL",0,0,"7",,terminal_output +4162,5501664,"generate_dataset.py",1990,0,"",python,selection_mouse +4163,5501889,"generate_dataset.py",1990,1,"e",python,selection_mouse +4164,5501890,"generate_dataset.py",1990,89,"episode_len"": np.mean([ep[""sequence_length""] for ep in episode_metadata]),\n ""episode_m",python,selection_mouse +4165,5501891,"generate_dataset.py",1990,90,"episode_len"": np.mean([ep[""sequence_length""] for ep in episode_metadata]),\n ""episode_me",python,selection_mouse +4166,5502087,"generate_dataset.py",1990,91,"episode_len"": np.mean([ep[""sequence_length""] for ep in episode_metadata]),\n ""episode_met",python,selection_mouse +4167,5502196,"generate_dataset.py",1990,7,"episode",python,selection_mouse +4168,5503118,"TERMINAL",0,0,"8",,terminal_output +4169,5503759,"generate_dataset.py",1990,7,"",python,content +4170,5504165,"TERMINAL",0,0,"41",,terminal_output +4171,5504530,"generate_dataset.py",1990,0,"s",python,content +4172,5504531,"generate_dataset.py",1991,0,"",python,selection_keyboard +4173,5504720,"generate_dataset.py",1991,0,"e",python,content +4174,5504721,"generate_dataset.py",1992,0,"",python,selection_keyboard +4175,5504860,"generate_dataset.py",1992,0,"q",python,content +4176,5504861,"generate_dataset.py",1993,0,"",python,selection_keyboard +4177,5504975,"generate_dataset.py",1993,0,"u",python,content +4178,5504976,"generate_dataset.py",1994,0,"",python,selection_keyboard +4179,5505175,"TERMINAL",0,0,"2",,terminal_output +4180,5505387,"generate_dataset.py",1994,0,"e",python,content +4181,5505388,"generate_dataset.py",1995,0,"",python,selection_keyboard +4182,5505602,"generate_dataset.py",1995,0,"n",python,content +4183,5505603,"generate_dataset.py",1996,0,"",python,selection_keyboard +4184,5505719,"generate_dataset.py",1996,0,"c",python,content +4185,5505721,"generate_dataset.py",1997,0,"",python,selection_keyboard +4186,5505764,"generate_dataset.py",1997,0,"e",python,content +4187,5505765,"generate_dataset.py",1998,0,"",python,selection_keyboard +4188,5506236,"TERMINAL",0,0,"3",,terminal_output +4189,5507255,"TERMINAL",0,0,"4",,terminal_output +4190,5508299,"TERMINAL",0,0,"5",,terminal_output +4191,5508805,"generate_dataset.py",1990,0,"",python,selection_mouse +4192,5509068,"generate_dataset.py",1990,1,"s",python,selection_mouse +4193,5509069,"generate_dataset.py",1990,3,"seq",python,selection_mouse +4194,5509069,"generate_dataset.py",1990,4,"sequ",python,selection_mouse +4195,5509070,"generate_dataset.py",1990,5,"seque",python,selection_mouse +4196,5509070,"generate_dataset.py",1990,6,"sequen",python,selection_mouse +4197,5509176,"generate_dataset.py",1990,7,"sequenc",python,selection_mouse +4198,5509329,"TERMINAL",0,0,"6",,terminal_output +4199,5509378,"generate_dataset.py",1990,8,"sequence",python,selection_mouse +4200,5510253,"generate_dataset.py",1990,8,"e",python,content +4201,5510254,"generate_dataset.py",1991,0,"",python,selection_keyboard +4202,5510345,"generate_dataset.py",1991,0,"p",python,content +4203,5510346,"generate_dataset.py",1992,0,"",python,selection_keyboard +4204,5510389,"TERMINAL",0,0,"7",,terminal_output +4205,5510478,"generate_dataset.py",1992,0,"i",python,content +4206,5510479,"generate_dataset.py",1993,0,"",python,selection_keyboard +4207,5510571,"generate_dataset.py",1993,0,"s",python,content +4208,5510572,"generate_dataset.py",1994,0,"",python,selection_keyboard +4209,5510667,"generate_dataset.py",1994,0,"o",python,content +4210,5510668,"generate_dataset.py",1995,0,"",python,selection_keyboard +4211,5510782,"generate_dataset.py",1995,0,"d",python,content +4212,5510783,"generate_dataset.py",1996,0,"",python,selection_keyboard +4213,5510863,"generate_dataset.py",1996,0,"e",python,content +4214,5510864,"generate_dataset.py",1997,0,"",python,selection_keyboard +4215,5511408,"TERMINAL",0,0,"8",,terminal_output +4216,5511926,"generate_dataset.py",1980,0,"",python,selection_mouse +4217,5512452,"TERMINAL",0,0,"9",,terminal_output +4218,5513496,"TERMINAL",0,0,"50",,terminal_output +4219,5514528,"TERMINAL",0,0,"1",,terminal_output +4220,5515566,"TERMINAL",0,0,"2",,terminal_output +4221,5516616,"TERMINAL",0,0,"3",,terminal_output +4222,5517642,"TERMINAL",0,0,"4",,terminal_output +4223,5518686,"TERMINAL",0,0,"5",,terminal_output +4224,5519791,"TERMINAL",0,0,"6",,terminal_output +4225,5520815,"TERMINAL",0,0,"7",,terminal_output +4226,5521813,"TERMINAL",0,0,"8",,terminal_output +4227,5522894,"TERMINAL",0,0,"9:00",,terminal_output +4228,5523904,"TERMINAL",0,0,"1",,terminal_output +4229,5525014,"TERMINAL",0,0,"2",,terminal_output +4230,5526047,"TERMINAL",0,0,"3",,terminal_output +4231,5527008,"TERMINAL",0,0,"4",,terminal_output +4232,5528084,"TERMINAL",0,0,"5",,terminal_output +4233,5529110,"TERMINAL",0,0,"6",,terminal_output +4234,5530137,"TERMINAL",0,0,"7",,terminal_output +4235,5531264,"TERMINAL",0,0,"8",,terminal_output +4236,5532128,"generate_dataset.py",2187,0,"",python,selection_mouse +4237,5532211,"TERMINAL",0,0,"9",,terminal_output +4238,5532228,"generate_dataset.py",2175,16,"episode_metadata",python,selection_mouse +4239,5532808,"generate_dataset.py",2175,16,"m",python,content +4240,5532809,"generate_dataset.py",2176,0,"",python,selection_keyboard +4241,5532895,"generate_dataset.py",2176,0,"e",python,content +4242,5532896,"generate_dataset.py",2177,0,"",python,selection_keyboard +4243,5533025,"generate_dataset.py",2177,0,"t",python,content +4244,5533026,"generate_dataset.py",2178,0,"",python,selection_keyboard +4245,5533125,"generate_dataset.py",2178,0,"a",python,content +4246,5533126,"generate_dataset.py",2179,0,"",python,selection_keyboard +4247,5533285,"TERMINAL",0,0,"10",,terminal_output +4248,5533321,"generate_dataset.py",2179,0,"d",python,content +4249,5533322,"generate_dataset.py",2180,0,"",python,selection_keyboard +4250,5533485,"generate_dataset.py",2180,0,"a",python,content +4251,5533486,"generate_dataset.py",2181,0,"",python,selection_keyboard +4252,5533592,"generate_dataset.py",2181,0,"t",python,content +4253,5533593,"generate_dataset.py",2182,0,"",python,selection_keyboard +4254,5533789,"generate_dataset.py",2182,0,"a",python,content +4255,5533790,"generate_dataset.py",2183,0,"",python,selection_keyboard +4256,5534216,"generate_dataset.py",2175,8,"metadata",python,content +4257,5534306,"TERMINAL",0,0,"1",,terminal_output +4258,5535342,"TERMINAL",0,0,"2",,terminal_output +4259,5536482,"TERMINAL",0,0,"3",,terminal_output +4260,5537169,"generate_dataset.py",2128,0,"",python,selection_mouse +4261,5537427,"TERMINAL",0,0,"4",,terminal_output +4262,5538530,"TERMINAL",0,0,"5",,terminal_output +4263,5538985,"generate_dataset.py",2108,0,"",python,selection_mouse +4264,5539556,"TERMINAL",0,0,"6",,terminal_output +4265,5540579,"TERMINAL",0,0,"7",,terminal_output +4266,5541754,"TERMINAL",0,0,"8",,terminal_output +4267,5542613,"TERMINAL",0,0,"9",,terminal_output +4268,5543651,"TERMINAL",0,0,"20",,terminal_output +4269,5544689,"TERMINAL",0,0,"1",,terminal_output +4270,5545799,"TERMINAL",0,0,"2",,terminal_output +4271,5546774,"TERMINAL",0,0,"3",,terminal_output +4272,5547849,"TERMINAL",0,0,"4",,terminal_output +4273,5548668,"TERMINAL",0,0,"rm /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/*",,terminal_command +4274,5548699,"TERMINAL",0,0,"]633;E;2025-09-04 11:29:25 rm /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/*;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4275,5548856,"TERMINAL",0,0,"6",,terminal_output +4276,5549889,"TERMINAL",0,0,"7",,terminal_output +4277,5550979,"TERMINAL",0,0,"8",,terminal_output +4278,5551975,"TERMINAL",0,0,"9",,terminal_output +4279,5552384,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/generate_dataset_10m.sh",,terminal_command +4280,5552424,"TERMINAL",0,0,"]633;E;2025-09-04 11:29:29 sh slurm/dev/mihir/horeka/generate_dataset_10m.sh;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +4281,5553039,"TERMINAL",0,0,"30",,terminal_output +4282,5554099,"TERMINAL",0,0,"1",,terminal_output +4283,5555074,"TERMINAL",0,0,"Gym has been unmaintained since 2022 and does not support NumPy 2.0 amongst other critical functionality.\r\nPlease upgrade to Gymnasium, the maintained drop-in replacement of Gym, or contact the authors of your software and request that they upgrade.\r\nUsers of this version of Gym should be able to simply replace 'import gym' with 'import gymnasium as gym' in the vast majority of cases.\r\nSee the migration guide at https://gymnasium.farama.org/introduction/migration_guide/ for additional information.\r\n",,terminal_output +4284,5555101,"TERMINAL",0,0,"2",,terminal_output +4285,5556246,"TERMINAL",0,0,"3",,terminal_output +4286,5557270,"TERMINAL",0,0,"4",,terminal_output +4287,5558295,"TERMINAL",0,0,"5",,terminal_output +4288,5559322,"TERMINAL",0,0,"6",,terminal_output +4289,5560300,"TERMINAL",0,0,"7",,terminal_output +4290,5561365,"TERMINAL",0,0,"8",,terminal_output +4291,5562196,"TERMINAL",0,0,"watch",,terminal_focus +4292,5562382,"TERMINAL",0,0,"9",,terminal_output +4293,5562601,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4294,5563431,"TERMINAL",0,0,"Episode too short (688), resampling...\r\n",,terminal_output +4295,5563519,"TERMINAL",0,0,"queue",,terminal_command +4296,5563574,"TERMINAL",0,0,"]633;E;2025-09-04 11:29:40 queue;98e58a9e-4278-4ee6-ae9b-7614f41efecb]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Thu Sep 4 11:29:40 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3465195 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)3465285 accelerat train_la tum_cte0 PD\t0:00\t 1 (Priority)3465286 accelerat train_to tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output +4297,5564028,"TERMINAL",0,0,"Episode 0 completed, length: 1000\r\n",,terminal_output +4298,5564593,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4299,5565412,"TERMINAL",0,0,"Episode 1 completed, length: 1000\r\n",,terminal_output +4300,5565992,"TERMINAL",0,0,"Episode 2 completed, length: 1000\r\n",,terminal_output +4301,5566511,"TERMINAL",0,0,"idle",,terminal_command +4302,5566523,"TERMINAL",0,0,"]633;E;2025-09-04 11:29:43 idle;98e58a9e-4278-4ee6-ae9b-7614f41efecb]633;CPartition dev_cpuonly : 8 nodes idle\r\nPartition cpuonly : 17 nodes idle\r\nPartition dev_accelerated : 0 nodes idle\r\nPartition accelerated : 33 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 0 nodes idle\r\nPartition large : 6 nodes idle\r\nPartition accelerated-h200 : 0 nodes idle\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4303,5566689,"TERMINAL",0,0,"Episode 3 completed, length: 1000\r\n",,terminal_output +4304,5567249,"TERMINAL",0,0,"Episode 4 completed, length: 1000\r\n",,terminal_output +4305,5567318,"TERMINAL",0,0,"Episode too short (163), resampling...\r\n",,terminal_output +4306,5567992,"TERMINAL",0,0,"Episode 5 completed, length: 1000\r\n",,terminal_output +4307,5568127,"TERMINAL",0,0,"Episode too short (327), resampling...\r\n",,terminal_output +4308,5568255,"TERMINAL",0,0,"Episode too short (253), resampling...\r\n",,terminal_output +4309,5568429,"TERMINAL",0,0,"Episode too short (254), resampling...\r\n",,terminal_output +4310,5569030,"TERMINAL",0,0,"Episode 6 completed, length: 1000\r\n",,terminal_output +4311,5569173,"TERMINAL",0,0,"Episode too short (278), resampling...\r\n",,terminal_output +4312,5569536,"TERMINAL",0,0,"Episode too short (594), resampling...\r\n",,terminal_output +4313,5570378,"TERMINAL",0,0,"Episode 7 completed, length: 1000\r\n",,terminal_output +4314,5571031,"TERMINAL",0,0,"Episode 8 completed, length: 1000\r\nEpisode too short (121), resampling...\r\n",,terminal_output +4315,5571089,"TERMINAL",0,0,"Episode too short (272), resampling...\r\n",,terminal_output +4316,5571573,"TERMINAL",0,0,"Episode 9 completed, length: 1000\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/generate_dataset.py"", line 68, in \r\n ""avg_episode_len"": np.mean([ep[""sequence_length""] for ep in episode_metadata]),\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/generate_dataset.py"", line 68, in \r\n ""avg_episode_len"": np.mean([ep[""sequence_length""] for ep in episode_metadata]),\r\nKeyError: 'sequence_length'\r\n",,terminal_output +4317,5571697,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;1",,terminal_output +4318,5590874,"TERMINAL",0,0,"bash",,terminal_focus +4319,5596344,"generate_dataset.py",0,0,"",python,tab +4320,5598420,"generate_dataset.py",1517,0,"",python,selection_mouse +4321,5598612,"generate_dataset.py",1505,15,"sequence_length",python,selection_mouse +4322,5602576,"generate_dataset.py",1676,0,"",python,selection_mouse +4323,5602717,"generate_dataset.py",1674,6,"length",python,selection_mouse +4324,5609057,"generate_dataset.py",2029,0,"",python,selection_mouse +4325,5609178,"generate_dataset.py",2017,15,"sequence_length",python,selection_mouse +4326,5609869,"generate_dataset.py",2017,15,"",python,content +4327,5610167,"generate_dataset.py",2017,0,"length",python,content +4328,5613972,"TERMINAL",0,0,"bash",,terminal_focus +4329,5616937,"TERMINAL",0,0,"bash",,terminal_focus +4330,5618330,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/generate_dataset_10m.sh",,terminal_command +4331,5618372,"TERMINAL",0,0,"]633;E;2025-09-04 11:30:35 sh slurm/dev/mihir/horeka/generate_dataset_10m.sh;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +4332,5618818,"TERMINAL",0,0,"Gym has been unmaintained since 2022 and does not support NumPy 2.0 amongst other critical functionality.\r\nPlease upgrade to Gymnasium, the maintained drop-in replacement of Gym, or contact the authors of your software and request that they upgrade.\r\nUsers of this version of Gym should be able to simply replace 'import gym' with 'import gymnasium as gym' in the vast majority of cases.\r\nSee the migration guide at https://gymnasium.farama.org/introduction/migration_guide/ for additional information.\r\n",,terminal_output +4333,5621169,"TERMINAL",0,0,"Episode too short (194), resampling...\r\n",,terminal_output +4334,5621275,"TERMINAL",0,0,"Episode too short (219), resampling...\r\n",,terminal_output +4335,5621923,"TERMINAL",0,0,"Episode 0 completed, length: 1000\r\n",,terminal_output +4336,5622540,"TERMINAL",0,0,"Episode 1 completed, length: 1000\r\n",,terminal_output +4337,5622644,"TERMINAL",0,0,"Episode too short (83), resampling...\r\n",,terminal_output +4338,5622843,"TERMINAL",0,0,"Episode too short (307), resampling...\r\n",,terminal_output +4339,5623554,"TERMINAL",0,0,"Episode too short (695), resampling...\r\n",,terminal_output +4340,5624137,"TERMINAL",0,0,"Episode 2 completed, length: 1000\r\n",,terminal_output +4341,5624757,"TERMINAL",0,0,"Episode 3 completed, length: 1000\r\n",,terminal_output +4342,5625195,"TERMINAL",0,0,"Episode too short (681), resampling...\r\nEpisode too short (124), resampling...\r\n",,terminal_output +4343,5625301,"TERMINAL",0,0,"Episode too short (196), resampling...\r\n",,terminal_output +4344,5625779,"TERMINAL",0,0,"Episode too short (727), resampling...\r\n",,terminal_output +4345,5626369,"TERMINAL",0,0,"Episode 4 completed, length: 1000\r\n",,terminal_output +4346,5626650,"TERMINAL",0,0,"Episode too short (570), resampling...\r\n",,terminal_output +4347,5627210,"TERMINAL",0,0,"Episode 5 completed, length: 1000\r\n",,terminal_output +4348,5627667,"TERMINAL",0,0,"Episode 6 completed, length: 1000\r\n",,terminal_output +4349,5628132,"TERMINAL",0,0,"Episode too short (858), resampling...\r\n",,terminal_output +4350,5628659,"TERMINAL",0,0,"Episode 7 completed, length: 1000\r\n",,terminal_output +4351,5629402,"TERMINAL",0,0,"Episode 8 completed, length: 1000\r\n",,terminal_output +4352,5630297,"TERMINAL",0,0,"Episode 9 completed, length: 1000\r\nDataset generated with 10 valid episodes\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4353,5636135,"TERMINAL",0,0,"cat /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/meta_data.json",,terminal_command +4354,5636168,"TERMINAL",0,0,"]633;E;2025-09-04 11:30:53 cat /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/meta_data.json;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C{""env"": ""coinrun"", ""num_episodes"": 10, ""avg_episode_len"": 1000.0, ""episode_metadata"": [{""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_0.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_1.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_2.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_3.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_4.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_5.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_6.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_7.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_8.array_record"", ""length"": 1000}, {""path"": ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/episode_9.array_record"", ""length"": 1000}]}]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4355,5753584,"TERMINAL",0,0,"clear",,terminal_command +4356,5753597,"TERMINAL",0,0,"]633;E;2025-09-04 11:32:50 clear;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4357,5759200,"generate_dataset.py",0,0,"",python,tab +4358,5772593,"generate_dataset.py",2201,0,"",python,selection_mouse +4359,5773519,"generate_dataset.py",2200,0,"",python,selection_command +4360,5774036,"generate_dataset.py",2180,49,"",python,content +4361,5785361,"input_pipeline/preprocess/npy_to_array_records.py",0,0,"",python,tab +4362,5807519,"generate_dataset.py",0,0,"",python,tab +4363,5809559,"input_pipeline/preprocess/npy_to_tfrecord.py",0,0,"",python,tab +4364,5811533,"generate_dataset.py",0,0,"",python,tab +4365,5815843,"input_pipeline/preprocess/video_to_npy.py",0,0,"import ffmpeg\nimport numpy as np\nimport os\nimport tyro\nimport multiprocessing as mp\nfrom dataclasses import dataclass\nimport json\n\n\n@dataclass\nclass Args:\n target_width, target_height = 160, 90\n target_fps = 10\n input_path: str = ""data/minecraft_videos""\n output_path: str = ""data/minecraft_npy""\n\n\ndef preprocess_video(\n idx, in_filename, output_path, target_width, target_height, target_fps\n):\n print(f""Processing video {idx}, Filename: {in_filename}"")\n try:\n out, _ = (\n ffmpeg.input(in_filename)\n .filter(""fps"", fps=target_fps, round=""up"")\n .filter(""scale"", target_width, target_height)\n .output(""pipe:"", format=""rawvideo"", pix_fmt=""rgb24"")\n .run(capture_stdout=True, quiet=True)\n )\n\n frame_size = target_height * target_width * 3\n n_frames = len(out) // frame_size\n\n frames = np.frombuffer(out, np.uint8).reshape(\n n_frames, target_height, target_width, 3\n )\n\n output_file = os.path.join(\n output_path, os.path.splitext(os.path.basename(in_filename))[0] + "".npy""\n )\n\n if not os.path.exists(os.path.dirname(output_file)):\n os.makedirs(os.path.dirname(output_file))\n\n np.save(output_file, frames)\n print(f""Saved {n_frames} frames to {output_file} with shape {frames.shape}"")\n return in_filename, True\n except Exception as e:\n print(f""Error processing video {idx} ({in_filename}): {e}"")\n return in_filename, False\n\n\ndef get_meta_data(filename, directory):\n filepath = os.path.join(directory, filename)\n arr = np.load(filepath, mmap_mode=""r"")\n return filepath, arr.shape[0]\n\n\ndef main():\n args = tyro.cli(Args)\n\n output_path = os.path.join(\n args.output_path,\n f""{args.target_fps}fps_{args.target_width}x{args.target_height}"",\n )\n print(f""Output path: {output_path}"")\n\n num_processes = mp.cpu_count()\n print(f""Number of processes: {num_processes}"")\n\n print(""Converting mp4 to npy files..."")\n pool_args = [\n (\n idx,\n os.path.join(args.input_path, in_filename),\n output_path,\n args.target_width,\n args.target_height,\n args.target_fps,\n )\n for idx, in_filename in enumerate(os.listdir(args.input_path))\n if in_filename.endswith("".mp4"") or in_filename.endswith("".webm"")\n ]\n\n results = []\n with mp.Pool(processes=num_processes) as pool:\n for result in pool.starmap(preprocess_video, pool_args):\n results.append(result)\n print(""Done converting mp4 to npy files"")\n\n # count the number of failed videos\n failed_videos = [result for result in results if not result[1]]\n print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of successful videos: {len(results) - len(failed_videos)}"")\n print(f""Number of total videos: {len(results)}"")\n\n with open(os.path.join(output_path, ""failed_videos.json""), ""w"") as f:\n json.dump(failed_videos, f)\n\n print(""Creating metadata file..."")\n metadata = []\n filenames = [\n filename\n for filename in os.listdir(output_path)\n if filename.endswith("".npy"") and filename != ""metadata.npy""\n ]\n pool_args = [(filename, output_path) for filename in filenames]\n\n with mp.Pool(processes=num_processes) as pool:\n results = list(pool.starmap(get_meta_data, pool_args))\n metadata = [{""path"": path, ""length"": length} for path, length in results]\n np.save(os.path.join(output_path, ""metadata.npy""), metadata)\n print(f""Saved {len(metadata)} videos to {output_path}"")\n\n\nif __name__ == ""__main__"":\n main()\n",python,tab +4366,5824432,"input_pipeline/preprocess/video_to_array_records.py",0,0,"import ffmpeg\nimport numpy as np\nimport os\nimport tyro\nimport multiprocessing as mp\nfrom dataclasses import dataclass\nimport json\nimport pickle\nfrom array_record.python.array_record_module import ArrayRecordWriter\n\n\n@dataclass\nclass Args:\n target_width, target_height = 160, 90\n target_fps = 10\n input_path: str = ""data/minecraft_videos""\n output_path: str = ""data/minecraft_arrayrecords""\n\n\ndef preprocess_video(\n idx, in_filename, output_path, target_width, target_height, target_fps\n):\n print(f""Processing video {idx}, Filename: {in_filename}"")\n try:\n out, _ = (\n ffmpeg.input(in_filename)\n .filter(""fps"", fps=target_fps, round=""up"")\n .filter(""scale"", target_width, target_height)\n .output(""pipe:"", format=""rawvideo"", pix_fmt=""rgb24"")\n .run(capture_stdout=True, quiet=True)\n )\n\n output_path = os.path.join(\n output_path,\n os.path.splitext(os.path.basename(in_filename))[0] + "".array_record"",\n )\n\n writer = ArrayRecordWriter(str(output_path), ""group_size:1"")\n\n frame_size = target_height * target_width * 3\n n_frames = len(out) // frame_size\n frames = np.frombuffer(out, np.uint8).reshape(\n n_frames, target_height, target_width, 3\n )\n\n print(f""Saving video {idx} to {output_path}"")\n record = {""raw_video"": frames.tobytes(), ""sequence_length"": n_frames}\n writer.write(pickle.dumps(record))\n writer.close()\n\n return in_filename, n_frames\n except Exception as e:\n print(f""Error processing video {idx} ({in_filename}): {e}"")\n return in_filename, 0\n\n\ndef main():\n args = tyro.cli(Args)\n\n os.makedirs(args.output_path, exist_ok=True)\n print(f""Output path: {args.output_path}"")\n\n num_processes = mp.cpu_count()\n print(f""Number of processes: {num_processes}"")\n\n print(""Converting video to array_record files..."")\n pool_args = [\n (\n idx,\n os.path.join(args.input_path, in_filename),\n args.output_path,\n args.target_width,\n args.target_height,\n args.target_fps,\n )\n for idx, in_filename in enumerate(os.listdir(args.input_path))\n if in_filename.endswith("".mp4"") or in_filename.endswith("".webm"")\n ]\n\n results = []\n with mp.Pool(processes=num_processes) as pool:\n for result in pool.starmap(preprocess_video, pool_args):\n results.append(result)\n print(""Done converting video to array_record files"")\n\n # count the number of failed videos\n failed_videos = [result for result in results if result[1] == 0]\n short_episodes = [result for result in results if result[1] < 1600]\n print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )\n print(f""Number of total videos: {len(results)}"")\n\n with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(results, f)\n\n\nif __name__ == ""__main__"":\n main()\n",python,tab +4367,5842237,"input_pipeline/preprocess/video_to_npy.py",0,0,"",python,tab +4368,5844616,"generate_dataset.py",0,0,"",python,tab +4369,5846130,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +4370,5847802,"input_pipeline/preprocess/video_to_array_records.py",364,0,"",python,selection_mouse +4371,5848078,"input_pipeline/preprocess/video_to_array_records.py",364,1,"s",python,selection_mouse +4372,5848078,"input_pipeline/preprocess/video_to_array_records.py",364,2,"st",python,selection_mouse +4373,5848129,"input_pipeline/preprocess/video_to_array_records.py",364,4,"str ",python,selection_mouse +4374,5848210,"input_pipeline/preprocess/video_to_array_records.py",364,5,"str =",python,selection_mouse +4375,5848211,"input_pipeline/preprocess/video_to_array_records.py",364,7,"str = """,python,selection_mouse +4376,5848211,"input_pipeline/preprocess/video_to_array_records.py",364,9,"str = ""da",python,selection_mouse +4377,5848245,"input_pipeline/preprocess/video_to_array_records.py",364,11,"str = ""data",python,selection_mouse +4378,5848283,"input_pipeline/preprocess/video_to_array_records.py",364,14,"str = ""data/mi",python,selection_mouse +4379,5848316,"input_pipeline/preprocess/video_to_array_records.py",364,16,"str = ""data/mine",python,selection_mouse +4380,5848350,"input_pipeline/preprocess/video_to_array_records.py",364,18,"str = ""data/minecr",python,selection_mouse +4381,5848386,"input_pipeline/preprocess/video_to_array_records.py",364,20,"str = ""data/minecraf",python,selection_mouse +4382,5848416,"input_pipeline/preprocess/video_to_array_records.py",364,21,"str = ""data/minecraft",python,selection_mouse +4383,5848456,"input_pipeline/preprocess/video_to_array_records.py",364,22,"str = ""data/minecraft_",python,selection_mouse +4384,5848486,"input_pipeline/preprocess/video_to_array_records.py",364,23,"str = ""data/minecraft_a",python,selection_mouse +4385,5848515,"input_pipeline/preprocess/video_to_array_records.py",364,24,"str = ""data/minecraft_ar",python,selection_mouse +4386,5848543,"input_pipeline/preprocess/video_to_array_records.py",364,25,"str = ""data/minecraft_arr",python,selection_mouse +4387,5848573,"input_pipeline/preprocess/video_to_array_records.py",364,26,"str = ""data/minecraft_arra",python,selection_mouse +4388,5848602,"input_pipeline/preprocess/video_to_array_records.py",364,27,"str = ""data/minecraft_array",python,selection_mouse +4389,5848633,"input_pipeline/preprocess/video_to_array_records.py",364,29,"str = ""data/minecraft_arrayre",python,selection_mouse +4390,5848669,"input_pipeline/preprocess/video_to_array_records.py",364,32,"str = ""data/minecraft_arrayrecor",python,selection_mouse +4391,5848702,"input_pipeline/preprocess/video_to_array_records.py",364,35,"str = ""data/minecraft_arrayrecords""",python,selection_mouse +4392,5849266,"input_pipeline/preprocess/video_to_array_records.py",399,0,"",python,selection_mouse +4393,5849289,"input_pipeline/preprocess/video_to_array_records.py",398,0,"",python,selection_command +4394,5849858,"input_pipeline/preprocess/video_to_array_records.py",399,0,"",python,selection_mouse +4395,5849877,"input_pipeline/preprocess/video_to_array_records.py",398,0,"",python,selection_command +4396,5850077,"input_pipeline/preprocess/video_to_array_records.py",398,1,"""",python,selection_mouse +4397,5850078,"input_pipeline/preprocess/video_to_array_records.py",394,4,"ords",python,selection_mouse +4398,5850078,"input_pipeline/preprocess/video_to_array_records.py",389,9,"ayrecords",python,selection_mouse +4399,5850105,"input_pipeline/preprocess/video_to_array_records.py",399,0,"",python,selection_command +4400,5850106,"input_pipeline/preprocess/video_to_array_records.py",399,1,"\n",python,selection_mouse +4401,5850284,"input_pipeline/preprocess/video_to_array_records.py",371,28,"data/minecraft_arrayrecords""",python,selection_mouse +4402,5850285,"input_pipeline/preprocess/video_to_array_records.py",370,29,"""data/minecraft_arrayrecords""",python,selection_mouse +4403,5850394,"input_pipeline/preprocess/video_to_array_records.py",369,30," ""data/minecraft_arrayrecords""",python,selection_mouse +4404,5850394,"input_pipeline/preprocess/video_to_array_records.py",368,31,"= ""data/minecraft_arrayrecords""",python,selection_mouse +4405,5850394,"input_pipeline/preprocess/video_to_array_records.py",367,32," = ""data/minecraft_arrayrecords""",python,selection_mouse +4406,5850417,"input_pipeline/preprocess/video_to_array_records.py",366,33,"r = ""data/minecraft_arrayrecords""",python,selection_mouse +4407,5850678,"input_pipeline/preprocess/video_to_array_records.py",365,34,"tr = ""data/minecraft_arrayrecords""",python,selection_mouse +4408,5850731,"input_pipeline/preprocess/video_to_array_records.py",364,35,"str = ""data/minecraft_arrayrecords""",python,selection_mouse +4409,5851186,"input_pipeline/preprocess/video_to_array_records.py",364,0,"",python,selection_mouse +4410,5852449,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"import os\nimport numpy as np\nfrom PIL import Image\nimport tyro\nfrom dataclasses import dataclass\nimport pickle\nimport json\nimport multiprocessing as mp\nfrom array_record.python.array_record_module import ArrayRecordWriter\n\n\n@dataclass\nclass Args:\n input_path: str = ""data/rl_pngs""\n output_path: str = ""data/rl_arrayrecords""\n original_fps: int = 60\n target_fps: int = 10\n target_width: int = 64 \n\ndef preprocess_pngs(input_dir, output_path, original_fps, target_fps, target_width=None):\n print(f""Processing PNGs in {input_dir}"")\n try:\n png_files = sorted([\n f for f in os.listdir(input_dir)\n if f.lower().endswith('.png')\n ], key=lambda x: int(os.path.splitext(x)[0]))\n\n if not png_files:\n print(f""No PNG files found in {input_dir}"")\n return input_dir, 0\n\n # Downsample indices\n n_total = len(png_files)\n if original_fps == target_fps:\n selected_indices = np.arange(n_total)\n else:\n n_target = int(np.floor(n_total * target_fps / original_fps))\n selected_indices = np.linspace(0, n_total-1, n_target, dtype=int)\n\n selected_files = [png_files[i] for i in selected_indices]\n\n # Load images\n frames = []\n for fname in selected_files:\n img = Image.open(os.path.join(input_dir, fname)).convert(""RGB"")\n if target_width is not None:\n w, h = img.size # PIL gives (width, height)\n if w != target_width:\n target_height = int(round(h * (target_width / float(w))))\n resample_filter = Image.LANCZOS\n img = img.resize((target_width, target_height), resample=resample_filter)\n frames.append(np.array(img))\n\n frames = np.stack(frames, axis=0) # (n_frames, H, W, 3)\n environment = os.path.basename(os.path.dirname(input_dir)) \n episode_id = os.path.basename(input_dir)\n # Write to array_record\n os.makedirs(output_path, exist_ok=True)\n out_file = os.path.join(\n output_path,\n f""{environment}_{episode_id}.array_record""\n )\n writer = ArrayRecordWriter(str(out_file), ""group_size:1"")\n record = {""raw_video"": frames.tobytes(), \n ""environment"": environment,\n ""sequence_length"": frames.shape[0]}\n writer.write(pickle.dumps(record))\n writer.close()\n print(f""Saved {frames.shape[0]} frames to {out_file}"")\n return input_dir, frames.shape[0]\n except Exception as e:\n print(f""Error processing {input_dir}: {e}"")\n return input_dir, 0\n\ndef main():\n args = tyro.cli(Args)\n os.makedirs(args.output_path, exist_ok=True)\n print(f""Output path: {args.output_path}"")\n\n games = [\n os.path.join(args.input_path, d)\n for d in os.listdir(args.input_path)\n if os.path.isdir(os.path.join(args.input_path, d))\n ]\n episodes = [\n os.path.join(game, d)\n for game in games\n for d in os.listdir(game)\n ]\n\n results = []\n num_processes = mp.cpu_count()\n print(f""Number of processes: {num_processes}"")\n pool_args = [\n (episode, args.output_path, args.original_fps, args.target_fps, args.target_width)\n for episode in episodes\n ]\n with mp.Pool(processes=num_processes) as pool:\n for result in pool.starmap(preprocess_pngs, pool_args):\n results.append(result)\n\n print(""Done converting png to array_record files"")\n\n # count the number of failed videos\n failed_videos = [result for result in results if result[1] == 0]\n short_episodes = [result for result in results if result[1] < 1600]\n print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )\n print(f""Number of total videos: {len(results)}"")\n\n with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(results, f)\n print(""Done."")\n\nif __name__ == ""__main__"":\n main()",python,tab +4411,5858187,"input_pipeline/preprocess/pngs_to_array_records.py",535,0,"",python,selection_mouse +4412,5859213,"input_pipeline/preprocess/pngs_to_array_records.py",410,0,"",python,selection_mouse +4413,5859733,"input_pipeline/preprocess/pngs_to_array_records.py",409,0,"",python,selection_mouse +4414,5859749,"input_pipeline/preprocess/pngs_to_array_records.py",408,0,"",python,selection_command +4415,5860605,"input_pipeline/preprocess/pngs_to_array_records.py",381,0,"",python,selection_mouse +4416,5860625,"input_pipeline/preprocess/pngs_to_array_records.py",380,0,"",python,selection_command +4417,5861165,"input_pipeline/preprocess/pngs_to_array_records.py",356,0,"",python,selection_mouse +4418,5861166,"input_pipeline/preprocess/pngs_to_array_records.py",355,0,"",python,selection_command +4419,5861981,"input_pipeline/preprocess/pngs_to_array_records.py",409,0,"",python,selection_mouse +4420,5861983,"input_pipeline/preprocess/pngs_to_array_records.py",408,0,"",python,selection_command +4421,5862779,"input_pipeline/preprocess/pngs_to_array_records.py",409,0,"",python,selection_mouse +4422,5862787,"input_pipeline/preprocess/pngs_to_array_records.py",408,0,"",python,selection_command +4423,5863426,"input_pipeline/preprocess/pngs_to_array_records.py",381,0,"",python,selection_mouse +4424,5863429,"input_pipeline/preprocess/pngs_to_array_records.py",380,0,"",python,selection_command +4425,5876616,"input_pipeline/preprocess/pngs_to_array_records.py",304,0,"",python,selection_mouse +4426,5876825,"input_pipeline/preprocess/pngs_to_array_records.py",304,3," = ",python,selection_mouse +4427,5876825,"input_pipeline/preprocess/pngs_to_array_records.py",304,7," = ""dat",python,selection_mouse +4428,5876826,"input_pipeline/preprocess/pngs_to_array_records.py",304,9," = ""data/",python,selection_mouse +4429,5876826,"input_pipeline/preprocess/pngs_to_array_records.py",304,12," = ""data/rl_",python,selection_mouse +4430,5876952,"input_pipeline/preprocess/pngs_to_array_records.py",304,14," = ""data/rl_ar",python,selection_mouse +4431,5876952,"input_pipeline/preprocess/pngs_to_array_records.py",304,16," = ""data/rl_arra",python,selection_mouse +4432,5876953,"input_pipeline/preprocess/pngs_to_array_records.py",304,17," = ""data/rl_array",python,selection_mouse +4433,5876953,"input_pipeline/preprocess/pngs_to_array_records.py",304,19," = ""data/rl_arrayre",python,selection_mouse +4434,5876953,"input_pipeline/preprocess/pngs_to_array_records.py",304,20," = ""data/rl_arrayrec",python,selection_mouse +4435,5876953,"input_pipeline/preprocess/pngs_to_array_records.py",304,21," = ""data/rl_arrayreco",python,selection_mouse +4436,5876993,"input_pipeline/preprocess/pngs_to_array_records.py",304,23," = ""data/rl_arrayrecord",python,selection_mouse +4437,5877025,"input_pipeline/preprocess/pngs_to_array_records.py",304,25," = ""data/rl_arrayrecords""",python,selection_mouse +4438,5877871,"input_pipeline/preprocess/pngs_to_array_records.py",304,25,"",python,content +4439,5877927,"input_pipeline/preprocess/pngs_to_array_records.py",303,0,"",python,selection_command +4440,5878136,"input_pipeline/preprocess/pngs_to_array_records.py",267,0,"",python,selection_command +4441,5878603,"input_pipeline/preprocess/pngs_to_array_records.py",266,0,"",python,selection_command +4442,5879890,"input_pipeline/preprocess/pngs_to_array_records.py",266,17,"",python,content +4443,5879926,"input_pipeline/preprocess/pngs_to_array_records.py",265,0,"",python,selection_command +4444,5902001,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +4445,5903627,"input_pipeline/preprocess/video_to_array_records.py",298,0,"",python,selection_mouse +4446,5904323,"input_pipeline/preprocess/video_to_array_records.py",318,0,"",python,selection_mouse +4447,5905085,"input_pipeline/preprocess/video_to_array_records.py",301,99,"",python,content +4448,5905239,"input_pipeline/preprocess/video_to_array_records.py",298,0,"",python,selection_command +4449,5905384,"input_pipeline/preprocess/video_to_array_records.py",256,0,"",python,selection_command +4450,5905524,"input_pipeline/preprocess/video_to_array_records.py",237,0,"",python,selection_command +4451,5905746,"input_pipeline/preprocess/video_to_array_records.py",238,0,"\n input_path: str = ""data/minecraft_videos""\n output_path: str = ""data/minecraft_arrayrecords""",python,content +4452,5905823,"input_pipeline/preprocess/video_to_array_records.py",243,0,"",python,selection_command +4453,5907060,"input_pipeline/preprocess/video_to_array_records.py",284,0,"",python,selection_command +4454,5907552,"input_pipeline/preprocess/video_to_array_records.py",283,1,"",python,content +4455,5907720,"input_pipeline/preprocess/video_to_array_records.py",267,16,"",python,content +4456,5907864,"input_pipeline/preprocess/video_to_array_records.py",266,1,"",python,content +4457,5908091,"input_pipeline/preprocess/video_to_array_records.py",262,4,"",python,content +4458,5908468,"input_pipeline/preprocess/video_to_array_records.py",261,1,"",python,content +4459,5908999,"input_pipeline/preprocess/video_to_array_records.py",259,2,"",python,content +4460,5909391,"input_pipeline/preprocess/video_to_array_records.py",258,1,"",python,content +4461,5909960,"input_pipeline/preprocess/video_to_array_records.py",257,0,"",python,selection_command +4462,5910386,"input_pipeline/preprocess/video_to_array_records.py",277,0,"",python,selection_command +4463,5911327,"input_pipeline/preprocess/video_to_array_records.py",278,0,"",python,selection_command +4464,5911479,"input_pipeline/preprocess/video_to_array_records.py",279,0,"",python,selection_command +4465,5911648,"input_pipeline/preprocess/video_to_array_records.py",280,0,"",python,selection_command +4466,5912411,"input_pipeline/preprocess/video_to_array_records.py",279,0,"",python,selection_command +4467,5913597,"input_pipeline/preprocess/video_to_array_records.py",279,32,"",python,content +4468,5913634,"input_pipeline/preprocess/video_to_array_records.py",278,0,"",python,selection_command +4469,5928263,"generate_dataset.py",0,0,"",python,tab +4470,5933514,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_type: str = ""maskgit"" # supported options: maskgit, causal\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(\n model: Genie, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n """"""Compute masked dynamics loss""""""\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@nnx.jit\ndef train_step(\n model: Genie, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n """"""Update state and compute metrics""""""\n\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=False,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(genie, tx)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +4471,6031091,"TERMINAL",0,0,"bash",,terminal_focus +4472,6079863,"train_dynamics.py",834,0,"",python,selection_mouse +4473,6079901,"train_dynamics.py",833,0,"",python,selection_command +4474,6081190,"TERMINAL",0,0,"bash",,terminal_focus +4475,6084950,"TERMINAL",0,0,"bash",,terminal_focus +4476,6105971,"TERMINAL",0,0,"bash",,terminal_focus +4477,6108254,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +4478,6113515,"TERMINAL",0,0,"bash",,terminal_focus +4479,6257697,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",195,0,"",shellscript,selection_mouse +4480,6257748,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",194,0,"",shellscript,selection_command +4481,6259012,"TERMINAL",0,0,"bash",,terminal_focus +4482,6295489,"TERMINAL",0,0,"bash",,terminal_focus +4483,6296998,"TERMINAL",0,0,"bash",,terminal_focus +4484,6298095,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +4485,6299597,"TERMINAL",0,0,"bash",,terminal_focus +4486,6470157,"TERMINAL",0,0,"git status",,terminal_command +4487,6470198,"TERMINAL",0,0,"]633;E;2025-09-04 11:44:47 git status;e3f3d151-a063-4c85-891d-0bfb917c5617]633;C",,terminal_output +4488,6470338,"TERMINAL",0,0,"On branch input_pipeline/add-npy2array_record\r\nYour branch is up to date with 'origin/input_pipeline/add-npy2array_record'.\r\n\r\nChanges not staged for commit:\r\n (use ""git add/rm ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: generate_dataset.py\r\n\tdeleted: input_pipeline/preprocess/npy_to_array_records.py\r\n\tdeleted: input_pipeline/preprocess/npy_to_tfrecord.py\r\n\tmodified: input_pipeline/preprocess/pngs_to_array_records.py\r\n\tmodified: input_pipeline/preprocess/video_to_array_records.py\r\n\tdeleted: input_pipeline/preprocess/video_to_npy.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdata/\r\n\tdata_atari/\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\tlogs/\r\n\toverfit_dir.zip\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\tutils/visualizer.py\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4489,6477655,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +4490,6483144,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +4491,6487728,"input_pipeline/preprocess/video_to_array_records.py",297,0,"",python,selection_mouse +4492,6489428,"input_pipeline/preprocess/video_to_array_records.py",296,1,"",python,content +4493,6489621,"input_pipeline/preprocess/video_to_array_records.py",296,0,"\n ",python,content +4494,6489672,"input_pipeline/preprocess/video_to_array_records.py",301,1,"",python,content +4495,6491386,"input_pipeline/preprocess/video_to_array_records.py",296,0,"",python,selection_mouse +4496,6492022,"input_pipeline/preprocess/video_to_array_records.py",296,0," ",python,content +4497,6492023,"input_pipeline/preprocess/video_to_array_records.py",297,0,"",python,selection_keyboard +4498,6492195,"input_pipeline/preprocess/video_to_array_records.py",297,0,"=",python,content +4499,6492196,"input_pipeline/preprocess/video_to_array_records.py",298,0,"",python,selection_keyboard +4500,6492265,"input_pipeline/preprocess/video_to_array_records.py",298,0," ",python,content +4501,6492265,"input_pipeline/preprocess/video_to_array_records.py",299,0,"",python,selection_keyboard +4502,6493465,"input_pipeline/preprocess/video_to_array_records.py",299,0,"1",python,content +4503,6493468,"input_pipeline/preprocess/video_to_array_records.py",300,0,"",python,selection_keyboard +4504,6494392,"input_pipeline/preprocess/video_to_array_records.py",300,0,"6",python,content +4505,6494393,"input_pipeline/preprocess/video_to_array_records.py",301,0,"",python,selection_keyboard +4506,6494447,"input_pipeline/preprocess/video_to_array_records.py",301,0,"0",python,content +4507,6494448,"input_pipeline/preprocess/video_to_array_records.py",302,0,"",python,selection_keyboard +4508,6496002,"input_pipeline/preprocess/video_to_array_records.py",296,0,"",python,selection_mouse +4509,6496898,"input_pipeline/preprocess/video_to_array_records.py",296,0,"_",python,content +4510,6496899,"input_pipeline/preprocess/video_to_array_records.py",297,0,"",python,selection_keyboard +4511,6497229,"input_pipeline/preprocess/video_to_array_records.py",296,1,"",python,content +4512,6497497,"input_pipeline/preprocess/video_to_array_records.py",296,0,":",python,content +4513,6497498,"input_pipeline/preprocess/video_to_array_records.py",297,0,"",python,selection_keyboard +4514,6498359,"input_pipeline/preprocess/video_to_array_records.py",297,0," ",python,content +4515,6498360,"input_pipeline/preprocess/video_to_array_records.py",298,0,"",python,selection_keyboard +4516,6498500,"input_pipeline/preprocess/video_to_array_records.py",298,0,"i",python,content +4517,6498501,"input_pipeline/preprocess/video_to_array_records.py",299,0,"",python,selection_keyboard +4518,6498633,"input_pipeline/preprocess/video_to_array_records.py",299,0,"n",python,content +4519,6498634,"input_pipeline/preprocess/video_to_array_records.py",300,0,"",python,selection_keyboard +4520,6498699,"input_pipeline/preprocess/video_to_array_records.py",300,0,"t",python,content +4521,6498700,"input_pipeline/preprocess/video_to_array_records.py",301,0,"",python,selection_keyboard +4522,6499237,"input_pipeline/preprocess/video_to_array_records.py",300,0,"",python,selection_command +4523,6499402,"input_pipeline/preprocess/video_to_array_records.py",328,0,"",python,selection_command +4524,6499744,"input_pipeline/preprocess/video_to_array_records.py",327,0,"",python,selection_command +4525,6499955,"input_pipeline/preprocess/video_to_array_records.py",326,0,"",python,selection_command +4526,6500054,"input_pipeline/preprocess/video_to_array_records.py",325,0,"",python,selection_command +4527,6501483,"input_pipeline/preprocess/video_to_array_records.py",325,0,":",python,content +4528,6501484,"input_pipeline/preprocess/video_to_array_records.py",326,0,"",python,selection_keyboard +4529,6501728,"input_pipeline/preprocess/video_to_array_records.py",326,0,"i",python,content +4530,6501729,"input_pipeline/preprocess/video_to_array_records.py",327,0,"",python,selection_keyboard +4531,6501816,"input_pipeline/preprocess/video_to_array_records.py",327,0,"n",python,content +4532,6501817,"input_pipeline/preprocess/video_to_array_records.py",328,0,"",python,selection_keyboard +4533,6502024,"input_pipeline/preprocess/video_to_array_records.py",328,0,"t",python,content +4534,6502025,"input_pipeline/preprocess/video_to_array_records.py",329,0,"",python,selection_keyboard +4535,6502059,"input_pipeline/preprocess/video_to_array_records.py",329,0," ",python,content +4536,6502060,"input_pipeline/preprocess/video_to_array_records.py",330,0,"",python,selection_keyboard +4537,6502695,"input_pipeline/preprocess/video_to_array_records.py",329,1,"",python,content +4538,6504552,"input_pipeline/preprocess/video_to_array_records.py",331,4,"",python,content +4539,6504948,"input_pipeline/preprocess/video_to_array_records.py",331,1,"",python,content +4540,6505426,"input_pipeline/preprocess/video_to_array_records.py",330,0,"",python,selection_command +4541,6506228,"input_pipeline/preprocess/video_to_array_records.py",327,0,"",python,selection_mouse +4542,6507044,"input_pipeline/preprocess/video_to_array_records.py",326,0,"",python,selection_command +4543,6507353,"input_pipeline/preprocess/video_to_array_records.py",326,0," ",python,content +4544,6507354,"input_pipeline/preprocess/video_to_array_records.py",327,0,"",python,selection_keyboard +4545,6507645,"input_pipeline/preprocess/video_to_array_records.py",326,0,"",python,selection_command +4546,6508951,"input_pipeline/preprocess/video_to_array_records.py",350,0,"",python,selection_mouse +4547,6509987,"input_pipeline/preprocess/video_to_array_records.py",350,0,":",python,content +4548,6509988,"input_pipeline/preprocess/video_to_array_records.py",351,0,"",python,selection_keyboard +4549,6510269,"input_pipeline/preprocess/video_to_array_records.py",351,0,"i",python,content +4550,6510270,"input_pipeline/preprocess/video_to_array_records.py",352,0,"",python,selection_keyboard +4551,6510407,"input_pipeline/preprocess/video_to_array_records.py",352,0,"n",python,content +4552,6510407,"input_pipeline/preprocess/video_to_array_records.py",353,0,"",python,selection_keyboard +4553,6510520,"input_pipeline/preprocess/video_to_array_records.py",353,0,"t",python,content +4554,6510521,"input_pipeline/preprocess/video_to_array_records.py",354,0,"",python,selection_keyboard +4555,6510590,"input_pipeline/preprocess/video_to_array_records.py",354,0," ",python,content +4556,6510591,"input_pipeline/preprocess/video_to_array_records.py",355,0,"",python,selection_keyboard +4557,6511052,"input_pipeline/preprocess/video_to_array_records.py",354,1,"",python,content +4558,6511440,"input_pipeline/preprocess/video_to_array_records.py",353,0,"",python,selection_command +4559,6511621,"input_pipeline/preprocess/video_to_array_records.py",352,0,"",python,selection_command +4560,6511762,"input_pipeline/preprocess/video_to_array_records.py",351,0,"",python,selection_command +4561,6512025,"input_pipeline/preprocess/video_to_array_records.py",351,0," ",python,content +4562,6512026,"input_pipeline/preprocess/video_to_array_records.py",352,0,"",python,selection_keyboard +4563,6512332,"input_pipeline/preprocess/video_to_array_records.py",351,0,"",python,selection_command +4564,6514521,"generate_dataset.py",0,0,"",python,tab +4565,6515124,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +4566,6519121,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +4567,6521086,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +4568,6522283,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +4569,6522719,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +4570,6523459,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +4571,6524301,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +4572,6526019,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +4573,6565250,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +4574,6566558,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +4575,6567614,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +4576,6626240,"TERMINAL",0,0,"git commit -am ""modify coinrun data generation to output array-records, remove unused files, code refactors""",,terminal_command +4577,6626283,"TERMINAL",0,0,"]633;E;2025-09-04 11:47:23 git commit -am ""modify coinrun data generation to output array-records, remove unused files, code refactors"";e3f3d151-a063-4c85-891d-0bfb917c5617]633;C",,terminal_output +4578,6626500,"TERMINAL",0,0,"[input_pipeline/add-npy2array_record b3148a5] modify coinrun data generation to output array-records, remove unused files, code refactors\r\n 6 files changed, 39 insertions(+), 344 deletions(-)\r\n delete mode 100644 input_pipeline/preprocess/npy_to_array_records.py\r\n delete mode 100644 input_pipeline/preprocess/npy_to_tfrecord.py\r\n delete mode 100644 input_pipeline/preprocess/video_to_npy.py\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4579,6632175,"TERMINAL",0,0,"git git status",,terminal_command +4580,6632236,"TERMINAL",0,0,"]633;E;2025-09-04 11:47:29 git git status;e3f3d151-a063-4c85-891d-0bfb917c5617]633;Cgit: 'git' is not a git command. See 'git --help'.\r\n\r\nThe most similar command is\r\n\tinit\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;1",,terminal_output +4581,6635097,"TERMINAL",0,0,"git status",,terminal_command +4582,6635148,"TERMINAL",0,0,"]633;E;2025-09-04 11:47:32 git status;e3f3d151-a063-4c85-891d-0bfb917c5617]633;COn branch input_pipeline/add-npy2array_record\r\nYour branch is ahead of 'origin/input_pipeline/add-npy2array_record' by 1 commit.\r\n (use ""git push"" to publish your local commits)\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdata/\r\n\tdata_atari/\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\tlogs/\r\n\toverfit_dir.zip\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\tutils/visualizer.py\r\n\r\nnothing added to commit but untracked files present (use ""git add"" to track)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4583,6648042,"TERMINAL",0,0,"git push",,terminal_command +4584,6648084,"TERMINAL",0,0,"]633;E;2025-09-04 11:47:45 git push;e3f3d151-a063-4c85-891d-0bfb917c5617]633;C",,terminal_output +4585,6649497,"TERMINAL",0,0,"Enumerating objects: 12, done.\r\nCounting objects: 8% (1/12)\rCounting objects: 16% (2/12)\rCounting objects: 25% (3/12)\rCounting objects: 33% (4/12)\rCounting objects: 41% (5/12)\rCounting objects: 50% (6/12)\rCounting objects: 58% (7/12)\rCounting objects: 66% (8/12)\rCounting objects: 75% (9/12)\rCounting objects: 83% (10/12)\rCounting objects: 91% (11/12)\rCounting objects: 100% (12/12)\rCounting objects: 100% (12/12), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 14% (1/7)\rCompressing objects: 28% (2/7)\rCompressing objects: 42% (3/7)\rCompressing objects: 57% (4/7)\rCompressing objects: 71% (5/7)\rCompressing objects: 85% (6/7)\rCompressing objects: 100% (7/7)\rCompressing objects: 100% (7/7), done.\r\nWriting objects: 14% (1/7)\rWriting objects: 28% (2/7)\rWriting objects: 42% (3/7)\rWriting objects: 57% (4/7)\rWriting objects: 71% (5/7)\rWriting objects: 85% (6/7)\rWriting objects: 100% (7/7)\rWriting objects: 100% (7/7), 1.82 KiB | 622.00 KiB/s, done.\r\nTotal 7 (delta 4), reused 0 (delta 0), pack-reused 0\r\n",,terminal_output +4586,6649593,"TERMINAL",0,0,"remote: Resolving deltas: 0% (0/4)\rremote: Resolving deltas: 25% (1/4)\rremote: Resolving deltas: 50% (2/4)\rremote: Resolving deltas: 75% (3/4)\rremote: Resolving deltas: 100% (4/4)\rremote: Resolving deltas: 100% (4/4), completed with 3 local objects.\r\n",,terminal_output +4587,6649741,"TERMINAL",0,0,"To github.com:p-doom/jasmine.git\r\n eeb24a7..b3148a5 input_pipeline/add-npy2array_record -> input_pipeline/add-npy2array_record\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4588,6704842,"generate_dataset.py",0,0,"",python,tab +4589,6707317,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +4590,6710053,"generate_dataset.py",0,0,"",python,tab +4591,6713816,"generate_dataset.py",912,0,"",python,selection_mouse +4592,6713988,"generate_dataset.py",912,1," ",python,selection_mouse +4593,6713989,"generate_dataset.py",912,3," ac",python,selection_mouse +4594,6714196,"generate_dataset.py",912,6," actio",python,selection_mouse +4595,6714196,"generate_dataset.py",912,10," action = ",python,selection_mouse +4596,6714197,"generate_dataset.py",912,14," action = type",python,selection_mouse +4597,6714197,"generate_dataset.py",912,17," action = types_n",python,selection_mouse +4598,6714197,"generate_dataset.py",912,19," action = types_np.",python,selection_mouse +4599,6714200,"generate_dataset.py",912,22," action = types_np.sam",python,selection_mouse +4600,6714221,"generate_dataset.py",912,24," action = types_np.sampl",python,selection_mouse +4601,6714344,"generate_dataset.py",912,82," action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)",python,selection_mouse +4602,6715437,"generate_dataset.py",912,82,"",python,content +4603,6715482,"generate_dataset.py",911,0,"",python,selection_command +4604,6716227,"generate_dataset.py",905,7," ",python,content +4605,6716736,"generate_dataset.py",913,0,"env.act(types_np.sample(env.ac_space, bshape=(env.num,)))",python,content +4606,6764551,"TERMINAL",0,0,"git commit -am ""revert refactoring change""",,terminal_command +4607,6764601,"TERMINAL",0,0,"]633;E;2025-09-04 11:49:41 git commit -am ""revert refactoring change"";e3f3d151-a063-4c85-891d-0bfb917c5617]633;C",,terminal_output +4608,6764687,"TERMINAL",0,0,"[input_pipeline/add-npy2array_record 43bdbba] revert refactoring change\r\n 1 file changed, 1 insertion(+), 2 deletions(-)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4609,6765826,"TERMINAL",0,0,"git push",,terminal_command +4610,6765897,"TERMINAL",0,0,"]633;E;2025-09-04 11:49:43 git push;e3f3d151-a063-4c85-891d-0bfb917c5617]633;C",,terminal_output +4611,6767365,"TERMINAL",0,0,"Enumerating objects: 5, done.\r\nCounting objects: 20% (1/5)\rCounting objects: 40% (2/5)\rCounting objects: 60% (3/5)\rCounting objects: 80% (4/5)\rCounting objects: 100% (5/5)\rCounting objects: 100% (5/5), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 33% (1/3)\rCompressing objects: 66% (2/3)\rCompressing objects: 100% (3/3)\rCompressing objects: 100% (3/3), done.\r\nWriting objects: 33% (1/3)\rWriting objects: 66% (2/3)\rWriting objects: 100% (3/3)\rWriting objects: 100% (3/3), 316 bytes | 316.00 KiB/s, done.\r\nTotal 3 (delta 2), reused 0 (delta 0), pack-reused 0\r\nremote: Resolving deltas: 0% (0/2)\rremote: Resolving deltas: 50% (1/2)\rremote: Resolving deltas: 100% (2/2)\rremote: Resolving deltas: 100% (2/2), completed with 2 local objects.\r\n",,terminal_output +4612,6767547,"TERMINAL",0,0,"To github.com:p-doom/jasmine.git\r\n b3148a5..43bdbba input_pipeline/add-npy2array_record -> input_pipeline/add-npy2array_record\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4613,6825075,"generate_dataset.py",0,0,"",python,tab +4614,6828141,"generate_dataset.py",472,0,"",python,selection_mouse +4615,6828270,"generate_dataset.py",470,4,"data",python,selection_mouse +4616,6828465,"generate_dataset.py",470,21,"data/coinrun_episodes",python,selection_mouse +4617,6829057,"generate_dataset.py",488,0,"",python,selection_mouse +4618,6829057,"generate_dataset.py",475,16,"coinrun_episodes",python,selection_mouse +4619,6829394,"generate_dataset.py",474,17,"/coinrun_episodes",python,selection_mouse +4620,6829742,"generate_dataset.py",474,0,"",python,selection_mouse +4621,6830561,"generate_dataset.py",470,4,"data",python,selection_mouse +4622,6830785,"generate_dataset.py",470,5,"data/",python,selection_mouse +4623,6830802,"generate_dataset.py",470,21,"data/coinrun_episodes",python,selection_mouse +4624,6831234,"generate_dataset.py",479,0,"",python,selection_mouse +4625,6831732,"generate_dataset.py",478,1,"n",python,selection_mouse +4626,6831766,"generate_dataset.py",477,2,"in",python,selection_mouse +4627,6831820,"generate_dataset.py",476,3,"oin",python,selection_mouse +4628,6831839,"generate_dataset.py",475,4,"coin",python,selection_mouse +4629,6831904,"generate_dataset.py",444,35,"00\n output_dir: str = ""data/coin",python,selection_mouse +4630,6831960,"generate_dataset.py",443,36,"000\n output_dir: str = ""data/coin",python,selection_mouse +4631,6831982,"generate_dataset.py",442,37,"0000\n output_dir: str = ""data/coin",python,selection_mouse +4632,6832040,"generate_dataset.py",441,38,"10000\n output_dir: str = ""data/coin",python,selection_mouse +4633,6832041,"generate_dataset.py",440,39," 10000\n output_dir: str = ""data/coin",python,selection_mouse +4634,6832104,"generate_dataset.py",439,40,"= 10000\n output_dir: str = ""data/coin",python,selection_mouse +4635,6832129,"generate_dataset.py",438,41," = 10000\n output_dir: str = ""data/coin",python,selection_mouse +4636,6832299,"generate_dataset.py",439,40,"= 10000\n output_dir: str = ""data/coin",python,selection_mouse +4637,6832315,"generate_dataset.py",469,10,"""data/coin",python,selection_mouse +4638,6832370,"generate_dataset.py",470,9,"data/coin",python,selection_mouse +4639,6832498,"generate_dataset.py",471,8,"ata/coin",python,selection_mouse +4640,6832950,"generate_dataset.py",471,0,"",python,selection_mouse +4641,6833056,"generate_dataset.py",470,4,"data",python,selection_mouse +4642,6833268,"generate_dataset.py",470,5,"data/",python,selection_mouse +4643,6833283,"generate_dataset.py",470,21,"data/coinrun_episodes",python,selection_mouse +4644,6833715,"generate_dataset.py",482,0,"",python,selection_mouse +4645,6836998,"TERMINAL",0,0,"bash",,terminal_focus +4646,6839794,"TERMINAL",0,0,"git branch",,terminal_command +4647,6839817,"TERMINAL",0,0,"]633;E;2025-09-04 11:50:56 git branch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[?1h=\r add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n coinrun-data-generation\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/darkness-filter\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n* input_pipeline/add-npy2array_record\r\n logging-variants\r\n lr-schedules\r\n main\r\n maskgit-different-maskprob-per-sample\r\n maskgit-sampling-iterative-unmasking-fix\r\n metrics-logging-for-dynamics-model\r\n monkey-patch\r\n new-arch-sampling\r\n preprocess_video\r\n refactor-tmp\r\n revised-dataloader\r\n runner\r\n runner-grain\r\n sample-ali-branch\r\n sample-from-different-topologies\r\n sampling-startframe-indexing-fix\r\n speedup-tfrecord-preprocessing\r\n tmp\r\n\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4648,6851118,"TERMINAL",0,0,"git branch -d coinrun-data-generation",,terminal_command +4649,6851172,"TERMINAL",0,0,"]633;E;2025-09-04 11:51:08 git branch -d coinrun-data-generation;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;Cerror: the branch 'coinrun-data-generation' is not fully merged.\r\nIf you are sure you want to delete it, run 'git branch -D coinrun-data-generation'\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;1",,terminal_output +4650,6856775,"TERMINAL",0,0,"git branch -D coinrun-data-generation",,terminal_command +4651,6856825,"TERMINAL",0,0,"]633;E;2025-09-04 11:51:14 git branch -D coinrun-data-generation;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +4652,6856910,"TERMINAL",0,0,"Deleted branch coinrun-data-generation (was bbef694).\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4653,6890843,"TERMINAL",0,0,"git checkout -b ""coinrun-gt-actions""",,terminal_command +4654,6890962,"",0,0,"Switched from branch 'input_pipeline/add-npy2array_record' to 'coinrun-gt-actions'",,git_branch_checkout +4655,6890966,"TERMINAL",0,0,"]633;E;2025-09-04 11:51:48 git checkout -b ""coinrun-gt-actions"";86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CSwitched to a new branch 'coinrun-gt-actions'\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +4656,6909206,"generate_dataset.py",0,0,"""""""\nGenerates a dataset of random-action CoinRun episodes.\nEpisodes are saved individually as memory-mapped files for efficient loading.\n""""""\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom gym3 import types_np\nimport numpy as np\nfrom procgen import ProcgenGym3Env\nimport tyro\nimport pickle\nimport json\nfrom array_record.python.array_record_module import ArrayRecordWriter \n\n\n\n@dataclass\nclass Args:\n num_episodes: int = 10000\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 50\n\n\nargs = tyro.cli(Args)\noutput_dir = Path(args.output_dir)\noutput_dir.mkdir(parents=True, exist_ok=True)\n\n# --- Generate episodes ---\ni = 0\nepisode_metadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n observations_seq = []\n\n # --- Run episode ---\n for j in range(1000):\n env.act(types_np.sample(env.ac_space, bshape=(env.num,)))\n rew, obs, first = env.observe()\n observations_seq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(observations_seq) >= args.min_episode_length:\n observations_data = np.concatenate(observations_seq, axis=0)\n episode_path = output_dir / f""episode_{i}.array_record"" \n\n # --- Save as ArrayRecord ---\n writer = ArrayRecordWriter(str(episode_path), ""group_size:1"")\n record = {""raw_video"": observations_data.tobytes(), ""sequence_length"": len(observations_seq)}\n writer.write(pickle.dumps(record))\n writer.close()\n\n episode_metadata.append({""path"": str(episode_path), ""length"": len(observations_seq)})\n print(f""Episode {i} completed, length: {len(observations_seq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(observations_seq)}), resampling..."")\n\n# --- Save metadata ---\nmetadata = {\n ""env"": ""coinrun"",\n ""num_episodes"": args.num_episodes,\n ""avg_episode_len"": np.mean([ep[""length""] for ep in episode_metadata]),\n ""episode_metadata"": episode_metadata,\n}\nwith open(output_dir / ""meta_data.json"", ""w"") as f:\n json.dump(metadata, f)\n\nprint(f""Dataset generated with {len(episode_metadata)} valid episodes"")\n",python,tab +4657,6921078,"generate_dataset.py",925,0,"",python,selection_mouse +4658,6922271,"generate_dataset.py",924,0,"",python,selection_command +4659,6922437,"generate_dataset.py",923,0,"",python,selection_command +4660,6922586,"generate_dataset.py",922,0,"",python,selection_command +4661,6922730,"generate_dataset.py",921,0,"",python,selection_command +4662,6924858,"generate_dataset.py",904,0,"",python,selection_mouse +4663,6925129,"generate_dataset.py",904,0,"\n ",python,content +4664,6925351,"generate_dataset.py",913,0,"a",python,content +4665,6925352,"generate_dataset.py",914,0,"",python,selection_keyboard +4666,6925470,"generate_dataset.py",914,0,"c",python,content +4667,6925472,"generate_dataset.py",915,0,"",python,selection_keyboard +4668,6925773,"generate_dataset.py",915,0,"t",python,content +4669,6925776,"generate_dataset.py",916,0,"",python,selection_keyboard +4670,6925951,"generate_dataset.py",916,0,"i",python,content +4671,6925952,"generate_dataset.py",917,0,"",python,selection_keyboard +4672,6926199,"generate_dataset.py",917,0,"n",python,content +4673,6926200,"generate_dataset.py",918,0,"",python,selection_keyboard +4674,6926571,"generate_dataset.py",917,1,"",python,content +4675,6926773,"generate_dataset.py",917,0,"o",python,content +4676,6926774,"generate_dataset.py",918,0,"",python,selection_keyboard +4677,6926888,"generate_dataset.py",918,0,"n",python,content +4678,6926889,"generate_dataset.py",919,0,"",python,selection_keyboard +4679,6927350,"generate_dataset.py",919,0," ",python,content +4680,6927351,"generate_dataset.py",920,0,"",python,selection_keyboard +4681,6927518,"generate_dataset.py",920,0,"=",python,content +4682,6927519,"generate_dataset.py",921,0,"",python,selection_keyboard +4683,6927603,"generate_dataset.py",921,0," ",python,content +4684,6927604,"generate_dataset.py",922,0,"",python,selection_keyboard +4685,6927889,"generate_dataset.py",921,0,"",python,selection_command +4686,6928032,"generate_dataset.py",939,0,"",python,selection_command +4687,6929671,"generate_dataset.py",939,49,"",python,content +4688,6929727,"generate_dataset.py",938,0,"",python,selection_command +4689,6930070,"generate_dataset.py",921,0,"",python,selection_command +4690,6930495,"generate_dataset.py",922,0,"types_np.sample(env.ac_space, bshape=(env.num,)))",python,content +4691,6930528,"generate_dataset.py",970,0,"",python,selection_command +4692,6931634,"generate_dataset.py",971,0,"",python,selection_command +4693,6931757,"generate_dataset.py",970,1,"",python,content +4694,6931997,"generate_dataset.py",969,0,"",python,selection_command +4695,6932380,"generate_dataset.py",986,0,"",python,selection_command +4696,6933217,"generate_dataset.py",987,0,"",python,selection_command +4697,6933695,"generate_dataset.py",987,0,"a",python,content +4698,6933696,"generate_dataset.py",988,0,"",python,selection_keyboard +4699,6933950,"generate_dataset.py",988,0,"c",python,content +4700,6933951,"generate_dataset.py",989,0,"",python,selection_keyboard +4701,6935168,"generate_dataset.py",987,2,"action",python,content +4702,6935989,"generate_dataset.py",993,0,")",python,content +4703,6935990,"generate_dataset.py",994,0,"",python,selection_keyboard +4704,6942417,"generate_dataset.py",936,0,"",python,selection_mouse +4705,6942949,"generate_dataset.py",941,0,"",python,selection_mouse +4706,6943115,"generate_dataset.py",938,3,"env",python,selection_mouse +4707,6943269,"generate_dataset.py",938,4,"env.",python,selection_mouse +4708,6943288,"generate_dataset.py",938,12,"env.ac_space",python,selection_mouse +4709,6943743,"generate_dataset.py",947,0,"",python,selection_mouse +4710,6943744,"generate_dataset.py",942,8,"ac_space",python,selection_mouse +4711,6944034,"generate_dataset.py",941,9,".ac_space",python,selection_mouse +4712,6944492,"generate_dataset.py",941,0,"",python,selection_mouse +4713,6944493,"generate_dataset.py",938,3,"env",python,selection_mouse +4714,6945133,"generate_dataset.py",945,0,"",python,selection_mouse +4715,6945265,"generate_dataset.py",942,8,"ac_space",python,selection_mouse +4716,6945447,"generate_dataset.py",941,9,".ac_space",python,selection_mouse +4717,6945507,"generate_dataset.py",938,12,"env.ac_space",python,selection_mouse +4718,6947684,"generate_dataset.py",1941,0,"",python,selection_mouse +4719,6959383,"generate_dataset.py",832,0,"",python,selection_mouse +4720,6959948,"generate_dataset.py",762,0,"",python,selection_mouse +4721,6960082,"generate_dataset.py",760,3,"env",python,selection_mouse +4722,6960341,"generate_dataset.py",760,4,"env ",python,selection_mouse +4723,6960341,"generate_dataset.py",760,5,"env =",python,selection_mouse +4724,6960342,"generate_dataset.py",760,20,"env = ProcgenGym3Env",python,selection_mouse +4725,6960576,"generate_dataset.py",760,21,"env = ProcgenGym3Env(",python,selection_mouse +4726,6960603,"generate_dataset.py",760,24,"env = ProcgenGym3Env(num",python,selection_mouse +4727,6960681,"generate_dataset.py",760,26,"env = ProcgenGym3Env(num=1",python,selection_mouse +4728,6960778,"generate_dataset.py",760,27,"env = ProcgenGym3Env(num=1,",python,selection_mouse +4729,6960779,"generate_dataset.py",760,36,"env = ProcgenGym3Env(num=1, env_name",python,selection_mouse +4730,6960780,"generate_dataset.py",754,9,")\n env",python,selection_mouse +4731,6960781,"generate_dataset.py",755,8,"\n env",python,selection_mouse +4732,6960874,"generate_dataset.py",760,59,"env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level",python,selection_mouse +4733,6960875,"generate_dataset.py",760,64,"env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed",python,selection_mouse +4734,6960891,"generate_dataset.py",760,65,"env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)",python,selection_mouse +4735,6960957,"generate_dataset.py",760,91,"env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n observations_seq = []",python,selection_mouse +4736,6961326,"generate_dataset.py",760,65,"env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)",python,selection_mouse +4737,6968877,"generate_dataset.py",1941,0,"",python,selection_mouse +4738,6970586,"generate_dataset.py",1906,0,"",python,selection_mouse +4739,6971568,"generate_dataset.py",1906,0,"\n",python,content +4740,6972653,"generate_dataset.py",1907,0,"env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)",python,content +4741,6974325,"generate_dataset.py",1908,0,"",python,selection_mouse +4742,6976289,"generate_dataset.py",1907,0,"",python,selection_mouse +4743,6978124,"generate_dataset.py",1904,0,"",python,selection_mouse +4744,6978584,"generate_dataset.py",1985,0,"",python,selection_mouse +4745,6980817,"generate_dataset.py",1932,0,"",python,selection_mouse +4746,6981157,"generate_dataset.py",1931,0,"",python,selection_command +4747,6981732,"generate_dataset.py",1907,66,"",python,content +4748,6982144,"generate_dataset.py",1883,0,"",python,selection_command +4749,6982347,"generate_dataset.py",1907,0,"",python,selection_command +4750,6982548,"generate_dataset.py",1920,0,"",python,selection_command +4751,6982690,"generate_dataset.py",1942,0,"",python,selection_command +4752,6983106,"generate_dataset.py",1920,0,"",python,selection_command +4753,6983295,"generate_dataset.py",1942,0,"",python,selection_command +4754,6983855,"generate_dataset.py",1920,0,"",python,selection_command +4755,6984160,"generate_dataset.py",1941,0,"\n ",python,content +4756,6985028,"generate_dataset.py",1946,0,"""""",python,content +4757,6985031,"generate_dataset.py",1947,0,"",python,selection_keyboard +4758,6985263,"generate_dataset.py",1947,0,"a",python,content +4759,6985264,"generate_dataset.py",1948,0,"",python,selection_keyboard +4760,6985405,"generate_dataset.py",1948,0,"c",python,content +4761,6985406,"generate_dataset.py",1949,0,"",python,selection_keyboard +4762,6985613,"generate_dataset.py",1949,0,"t",python,content +4763,6985614,"generate_dataset.py",1950,0,"",python,selection_keyboard +4764,6985716,"generate_dataset.py",1950,0,"i",python,content +4765,6985717,"generate_dataset.py",1951,0,"",python,selection_keyboard +4766,6985828,"generate_dataset.py",1951,0,"o",python,content +4767,6985828,"generate_dataset.py",1952,0,"",python,selection_keyboard +4768,6985955,"generate_dataset.py",1952,0,"n",python,content +4769,6985956,"generate_dataset.py",1953,0,"",python,selection_keyboard +4770,6986440,"generate_dataset.py",1953,0,"_",python,content +4771,6986441,"generate_dataset.py",1954,0,"",python,selection_keyboard +4772,6986696,"generate_dataset.py",1954,0,"s",python,content +4773,6986697,"generate_dataset.py",1955,0,"",python,selection_keyboard +4774,6986794,"generate_dataset.py",1955,0,"p",python,content +4775,6986795,"generate_dataset.py",1956,0,"",python,selection_keyboard +4776,6986943,"generate_dataset.py",1956,0,"a",python,content +4777,6986944,"generate_dataset.py",1957,0,"",python,selection_keyboard +4778,6987019,"generate_dataset.py",1957,0,"c",python,content +4779,6987020,"generate_dataset.py",1958,0,"",python,selection_keyboard +4780,6987183,"generate_dataset.py",1958,0,"e",python,content +4781,6987184,"generate_dataset.py",1959,0,"",python,selection_keyboard +4782,6987566,"generate_dataset.py",1960,0,"",python,selection_command +4783,6987968,"generate_dataset.py",1960,0,":",python,content +4784,6987969,"generate_dataset.py",1961,0,"",python,selection_keyboard +4785,6988211,"generate_dataset.py",1961,0," ",python,content +4786,6988212,"generate_dataset.py",1962,0,"",python,selection_keyboard +4787,6988675,"generate_dataset.py",1962,0,"e",python,content +4788,6988676,"generate_dataset.py",1963,0,"",python,selection_keyboard +4789,6988780,"generate_dataset.py",1963,0,"n",python,content +4790,6988781,"generate_dataset.py",1964,0,"",python,selection_keyboard +4791,6988902,"generate_dataset.py",1964,0,"v",python,content +4792,6988903,"generate_dataset.py",1965,0,"",python,selection_keyboard +4793,6989048,"generate_dataset.py",1965,0,".",python,content +4794,6989049,"generate_dataset.py",1966,0,"",python,selection_keyboard +4795,6990389,"generate_dataset.py",1966,0,"ac_space",python,content +4796,6991137,"generate_dataset.py",1974,0,",",python,content +4797,6991138,"generate_dataset.py",1975,0,"",python,selection_keyboard +4798,6991513,"generate_dataset.py",1974,0,"",python,selection_command +4799,6995286,"generate_dataset.py",761,0,"",python,selection_mouse +4800,6995455,"generate_dataset.py",760,3,"env",python,selection_mouse +4801,7022338,"generate_dataset.py",1285,0,"",python,selection_mouse +4802,7023272,"generate_dataset.py",1115,0,"",python,selection_mouse +4803,7024025,"generate_dataset.py",986,0,"",python,selection_mouse +4804,7024492,"generate_dataset.py",989,0,"",python,selection_mouse +4805,7024630,"generate_dataset.py",987,6,"action",python,selection_mouse +4806,7025575,"generate_dataset.py",1052,0,"",python,selection_mouse +4807,7025738,"generate_dataset.py",1043,16,"observations_seq",python,selection_mouse +4808,7026494,"generate_dataset.py",842,0,"",python,selection_mouse +4809,7027468,"generate_dataset.py",687,0,"",python,selection_mouse +4810,7027470,"generate_dataset.py",686,0,"",python,selection_command +4811,7029398,"generate_dataset.py",708,0,"",python,selection_command +4812,7029559,"generate_dataset.py",737,0,"",python,selection_command +4813,7029726,"generate_dataset.py",776,0,"",python,selection_command +4814,7029881,"generate_dataset.py",846,0,"",python,selection_command +4815,7030155,"generate_dataset.py",851,0,"\n ",python,content +4816,7030608,"generate_dataset.py",856,0,"a",python,content +4817,7030609,"generate_dataset.py",857,0,"",python,selection_keyboard +4818,7031390,"generate_dataset.py",857,0,"c",python,content +4819,7031391,"generate_dataset.py",858,0,"",python,selection_keyboard +4820,7031866,"generate_dataset.py",856,2,"action",python,content +4821,7032468,"generate_dataset.py",862,0,"_",python,content +4822,7032469,"generate_dataset.py",863,0,"",python,selection_keyboard +4823,7033544,"generate_dataset.py",863,0,"s",python,content +4824,7033545,"generate_dataset.py",864,0,"",python,selection_keyboard +4825,7033937,"generate_dataset.py",864,0,"e",python,content +4826,7033938,"generate_dataset.py",865,0,"",python,selection_keyboard +4827,7035163,"generate_dataset.py",865,0,"q",python,content +4828,7035164,"generate_dataset.py",866,0,"",python,selection_keyboard +4829,7035402,"generate_dataset.py",866,0," ",python,content +4830,7035403,"generate_dataset.py",867,0,"",python,selection_keyboard +4831,7035600,"generate_dataset.py",867,0,"=",python,content +4832,7035600,"generate_dataset.py",868,0,"",python,selection_keyboard +4833,7035776,"generate_dataset.py",868,0," ",python,content +4834,7035777,"generate_dataset.py",869,0,"",python,selection_keyboard +4835,7036040,"generate_dataset.py",869,0,"[]",python,content +4836,7036040,"generate_dataset.py",870,0,"",python,selection_keyboard +4837,7037061,"generate_dataset.py",869,0,"",python,selection_command +4838,7037215,"generate_dataset.py",868,0,"",python,selection_command +4839,7037377,"generate_dataset.py",867,0,"",python,selection_command +4840,7037531,"generate_dataset.py",866,0,"",python,selection_command +4841,7037672,"generate_dataset.py",865,0,"",python,selection_command +4842,7037822,"generate_dataset.py",864,0,"",python,selection_command +4843,7037966,"generate_dataset.py",863,0,"",python,selection_command +4844,7038104,"generate_dataset.py",862,0,"",python,selection_command +4845,7038261,"generate_dataset.py",862,0,"s",python,content +4846,7038263,"generate_dataset.py",863,0,"",python,selection_keyboard +4847,7039368,"generate_dataset.py",873,0,"",python,selection_mouse +4848,7039818,"generate_dataset.py",863,0,"",python,selection_mouse +4849,7040793,"generate_dataset.py",862,0,"",python,selection_command +4850,7041707,"generate_dataset.py",873,0,"",python,selection_command +4851,7041811,"generate_dataset.py",884,0,"",python,selection_command +4852,7041942,"generate_dataset.py",910,0,"",python,selection_command +4853,7042106,"generate_dataset.py",936,0,"",python,selection_command +4854,7042984,"generate_dataset.py",1002,0,"",python,selection_command +4855,7043142,"generate_dataset.py",1026,0,"",python,selection_command +4856,7043329,"generate_dataset.py",1066,0,"",python,selection_command +4857,7043647,"generate_dataset.py",1099,0,"\n ",python,content +4858,7044859,"generate_dataset.py",1108,0,"a",python,content +4859,7044860,"generate_dataset.py",1109,0,"",python,selection_keyboard +4860,7045051,"generate_dataset.py",1109,0,"c",python,content +4861,7045052,"generate_dataset.py",1110,0,"",python,selection_keyboard +4862,7045955,"generate_dataset.py",1108,2,"actions_seq",python,content +4863,7046581,"generate_dataset.py",1119,0,".",python,content +4864,7046582,"generate_dataset.py",1120,0,"",python,selection_keyboard +4865,7046960,"generate_dataset.py",1120,0,"a",python,content +4866,7046961,"generate_dataset.py",1121,0,"",python,selection_keyboard +4867,7047046,"generate_dataset.py",1121,0,"p",python,content +4868,7047046,"generate_dataset.py",1122,0,"",python,selection_keyboard +4869,7047444,"generate_dataset.py",1120,2,"append",python,content +4870,7048384,"generate_dataset.py",1126,0,"()",python,content +4871,7048385,"generate_dataset.py",1127,0,"",python,selection_keyboard +4872,7049414,"generate_dataset.py",1127,0,"a",python,content +4873,7049415,"generate_dataset.py",1128,0,"",python,selection_keyboard +4874,7049593,"generate_dataset.py",1128,0,"c",python,content +4875,7049594,"generate_dataset.py",1129,0,"",python,selection_keyboard +4876,7050439,"generate_dataset.py",1127,2,"action",python,content +4877,7051926,"generate_dataset.py",1128,0,"",python,selection_mouse +4878,7052479,"generate_dataset.py",1134,0,"",python,selection_mouse +4879,7056871,"generate_dataset.py",1131,0,"",python,selection_mouse +4880,7057481,"generate_dataset.py",1084,0,"",python,selection_mouse +4881,7057632,"generate_dataset.py",1081,6,"append",python,selection_mouse +4882,7058174,"generate_dataset.py",1129,0,"",python,selection_mouse +4883,7058333,"generate_dataset.py",1127,6,"action",python,selection_mouse +4884,7125817,"generate_dataset.py",1152,0,"",python,selection_mouse +4885,7126363,"generate_dataset.py",1171,0,"",python,selection_mouse +4886,7127425,"generate_dataset.py",1172,0,"",python,selection_command +4887,7127610,"generate_dataset.py",1199,0,"",python,selection_command +4888,7127904,"generate_dataset.py",1256,0,"",python,selection_command +4889,7128062,"generate_dataset.py",1325,0,"",python,selection_command +4890,7128466,"generate_dataset.py",1256,0,"",python,selection_command +4891,7128637,"generate_dataset.py",1199,0,"",python,selection_command +4892,7133376,"generate_dataset.py",1256,0,"",python,selection_command +4893,7133575,"generate_dataset.py",1325,0,"",python,selection_command +4894,7133760,"generate_dataset.py",1391,0,"",python,selection_command +4895,7133885,"generate_dataset.py",1325,0,"",python,selection_command +4896,7134062,"generate_dataset.py",1256,0,"",python,selection_command +4897,7134442,"generate_dataset.py",1199,0,"",python,selection_command +4898,7134677,"generate_dataset.py",1256,0,"",python,selection_command +4899,7134868,"generate_dataset.py",1325,0,"",python,selection_command +4900,7135119,"generate_dataset.py",1256,0,"",python,selection_command +4901,7135302,"generate_dataset.py",1199,0,"",python,selection_command +4902,7135454,"generate_dataset.py",1256,0,"",python,selection_command +4903,7135639,"generate_dataset.py",1325,0,"",python,selection_command +4904,7136171,"generate_dataset.py",1256,0,"",python,selection_command +4905,7136303,"generate_dataset.py",1325,0,"",python,selection_command +4906,7136415,"generate_dataset.py",1256,0,"",python,selection_command +4907,7136505,"generate_dataset.py",1325,0,"",python,selection_command +4908,7136667,"generate_dataset.py",1391,0,"",python,selection_command +4909,7136756,"generate_dataset.py",1325,0,"",python,selection_command +4910,7136872,"generate_dataset.py",1391,0,"",python,selection_command +4911,7136983,"generate_dataset.py",1325,0,"",python,selection_command +4912,7137062,"generate_dataset.py",1391,0,"",python,selection_command +4913,7137256,"generate_dataset.py",1325,0,"",python,selection_command +4914,7137256,"generate_dataset.py",1391,0,"",python,selection_command +4915,7137413,"generate_dataset.py",1325,0,"",python,selection_command +4916,7137475,"generate_dataset.py",1391,0,"",python,selection_command +4917,7137610,"generate_dataset.py",1325,0,"",python,selection_command +4918,7137712,"generate_dataset.py",1391,0,"",python,selection_command +4919,7137811,"generate_dataset.py",1325,0,"",python,selection_command +4920,7137909,"generate_dataset.py",1391,0,"",python,selection_command +4921,7138020,"generate_dataset.py",1325,0,"",python,selection_command +4922,7138106,"generate_dataset.py",1391,0,"",python,selection_command +4923,7138210,"generate_dataset.py",1325,0,"",python,selection_command +4924,7138297,"generate_dataset.py",1391,0,"",python,selection_command +4925,7138403,"generate_dataset.py",1325,0,"",python,selection_command +4926,7138481,"generate_dataset.py",1391,0,"",python,selection_command +4927,7138585,"generate_dataset.py",1325,0,"",python,selection_command +4928,7138683,"generate_dataset.py",1391,0,"",python,selection_command +4929,7138795,"generate_dataset.py",1325,0,"",python,selection_command +4930,7138876,"generate_dataset.py",1391,0,"",python,selection_command +4931,7139004,"generate_dataset.py",1325,0,"",python,selection_command +4932,7139107,"generate_dataset.py",1391,0,"",python,selection_command +4933,7139195,"generate_dataset.py",1325,0,"",python,selection_command +4934,7139293,"generate_dataset.py",1391,0,"",python,selection_command +4935,7139405,"generate_dataset.py",1325,0,"",python,selection_command +4936,7139504,"generate_dataset.py",1391,0,"",python,selection_command +4937,7139603,"generate_dataset.py",1325,0,"",python,selection_command +4938,7139676,"generate_dataset.py",1391,0,"",python,selection_command +4939,7139814,"generate_dataset.py",1325,0,"",python,selection_command +4940,7139906,"generate_dataset.py",1391,0,"",python,selection_command +4941,7140004,"generate_dataset.py",1325,0,"",python,selection_command +4942,7140101,"generate_dataset.py",1391,0,"",python,selection_command +4943,7140199,"generate_dataset.py",1325,0,"",python,selection_command +4944,7140293,"generate_dataset.py",1391,0,"",python,selection_command +4945,7140406,"generate_dataset.py",1325,0,"",python,selection_command +4946,7140507,"generate_dataset.py",1391,0,"",python,selection_command +4947,7140593,"generate_dataset.py",1325,0,"",python,selection_command +4948,7140786,"generate_dataset.py",1256,0,"",python,selection_command +4949,7141022,"generate_dataset.py",1199,0,"",python,selection_command +4950,7141146,"generate_dataset.py",1256,0,"",python,selection_command +4951,7141281,"generate_dataset.py",1199,0,"",python,selection_command +4952,7141370,"generate_dataset.py",1256,0,"",python,selection_command +4953,7141531,"generate_dataset.py",1325,0,"",python,selection_command +4954,7141849,"generate_dataset.py",1256,0,"",python,selection_command +4955,7142055,"generate_dataset.py",1199,0,"",python,selection_command +4956,7142189,"generate_dataset.py",1256,0,"",python,selection_command +4957,7142305,"generate_dataset.py",1199,0,"",python,selection_command +4958,7142416,"generate_dataset.py",1256,0,"",python,selection_command +4959,7142631,"generate_dataset.py",1325,0,"",python,selection_command +4960,7142821,"generate_dataset.py",1256,0,"",python,selection_command +4961,7142931,"generate_dataset.py",1325,0,"",python,selection_command +4962,7143056,"generate_dataset.py",1256,0,"",python,selection_command +4963,7143153,"generate_dataset.py",1325,0,"",python,selection_command +4964,7143281,"generate_dataset.py",1256,0,"",python,selection_command +4965,7143349,"generate_dataset.py",1325,0,"",python,selection_command +4966,7143524,"generate_dataset.py",1256,0,"",python,selection_command +4967,7143581,"generate_dataset.py",1325,0,"",python,selection_command +4968,7143698,"generate_dataset.py",1256,0,"",python,selection_command +4969,7143797,"generate_dataset.py",1325,0,"",python,selection_command +4970,7143919,"generate_dataset.py",1256,0,"",python,selection_command +4971,7144041,"generate_dataset.py",1325,0,"",python,selection_command +4972,7144142,"generate_dataset.py",1256,0,"",python,selection_command +4973,7144239,"generate_dataset.py",1325,0,"",python,selection_command +4974,7144369,"generate_dataset.py",1256,0,"",python,selection_command +4975,7144498,"generate_dataset.py",1325,0,"",python,selection_command +4976,7144607,"generate_dataset.py",1256,0,"",python,selection_command +4977,7144712,"generate_dataset.py",1325,0,"",python,selection_command +4978,7144815,"generate_dataset.py",1256,0,"",python,selection_command +4979,7144935,"generate_dataset.py",1325,0,"",python,selection_command +4980,7145046,"generate_dataset.py",1256,0,"",python,selection_command +4981,7145159,"generate_dataset.py",1325,0,"",python,selection_command +4982,7145288,"generate_dataset.py",1256,0,"",python,selection_command +4983,7145369,"generate_dataset.py",1325,0,"",python,selection_command +4984,7145487,"generate_dataset.py",1256,0,"",python,selection_command +4985,7145590,"generate_dataset.py",1325,0,"",python,selection_command +4986,7145721,"generate_dataset.py",1256,0,"",python,selection_command +4987,7145838,"generate_dataset.py",1325,0,"",python,selection_command +4988,7145935,"generate_dataset.py",1256,0,"",python,selection_command +4989,7146041,"generate_dataset.py",1325,0,"",python,selection_command +4990,7146146,"generate_dataset.py",1256,0,"",python,selection_command +4991,7146242,"generate_dataset.py",1325,0,"",python,selection_command +4992,7146364,"generate_dataset.py",1256,0,"",python,selection_command +4993,7146468,"generate_dataset.py",1325,0,"",python,selection_command +4994,7146619,"generate_dataset.py",1256,0,"",python,selection_command +4995,7146840,"generate_dataset.py",1325,0,"",python,selection_command +4996,7147290,"generate_dataset.py",1256,0,"",python,selection_command +4997,7147387,"generate_dataset.py",1325,0,"",python,selection_command +4998,7148835,"generate_dataset.py",1007,0,"",python,selection_mouse +4999,7149303,"generate_dataset.py",1012,0,"",python,selection_mouse +5000,7149457,"generate_dataset.py",1008,6,"action",python,selection_mouse +5001,7150513,"generate_dataset.py",1122,0,"",python,selection_mouse +5002,7150655,"generate_dataset.py",1120,6,"append",python,selection_mouse +5003,7151222,"generate_dataset.py",1115,0,"",python,selection_mouse +5004,7151357,"generate_dataset.py",1108,11,"actions_seq",python,selection_mouse +5005,7152257,"generate_dataset.py",1073,0,"",python,selection_mouse +5006,7152387,"generate_dataset.py",1064,16,"observations_seq",python,selection_mouse +5007,7160713,"generate_dataset.py",1303,0,"",python,selection_mouse +5008,7161945,"generate_dataset.py",1324,0,"\n ",python,content +5009,7162223,"generate_dataset.py",1333,0,"a",python,content +5010,7162224,"generate_dataset.py",1334,0,"",python,selection_keyboard +5011,7162611,"generate_dataset.py",1334,0,"c",python,content +5012,7162612,"generate_dataset.py",1335,0,"",python,selection_keyboard +5013,7163093,"generate_dataset.py",1335,0,"t",python,content +5014,7163094,"generate_dataset.py",1336,0,"",python,selection_keyboard +5015,7163204,"generate_dataset.py",1336,0,"i",python,content +5016,7163205,"generate_dataset.py",1337,0,"",python,selection_keyboard +5017,7163312,"generate_dataset.py",1337,0,"o",python,content +5018,7163313,"generate_dataset.py",1338,0,"",python,selection_keyboard +5019,7163448,"generate_dataset.py",1338,0,"n",python,content +5020,7163449,"generate_dataset.py",1339,0,"",python,selection_keyboard +5021,7163823,"generate_dataset.py",1339,0,"_",python,content +5022,7163824,"generate_dataset.py",1340,0,"",python,selection_keyboard +5023,7164209,"generate_dataset.py",1340,0,"d",python,content +5024,7164210,"generate_dataset.py",1341,0,"",python,selection_keyboard +5025,7164400,"generate_dataset.py",1341,0,"a",python,content +5026,7164401,"generate_dataset.py",1342,0,"",python,selection_keyboard +5027,7164607,"generate_dataset.py",1342,0,"t",python,content +5028,7164608,"generate_dataset.py",1343,0,"",python,selection_keyboard +5029,7164817,"generate_dataset.py",1343,0,"a",python,content +5030,7164818,"generate_dataset.py",1344,0,"",python,selection_keyboard +5031,7165262,"generate_dataset.py",1344,0," ",python,content +5032,7165263,"generate_dataset.py",1345,0,"",python,selection_keyboard +5033,7165421,"generate_dataset.py",1345,0,"=",python,content +5034,7165422,"generate_dataset.py",1346,0,"",python,selection_keyboard +5035,7165533,"generate_dataset.py",1346,0," ",python,content +5036,7165534,"generate_dataset.py",1347,0,"",python,selection_keyboard +5037,7165778,"generate_dataset.py",1347,0,"n",python,content +5038,7165779,"generate_dataset.py",1348,0,"",python,selection_keyboard +5039,7165936,"generate_dataset.py",1348,0,"p",python,content +5040,7165937,"generate_dataset.py",1349,0,"",python,selection_keyboard +5041,7166142,"generate_dataset.py",1349,0,".",python,content +5042,7166143,"generate_dataset.py",1350,0,"",python,selection_keyboard +5043,7166718,"generate_dataset.py",1350,0,"c",python,content +5044,7166720,"generate_dataset.py",1351,0,"",python,selection_keyboard +5045,7166904,"generate_dataset.py",1351,0,"o",python,content +5046,7166905,"generate_dataset.py",1352,0,"",python,selection_keyboard +5047,7167619,"generate_dataset.py",1352,0,"n",python,content +5048,7167620,"generate_dataset.py",1353,0,"",python,selection_keyboard +5049,7167905,"generate_dataset.py",1353,0,"c",python,content +5050,7167906,"generate_dataset.py",1354,0,"",python,selection_keyboard +5051,7168416,"generate_dataset.py",1350,4,"concatenate",python,content +5052,7169311,"generate_dataset.py",1361,0,"()",python,content +5053,7169312,"generate_dataset.py",1362,0,"",python,selection_keyboard +5054,7169702,"generate_dataset.py",1362,0,"e",python,content +5055,7169702,"generate_dataset.py",1363,0,"",python,selection_keyboard +5056,7169996,"generate_dataset.py",1362,1,"",python,content +5057,7170101,"generate_dataset.py",1362,0,"a",python,content +5058,7170102,"generate_dataset.py",1363,0,"",python,selection_keyboard +5059,7170180,"generate_dataset.py",1363,0,"c",python,content +5060,7170181,"generate_dataset.py",1364,0,"",python,selection_keyboard +5061,7170419,"generate_dataset.py",1364,0,"t",python,content +5062,7170420,"generate_dataset.py",1365,0,"",python,selection_keyboard +5063,7170501,"generate_dataset.py",1365,0,"i",python,content +5064,7170502,"generate_dataset.py",1366,0,"",python,selection_keyboard +5065,7170606,"generate_dataset.py",1366,0,"o",python,content +5066,7170607,"generate_dataset.py",1367,0,"",python,selection_keyboard +5067,7170726,"generate_dataset.py",1367,0,"n",python,content +5068,7170727,"generate_dataset.py",1368,0,"",python,selection_keyboard +5069,7170993,"generate_dataset.py",1362,6,"action",python,content +5070,7172518,"generate_dataset.py",1368,0,"s",python,content +5071,7172519,"generate_dataset.py",1369,0,"",python,selection_keyboard +5072,7173164,"generate_dataset.py",1362,7,"actions_seq",python,content +5073,7174108,"generate_dataset.py",1373,0,",",python,content +5074,7174109,"generate_dataset.py",1374,0,"",python,selection_keyboard +5075,7174650,"generate_dataset.py",1374,0,"a",python,content +5076,7174651,"generate_dataset.py",1375,0,"",python,selection_keyboard +5077,7175512,"generate_dataset.py",1374,1,"",python,content +5078,7175905,"generate_dataset.py",1374,0," ",python,content +5079,7175906,"generate_dataset.py",1375,0,"",python,selection_keyboard +5080,7176109,"generate_dataset.py",1375,0,"a",python,content +5081,7176110,"generate_dataset.py",1376,0,"",python,selection_keyboard +5082,7176292,"generate_dataset.py",1376,0,"c",python,content +5083,7176293,"generate_dataset.py",1377,0,"",python,selection_keyboard +5084,7177545,"generate_dataset.py",1376,1,"",python,content +5085,7177695,"generate_dataset.py",1376,0,"x",python,content +5086,7177695,"generate_dataset.py",1377,0,"",python,selection_keyboard +5087,7178185,"generate_dataset.py",1375,2,"axis=",python,content +5088,7178620,"generate_dataset.py",1380,0,"0",python,content +5089,7178620,"generate_dataset.py",1381,0,"",python,selection_keyboard +5090,7178988,"generate_dataset.py",1380,0,"",python,selection_command +5091,7181802,"generate_dataset.py",1397,0,"",python,selection_mouse +5092,7181927,"generate_dataset.py",1391,12,"episode_path",python,selection_mouse +5093,7183763,"generate_dataset.py",1449,0,"",python,selection_mouse +5094,7184378,"generate_dataset.py",1411,0,"",python,selection_mouse +5095,7185211,"generate_dataset.py",1413,0,"",python,selection_mouse +5096,7185373,"generate_dataset.py",1406,10,"output_dir",python,selection_mouse +5097,7185975,"generate_dataset.py",1424,0,"",python,selection_mouse +5098,7186125,"generate_dataset.py",1421,8,"episode_",python,selection_mouse +5099,7190488,"generate_dataset.py",1400,0,"",python,selection_mouse +5100,7190630,"generate_dataset.py",1391,12,"episode_path",python,selection_mouse +5101,7191329,"generate_dataset.py",1400,0,"",python,selection_mouse +5102,7191329,"generate_dataset.py",1391,12,"episode_path",python,selection_mouse +5103,7191868,"generate_dataset.py",1342,0,"",python,selection_mouse +5104,7192010,"generate_dataset.py",1333,11,"action_data",python,selection_mouse +5105,7193527,"generate_dataset.py",1602,0,"",python,selection_mouse +5106,7193665,"generate_dataset.py",1589,17,"observations_data",python,selection_mouse +5107,7194212,"generate_dataset.py",1702,0,"",python,selection_mouse +5108,7194231,"generate_dataset.py",1701,0,"",python,selection_command +5109,7194385,"generate_dataset.py",1702,0,"",python,selection_mouse +5110,7194397,"generate_dataset.py",1701,0,"",python,selection_command +5111,7194908,"generate_dataset.py",1614,0,"",python,selection_mouse +5112,7195073,"generate_dataset.py",1607,7,"tobytes",python,selection_mouse +5113,7195546,"generate_dataset.py",1617,0,"",python,selection_mouse +5114,7220173,"generate_dataset.py",1617,0," ",python,content +5115,7220176,"generate_dataset.py",1618,0,"",python,selection_keyboard +5116,7220427,"generate_dataset.py",1618,0,"a",python,content +5117,7220428,"generate_dataset.py",1619,0,"",python,selection_keyboard +5118,7220557,"generate_dataset.py",1619,0,"c",python,content +5119,7220559,"generate_dataset.py",1620,0,"",python,selection_keyboard +5120,7220992,"generate_dataset.py",1619,1,"",python,content +5121,7221131,"generate_dataset.py",1618,1,"",python,content +5122,7221422,"generate_dataset.py",1618,0,"""""",python,content +5123,7221423,"generate_dataset.py",1619,0,"",python,selection_keyboard +5124,7221733,"generate_dataset.py",1619,0,"a",python,content +5125,7221734,"generate_dataset.py",1620,0,"",python,selection_keyboard +5126,7221883,"generate_dataset.py",1620,0,"c",python,content +5127,7221884,"generate_dataset.py",1621,0,"",python,selection_keyboard +5128,7222120,"generate_dataset.py",1621,0,"t",python,content +5129,7222121,"generate_dataset.py",1622,0,"",python,selection_keyboard +5130,7222262,"generate_dataset.py",1622,0,"i",python,content +5131,7222263,"generate_dataset.py",1623,0,"",python,selection_keyboard +5132,7222356,"generate_dataset.py",1623,0,"o",python,content +5133,7222358,"generate_dataset.py",1624,0,"",python,selection_keyboard +5134,7222493,"generate_dataset.py",1624,0,"n",python,content +5135,7222494,"generate_dataset.py",1625,0,"",python,selection_keyboard +5136,7222535,"generate_dataset.py",1625,0,"s",python,content +5137,7222536,"generate_dataset.py",1626,0,"",python,selection_keyboard +5138,7223148,"generate_dataset.py",1626,0,"_",python,content +5139,7223149,"generate_dataset.py",1627,0,"",python,selection_keyboard +5140,7223694,"generate_dataset.py",1626,1,"",python,content +5141,7223968,"generate_dataset.py",1627,0,"",python,selection_command +5142,7224771,"generate_dataset.py",1626,0,"",python,selection_command +5143,7226563,"generate_dataset.py",1627,0,"",python,selection_command +5144,7227088,"generate_dataset.py",1627,0,":",python,content +5145,7227088,"generate_dataset.py",1628,0,"",python,selection_keyboard +5146,7227540,"generate_dataset.py",1628,0," ",python,content +5147,7227541,"generate_dataset.py",1629,0,"",python,selection_keyboard +5148,7243422,"generate_dataset.py",1629,0,"a",python,content +5149,7243423,"generate_dataset.py",1630,0,"",python,selection_keyboard +5150,7243588,"generate_dataset.py",1630,0,"c",python,content +5151,7243589,"generate_dataset.py",1631,0,"",python,selection_keyboard +5152,7245261,"generate_dataset.py",1629,2,"action_data",python,content +5153,7246396,"generate_dataset.py",1640,0,",",python,content +5154,7246397,"generate_dataset.py",1641,0,"",python,selection_keyboard +5155,7246823,"generate_dataset.py",1640,0,"",python,selection_command +5156,7275582,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"#!/usr/bin/env bash\n\npython generate_dataset.py \\n --num_episodes 10 \\n --output_dir /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev \\n --min_episode_length 1000",shellscript,tab +5157,7283837,"TERMINAL",0,0,"rm /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/*",,terminal_command +5158,7283888,"TERMINAL",0,0,"]633;E;2025-09-04 11:58:21 rm /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev/*;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +5159,7284032,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +5160,7289775,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/generate_dataset_10m.sh",,terminal_command +5161,7289825,"TERMINAL",0,0,"]633;E;2025-09-04 11:58:26 sh slurm/dev/mihir/horeka/generate_dataset_10m.sh;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +5162,7294096,"TERMINAL",0,0,"Gym has been unmaintained since 2022 and does not support NumPy 2.0 amongst other critical functionality.\r\nPlease upgrade to Gymnasium, the maintained drop-in replacement of Gym, or contact the authors of your software and request that they upgrade.\r\nUsers of this version of Gym should be able to simply replace 'import gym' with 'import gymnasium as gym' in the vast majority of cases.\r\nSee the migration guide at https://gymnasium.farama.org/introduction/migration_guide/ for additional information.\r\n",,terminal_output +5163,7305891,"TERMINAL",0,0,"Episode 0 completed, length: 1000\r\nEpisode too short (57), resampling...\r\n",,terminal_output +5164,7306392,"TERMINAL",0,0,"Episode too short (887), resampling...\r\n",,terminal_output +5165,7306712,"TERMINAL",0,0,"Episode too short (376), resampling...\r\n",,terminal_output +5166,7306926,"TERMINAL",0,0,"Episode too short (346), resampling...\r\n",,terminal_output +5167,7307612,"TERMINAL",0,0,"Episode 1 completed, length: 1000\r\n",,terminal_output +5168,7308021,"TERMINAL",0,0,"Episode too short (660), resampling...\r\n",,terminal_output +5169,7308678,"TERMINAL",0,0,"Episode 2 completed, length: 1000\r\n",,terminal_output +5170,7309148,"TERMINAL",0,0,"Episode 3 completed, length: 1000\r\n",,terminal_output +5171,7309256,"TERMINAL",0,0,"Episode too short (229), resampling...\r\n",,terminal_output +5172,7309844,"TERMINAL",0,0,"Episode 4 completed, length: 1000\r\n",,terminal_output +5173,7310232,"TERMINAL",0,0,"Episode too short (834), resampling...\r\n",,terminal_output +5174,7310346,"TERMINAL",0,0,"Episode too short (161), resampling...\r\n",,terminal_output +5175,7310962,"TERMINAL",0,0,"Episode 5 completed, length: 1000\r\n",,terminal_output +5176,7311434,"TERMINAL",0,0,"Episode 6 completed, length: 1000\r\n",,terminal_output +5177,7312222,"TERMINAL",0,0,"Episode too short (868), resampling...\r\n",,terminal_output +5178,7312631,"TERMINAL",0,0,"Episode too short (622), resampling...\r\n",,terminal_output +5179,7312939,"TERMINAL",0,0,"Episode too short (622), resampling...\r\n",,terminal_output +5180,7313143,"TERMINAL",0,0,"Episode too short (405), resampling...\r\n",,terminal_output +5181,7313774,"TERMINAL",0,0,"Episode too short (860), resampling...\r\nEpisode too short (79), resampling...\r\n",,terminal_output +5182,7314406,"TERMINAL",0,0,"Episode 7 completed, length: 1000\r\n",,terminal_output +5183,7314780,"TERMINAL",0,0,"Episode too short (602), resampling...\r\n",,terminal_output +5184,7315402,"TERMINAL",0,0,"Episode 8 completed, length: 1000\r\n",,terminal_output +5185,7315936,"TERMINAL",0,0,"Episode 9 completed, length: 1000\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/generate_dataset.py"", line 76, in \r\n json.dump(metadata, f)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/json/__init__.py"", line 179, in dump\r\n for chunk in iterable:\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/json/encoder.py"", line 431, in _iterencode\r\n yield from _iterencode_dict(o, _current_indent_level)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/json/encoder.py"", line 405, in _iterencode_dict\r\n yield from chunks\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/json/encoder.py"", line 438, in _iterencode\r\n o = _default(o)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/json/encoder.py"", line 179, in default\r\n raise TypeError(f'Object of type {o.__class__.__name__} '\r\nTypeError: Object of type TensorType is not JSON serializable\r\n",,terminal_output +5186,7316028,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;1",,terminal_output +5187,7387114,"TERMINAL",0,0,"bash",,terminal_focus +5188,7388440,"generate_dataset.py",0,0,"",python,tab +5189,7389965,"generate_dataset.py",2105,0,"",python,selection_mouse +5190,7391133,"generate_dataset.py",2039,0,"",python,selection_mouse +5191,7394949,"TERMINAL",0,0,"bash",,terminal_focus +5192,7395422,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +5193,7395899,"TERMINAL",0,0,"bash",,terminal_focus +5194,7396362,"generate_dataset.py",0,0,"",python,tab +5195,7398469,"TERMINAL",0,0,"bash",,terminal_focus +5196,7402504,"generate_dataset.py",0,0,"",python,tab +5197,7404725,"generate_dataset.py",2044,0,"\n",python,content +5198,7405275,"generate_dataset.py",2045,0,"b",python,content +5199,7405276,"generate_dataset.py",2046,0,"",python,selection_keyboard +5200,7405370,"generate_dataset.py",2046,0,"r",python,content +5201,7405371,"generate_dataset.py",2047,0,"",python,selection_keyboard +5202,7405559,"generate_dataset.py",2047,0,"e",python,content +5203,7405560,"generate_dataset.py",2048,0,"",python,selection_keyboard +5204,7406543,"generate_dataset.py",2045,3,"breakpoint",python,content +5205,7407219,"generate_dataset.py",2055,0,"()",python,content +5206,7407222,"generate_dataset.py",2056,0,"",python,selection_keyboard +5207,7407252,"generate_dataset.py",2056,1,")",python,content +5208,7407253,"generate_dataset.py",2057,0,"",python,selection_keyboard +5209,7407326,"generate_dataset.py",2056,0,"",python,selection_command +5210,7409291,"TERMINAL",0,0,"sh slurm/dev/mihir/horeka/generate_dataset_10m.sh",,terminal_command +5211,7409333,"TERMINAL",0,0,"]633;E;2025-09-04 12:00:26 sh slurm/dev/mihir/horeka/generate_dataset_10m.sh;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +5212,7409942,"TERMINAL",0,0,"Gym has been unmaintained since 2022 and does not support NumPy 2.0 amongst other critical functionality.\r\nPlease upgrade to Gymnasium, the maintained drop-in replacement of Gym, or contact the authors of your software and request that they upgrade.\r\nUsers of this version of Gym should be able to simply replace 'import gym' with 'import gymnasium as gym' in the vast majority of cases.\r\nSee the migration guide at https://gymnasium.farama.org/introduction/migration_guide/ for additional information.\r\n",,terminal_output +5213,7413495,"TERMINAL",0,0,"Episode too short (302), resampling...\r\n",,terminal_output +5214,7413816,"TERMINAL",0,0,"Episode too short (662), resampling...\r\n",,terminal_output +5215,7414006,"TERMINAL",0,0,"Episode too short (329), resampling...\r\n",,terminal_output +5216,7414282,"TERMINAL",0,0,"Episode too short (447), resampling...\r\n",,terminal_output +5217,7414695,"TERMINAL",0,0,"Episode too short (719), resampling...\r\n",,terminal_output +5218,7414918,"TERMINAL",0,0,"Episode too short (425), resampling...\r\n",,terminal_output +5219,7415240,"TERMINAL",0,0,"Episode too short (378), resampling...\r\n",,terminal_output +5220,7415477,"TERMINAL",0,0,"Episode too short (497), resampling...\r\n",,terminal_output +5221,7416012,"TERMINAL",0,0,"Episode 0 completed, length: 1000\r\nEpisode too short (57), resampling...\r\n",,terminal_output +5222,7416307,"TERMINAL",0,0,"Episode too short (597), resampling...\r\n",,terminal_output +5223,7416513,"TERMINAL",0,0,"Episode too short (399), resampling...\r\n",,terminal_output +5224,7416619,"TERMINAL",0,0,"Episode too short (114), resampling...\r\n",,terminal_output +5225,7417016,"TERMINAL",0,0,"Episode too short (806), resampling...\r\n",,terminal_output +5226,7417408,"TERMINAL",0,0,"Episode too short (964), resampling...\r\n",,terminal_output +5227,7417898,"TERMINAL",0,0,"Episode too short (971), resampling...\r\n",,terminal_output +5228,7417951,"TERMINAL",0,0,"Episode too short (89), resampling...\r\n",,terminal_output +5229,7418149,"TERMINAL",0,0,"Episode too short (315), resampling...\r\n",,terminal_output +5230,7418921,"TERMINAL",0,0,"Episode 1 completed, length: 1000\r\n",,terminal_output +5231,7419230,"TERMINAL",0,0,"Episode too short (589), resampling...\r\n",,terminal_output +5232,7419844,"TERMINAL",0,0,"Episode 2 completed, length: 1000\r\n",,terminal_output +5233,7420354,"TERMINAL",0,0,"Episode too short (841), resampling...\r\n",,terminal_output +5234,7420467,"TERMINAL",0,0,"Episode too short (233), resampling...\r\n",,terminal_output +5235,7420976,"TERMINAL",0,0,"Episode too short (730), resampling...\r\nEpisode too short (51), resampling...\r\n",,terminal_output +5236,7421304,"TERMINAL",0,0,"Episode too short (626), resampling...\r\n",,terminal_output +5237,7421730,"TERMINAL",0,0,"Episode too short (689), resampling...\r\n",,terminal_output +5238,7422093,"TERMINAL",0,0,"Episode too short (582), resampling...\r\n",,terminal_output +5239,7422303,"TERMINAL",0,0,"Episode too short (219), resampling...\r\n",,terminal_output +5240,7422470,"TERMINAL",0,0,"Episode too short (176), resampling...\r\n",,terminal_output +5241,7423018,"TERMINAL",0,0,"Episode too short (730), resampling...\r\n",,terminal_output +5242,7423430,"TERMINAL",0,0,"Episode too short (676), resampling...\r\n",,terminal_output +5243,7423883,"TERMINAL",0,0,"Episode 3 completed, length: 1000\r\n",,terminal_output +5244,7424511,"TERMINAL",0,0,"Episode 4 completed, length: 1000\r\n",,terminal_output +5245,7424909,"TERMINAL",0,0,"bash",,terminal_focus +5246,7425144,"TERMINAL",0,0,"Episode 5 completed, length: 1000\r\n",,terminal_output +5247,7425483,"TERMINAL",0,0,"Episode too short (663), resampling...\r\n",,terminal_output +5248,7425778,"TERMINAL",0,0,"Episode too short (507), resampling...\r\n",,terminal_output +5249,7426144,"generate_dataset.py",0,0,"",python,tab +5250,7426384,"TERMINAL",0,0,"Episode 6 completed, length: 1000\r\n",,terminal_output +5251,7426531,"TERMINAL",0,0,"Episode too short (296), resampling...\r\n",,terminal_output +5252,7427155,"TERMINAL",0,0,"Episode 7 completed, length: 1000\r\n",,terminal_output +5253,7427732,"TERMINAL",0,0,"Episode 8 completed, length: 1000\r\nEpisode too short (129), resampling...\r\n",,terminal_output +5254,7428036,"TERMINAL",0,0,"Episode too short (443), resampling...\r\n",,terminal_output +5255,7428566,"TERMINAL",0,0,"Episode 9 completed, length: 1000\r\n",,terminal_output +5256,7428686,"TERMINAL",0,0,"> /hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/generate_dataset.py(70)()\r\n-> ""env"": ""coinrun"",\r\n(Pdb) ",,terminal_output +5257,7446856,"TERMINAL",0,0,"sh",,terminal_focus +5258,7448689,"TERMINAL",0,0,"l",,terminal_output +5259,7448865,"TERMINAL",0,0,"\r\n 65 \t print(f""Episode too short ({len(observations_seq)}), resampling..."")\r\n 66 \t\r\n 67 \t# --- Save metadata ---\r\n 68 \tbreakpoint()\r\n 69 \tmetadata = {\r\n 70 ->\t ""env"": ""coinrun"",\r\n 71 \t ""action_space"": env.ac_space,\r\n 72 \t ""num_episodes"": args.num_episodes,\r\n 73 \t ""avg_episode_len"": np.mean([ep[""length""] for ep in episode_metadata]),\r\n 74 \t ""episode_metadata"": episode_metadata,\r\n 75 \t}\r\n(Pdb) ",,terminal_output +5260,7451256,"TERMINAL",0,0,"e",,terminal_output +5261,7451364,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +5262,7451488,"TERMINAL",0,0,"[?25lv[?25h",,terminal_output +5263,7451602,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +5264,7452133,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +5265,7452318,"TERMINAL",0,0,"[?25lx[?25h",,terminal_output +5266,7452900,"TERMINAL",0,0,"[?25lx\r[?25h",,terminal_output +5267,7453036,"TERMINAL",0,0,"[?25lx[?25h",,terminal_output +5268,7453660,"TERMINAL",0,0,"[?25lx\r[?25h",,terminal_output +5269,7454262,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +5270,7454599,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +5271,7454846,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +5272,7454952,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +5273,7455100,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +5274,7455243,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +5275,7455305,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5276,7455462,"TERMINAL",0,0,"\r\n\r\n(Pdb) ",,terminal_output +5277,7457277,"TERMINAL",0,0,"n",,terminal_output +5278,7457550,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +5279,7458028,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +5280,7460333,"TERMINAL",0,0,"\r",,terminal_output +5281,7466392,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +5282,7466478,"TERMINAL",0,0,"[?25ly[?25h",,terminal_output +5283,7466706,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5284,7467042,"TERMINAL",0,0,"[?25le\r[?25h",,terminal_output +5285,7467237,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +5286,7467346,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5287,7468075,"TERMINAL",0,0,"[?25l([?25h",,terminal_output +5288,7469178,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5289,7469445,"TERMINAL",0,0,"[?25ln[?25h[?25lv[?25h",,terminal_output +5290,7469660,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +5291,7470219,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +5292,7470687,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +5293,7471208,"TERMINAL",0,0,"[?25l_[?25h",,terminal_output +5294,7471588,"TERMINAL",0,0,"[?25ls[?25h",,terminal_output +5295,7471784,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +5296,7472231,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +5297,7472364,"TERMINAL",0,0,"[?25lc[?25h",,terminal_output +5298,7472498,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5299,7473048,"TERMINAL",0,0,"[?25l)[?25h",,terminal_output +5300,7473156,"TERMINAL",0,0,"\r\n\r\n(Pdb) ",,terminal_output +5301,7478794,"generate_dataset.py",0,0,"",python,tab +5302,7482885,"generate_dataset.py",222,0,"",python,selection_mouse +5303,7483127,".venv/lib/python3.10/site-packages/gym3/types_np.py",0,0,"from functools import partial\nfrom typing import Any, Optional, Sequence, Tuple\n\nimport numpy as np\n\nfrom gym3.types import Discrete, Real, TensorType, ValType, multimap\n\n\ndef concat(xs: Sequence[Any], axis: int = 0) -> Any:\n """"""\n Concatenate the (leaf) arrays from xs\n\n :param xs: list of trees with the same shape, where the leaf values are numpy arrays\n :param axis: axis to concatenate along\n """"""\n return multimap(lambda *xs: np.concatenate(xs, axis=axis), *xs)\n\n\ndef stack(xs: Sequence[Any], axis: int = 0) -> Any:\n """"""\n Stack the (leaf) arrays from xs\n\n :param xs: list of trees with the same shape, where the leaf values are numpy arrays\n :param axis: axis to stack along\n """"""\n return multimap(lambda *xs: np.stack(xs, axis=axis), *xs)\n\n\ndef split(x: Any, sections: Sequence[int]) -> Sequence[Any]:\n """"""\n Split the (leaf) arrays from the tree x\n\n Examples:\n\n split([1,2,3,4], [1,2,3,4]) => [[1], [2], [3], [4]]\n split([1,2,3,4], [1,3,4]) => [[1], [2, 3], [4]]\n\n :param x: a tree where the leaf values are numpy arrays\n :param sections: list of indices to split at (not sizes of each split)\n\n :returns: list of trees with length `len(sections)` with the same shape as x\n where each leaf is the corresponding section of the leaf in x\n """"""\n result = []\n start = 0\n for end in sections:\n select_tree = multimap(lambda arr: arr[start:end], x)\n start = end\n result.append(select_tree)\n return result\n\n\ndef dtype(tt: TensorType) -> np.dtype:\n """"""\n :param tt: TensorType to get dtype for\n\n :returns: numpy.dtype to use for tt\n """"""\n assert isinstance(tt, TensorType)\n return np.dtype(tt.eltype.dtype_name)\n\n\ndef zeros(vt: ValType, bshape: Tuple) -> Any:\n """"""\n :param vt: ValType to create zeros for\n :param bshape: batch shape to prepend to the shape of each numpy array created by this function\n\n :returns: tree of numpy arrays matching vt\n """"""\n return multimap(\n lambda subdt: np.zeros(bshape + subdt.shape, dtype=dtype(subdt)), vt\n )\n\n\ndef _sample_tensor(\n tt: TensorType, bshape: Tuple, rng: Optional[np.random.RandomState] = None\n) -> np.ndarray:\n """"""\n :param tt: TensorType to create sample for\n :param bshape: batch shape to prepend to the shape of each numpy array created by this function\n :param rng: np.random.RandomState to use for sampling\n\n :returns: numpy array matching tt\n """"""\n if rng is None:\n rng = np.random\n assert isinstance(tt, TensorType)\n eltype = tt.eltype\n shape = bshape + tt.shape\n if isinstance(eltype, Discrete):\n return rng.randint(eltype.n, size=shape, dtype=dtype(tt))\n elif isinstance(eltype, Real):\n return rng.randn(*shape).astype(dtype(tt))\n else:\n raise ValueError(f""Expected ScalarType, got {type(eltype)}"")\n\n\ndef sample(\n vt: ValType, bshape: Tuple, rng: Optional[np.random.RandomState] = None\n) -> Any:\n """"""\n :param vt: ValType to create sample for\n :param bshape: batch shape to prepend to the shape of each numpy array created by this function\n :param rng: np.random.RandomState to use for sampling\n\n :returns: tree of numpy arrays matching vt\n """"""\n return multimap(partial(_sample_tensor, bshape=bshape, rng=rng), vt)\n",python,tab +5304,7486155,".venv/lib/python3.10/site-packages/gym3/types_np.py",716,0,"",python,selection_mouse +5305,7486155,".venv/lib/python3.10/site-packages/gym3/types_np.py",715,0,"",python,selection_command +5306,7486610,".venv/lib/python3.10/site-packages/gym3/types_np.py",678,0,"",python,selection_command +5307,7540565,".venv/lib/python3.10/site-packages/gym3/types_np.py",146,0,"",python,selection_mouse +5308,7541775,".venv/lib/python3.10/site-packages/gym3/types_np.py",144,0,"",python,selection_mouse +5309,7542191,".venv/lib/python3.10/site-packages/gym3/types.py",0,0,"from typing import Any, Callable, Tuple\n\nfrom gym3.internal import misc\n\nINTEGER_DTYPE_NAMES = set(\n [""int8"", ""int16"", ""int32"", ""int64"", ""uint8"", ""uint16"", ""uint32"", ""uint64""]\n)\nFLOAT_DTYPE_NAMES = set([""float32"", ""float64""])\nDTYPE_NAME_TO_MAX_VALUE = {}\nDTYPE_NAME_TO_BIT_WIDTH = {}\nfor signed in [True, False]:\n for bit_width in (8, 16, 32, 64):\n if signed:\n max_value = 2 ** (bit_width - 1) - 1\n else:\n max_value = 2 ** bit_width\n DTYPE_NAME_TO_MAX_VALUE[("""" if signed else ""u"") + f""int{bit_width}""] = max_value\n DTYPE_NAME_TO_BIT_WIDTH[("""" if signed else ""u"") + f""int{bit_width}""] = bit_width\n\n\ndef pod_equals(x, y):\n """"""\n equality for plain-old-data types\n """"""\n return type(x) == type(y) and x.__dict__ == y.__dict__\n\n\nclass ScalarType:\n """"""\n Type of a scalar, used as the base class for the element type of TensorTypes\n """"""\n\n def __init__(self):\n self.dtype_name = None\n\n __eq__ = pod_equals\n\n\nclass Real(ScalarType):\n """"""\n A scalar that can represent continuous values\n """"""\n\n def __init__(self, dtype_name: str = ""float32""):\n self.dtype_name = dtype_name\n\n def __repr__(self):\n return """"\n\n def __str__(self):\n return ""R""\n\n\nclass Discrete(ScalarType):\n """"""\n A scalar that can represent discrete (integer) values\n\n :param n: number of discrete values that this scalar can assume, which are required to be in the range [0, n)\n :param bit_width: number of bits for the integer dtype that will be used to store values\n :param signed: Set to False to use an unsigned integer type\n """"""\n\n def __init__(self, n: int, dtype_name: str = ""int64"") -> None:\n self.n = int(n)\n self.dtype_name = dtype_name\n assert self.dtype_name in INTEGER_DTYPE_NAMES\n max_value = DTYPE_NAME_TO_MAX_VALUE[self.dtype_name]\n assert (\n n <= max_value + 1\n ), f""{n} cannot be greater than {max_value + 1} for dtype_name={dtype_name}""\n\n def __repr__(self):\n return f""""\n\n def __str__(self):\n return f""D{self.n}""\n\n\nclass ValType:\n """"""\n Tensor or combination of tensors\n """"""\n\n __eq__ = pod_equals\n\n\nclass TensorType(ValType):\n """"""\n A tensor value type\n\n :param eltype: instance of ScalarType subclass that represents the types of values in this tensor\n :param shape: shape of the tensor as a tuple of ints\n """"""\n\n def __init__(self, eltype: ScalarType, shape: Tuple) -> None:\n assert isinstance(shape, tuple)\n self.eltype = eltype\n self.shape = shape\n\n @property\n def ndim(self):\n """"""\n Number of dimensions of the tensor\n """"""\n return len(self.shape)\n\n @property\n def size(self):\n """"""\n Number of elements of the tensor\n """"""\n return int(misc.intprod(self.shape))\n\n def __repr__(self):\n return f""""\n\n def __str__(self):\n shape_str = "","".join(map(str, self.shape))\n return f""{self.eltype}[{shape_str}]""\n\n\ndef discrete_scalar(n: int) -> TensorType:\n """"""\n Convenience method for definiting a Discrete TensorType\n """"""\n return TensorType(shape=(), eltype=Discrete(n))\n\n\nclass DictType(ValType):\n """"""\n A value type representing a (possibly nested) dictionary of strings to value types\n """"""\n\n def __init__(self, **name2type: ValType) -> None:\n self._n2t = name2type\n\n def __repr__(self):\n return f""""\n\n def __str__(self):\n elems_str = "", "".join([f""{k}={v}"" for (k, v) in self._n2t.items()])\n return f""Dict({elems_str})""\n\n def __len__(self):\n return len(self._n2t)\n\n def keys(self):\n return self._n2t.keys()\n\n def values(self):\n return self._n2t.values()\n\n def items(self):\n return self._n2t.items()\n\n def __getitem__(self, key):\n return self._n2t[key]\n\n def __contains__(self, key):\n return key in self._n2t\n\n\ndef multimap(f: Callable, *xs: Any) -> Any:\n """"""\n Apply f at each leaf of the list of trees\n\n A tree is:\n * a (possibly nested) dict\n * a (possibly nested) DictType\n * any other object (a leaf value)\n\n `{""a"": 1}`, `{""a"": {""b"": 2}}`, and `3` are all valid trees, where the leaf values\n are the integers\n\n :param f: function to call at each leaf, must take len(xs) arguments\n :param xs: a list of trees, all with the same structure\n\n :returns: A tree of the same structure, where each leaf contains f's return value.\n """"""\n first = xs[0]\n if isinstance(first, dict) or isinstance(first, DictType):\n assert all(isinstance(x, dict) or isinstance(x, DictType) for x in xs)\n assert all(sorted(x.keys()) == sorted(first.keys()) for x in xs)\n return {k: multimap(f, *(x[k] for x in xs)) for k in sorted(first.keys())}\n else:\n return f(*xs)\n",python,tab +5310,7548936,".venv/lib/python3.10/site-packages/gym3/types.py",2919,0,"",python,selection_mouse +5311,7548949,".venv/lib/python3.10/site-packages/gym3/types.py",2918,0,"",python,selection_command +5312,7549784,".venv/lib/python3.10/site-packages/gym3/types.py",2959,0,"",python,selection_mouse +5313,7551024,".venv/lib/python3.10/site-packages/gym3/types.py",2944,0,"",python,selection_mouse +5314,7559551,".venv/lib/python3.10/site-packages/gym3/types.py",2310,0,"",python,selection_mouse +5315,7559713,".venv/lib/python3.10/site-packages/gym3/types.py",2306,8,"instance",python,selection_mouse +5316,7560342,".venv/lib/python3.10/site-packages/gym3/types.py",2320,0,"",python,selection_mouse +5317,7560487,".venv/lib/python3.10/site-packages/gym3/types.py",2318,10,"ScalarType",python,selection_mouse +5318,7561436,".venv/lib/python3.10/site-packages/gym3/types.py",2331,0,"",python,selection_mouse +5319,7561577,".venv/lib/python3.10/site-packages/gym3/types.py",2329,8,"subclass",python,selection_mouse +5320,7562281,".venv/lib/python3.10/site-packages/gym3/types.py",2702,0,"",python,selection_mouse +5321,7570084,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +5322,7571576,"TERMINAL",0,0,"bash",,terminal_focus +5323,7573210,"TERMINAL",0,0,"sh",,terminal_focus +5324,7574199,"TERMINAL",0,0,"",,terminal_output +5325,7575076,"TERMINAL",0,0,"\rtype(env.ac_space)",,terminal_output +5326,7576179,"TERMINAL",0,0,"\r",,terminal_output +5327,7578792,"TERMINAL",0,0,".",,terminal_output +5328,7579343,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +5329,7579536,"TERMINAL",0,0,"[?25ld[?25h",,terminal_output +5330,7579665,"TERMINAL",0,0,"[?25lo[?25h",,terminal_output +5331,7579730,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +5332,7580159,"TERMINAL",0,0,"\r\n*** AttributeError: 'TensorType' object has no attribute 'ndom'\r\n(Pdb) ",,terminal_output +5333,7581058,"TERMINAL",0,0,"\renv.ac_space.ndom",,terminal_output +5334,7581776,"TERMINAL",0,0,"[?25lo\rm[?25h",,terminal_output +5335,7582176,"TERMINAL",0,0,"\rim",,terminal_output +5336,7582296,"TERMINAL",0,0,"\r\n0\r\n(Pdb) ",,terminal_output +5337,7583725,"TERMINAL",0,0,"\renv.ac_space.ndim",,terminal_output +5338,7584989,"TERMINAL",0,0,"\r",,terminal_output +5339,7586174,"generate_dataset.py",0,0,"",python,tab +5340,7588643,".venv/lib/python3.10/site-packages/gym3/types_np.py",0,0,"",python,tab +5341,7589154,".venv/lib/python3.10/site-packages/gym3/types.py",0,0,"",python,tab +5342,7591663,".venv/lib/python3.10/site-packages/gym3/types.py",2849,0,"",python,selection_mouse +5343,7591722,".venv/lib/python3.10/site-packages/gym3/types.py",2848,0,"",python,selection_command +5344,7592223,".venv/lib/python3.10/site-packages/gym3/types.py",2849,0,"",python,selection_mouse +5345,7592235,".venv/lib/python3.10/site-packages/gym3/types.py",2848,0,"",python,selection_command +5346,7592912,".venv/lib/python3.10/site-packages/gym3/types.py",2808,0,"",python,selection_mouse +5347,7593057,".venv/lib/python3.10/site-packages/gym3/types.py",2805,6,"Number",python,selection_mouse +5348,7595569,"TERMINAL",0,0,"\renv.ac_space.ndim",,terminal_output +5349,7595822,"TERMINAL",0,0,"[?25lm\r[?25h",,terminal_output +5350,7595949,"TERMINAL",0,0,"[?25li\r[?25h",,terminal_output +5351,7596058,"TERMINAL",0,0,"[?25ld\r[?25h",,terminal_output +5352,7596416,"TERMINAL",0,0,"[?25ln\r[?25h",,terminal_output +5353,7596696,"TERMINAL",0,0,"s",,terminal_output +5354,7596759,"TERMINAL",0,0,"[?25li[?25h",,terminal_output +5355,7596905,"TERMINAL",0,0,"[?25lz[?25h",,terminal_output +5356,7597002,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5357,7597122,"TERMINAL",0,0,"\r\n1\r\n(Pdb) ",,terminal_output +5358,7599304,".venv/lib/python3.10/site-packages/gym3/types.py",0,0,"",python,tab +5359,7604028,".venv/lib/python3.10/site-packages/gym3/types.py",2953,0,"",python,selection_mouse +5360,7604180,".venv/lib/python3.10/site-packages/gym3/types.py",2951,4,"self",python,selection_mouse +5361,7612055,".venv/lib/python3.10/site-packages/gym3/types.py",3131,0,"",python,selection_mouse +5362,7612080,".venv/lib/python3.10/site-packages/gym3/types.py",3130,0,"",python,selection_command +5363,7612837,".venv/lib/python3.10/site-packages/gym3/types.py",3024,0,"",python,selection_mouse +5364,7613054,".venv/lib/python3.10/site-packages/gym3/types.py",3021,4,"self",python,selection_mouse +5365,7613490,".venv/lib/python3.10/site-packages/gym3/types.py",3030,0,"",python,selection_mouse +5366,7613625,".venv/lib/python3.10/site-packages/gym3/types.py",3026,5,"shape",python,selection_mouse +5367,7613821,".venv/lib/python3.10/site-packages/gym3/types.py",3025,6,".shape",python,selection_mouse +5368,7613867,".venv/lib/python3.10/site-packages/gym3/types.py",3021,10,"self.shape",python,selection_mouse +5369,7614323,".venv/lib/python3.10/site-packages/gym3/types.py",3024,0,"",python,selection_mouse +5370,7614487,".venv/lib/python3.10/site-packages/gym3/types.py",3021,4,"self",python,selection_mouse +5371,7614621,".venv/lib/python3.10/site-packages/gym3/types.py",3021,5,"self.",python,selection_mouse +5372,7614649,".venv/lib/python3.10/site-packages/gym3/types.py",3021,10,"self.shape",python,selection_mouse +5373,7622393,"TERMINAL",0,0,"\renv.ac_space.size",,terminal_output +5374,7622640,"TERMINAL",0,0,"[?25le\r[?25h",,terminal_output +5375,7622749,"TERMINAL",0,0,"[?25lz\r[?25h",,terminal_output +5376,7622903,"TERMINAL",0,0,"[?25li\r[?25h",,terminal_output +5377,7623445,"TERMINAL",0,0,"h",,terminal_output +5378,7623532,"TERMINAL",0,0,"[?25la[?25h",,terminal_output +5379,7623670,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +5380,7623776,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5381,7623861,"TERMINAL",0,0,"\r\n()\r\n(Pdb) ",,terminal_output +5382,7626414,"TERMINAL",0,0,"\renv.ac_space.shape",,terminal_output +5383,7626695,"TERMINAL",0,0,"[?25le\r[?25h",,terminal_output +5384,7626803,"TERMINAL",0,0,"[?25lp\r[?25h",,terminal_output +5385,7626953,"TERMINAL",0,0,"[?25la\r[?25h",,terminal_output +5386,7627072,"TERMINAL",0,0,"[?25lh\r[?25h",,terminal_output +5387,7627221,"TERMINAL",0,0,"[?25ls\r[?25h",,terminal_output +5388,7627359,"TERMINAL",0,0,"[?25l.\r[?25h",,terminal_output +5389,7627545,"TERMINAL",0,0,"\r\n\r\n(Pdb) ",,terminal_output +5390,7629476,"TERMINAL",0,0,"\renv.ac_space",,terminal_output +5391,7630883,"TERMINAL",0,0,"\r.shape",,terminal_output +5392,7632245,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +5393,7633159,"TERMINAL",0,0,"bash",,terminal_focus +5394,7634749,"generate_dataset.py",0,0,"",python,tab +5395,7637991,".venv/lib/python3.10/site-packages/gym3/types_np.py",0,0,"",python,tab +5396,7639072,".venv/lib/python3.10/site-packages/gym3/types.py",0,0,"",python,tab +5397,7640797,".venv/lib/python3.10/site-packages/gym3/types.py",3061,0,"",python,selection_mouse +5398,7640932,".venv/lib/python3.10/site-packages/gym3/types.py",3057,6,"eltype",python,selection_mouse +5399,7641604,".venv/lib/python3.10/site-packages/gym3/types.py",3054,0,"",python,selection_mouse +5400,7641742,".venv/lib/python3.10/site-packages/gym3/types.py",3052,4,"self",python,selection_mouse +5401,7641938,".venv/lib/python3.10/site-packages/gym3/types.py",3052,5,"self.",python,selection_mouse +5402,7641950,".venv/lib/python3.10/site-packages/gym3/types.py",3052,11,"self.eltype",python,selection_mouse +5403,7680621,"TERMINAL",0,0,"sh",,terminal_focus +5404,7683453,"TERMINAL",0,0,"\r\n()\r\n(Pdb) ",,terminal_output +5405,7684207,"TERMINAL",0,0,"\renv.ac_space.shape",,terminal_output +5406,7684649,"TERMINAL",0,0,"[?25le\r[?25h",,terminal_output +5407,7684793,"TERMINAL",0,0,"[?25lp\r[?25h",,terminal_output +5408,7684913,"TERMINAL",0,0,"[?25la\r[?25h",,terminal_output +5409,7685076,"TERMINAL",0,0,"[?25lh\r[?25h",,terminal_output +5410,7685440,"TERMINAL",0,0,"[?25ls\r[?25h",,terminal_output +5411,7686045,"TERMINAL",0,0,"self.eltype",,terminal_output +5412,7687277,"TERMINAL",0,0,"[?25l[?25h",,terminal_output +5413,7687990,"TERMINAL",0,0,"[?25l[?25h",,terminal_output +5414,7689643,"TERMINAL",0,0,"[?25l.\re[?25h",,terminal_output +5415,7689784,"TERMINAL",0,0,"[?25lf\re[?25h",,terminal_output +5416,7689930,"TERMINAL",0,0,"[?25ll\re[?25h",,terminal_output +5417,7690102,"TERMINAL",0,0,"[?25le\rl[?25h",,terminal_output +5418,7690480,"TERMINAL",0,0,"[?25ls\re[?25h",,terminal_output +5419,7690851,"TERMINAL",0,0,"\r\n\r\n(Pdb) ",,terminal_output +5420,7903435,"TERMINAL",0,0,"bash",,terminal_focus +5421,7904301,"TERMINAL",0,0,"sh",,terminal_focus +5422,7911629,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +5423,7914114,"TERMINAL",0,0,"bash",,terminal_focus +5424,7915394,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +5425,7916047,"TERMINAL",0,0,"sh",,terminal_focus +5426,7918845,"generate_dataset.py",0,0,"",python,tab +5427,7921950,"generate_dataset.py",949,0,"",python,selection_mouse +5428,7923185,"generate_dataset.py",991,0,"\n ",python,content +5429,7923541,"generate_dataset.py",1000,0,"p",python,content +5430,7923542,"generate_dataset.py",1001,0,"",python,selection_keyboard +5431,7923731,"generate_dataset.py",1001,0,"r",python,content +5432,7923732,"generate_dataset.py",1002,0,"",python,selection_keyboard +5433,7923830,"generate_dataset.py",1002,0,"i",python,content +5434,7923832,"generate_dataset.py",1003,0,"",python,selection_keyboard +5435,7923881,"generate_dataset.py",1003,0,"n",python,content +5436,7923882,"generate_dataset.py",1004,0,"",python,selection_keyboard +5437,7923970,"generate_dataset.py",1004,0,"t",python,content +5438,7923971,"generate_dataset.py",1005,0,"",python,selection_keyboard +5439,7924446,"generate_dataset.py",1000,5,"print",python,content +5440,7925340,"generate_dataset.py",1005,0,"()",python,content +5441,7925341,"generate_dataset.py",1006,0,"",python,selection_keyboard +5442,7925469,"generate_dataset.py",1006,0,"a",python,content +5443,7925470,"generate_dataset.py",1007,0,"",python,selection_keyboard +5444,7925587,"generate_dataset.py",1007,0,"c",python,content +5445,7925588,"generate_dataset.py",1008,0,"",python,selection_keyboard +5446,7925677,"generate_dataset.py",1008,0,"t",python,content +5447,7925678,"generate_dataset.py",1009,0,"",python,selection_keyboard +5448,7925914,"generate_dataset.py",1009,0,"i",python,content +5449,7925915,"generate_dataset.py",1010,0,"",python,selection_keyboard +5450,7926046,"generate_dataset.py",1010,0,"o",python,content +5451,7926047,"generate_dataset.py",1011,0,"",python,selection_keyboard +5452,7926242,"generate_dataset.py",1011,0,"n",python,content +5453,7926243,"generate_dataset.py",1012,0,"",python,selection_keyboard +5454,7926504,"generate_dataset.py",1006,6,"action",python,content +5455,7928953,"TERMINAL",0,0,"\renv.ac_space.eltype",,terminal_output +5456,7929457,"TERMINAL",0,0,"\r",,terminal_output +5457,7930612,"TERMINAL",0,0,"bash",,terminal_focus +5458,7931636,"generate_dataset.py",0,0,"",python,tab +5459,7935928,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +5460,7937533,".venv/lib/python3.10/site-packages/gym3/types_np.py",0,0,"",python,tab +5461,7963651,".venv/lib/python3.10/site-packages/gym3/types.py",0,0,"",python,tab +5462,7973378,".venv/lib/python3.10/site-packages/gym3/types.py",2612,0,"",python,selection_mouse +5463,7973742,".venv/lib/python3.10/site-packages/gym3/types.py",2587,0,"",python,selection_mouse +5464,7974078,".venv/lib/python3.10/site-packages/gym3/types.py",2583,6,"eltype",python,selection_mouse +5465,7974700,".venv/lib/python3.10/site-packages/gym3/types.py",2491,0,"",python,selection_mouse +5466,7974874,".venv/lib/python3.10/site-packages/gym3/types.py",2486,10,"ScalarType",python,selection_mouse +5467,7975549,".venv/lib/python3.10/site-packages/gym3/types.py",2492,0,"",python,selection_mouse +5468,7982595,".venv/lib/python3.10/site-packages/gym3/types.py",2493,0,"",python,selection_mouse +5469,8035527,".venv/lib/python3.10/site-packages/gym3/types.py",2126,0,"",python,selection_mouse +5470,8035950,".venv/lib/python3.10/site-packages/gym3/types.py",2120,0,"",python,selection_mouse +5471,8036342,".venv/lib/python3.10/site-packages/gym3/types.py",2122,0,"",python,selection_mouse +5472,8036483,".venv/lib/python3.10/site-packages/gym3/types.py",2121,4,"self",python,selection_mouse +5473,8037120,".venv/lib/python3.10/site-packages/gym3/types.py",2128,0,"",python,selection_mouse +5474,8037280,".venv/lib/python3.10/site-packages/gym3/types.py",2127,2,"}""",python,selection_mouse +5475,8056413,".venv/lib/python3.10/site-packages/gym3/types.py",2124,0,"",python,selection_mouse +5476,8086731,".venv/lib/python3.10/site-packages/gym3/types.py",2101,0,"",python,selection_mouse +5477,8086731,".venv/lib/python3.10/site-packages/gym3/types.py",2100,0,"",python,selection_command +5478,8087263,".venv/lib/python3.10/site-packages/gym3/types.py",2129,0,"",python,selection_mouse +5479,8087276,".venv/lib/python3.10/site-packages/gym3/types.py",2128,0,"",python,selection_command +5480,8087863,".venv/lib/python3.10/site-packages/gym3/types.py",2101,0,"",python,selection_mouse +5481,8087876,".venv/lib/python3.10/site-packages/gym3/types.py",2100,0,"",python,selection_command +5482,8088362,".venv/lib/python3.10/site-packages/gym3/types.py",2101,0,"",python,selection_mouse +5483,8088369,".venv/lib/python3.10/site-packages/gym3/types.py",2100,0,"",python,selection_command +5484,8089812,"TERMINAL",0,0,"sh",,terminal_focus +5485,8090744,"TERMINAL",0,0,"\renv.ac_space.eltype",,terminal_output +5486,8091182,"TERMINAL",0,0,"[?25le\r[?25h",,terminal_output +5487,8091355,"TERMINAL",0,0,"[?25lp\r[?25h",,terminal_output +5488,8091496,"TERMINAL",0,0,"[?25ly\r[?25h",,terminal_output +5489,8091633,"TERMINAL",0,0,"[?25lt\r[?25h",,terminal_output +5490,8091713,"TERMINAL",0,0,"[?25ll\r[?25h",,terminal_output +5491,8091883,"TERMINAL",0,0,"[?25le\r[?25h",,terminal_output +5492,8092561,"TERMINAL",0,0,"n",,terminal_output +5493,8092778,"TERMINAL",0,0,"\r\n*** AttributeError: 'TensorType' object has no attribute 'n'\r\n(Pdb) ",,terminal_output +5494,8093794,"TERMINAL",0,0,"\renv.ac_space.n",,terminal_output +5495,8095188,"TERMINAL",0,0,"[?25ln\r[?25h",,terminal_output +5496,8095459,"TERMINAL",0,0,"e",,terminal_output +5497,8095522,"TERMINAL",0,0,"[?25ll[?25h",,terminal_output +5498,8095898,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +5499,8096265,"TERMINAL",0,0,"[?25ly[?25h",,terminal_output +5500,8096375,"TERMINAL",0,0,"[?25lp[?25h",,terminal_output +5501,8096573,"TERMINAL",0,0,"[?25le[?25h",,terminal_output +5502,8097128,"TERMINAL",0,0,"[?25l.[?25h",,terminal_output +5503,8097669,"TERMINAL",0,0,"[?25l.\r[?25h",,terminal_output +5504,8098099,"TERMINAL",0,0,"\r\n\r\n(Pdb) ",,terminal_output +5505,8099068,"TERMINAL",0,0,"\renv.ac_space.eltype",,terminal_output +5506,8100138,"TERMINAL",0,0,".",,terminal_output +5507,8100363,"TERMINAL",0,0,"[?25ln[?25h",,terminal_output +5508,8100498,"TERMINAL",0,0,"\r\n15\r\n(Pdb) ",,terminal_output +5509,8108892,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +5510,8109881,"TERMINAL",0,0,"bash",,terminal_focus +5511,8110482,"generate_dataset.py",0,0,"",python,tab +5512,8112525,"generate_dataset.py",2136,0,"",python,selection_mouse +5513,8112646,"generate_dataset.py",2135,3,"env",python,selection_mouse +5514,8112780,"generate_dataset.py",2135,12,"env.ac_space",python,selection_mouse +5515,8113360,"generate_dataset.py",2135,12,"",python,content +5516,8113710,"generate_dataset.py",2135,0,"env.ac_space.eltype.n",python,content +5517,8114797,"generate_dataset.py",2234,0,"",python,selection_mouse +5518,8115302,"generate_dataset.py",2139,0,"",python,selection_mouse +5519,8115909,"generate_dataset.py",2137,0,"",python,selection_mouse +5520,8116057,"generate_dataset.py",2135,3,"env",python,selection_mouse +5521,8116230,"generate_dataset.py",2135,12,"env.ac_space",python,selection_mouse +5522,8116326,"generate_dataset.py",2135,13,"env.ac_space.",python,selection_mouse +5523,8116326,"generate_dataset.py",2135,19,"env.ac_space.eltype",python,selection_mouse +5524,8116822,"generate_dataset.py",2153,0,"",python,selection_mouse +5525,8117242,"generate_dataset.py",2145,0,"",python,selection_mouse +5526,8117402,"generate_dataset.py",2139,8,"ac_space",python,selection_mouse +5527,8117686,"generate_dataset.py",2138,9,".ac_space",python,selection_mouse +5528,8118137,"generate_dataset.py",2138,0,"",python,selection_mouse +5529,8118138,"generate_dataset.py",2135,3,"env",python,selection_mouse +5530,8118778,"generate_dataset.py",2191,0,"",python,selection_mouse +5531,8118941,"generate_dataset.py",2183,12,"num_episodes",python,selection_mouse +5532,8119177,"generate_dataset.py",2148,47,"eltype.n,\n ""num_episodes"": args.num_episodes",python,selection_mouse +5533,8119240,"generate_dataset.py",2155,40,"n,\n ""num_episodes"": args.num_episodes",python,selection_mouse +5534,8119607,"generate_dataset.py",2155,0,"",python,selection_mouse +5535,8119607,"generate_dataset.py",2155,1,"n",python,selection_mouse +5536,8119787,"generate_dataset.py",2154,2,".n",python,selection_mouse +5537,8119788,"generate_dataset.py",2148,8,"eltype.n",python,selection_mouse +5538,8119788,"generate_dataset.py",2147,9,".eltype.n",python,selection_mouse +5539,8119801,"generate_dataset.py",2139,17,"ac_space.eltype.n",python,selection_mouse +5540,8119860,"generate_dataset.py",2138,18,".ac_space.eltype.n",python,selection_mouse +5541,8119918,"generate_dataset.py",2135,21,"env.ac_space.eltype.n",python,selection_mouse +5542,8120360,"generate_dataset.py",2137,0,"",python,selection_mouse +5543,8120446,"generate_dataset.py",2135,3,"env",python,selection_mouse +5544,8120640,"generate_dataset.py",2135,4,"env.",python,selection_mouse +5545,8120641,"generate_dataset.py",2135,12,"env.ac_space",python,selection_mouse +5546,8120708,"generate_dataset.py",2135,19,"env.ac_space.eltype",python,selection_mouse +5547,8120891,"generate_dataset.py",2135,20,"env.ac_space.eltype.",python,selection_mouse +5548,8120947,"generate_dataset.py",2135,21,"env.ac_space.eltype.n",python,selection_mouse +5549,8121316,"generate_dataset.py",2156,0,"",python,selection_mouse +5550,8122115,"generate_dataset.py",2125,0,"",python,selection_mouse +5551,8122278,"generate_dataset.py",2120,12,"action_space",python,selection_mouse +5552,8123134,"generate_dataset.py",2120,12,"n",python,content +5553,8123135,"generate_dataset.py",2121,0,"",python,selection_keyboard +5554,8123264,"generate_dataset.py",2121,0,"u",python,content +5555,8123265,"generate_dataset.py",2122,0,"",python,selection_keyboard +5556,8123450,"generate_dataset.py",2122,0,"m",python,content +5557,8123451,"generate_dataset.py",2123,0,"",python,selection_keyboard +5558,8123709,"generate_dataset.py",2123,0,"_",python,content +5559,8123710,"generate_dataset.py",2124,0,"",python,selection_keyboard +5560,8123892,"generate_dataset.py",2124,0,"a",python,content +5561,8123892,"generate_dataset.py",2125,0,"",python,selection_keyboard +5562,8124015,"generate_dataset.py",2125,0,"c",python,content +5563,8124016,"generate_dataset.py",2126,0,"",python,selection_keyboard +5564,8124180,"generate_dataset.py",2126,0,"t",python,content +5565,8124182,"generate_dataset.py",2127,0,"",python,selection_keyboard +5566,8124292,"generate_dataset.py",2127,0,"i",python,content +5567,8124293,"generate_dataset.py",2128,0,"",python,selection_keyboard +5568,8124407,"generate_dataset.py",2128,0,"o",python,content +5569,8124408,"generate_dataset.py",2129,0,"",python,selection_keyboard +5570,8124514,"generate_dataset.py",2129,0,"n",python,content +5571,8124515,"generate_dataset.py",2130,0,"",python,selection_keyboard +5572,8124549,"generate_dataset.py",2130,0,"s",python,content +5573,8124550,"generate_dataset.py",2131,0,"",python,selection_keyboard +5574,8162094,"generate_dataset.py",2169,0,"",python,selection_mouse +5575,8162690,"generate_dataset.py",2127,0,"",python,selection_mouse +5576,8162846,"generate_dataset.py",2120,11,"num_actions",python,selection_mouse +5577,8819470,"generate_dataset.py",2195,0,"",python,selection_mouse +5578,8820088,"generate_dataset.py",2314,0,"",python,selection_mouse +5579,8820478,"generate_dataset.py",2313,0,"",python,selection_command +5580,8823746,"TERMINAL",0,0,"sh",,terminal_focus +5581,8827203,"TERMINAL",0,0,"^D\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/generate_dataset.py"", line 70, in \r\n metadata = {\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/generate_dataset.py"", line 70, in \r\n metadata = {\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 90, in trace_dispatch\r\n return self.dispatch_line(frame)\r\n File ""/home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/lib/python3.10/bdb.py"", line 115, in dispatch_line\r\n if self.quitting: raise BdbQuit\r\nbdb.BdbQuit\r\n",,terminal_output +5582,8827305,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;1",,terminal_output +5583,8829376,"generate_dataset.py",0,0,"",python,tab +5584,8830594,"generate_dataset.py",2075,0,"",python,selection_mouse +5585,8831475,"generate_dataset.py",2067,13,"",python,content +5586,8831846,"generate_dataset.py",2043,0,"",python,selection_command +5587,8832059,"generate_dataset.py",2042,0,"",python,selection_command +5588,8832210,"generate_dataset.py",1965,0,"",python,selection_command +5589,8832299,"generate_dataset.py",2042,0,"",python,selection_command +5590,8832462,"generate_dataset.py",2043,0,"",python,selection_command +5591,8833085,"TERMINAL",0,0,"bash",,terminal_focus +5592,8834433,"TERMINAL",0,0,"bash",,terminal_focus +5593,8837090,"TERMINAL",0,0,"git status",,terminal_command +5594,8837141,"TERMINAL",0,0,"]633;E;2025-09-04 12:24:14 git status;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;COn branch coinrun-gt-actions\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: generate_dataset.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdata/\r\n\tdata_atari/\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\tlogs/\r\n\toverfit_dir.zip\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\tutils/visualizer.py\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +5595,8840086,"TERMINAL",0,0,"git add generate_dataset.py",,terminal_command +5596,8840120,"TERMINAL",0,0,"]633;E;2025-09-04 12:24:17 git add generate_dataset.py ;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +5597,8952955,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_type: str = ""maskgit"" # supported options: maskgit, causal\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(\n model: Genie, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n """"""Compute masked dynamics loss""""""\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@nnx.jit\ndef train_step(\n model: Genie, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n """"""Update state and compute metrics""""""\n\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=False,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(genie, tx)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +5598,8953754,"train_dynamics.py",753,0,"",python,selection_mouse +5599,8958415,"train_dynamics.py",753,0,"m",python,content +5600,8958417,"train_dynamics.py",754,0,"",python,selection_keyboard +5601,8958592,"train_dynamics.py",754,0,"e",python,content +5602,8958594,"train_dynamics.py",755,0,"",python,selection_keyboard +5603,8959019,"train_dynamics.py",754,1,"",python,content +5604,8959143,"train_dynamics.py",753,1,"",python,content +5605,8961699,"train_dynamics.py",762,1,"m",python,selection_command +5606,8961866,"train_dynamics.py",2126,2,"me",python,selection_command +5607,8962071,"train_dynamics.py",3870,3,"met",python,selection_command +5608,9079275,"TERMINAL",0,0,"bash",,terminal_focus +5609,9081088,"TERMINAL",0,0,"idling",,terminal_command +5610,9081175,"TERMINAL",0,0,"]633;E;2025-09-04 12:28:18 idling;98e58a9e-4278-4ee6-ae9b-7614f41efecb]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1990.localdomain: Thu Sep 4 12:28:18 2025Partition dev_cpuonly:\t 8 nodes idle\rPartition cpuonly: 22 nodes idle\rPartition dev_accelerated:\t 0 nodes idle\rPartition accelerated:\t 0 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 1 nodes idle\rPartition large:\t 6 nodes idle\rPartition accelerated-h200:\t 0 nodes idle",,terminal_output +5611,9082198,"TERMINAL",0,0,"9\t",,terminal_output +5612,9083235,"TERMINAL",0,0,"209",,terminal_output +5613,9084277,"TERMINAL",0,0,"1\t",,terminal_output +5614,9085308,"TERMINAL",0,0,"2\t",,terminal_output +5615,9086349,"TERMINAL",0,0,"3\t",,terminal_output +5616,9087393,"TERMINAL",0,0,"4\t",,terminal_output +5617,9088419,"TERMINAL",0,0,"5\t",,terminal_output +5618,9089462,"TERMINAL",0,0,"6\t",,terminal_output +5619,9090496,"TERMINAL",0,0,"7\t",,terminal_output +5620,9091534,"TERMINAL",0,0,"8\t",,terminal_output +5621,9092573,"TERMINAL",0,0,"9\t",,terminal_output +5622,9093637,"TERMINAL",0,0,"30\t",,terminal_output +5623,9094669,"TERMINAL",0,0,"1\t",,terminal_output +5624,9095740,"TERMINAL",0,0,"2\t",,terminal_output +5625,9096760,"TERMINAL",0,0,"3\t",,terminal_output +5626,9097830,"TERMINAL",0,0,"4\t",,terminal_output +5627,9098914,"TERMINAL",0,0,"5\t",,terminal_output +5628,9099938,"TERMINAL",0,0,"7\t",,terminal_output +5629,9100893,"TERMINAL",0,0,"8\t",,terminal_output +5630,9101932,"TERMINAL",0,0,"9\t",,terminal_output +5631,9103013,"TERMINAL",0,0,"40\t",,terminal_output +5632,9104006,"TERMINAL",0,0,"1\t",,terminal_output +5633,9105060,"TERMINAL",0,0,"2\t",,terminal_output +5634,9106186,"TERMINAL",0,0,"3\t",,terminal_output +5635,9107118,"TERMINAL",0,0,"4\t",,terminal_output +5636,9108234,"TERMINAL",0,0,"5\t",,terminal_output +5637,9109189,"TERMINAL",0,0,"6\t",,terminal_output +5638,9110281,"TERMINAL",0,0,"7\t",,terminal_output +5639,9111264,"TERMINAL",0,0,"8\t",,terminal_output +5640,9112855,"TERMINAL",0,0,"9\t",,terminal_output +5641,9113486,"TERMINAL",0,0,"50\t",,terminal_output +5642,9114524,"TERMINAL",0,0,"1\t",,terminal_output +5643,9118051,"TERMINAL",0,0,"2\t3\t",,terminal_output +5644,9118102,"TERMINAL",0,0,"4\t",,terminal_output +5645,9118579,"TERMINAL",0,0,"5\t",,terminal_output +5646,9119855,"TERMINAL",0,0,"6\t",,terminal_output +5647,9120716,"TERMINAL",0,0,"7\t",,terminal_output +5648,9121655,"TERMINAL",0,0,"8\t",,terminal_output +5649,9123092,"TERMINAL",0,0,"9\t",,terminal_output +5650,9123715,"TERMINAL",0,0,"9:00\t",,terminal_output +5651,9124755,"TERMINAL",0,0,"1\t",,terminal_output +5652,9125859,"TERMINAL",0,0,"2\t",,terminal_output +5653,9126836,"TERMINAL",0,0,"4\t",,terminal_output +5654,9127893,"TERMINAL",0,0,"5\t",,terminal_output +5655,9128922,"TERMINAL",0,0,"64",,terminal_output +5656,9129945,"TERMINAL",0,0,"7\t",,terminal_output +5657,9130992,"TERMINAL",0,0,"8\t",,terminal_output +5658,9132025,"TERMINAL",0,0,"9\t",,terminal_output +5659,9133079,"TERMINAL",0,0,"10\t",,terminal_output +5660,9134105,"TERMINAL",0,0,"12",,terminal_output +5661,9135150,"TERMINAL",0,0,"2\t",,terminal_output +5662,9136207,"TERMINAL",0,0,"3\t",,terminal_output +5663,9137224,"TERMINAL",0,0,"4\t",,terminal_output +5664,9138283,"TERMINAL",0,0,"5\t",,terminal_output +5665,9139185,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +5666,9148034,"TERMINAL",0,0,"bash",,terminal_focus +5667,9156553,"TERMINAL",0,0,"git commit -m ""add gt actions to dataset generation""",,terminal_command +5668,9156605,"TERMINAL",0,0,"]633;E;2025-09-04 12:29:33 git commit -m ""add gt actions to dataset generation"";86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +5669,9156733,"TERMINAL",0,0,"[coinrun-gt-actions 96abc4f] add gt actions to dataset generation\r\n 1 file changed, 8 insertions(+), 2 deletions(-)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +5670,9159728,"TERMINAL",0,0,"git branch",,terminal_command +5671,9159821,"TERMINAL",0,0,"]633;E;2025-09-04 12:29:36 git branch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[?1h=\r add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n* coinrun-gt-actions\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/darkness-filter\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n input_pipeline/add-npy2array_record\r\n logging-variants\r\n lr-schedules\r\n main\r\n maskgit-different-maskprob-per-sample\r\n maskgit-sampling-iterative-unmasking-fix\r\n metrics-logging-for-dynamics-model\r\n monkey-patch\r\n new-arch-sampling\r\n preprocess_video\r\n refactor-tmp\r\n revised-dataloader\r\n runner\r\n runner-grain\r\n sample-ali-branch\r\n sample-from-different-topologies\r\n sampling-startframe-indexing-fix\r\n speedup-tfrecord-preprocessing\r\n tmp\r\n\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +5672,9174129,"TERMINAL",0,0,"git checkout input_pipeline/add-npy2array_record",,terminal_command +5673,9174166,"TERMINAL",0,0,"]633;E;2025-09-04 12:29:51 git checkout input_pipeline/add-npy2array_record;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CSwitched to branch 'input_pipeline/add-npy2array_record'\r\nYour branch is up to date with 'origin/input_pipeline/add-npy2array_record'.\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +5674,9176220,"",0,0,"Switched from branch 'coinrun-gt-actions' to 'input_pipeline/add-npy2array_record'",,git_branch_checkout +5675,9180923,"generate_dataset.py",0,0,"""""""\nGenerates a dataset of random-action CoinRun episodes.\nEpisodes are saved individually as memory-mapped files for efficient loading.\n""""""\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom gym3 import types_np\nimport numpy as np\nfrom procgen import ProcgenGym3Env\nimport tyro\nimport pickle\nimport json\nfrom array_record.python.array_record_module import ArrayRecordWriter \n\n\n\n@dataclass\nclass Args:\n num_episodes: int = 10000\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 50\n\n\nargs = tyro.cli(Args)\noutput_dir = Path(args.output_dir)\noutput_dir.mkdir(parents=True, exist_ok=True)\n\n# --- Generate episodes ---\ni = 0\nepisode_metadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n observations_seq = []\n\n # --- Run episode ---\n for j in range(1000):\n env.act(types_np.sample(env.ac_space, bshape=(env.num,)))\n rew, obs, first = env.observe()\n observations_seq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(observations_seq) >= args.min_episode_length:\n observations_data = np.concatenate(observations_seq, axis=0)\n episode_path = output_dir / f""episode_{i}.array_record"" \n\n # --- Save as ArrayRecord ---\n writer = ArrayRecordWriter(str(episode_path), ""group_size:1"")\n record = {""raw_video"": observations_data.tobytes(), ""sequence_length"": len(observations_seq)}\n writer.write(pickle.dumps(record))\n writer.close()\n\n episode_metadata.append({""path"": str(episode_path), ""length"": len(observations_seq)})\n print(f""Episode {i} completed, length: {len(observations_seq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(observations_seq)}), resampling..."")\n\n# --- Save metadata ---\nmetadata = {\n ""env"": ""coinrun"",\n ""num_episodes"": args.num_episodes,\n ""avg_episode_len"": np.mean([ep[""length""] for ep in episode_metadata]),\n ""episode_metadata"": episode_metadata,\n}\nwith open(output_dir / ""meta_data.json"", ""w"") as f:\n json.dump(metadata, f)\n\nprint(f""Dataset generated with {len(episode_metadata)} valid episodes"")\n",python,tab +5676,9182767,"generate_dataset.py",2059,0,"",python,selection_mouse +5677,9183125,"generate_dataset.py",2154,0,"",python,selection_mouse +5678,9183142,"generate_dataset.py",2153,0,"",python,selection_command +5679,9196037,"input_pipeline/preprocess/video_to_array_records.py",0,0,"import ffmpeg\nimport numpy as np\nimport os\nimport tyro\nimport multiprocessing as mp\nfrom dataclasses import dataclass\nimport json\nimport pickle\nfrom array_record.python.array_record_module import ArrayRecordWriter\n\n\n@dataclass\nclass Args:\n input_path: str\n output_path: str\n target_width: int = 160\n target_height: int = 90\n target_fps: int = 10\n\n\ndef preprocess_video(\n idx, in_filename, output_path, target_width, target_height, target_fps\n):\n print(f""Processing video {idx}, Filename: {in_filename}"")\n try:\n out, _ = (\n ffmpeg.input(in_filename)\n .filter(""fps"", fps=target_fps, round=""up"")\n .filter(""scale"", target_width, target_height)\n .output(""pipe:"", format=""rawvideo"", pix_fmt=""rgb24"")\n .run(capture_stdout=True, quiet=True)\n )\n\n output_path = os.path.join(\n output_path,\n os.path.splitext(os.path.basename(in_filename))[0] + "".array_record"",\n )\n\n writer = ArrayRecordWriter(str(output_path), ""group_size:1"")\n\n frame_size = target_height * target_width * 3\n n_frames = len(out) // frame_size\n frames = np.frombuffer(out, np.uint8).reshape(\n n_frames, target_height, target_width, 3\n )\n\n print(f""Saving video {idx} to {output_path}"")\n record = {""raw_video"": frames.tobytes(), ""sequence_length"": n_frames}\n writer.write(pickle.dumps(record))\n writer.close()\n\n return in_filename, n_frames\n except Exception as e:\n print(f""Error processing video {idx} ({in_filename}): {e}"")\n return in_filename, 0\n\n\ndef main():\n args = tyro.cli(Args)\n\n os.makedirs(args.output_path, exist_ok=True)\n print(f""Output path: {args.output_path}"")\n\n num_processes = mp.cpu_count()\n print(f""Number of processes: {num_processes}"")\n\n print(""Converting video to array_record files..."")\n pool_args = [\n (\n idx,\n os.path.join(args.input_path, in_filename),\n args.output_path,\n args.target_width,\n args.target_height,\n args.target_fps,\n )\n for idx, in_filename in enumerate(os.listdir(args.input_path))\n if in_filename.endswith("".mp4"") or in_filename.endswith("".webm"")\n ]\n\n results = []\n with mp.Pool(processes=num_processes) as pool:\n for result in pool.starmap(preprocess_video, pool_args):\n results.append(result)\n print(""Done converting video to array_record files"")\n\n # count the number of failed videos\n failed_videos = [result for result in results if result[1] == 0]\n short_episodes = [result for result in results if result[1] < 1600]\n print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )\n print(f""Number of total videos: {len(results)}"")\n\n with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(results, f)\n\n\nif __name__ == ""__main__"":\n main()\n",python,tab +5680,9196868,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"import os\nimport numpy as np\nfrom PIL import Image\nimport tyro\nfrom dataclasses import dataclass\nimport pickle\nimport json\nimport multiprocessing as mp\nfrom array_record.python.array_record_module import ArrayRecordWriter\n\n\n@dataclass\nclass Args:\n input_path: str\n output_path: str\n original_fps: int = 60\n target_fps: int = 10\n target_width: int = 64 \n\ndef preprocess_pngs(input_dir, output_path, original_fps, target_fps, target_width=None):\n print(f""Processing PNGs in {input_dir}"")\n try:\n png_files = sorted([\n f for f in os.listdir(input_dir)\n if f.lower().endswith('.png')\n ], key=lambda x: int(os.path.splitext(x)[0]))\n\n if not png_files:\n print(f""No PNG files found in {input_dir}"")\n return input_dir, 0\n\n # Downsample indices\n n_total = len(png_files)\n if original_fps == target_fps:\n selected_indices = np.arange(n_total)\n else:\n n_target = int(np.floor(n_total * target_fps / original_fps))\n selected_indices = np.linspace(0, n_total-1, n_target, dtype=int)\n\n selected_files = [png_files[i] for i in selected_indices]\n\n # Load images\n frames = []\n for fname in selected_files:\n img = Image.open(os.path.join(input_dir, fname)).convert(""RGB"")\n if target_width is not None:\n w, h = img.size # PIL gives (width, height)\n if w != target_width:\n target_height = int(round(h * (target_width / float(w))))\n resample_filter = Image.LANCZOS\n img = img.resize((target_width, target_height), resample=resample_filter)\n frames.append(np.array(img))\n\n frames = np.stack(frames, axis=0) # (n_frames, H, W, 3)\n environment = os.path.basename(os.path.dirname(input_dir)) \n episode_id = os.path.basename(input_dir)\n # Write to array_record\n os.makedirs(output_path, exist_ok=True)\n out_file = os.path.join(\n output_path,\n f""{environment}_{episode_id}.array_record""\n )\n writer = ArrayRecordWriter(str(out_file), ""group_size:1"")\n record = {""raw_video"": frames.tobytes(), \n ""environment"": environment,\n ""sequence_length"": frames.shape[0]}\n writer.write(pickle.dumps(record))\n writer.close()\n print(f""Saved {frames.shape[0]} frames to {out_file}"")\n return input_dir, frames.shape[0]\n except Exception as e:\n print(f""Error processing {input_dir}: {e}"")\n return input_dir, 0\n\ndef main():\n args = tyro.cli(Args)\n os.makedirs(args.output_path, exist_ok=True)\n print(f""Output path: {args.output_path}"")\n\n games = [\n os.path.join(args.input_path, d)\n for d in os.listdir(args.input_path)\n if os.path.isdir(os.path.join(args.input_path, d))\n ]\n episodes = [\n os.path.join(game, d)\n for game in games\n for d in os.listdir(game)\n ]\n\n results = []\n num_processes = mp.cpu_count()\n print(f""Number of processes: {num_processes}"")\n pool_args = [\n (episode, args.output_path, args.original_fps, args.target_fps, args.target_width)\n for episode in episodes\n ]\n with mp.Pool(processes=num_processes) as pool:\n for result in pool.starmap(preprocess_pngs, pool_args):\n results.append(result)\n\n print(""Done converting png to array_record files"")\n\n # count the number of failed videos\n failed_videos = [result for result in results if result[1] == 0]\n short_episodes = [result for result in results if result[1] < 1600]\n print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )\n print(f""Number of total videos: {len(results)}"")\n\n with open(os.path.join(args.output_path, ""meta_data.json""), ""w"") as f:\n json.dump(results, f)\n print(""Done."")\n\nif __name__ == ""__main__"":\n main()",python,tab +5681,9201366,"generate_dataset.py",0,0,"",python,tab +5682,9203579,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +5683,9206021,"input_pipeline/preprocess/pngs_to_array_records.py",4018,0,"",python,selection_mouse +5684,9206187,"input_pipeline/preprocess/pngs_to_array_records.py",4013,9,"meta_data",python,selection_mouse +5685,9215339,"input_pipeline/preprocess/pngs_to_array_records.py",4002,0,"",python,selection_mouse +5686,9215481,"input_pipeline/preprocess/pngs_to_array_records.py",3999,11,"output_path",python,selection_mouse +5687,9216242,"input_pipeline/preprocess/pngs_to_array_records.py",4062,0,"",python,selection_mouse +5688,9216391,"input_pipeline/preprocess/pngs_to_array_records.py",4060,7,"results",python,selection_mouse +5689,9226486,"TERMINAL",0,0,"bash",,terminal_focus +5690,9228255,"TERMINAL",0,0,"bash",,terminal_focus +5691,9229747,"TERMINAL",0,0,"bash",,terminal_focus +5692,9231911,"TERMINAL",0,0,"bash",,terminal_focus +5693,9235333,"TERMINAL",0,0,"cd $ws_dir",,terminal_command +5694,9235361,"TERMINAL",0,0,"]633;E;2025-09-04 12:30:52 cd $ws_dir;e3f3d151-a063-4c85-891d-0bfb917c5617]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared]633;D;0",,terminal_output +5695,9238563,"TERMINAL",0,0,"cd data_atari/",,terminal_command +5696,9238962,"TERMINAL",0,0,"ls",,terminal_command +5697,9239004,"TERMINAL",0,0,"]633;E;2025-09-04 12:30:56 ls;e3f3d151-a063-4c85-891d-0bfb917c5617]633;C",,terminal_output +5698,9239121,"TERMINAL",0,0,"array_records atari_v1 full.tar.gz per-game slurm-3413833.out tmp\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari]633;D;0",,terminal_output +5699,9242468,"TERMINAL",0,0,"cd array_records/",,terminal_command +5700,9242504,"TERMINAL",0,0,"]633;E;2025-09-04 12:30:59 cd array_records/;e3f3d151-a063-4c85-891d-0bfb917c5617]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/array_records]633;D;0",,terminal_output +5701,9247085,"TERMINAL",0,0,"cat *.json",,terminal_command +5702,9247277,"TERMINAL",0,0,"]633;E;2025-09-04 12:31:04 cat *.json;e3f3d151-a063-4c85-891d-0bfb917c5617]633;C[[""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/705"", 444], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/630"", 313], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/263"", 136], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/747"", 488], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/672"", 103], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/743"", 145], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/376"", 410], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/789"", 716], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/785"", 434], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/326"", 274], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/251"", 434], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/322"", 169], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/735"", 43], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/368"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/806"", 610], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/660"", 268], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/731"", 128], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/364"", 743], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/848"", 374], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/360"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/773"", 575], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/886"", 120], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/310"", 242], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/723"", 911], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/427"", 482], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/352"", 376], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/85"", 201], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/832"", 1327], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/465"", 177], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/878"", 627], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/949"", 838], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/80"", 477], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1034"", 767], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/119"", 361], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/411"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/824"", 174], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/457"", 184], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/528"", 307], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/382"", 68], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/820"", 720], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/157"", 80], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/866"", 601], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/937"", 852], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/88"", 214], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/862"", 553], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/199"", 309], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/403"", 850], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/107"", 376], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/103"", 341], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/516"", 347], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/149"", 495], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/929"", 85], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/854"", 454], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/925"", 261], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/850"", 333], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/483"", 199], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/187"", 171], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/892"", 486], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/963"", 379], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/596"", 258], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/800"", 97], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/208"", 184], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/500"", 266], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/913"", 211], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/546"", 170], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/471"", 458], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/542"", 309], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/955"", 576], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/659"", 183], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/584"", 80], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/997"", 1064], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/51"", 287], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/580"", 255], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/993"", 350], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/121"", 416], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/534"", 827], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/605"", 109], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/530"", 307], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/163"", 97], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/647"", 77], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/718"", 889], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/59"", 638], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1017"", 1429], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/643"", 836], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/689"", 151], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/635"", 233], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/268"", 231], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/706"", 94], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/339"", 767], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/631"", 402], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/677"", 191], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/377"", 332], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/214"", 378], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/786"", 254], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/210"", 497], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/623"", 128], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/27"", 568], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/323"", 510], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/736"", 57], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/369"", 253], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/294"", 478], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/732"", 280], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/774"", 184], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/70"", 530], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/315"", 271], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/240"", 345], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/311"", 294], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/357"", 72], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/428"", 245], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/720"", 979], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/353"", 581], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/78"", 133], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/395"", 65], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/466"", 308], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/879"", 333], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/303"", 385], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/391"", 315], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/712"", 291], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/416"", 460], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/829"", 795], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/43"", 348], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/341"", 271], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/412"", 100], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/458"", 336], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/529"", 589], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/750"", 334], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/383"", 264], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/821"", 645], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/867"", 293], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/938"", 855], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/792"", 290], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/863"", 1089], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/700"", 456], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/404"", 386], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/108"", 351], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1019"", 115], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/813"", 315], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/517"", 538], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/442"", 96], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/926"", 918], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/559"", 373], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1030"", 120], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/780"", 300], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/851"", 354], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/484"", 122], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/555"", 493], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/41"", 386], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/968"", 160], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/893"", 867], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/509"", 305], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/964"", 515], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/505"", 533], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/430"", 275], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/501"", 323], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/914"", 642], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/547"", 785], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1005"", 173], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/956"", 343], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/19"", 389], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/585"", 119], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/998"", 477], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/126"", 671], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/92"", 82], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/122"", 531], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/902"", 259], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/606"", 172], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/460"", 341], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/531"", 330], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/164"", 475], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/235"", 373], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/160"", 107], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/644"", 285], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/277"", 267], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/114"", 429], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/982"", 279], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/686"", 224], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/110"", 347], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/523"", 613], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/227"", 173], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/152"", 376], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/707"", 93], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/65"", 88], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/194"", 380], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/632"", 298], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/678"", 269], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/674"", 370], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1006"", 184], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/378"", 274], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/215"", 447], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1013"", 310], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/787"", 785], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/211"", 95], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/624"", 376], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/253"", 272], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/737"", 149], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/68"", 536], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/662"", 123], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/203"", 284], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1028"", 504], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/63"", 259], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/612"", 618], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/725"", 143], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/358"", 136], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/429"", 386], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/650"", 327], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/721"", 1240], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/354"", 382], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/5"", 326], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/692"", 125], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/396"", 139], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/2"", 208], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/304"", 451], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/300"", 217], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/713"", 468], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/417"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1014"", 2240], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/342"", 304], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/755"", 126], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/826"", 727], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/31"", 320], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/380"", 96], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/409"", 344], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/864"", 96], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/701"", 50], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/405"", 312], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/109"", 408], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/330"", 540], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/401"", 162], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/814"", 273], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/447"", 330], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/39"", 340], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/372"", 336], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/810"", 256], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/443"", 97], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/856"", 529], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/489"", 361], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/927"", 1021], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/87"", 222], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/781"", 347], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/852"", 519], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/485"", 349], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1000"", 476], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/898"", 1034], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/969"", 315], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/481"", 288], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/894"", 526], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/802"", 203], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/435"", 140], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/890"", 99], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/506"", 351], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/139"", 191], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/431"", 283], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/844"", 138], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/915"", 435], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/548"", 282], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/840"", 556], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/473"", 1112], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1015"", 1859], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/882"", 349], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/953"", 317], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/586"", 471], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/999"", 376], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/127"", 388], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/995"", 736], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/607"", 420], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/532"", 671], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/165"", 204], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/945"", 109], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/578"", 369], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/649"", 120], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/161"", 73], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/870"", 642], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/941"", 474], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/278"", 1086], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/50"", 490], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/115"", 74], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/983"", 130], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1001"", 474], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/524"", 394], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/520"", 359], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/153"", 64], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/224"", 311], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/933"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/562"", 471], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/195"", 380], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/633"", 383], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/266"", 313], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/191"", 332], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1016"", 119], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/512"", 181], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/23"", 216], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/141"", 436], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/212"", 89], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/921"", 708], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/625"", 129], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/550"", 150], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/621"", 615], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/254"", 458], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/667"", 219], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/738"", 65], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/663"", 1364], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/296"", 280], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1"", 772], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/367"", 382], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/776"", 119], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/200"", 359], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/317"", 320], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/242"", 514], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1002"", 146], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/655"", 986], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/359"", 249], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/74"", 366], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/651"", 400], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/284"", 227], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/839"", 611], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/693"", 156], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/397"", 100], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/301"", 148], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/714"", 474], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/347"", 645], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/418"", 622], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/29"", 361], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/710"", 325], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/343"", 215], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/756"", 82], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/827"", 456], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/77"", 350], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/752"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/385"", 194], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/381"", 76], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/794"", 307], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/498"", 325], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/702"", 530], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/790"", 456], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/406"", 319], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/331"", 275], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/815"", 231], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/448"", 448], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/519"", 811], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/740"", 693], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/444"", 596], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/928"", 1759], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/782"", 297], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/853"", 423], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1010"", 744], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/899"", 859], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/807"", 354], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/895"", 931], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/599"", 190], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/803"", 322], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/436"", 308], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/361"", 719], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/432"", 310], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/478"", 287], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/549"", 65], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/770"", 301], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/841"", 386], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/474"", 260], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1025"", 148], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/887"", 312], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/958"", 398], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/470"", 699], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/883"", 330], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/587"", 425], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/424"", 298], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/128"", 661], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/908"", 186], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/996"", 581], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/420"", 401], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/833"", 1394], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/904"", 338], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/537"", 304], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/48"", 354], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/533"", 192], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/946"", 474], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/579"", 259], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/871"", 469], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/942"", 77], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/575"", 535], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/988"", 459], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/116"", 303], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/13"", 338], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1011"", 350], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/112"", 423], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/158"", 62], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/450"", 83], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/521"", 465], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/154"", 86], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/567"", 327], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/150"", 457], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/930"", 96], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/196"", 245], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/976"", 182], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/99"", 240], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/192"", 440], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/972"", 481], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/676"", 575], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1026"", 105], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/513"", 158], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/16"", 385], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/142"", 777], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/213"", 256], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/626"", 262], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/259"", 532], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/64"", 745], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/551"", 266], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/622"", 287], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/668"", 371], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/739"", 69], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/180"", 231], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/960"", 301], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/593"", 274], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/664"", 140], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/297"", 214], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/293"", 378], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/130"", 661], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/614"", 586], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/172"", 106], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/610"", 421], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/243"", 305], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/656"", 591], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/727"", 125], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/67"", 416], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/581"", 441], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/652"", 278], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/285"", 330], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/769"", 998], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/990"", 206], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/694"", 1496], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/398"", 166], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/690"", 1124], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/719"", 868], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1027"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/302"", 370], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/715"", 418], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/348"", 717], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/640"", 648], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/273"", 420], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/757"", 476], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/682"", 479], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/753"", 62], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/799"", 291], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/795"", 149], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/499"", 247], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/336"", 161], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/332"", 732], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/745"", 147], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/449"", 101], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/670"", 714], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/741"", 148], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/374"", 185], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/445"", 293], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/30"", 639], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/858"", 109], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/783"", 468], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/487"", 150], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/808"", 399], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/896"", 771], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/320"", 404], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/804"", 450], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/437"", 118], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/508"", 83], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/38"", 291], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/846"", 185], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/917"", 482], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/86"", 346], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/842"", 472], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/475"", 372], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/888"", 111], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/959"", 234], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/884"", 349], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/81"", 510], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/425"", 350], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/909"", 425], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/421"", 434], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/467"", 435], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/538"", 622], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/830"", 540], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/463"", 104], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/167"", 131], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/876"", 280], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/947"", 498], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/872"", 700], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/576"", 488], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/989"", 511], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/413"", 199], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/985"", 277], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1021"", 99], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/822"", 708], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/159"", 59], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/939"", 1055], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/451"", 245], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/522"", 201], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/155"", 263], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/935"", 389], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/639"", 419], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/860"", 451], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/564"", 330], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/193"", 416], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/514"", 437], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/510"", 314], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/143"", 916], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/556"", 242], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/627"", 1505], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/57"", 90], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/185"", 415], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/965"", 416], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/669"", 680], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/181"", 283], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/961"", 375], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/594"", 301], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/665"", 329], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/298"", 109], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/502"", 454], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/590"", 276], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/206"", 147], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/619"", 288], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/22"", 120], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/911"", 233], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/248"", 170], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/319"", 273], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/540"", 565], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/173"", 369], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/611"", 325], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/244"", 425], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1022"", 114], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/657"", 402], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/582"", 332], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/653"", 861], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/286"", 286], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/699"", 499], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/282"", 222], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/991"", 204], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/695"", 79], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/399"", 109], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/691"", 132], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/307"", 286], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/25"", 357], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/232"", 332], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/645"", 1263], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/641"", 442], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/274"", 257], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/345"", 440], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/20"", 343], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/758"", 856], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/270"", 118], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/683"", 427], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/754"", 101], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/9"", 79], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/796"", 146], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/704"", 536], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/408"", 266], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/28"", 296], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/262"", 254], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/333"", 527], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/746"", 97], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/379"", 120], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/817"", 443], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/671"", 104], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/375"", 504], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1023"", 400], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/788"", 554], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/784"", 273], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/488"", 460], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/71"", 193], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/325"", 308], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/897"", 758], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/250"", 383], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/734"", 75], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/805"", 322], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/438"", 119], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/730"", 109], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/363"", 398], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1038"", 1053], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/847"", 229], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/772"", 75], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/843"", 165], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/889"", 318], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/885"", 111], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/44"", 49], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/468"", 118], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/906"", 113], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/539"", 403], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/393"", 223], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/831"", 522], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/948"", 790], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/873"", 252], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/414"", 334], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/986"", 72], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/410"", 409], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1031"", 514], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/823"", 1031], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/456"", 366], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/156"", 71], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/865"", 1052], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/936"", 454], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/95"", 373], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/861"", 115], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/565"", 133], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/198"", 410], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/978"", 84], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/402"", 423], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/974"", 425], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/12"", 379], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/148"", 386], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/440"", 468], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/511"", 331], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/924"", 123], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/628"", 156], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/482"", 90], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/920"", 399], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/553"", 333], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/257"", 402], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/182"", 314], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/891"", 481], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/962"", 137], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/595"", 420], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/136"", 111], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/591"", 354], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/207"", 183], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/15"", 355], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/93"", 394], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/545"", 257], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/616"", 964], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/541"", 276], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/174"", 508], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/954"", 433], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/10"", 195], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/658"", 1252], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/729"", 425], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/170"", 203], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/654"", 468], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/8"", 91], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/992"", 256], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/237"", 458], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/18"", 319], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/162"", 144], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/600"", 204], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/233"", 332], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/646"", 667], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/66"", 384], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/571"", 242], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/642"", 125], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/275"", 328], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/688"", 117], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/684"", 666], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/388"", 416], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/61"", 316], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/797"", 118], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/338"", 670], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/263"", 853], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/334"", 409], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/69"", 633], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/376"", 505], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/34"", 541], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/251"", 695], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/322"", 738], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/368"", 591], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/439"", 564], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/364"", 610], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/360"", 808], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/477"", 1322], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/314"", 606], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/310"", 727], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/427"", 584], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/37"", 703], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/352"", 629], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/423"", 1066], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/469"", 755], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/85"", 737], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/465"", 455], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/390"", 724], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/80"", 1684], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/415"", 68], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/340"", 941], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/411"", 722], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/382"", 307], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/453"", 911], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/88"", 1231], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/495"", 107], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/199"", 729], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/403"", 906], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/491"", 625], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/107"", 1220], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/83"", 712], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/103"", 647], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/149"", 471], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/53"", 844], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/441"", 770], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/483"", 81], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/7"", 318], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/504"", 1147], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/137"", 439], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/500"", 1182], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/133"", 256], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/56"", 306], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/471"", 717], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/246"", 787], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/288"", 501], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/51"", 1437], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/125"", 705], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/21"", 568], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/121"", 272], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/238"", 1221], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/309"", 197], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/234"", 830], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/59"", 1147], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/276"", 789], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/389"", 707], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/226"", 111], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/268"", 725], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/339"", 775], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/264"", 337], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/260"", 614], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/377"", 316], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/210"", 1197], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/327"", 1364], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/27"", 76], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/252"", 639], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/323"", 733], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/369"", 510], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/75"", 393], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/294"", 733], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/365"", 565], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/290"", 712], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/70"", 660], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/315"", 711], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/311"", 777], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/357"", 951], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/428"", 1434], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/353"", 1137], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/78"", 459], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/395"", 1167], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/466"", 701], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/303"", 450], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/391"", 545], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/73"", 605], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/416"", 682], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/43"", 797], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/341"", 772], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/412"", 559], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/454"", 680], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/6"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/496"", 993], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/3"", 591], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/404"", 762], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/492"", 282], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/108"", 732], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/400"", 491], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/446"", 796], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/46"", 1264], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/371"", 319], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/442"", 863], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/146"", 886], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/484"", 128], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/41"", 920], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/509"", 639], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/505"", 1236], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/209"", 798], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/430"", 530], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/501"", 156], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/49"", 849], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/472"", 643], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/289"", 705], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/126"", 92], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/14"", 908], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/122"", 829], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/239"", 376], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/460"", 587], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/164"", 669], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/235"", 441], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/160"", 1200], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/277"", 423], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/114"", 552], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/110"", 946], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/227"", 543], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/17"", 666], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/152"", 503], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/223"", 761], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/269"", 760], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/65"", 263], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/265"", 109], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/190"", 688], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/378"", 758], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/60"", 685], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/211"", 481], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/328"", 442], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/253"", 1170], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/68"", 1088], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/295"", 1205], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/366"", 384], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/203"", 633], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/291"", 717], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/316"", 1120], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/33"", 726], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/312"", 778], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/358"", 72], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/429"", 77], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/283"", 976], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/5"", 769], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/396"", 270], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/2"", 444], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/304"", 552], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/392"", 1192], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/300"", 643], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/417"", 798], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/36"", 730], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/342"", 463], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/459"", 763], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/384"", 673], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/455"", 680], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/380"", 792], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/409"", 768], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/405"", 512], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/109"", 1274], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/330"", 704], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/401"", 1413], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/447"", 1110], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/39"", 810], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/372"", 1127], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/443"", 1144], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/87"", 878], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/485"", 270], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/82"", 588], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/435"", 863], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/506"", 723], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/139"", 709], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/431"", 700], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/135"", 751], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/473"", 699], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/177"", 774], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/127"", 759], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/123"", 856], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/55"", 495], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/461"", 623], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/161"", 672], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/278"", 215], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/50"", 1787], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/111"", 672], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/228"", 212], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/153"", 826], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/224"", 571], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/58"", 425], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/195"", 1328], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/266"", 1270], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/191"", 1178], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/512"", 470], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/23"", 136], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/141"", 786], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/212"", 760], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/258"", 497], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/329"", 650], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/183"", 766], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/254"", 171], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/4"", 554], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/296"", 794], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/1"", 183], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/367"", 704], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/204"", 139], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/292"", 1405], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/200"", 1852], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/317"", 757], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/242"", 1091], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/359"", 388], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/74"", 214], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/284"", 395], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/397"", 249], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/305"", 1177], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/230"", 481], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/301"", 604], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/347"", 829], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/418"", 1020], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/272"", 829], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/343"", 768], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/77"", 750], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/385"", 874], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/381"", 438], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/498"", 556], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/72"", 1141], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/335"", 536], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/406"", 1270], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/331"", 1073], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/373"", 270], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/444"", 672], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/436"", 864], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/507"", 1326], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/45"", 141], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/361"", 929], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/432"", 1139], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/478"", 641], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/474"", 517], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/178"", 657], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/40"", 555], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/424"", 333], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/420"", 1317], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/124"", 859], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/48"", 200], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/462"", 608], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/166"", 641], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/96"", 810], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/116"", 1641], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/13"", 844], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/d",,terminal_output +5703,9247494,"TERMINAL",0,0,"ata_atari/per-game/atari_v1/screens/spaceinvaders/112"", 327], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/158"", 733], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/229"", 634], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/450"", 755], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/154"", 643], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/150"", 631], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/196"", 637], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/267"", 799], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/104"", 816], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/513"", 999], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/217"", 595], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/16"", 691], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/94"", 716], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/142"", 691], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/213"", 504], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/259"", 405], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/255"", 1012], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/180"", 609], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/297"", 664], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/205"", 731], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/293"", 561], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/130"", 444], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/318"", 806], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/243"", 560], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/67"", 462], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/285"", 748], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/356"", 694], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/281"", 336], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/398"", 730], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/62"", 91], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/306"", 949], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/32"", 578], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/231"", 698], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/302"", 103], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/419"", 699], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/273"", 584], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/344"", 1112], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/386"", 708], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/499"", 565], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/336"", 803], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/407"", 1171], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/35"", 897], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/332"", 690], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/449"", 758], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/374"", 682], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/445"", 709], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/370"", 558], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/324"", 503], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/437"", 1709], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/508"", 439], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/38"", 907], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/362"", 543], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/433"", 615], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/479"", 827], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/475"", 1544], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/179"", 704], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/81"", 743], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/425"", 1286], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/129"", 747], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/350"", 483], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/421"", 1236], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/467"", 171], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/463"", 706], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/413"", 602], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/117"", 587], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/84"", 761], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/113"", 1146], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/159"", 847], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/54"", 662], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/155"", 626], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/493"", 183], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/105"", 542], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/193"", 1168], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/101"", 694], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/147"", 564], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/218"", 756], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/510"", 416], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/143"", 610], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/57"", 689], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/181"", 808], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/298"", 672], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/52"", 819], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/502"", 488], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/206"", 722], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/22"", 851], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/131"", 661], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/202"", 579], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/248"", 718], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/319"", 145], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/244"", 733], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/286"", 622], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/282"", 705], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/399"", 1080], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/232"", 755], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/349"", 869], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/274"", 663], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/345"", 707], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/20"", 517], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/270"", 589], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/387"", 665], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/9"", 577], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/220"", 559], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/337"", 730], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/408"", 1124], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/333"", 409], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/379"", 519], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/76"", 339], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/71"", 1258], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/325"", 338], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/250"", 756], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/438"", 1017], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/363"", 642], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/434"", 572], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/79"", 420], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/476"", 1439], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/313"", 583], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/426"", 762], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/44"", 394], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/351"", 585], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/422"", 569], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/468"", 112], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/393"", 1253], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/464"", 628], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/168"", 905], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/414"", 622], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/118"", 730], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/410"", 389], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/456"", 870], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/452"", 1468], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/156"", 919], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/95"", 798], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/494"", 176], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/198"", 776], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/42"", 495], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/402"", 1230], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/490"", 798], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/106"", 842], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/12"", 862], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/90"", 455], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/102"", 791], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/148"", 789], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/219"", 1166], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/440"", 675], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/511"", 747], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/482"", 383], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/186"", 53], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/257"", 193], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/182"", 729], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/503"", 129], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/136"", 603], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/207"", 127], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/15"", 119], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/93"", 510], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/132"", 716], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/249"", 883], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/245"", 314], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/10"", 237], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/8"", 782], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/120"", 720], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/237"", 716], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/308"", 126], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/162"", 751], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/233"", 308], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/66"", 554], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/275"", 876], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/271"", 668], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/388"", 616], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/61"", 682], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/225"", 452], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/spaceinvaders/221"", 482], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/263"", 1623], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/334"", 1001], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/376"", 1015], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/34"", 1011], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/322"", 1004], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/368"", 662], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/364"", 1185], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/314"", 896], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/310"", 1313], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/352"", 988], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/85"", 340], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/394"", 926], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/390"", 139], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/80"", 241], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/340"", 657], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/411"", 64], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/157"", 2029], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/88"", 2218], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/403"", 38], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/187"", 722], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/7"", 403], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/208"", 659], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/175"", 989], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/246"", 188], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/288"", 2268], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/51"", 1413], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/238"", 984], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/309"", 1950], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/163"", 1028], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/234"", 985], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/59"", 1095], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/276"", 762], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/389"", 192], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/222"", 1447], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/268"", 1037], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/264"", 1151], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/377"", 1229], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/214"", 1495], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/210"", 1502], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/369"", 399], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/75"", 1384], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/70"", 1117], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/315"", 936], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/311"", 283], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/78"", 484], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/395"", 682], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/73"", 584], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/43"", 654], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/341"", 500], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/412"", 152], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/6"", 172], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/404"", 370], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/108"", 1139], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/46"", 1898], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/11"", 663], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/209"", 892], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/49"", 1311], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/19"", 432], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/227"", 1114], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/17"", 147], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/223"", 1934], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/269"", 1258], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/65"", 1426], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/194"", 608], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/190"", 809], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/378"", 951], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/60"", 1310], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/215"", 1342], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/211"", 1252], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/68"", 939], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/295"", 790], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/366"", 802], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/203"", 1273], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/63"", 1430], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/316"", 1028], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/33"", 1078], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/241"", 275], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/283"", 945], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/396"", 715], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/2"", 890], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/300"", 977], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/346"", 1508], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/36"", 1318], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/342"", 931], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/380"", 280], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/409"", 59], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/405"", 2200], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/109"", 1484], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/401"", 53], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/87"", 1287], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/139"", 1047], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/127"", 1400], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/123"", 252], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/169"", 1982], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/165"", 1486], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/161"", 774], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/50"", 885], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/228"", 964], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/153"", 1060], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/224"", 440], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/58"", 1310], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/195"", 1235], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/216"", 783], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/212"", 1060], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/258"", 1245], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/296"", 1094], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/1"", 1065], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/367"", 1017], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/204"", 615], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/292"", 611], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/242"", 1717], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/284"", 1515], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/397"", 1049], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/305"", 1666], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/230"", 317], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/347"", 188], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/272"", 1643], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/343"", 120], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/381"", 1071], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/335"", 1555], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/406"", 42], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/45"", 193], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/178"", 1282], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/128"", 1092], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/166"", 0], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/229"", 997], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/196"", 424], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/267"", 1110], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/104"", 1403], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/192"", 736], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/100"", 1445], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/217"", 1039], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/94"", 1361], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/142"", 1105], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/213"", 1636], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/259"", 1637], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/180"", 860], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/293"", 961], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/247"", 581], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/172"", 1579], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/67"", 824], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/285"", 1925], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/356"", 586], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/32"", 1115], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/231"", 1259], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/302"", 1282], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/344"", 1336], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/336"", 486], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/407"", 92], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/35"", 185], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/261"", 1080], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/320"", 1034], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/179"", 855], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/81"", 257], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/350"", 1443], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/89"", 1306], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/54"", 1043], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/155"", 1225], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/105"", 343], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/101"", 753], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/218"", 1311], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/143"", 530], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/57"", 787], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/185"", 1283], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/256"", 764], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/181"", 1255], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/298"", 961], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/131"", 1109], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/248"", 484], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/286"", 533], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/282"", 802], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/232"", 1250], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/274"", 1448], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/345"", 198], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/270"", 93], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/337"", 927], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/408"", 56], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/76"", 1226], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/375"", 1769], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/321"", 1246], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/79"", 1483], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/44"", 1248], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/351"", 1633], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/410"", 106], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/47"", 1240], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/95"", 488], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/42"", 505], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/402"", 316], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/90"", 690], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/102"", 2008], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/219"", 1035], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/144"", 486], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/98"", 1101], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/182"", 1061], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/136"", 413], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/207"", 125], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/132"", 215], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/174"", 775], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/245"", 474], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/237"", 1062], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/308"", 1134], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/18"", 318], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/233"", 1939], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/66"", 1646], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/271"", 1618], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/61"", 2016], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/pinball/225"", 492], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/630"", 930], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/263"", 848], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/334"", 412], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/69"", 569], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/326"", 875], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/368"", 45], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/364"", 813], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/360"", 57], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/477"", 554], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/314"", 1869], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/427"", 899], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/352"", 1294], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/423"", 109], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/85"", 849], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/394"", 764], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/390"", 533], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/80"", 927], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/415"", 305], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/119"", 1390], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/411"", 851], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/457"", 1258], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/382"", 1918], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/453"", 664], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/157"", 48], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/88"", 210], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/403"", 973], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/107"", 1382], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/83"", 788], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/103"", 1162], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/516"", 851], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/149"", 940], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/53"", 46], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/558"", 930], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/483"", 1431], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/554"", 135], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/187"", 494], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/7"", 348], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/596"", 217], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/504"", 320], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/133"", 724], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/546"", 731], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/56"", 1366], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/471"", 1204], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/175"", 1101], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/171"", 759], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/584"", 1000], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/125"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/580"", 980], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/21"", 549], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/121"", 65], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/534"", 345], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/238"", 238], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/309"", 959], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/572"", 683], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/389"", 885], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/24"", 59], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/151"", 1622], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/635"", 780], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/339"", 46], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/560"", 954], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/631"", 793], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/260"", 616], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/210"", 281], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/623"", 577], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/323"", 161], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/369"", 1256], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/294"", 855], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/365"", 56], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/240"", 519], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/428"", 771], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/353"", 747], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/78"", 586], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/395"", 820], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/466"", 1507], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/303"", 996], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/412"", 474], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/458"", 655], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/496"", 49], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/3"", 54], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/404"", 48], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/400"", 379], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/446"", 451], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/517"", 817], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/559"", 746], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/484"", 579], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/188"", 1068], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/480"", 481], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/597"", 766], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/11"", 523], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/138"", 1000], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/209"", 49], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/430"", 404], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/134"", 494], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/547"", 416], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/176"", 1057], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/589"", 659], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/19"", 839], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/97"", 892], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/585"", 606], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/289"", 50], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/126"", 602], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/14"", 334], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/92"", 1362], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/535"", 367], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/239"", 397], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/531"", 1680], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/573"", 827], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/277"", 49], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/114"", 1124], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/110"", 1189], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/523"", 286], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/152"", 580], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/223"", 514], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/65"", 418], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/561"", 1074], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/190"", 641], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/60"", 992], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/215"", 54], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/140"", 470], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/624"", 48], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/328"", 531], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/620"", 254], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/68"", 789], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/203"", 320], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/291"", 868], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/63"", 1136], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/612"", 725], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/316"", 672], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/33"", 49], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/241"", 1298], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/312"", 77], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/358"", 588], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/429"", 658], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/5"", 53], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/396"", 1061], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/2"", 217], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/304"", 640], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/392"", 600], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/300"", 1187], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/346"", 473], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/36"", 862], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/31"", 745], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/405"", 185], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/330"", 888], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/401"", 1217], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/518"", 1004], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/39"", 705], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/443"", 909], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/87"", 759], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/189"", 90], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/598"", 352], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/82"", 834], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/435"", 621], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/506"", 543], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/139"", 942], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/135"", 105], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/548"", 104], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/544"", 391], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/177"", 352], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/586"", 1483], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/127"", 705], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/123"", 378], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/536"", 732], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/169"", 2427], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/607"", 50], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/461"", 719], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/532"", 1198], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/574"", 573], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/278"", 437], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/115"", 207], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/524"", 129], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/520"", 1562], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/153"", 1558], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/224"", 952], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/637"", 617], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/562"", 477], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/633"", 241], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/216"", 510], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/141"", 1078], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/212"", 1183], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/625"", 625], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/329"", 47], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/550"", 79], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/183"", 1189], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/621"", 703], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/592"", 56], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/1"", 1073], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/367"", 117], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/204"", 800], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/292"", 847], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/613"", 746], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/317"", 851], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/242"", 190], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/359"", 46], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/397"", 151], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/601"", 462], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/347"", 100], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/29"", 504], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/77"", 153], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/498"", 47], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/448"", 769], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/519"", 640], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/444"", 259], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/599"", 554], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/436"", 872], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/507"", 593], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/432"", 535], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/478"", 912], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/474"", 505], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/178"", 1971], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/40"", 1043], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/470"", 783], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/587"", 1110], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/424"", 393], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/420"", 57], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/48"", 449], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/462"", 840], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/533"", 268], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/579"", 1120], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/96"", 104], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/575"", 675], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/116"", 438], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/13"", 834], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/91"", 528], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/112"", 696], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/525"", 534], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/521"", 958], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/567"", 1424], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/638"", 1069], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/150"", 1753], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/563"", 749], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/267"", 1373], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/99"", 423], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/192"", 301], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/100"", 89], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/94"", 514], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/142"", 1527], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/259"", 359], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/64"", 1318], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/184"", 899], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/255"", 1320], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/180"", 1135], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/593"", 129], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/297"", 610], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/172"", 128], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/610"", 588], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/67"", 831], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/581"", 402], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/285"", 527], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/281"", 841], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/398"", 48], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/62"", 851], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/602"", 700], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/32"", 45], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/302"", 1019], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/348"", 254], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/419"", 743], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/640"", 400], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/273"", 776], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/499"", 1804], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/407"", 1366], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/35"", 836], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/261"", 675], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/30"", 402], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/370"", 606], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/324"", 822], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/433"", 232], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/86"", 574], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/179"", 1191], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/588"", 862], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/81"", 1685], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/425"", 1493], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/129"", 755], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/350"", 738], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/421"", 636], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/467"", 1036], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/538"", 500], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/463"", 717], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/167"", 46], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/413"", 540], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/117"", 1043], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/84"", 816], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/113"", 985], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/526"", 885], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/451"", 672], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/522"", 1060], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/568"", 738], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/639"", 1000], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/564"", 366], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/193"", 781], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/514"", 730], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/147"", 1155], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/218"", 470], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/510"", 776], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/552"", 939], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/256"", 897], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/594"", 699], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/52"", 700], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/206"", 1275], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/619"", 1290], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/131"", 94], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/202"", 929], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/615"", 741], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/248"", 872], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/540"", 440], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/173"", 257], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/611"", 46], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/582"", 182], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/282"", 315], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/399"", 739], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/603"", 909], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/236"", 479], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/25"", 988], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/570"", 760], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/641"", 1121], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/274"", 1366], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/270"", 68], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/9"", 326], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/408"", 1421], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/333"", 815], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/76"", 727], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/325"", 842], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/250"", 187], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/321"", 427], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/438"", 45], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/363"", 522], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/434"", 189], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/79"", 642], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/351"", 581], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/422"", 1259], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/393"", 853], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/168"", 955], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/577"", 1019], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/414"", 705], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/410"", 542], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/456"", 491], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/527"", 128], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/47"", 275], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/452"", 754], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/95"", 289], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/565"", 53], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/402"", 899], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/12"", 695], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/90"", 1186], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/102"", 962], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/515"", 836], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/148"", 353], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/219"", 450], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/511"", 533], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/144"", 526], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/557"", 690], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/628"", 731], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/482"", 1111], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/553"", 732], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/186"", 552], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/257"", 477], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/98"", 529], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/182"", 386], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/595"", 659], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/299"", 896], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/136"", 837], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/15"", 879], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/93"", 484], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/132"", 548], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/616"", 1672], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/249"", 55], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/174"", 149], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/245"", 55], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/10"", 254], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/583"", 748], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/287"", 1084], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/8"", 261], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/120"", 572], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/237"", 997], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/18"", 1137], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/162"", 694], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/600"", 566], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/66"", 549], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/271"", 510], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/388"", 575], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/61"", 995], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/mspacman/634"", 1059], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/630"", 365], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1347"", 475], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/263"", 208], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1212"", 959], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/69"", 630], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/672"", 378], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1354"", 489], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1033"", 918], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1080"", 423], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1128"", 896], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/785"", 471], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1453"", 544], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1413"", 239], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/251"", 931], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/322"", 334], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1274"", 143], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/735"", 344], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/368"", 454], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/660"", 371], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1234"", 125], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/731"", 502], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/848"", 508], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1428"", 904], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1475"", 366], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/310"", 373], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1340"", 1217], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1289"", 1298], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/723"", 551], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/427"", 571], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/352"", 506], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/836"", 529], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/469"", 560], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/85"", 328], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1395"", 401], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1260"", 320], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/390"", 787], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1308"", 732], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/874"", 471], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1355"", 540], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1220"", 589], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/80"", 1161], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/711"", 965], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1081"", 643], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1176"", 403], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1041"", 1034], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/824"", 511], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/528"", 346], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1414"", 227], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/820"", 626], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/157"", 801], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/866"", 2069], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1140"", 854], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/862"", 94], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/566"", 696], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1100"", 492], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/979"", 1061], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1282"", 158], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1056"", 848], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/103"", 306], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/812"", 684], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1381"", 1794], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1429"", 170], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/53"", 399], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1476"", 192], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1341"", 233], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/629"", 554], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1301"", 586], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1297"", 362], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/967"", 516], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1162"", 592], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/pe",,terminal_output +5704,9247570,"TERMINAL",0,0,"r-game/atari_v1/screens/revenge/963"", 719], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/596"", 615], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1400"", 177], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/137"", 737], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/208"", 637], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1356"", 1151], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/500"", 273], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1221"", 1165], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/913"", 1111], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/546"", 1606], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1316"", 454], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/542"", 448], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/659"", 328], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/880"", 520], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1415"", 96], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/951"", 369], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/584"", 1224], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/288"", 227], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/125"", 439], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/580"", 1077], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/609"", 787], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1236"", 113], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1283"", 1415], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/605"", 451], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/309"", 904], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/530"", 576], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1057"", 801], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/234"", 1052], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/59"", 800], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1017"", 406], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/572"", 432], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/643"", 538], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1342"", 506], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1437"", 935], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1302"", 1148], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/389"", 527], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1298"", 570], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1163"", 300], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/226"", 1067], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/24"", 315], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/222"", 505], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/635"", 785], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/268"", 531], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/706"", 717], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1317"", 1334], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/673"", 893], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1178"", 861], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1043"", 622], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1138"", 551], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1003"", 1083], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1423"", 95], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1237"", 101], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/323"", 882], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1284"", 1155], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/736"", 350], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/369"", 1153], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1379"", 142], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1244"", 1022], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/365"", 918], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1058"", 363], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1018"", 344], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1478"", 119], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1343"", 438], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/70"", 292], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/315"", 381], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1303"", 530], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1350"", 325], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1299"", 492], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1164"", 582], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1259"", 1007], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/720"", 407], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/353"", 493], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/766"", 193], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/837"", 449], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/303"", 89], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/391"", 407], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1230"", 100], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1179"", 489], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1044"", 360], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/712"", 637], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1091"", 493], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/829"", 891], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1139"", 392], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/825"", 789], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/821"", 711], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/867"", 824], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1285"", 638], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/792"", 419], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/3"", 499], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/404"", 497], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1019"", 606], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/371"", 398], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/442"", 388], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1351"", 432], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1165"", 667], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/926"", 350], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/559"", 1296], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1030"", 849], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/484"", 504], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/968"", 409], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1172"", 448], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/480"", 987], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/597"", 424], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/11"", 381], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1271"", 105], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/209"", 403], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1319"", 1054], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/134"", 610], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/547"", 619], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1092"", 746], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/49"", 547], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1005"", 705], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/543"", 507], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1187"", 687], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/956"", 1040], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/881"", 497], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1425"", 159], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/585"", 946], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/698"", 1024], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/122"", 571], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/902"", 406], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/535"", 1238], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/606"", 1027], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/531"", 631], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/235"", 628], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/944"", 440], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/940"", 1031], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/573"", 514], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1487"", 349], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/277"", 147], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1352"", 244], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1312"", 458], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/686"", 628], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/110"", 863], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1173"", 540], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1411"", 99], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/932"", 631], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/636"", 170], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/269"", 551], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/707"", 375], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/65"", 425], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/561"", 193], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/632"", 396], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1232"", 722], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1093"", 918], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1327"", 87], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1188"", 1088], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1053"", 789], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1013"", 757], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/787"", 633], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/211"", 633], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1473"", 1493], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/624"", 497], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/620"", 527], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/737"", 598], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/68"", 357], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/662"", 486], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1068"", 964], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/779"", 837], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/291"", 90], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1488"", 1010], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1353"", 791], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/612"", 660], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1313"", 1034], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1127"", 798], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/312"", 80], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/358"", 1013], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/429"", 276], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/283"", 2153], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1134"", 601], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/5"", 1043], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/767"", 411], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/838"", 445], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/396"", 345], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/392"", 334], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/300"", 1599], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1189"", 1047], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/713"", 389], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/346"", 619], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1149"", 910], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1474"", 956], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/755"", 464], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/826"", 415], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/384"", 946], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/868"", 96], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1160"", 410], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/793"", 494], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/409"", 1038], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1255"", 483], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1069"", 501], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/405"", 559], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/818"", 570], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1029"", 245], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/401"", 500], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/814"", 610], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/447"", 545], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/518"", 547], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1496"", 523], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1361"", 505], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/856"", 121], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/489"", 950], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/781"", 287], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1000"", 779], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/481"", 378], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/598"", 275], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1420"", 131], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1281"", 764], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/919"", 945], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1329"", 306], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1241"", 96], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/915"", 1135], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/548"", 459], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/544"", 919], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/957"", 868], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1435"", 98], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/953"", 952], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/999"", 395], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/127"", 1247], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/907"", 775], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1209"", 520], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/995"", 708], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1256"", 1623], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1121"", 460], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/607"", 706], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/165"", 391], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/945"", 1021], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/578"", 424], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/574"", 526], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1362"", 452], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/687"", 179], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1001"", 863], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1183"", 886], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/228"", 523], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1421"", 99], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/224"", 361], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/195"", 520], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/633"", 437], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1377"", 185], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/266"", 682], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/679"", 960], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1016"", 418], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1198"", 459], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/216"", 566], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1436"", 940], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/212"", 1060], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1443"", 556], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/183"", 868], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1122"", 621], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/667"", 596], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1217"", 864], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/663"", 496], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1"", 672], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/367"", 394], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/292"", 88], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/776"", 1329], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1498"", 571], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1363"", 338], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1458"", 1317], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1323"", 160], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1002"", 548], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/655"", 218], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/651"", 476], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/284"", 176], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/355"", 547], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/693"", 835], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/397"", 865], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1290"", 584], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1203"", 432], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/230"", 683], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1199"", 1282], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/29"", 409], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/710"", 416], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/343"", 463], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1484"", 518], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/756"", 312], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/827"", 371], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/681"", 393], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1444"", 957], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/798"", 595], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/869"", 92], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1218"", 335], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/498"", 1013], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1130"", 866], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/702"", 314], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/335"", 480], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/406"", 599], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1039"", 361], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/331"", 340], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1499"", 199], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1364"", 671], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/448"", 580], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/519"", 288], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1324"", 1159], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/444"", 1086], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1145"", 278], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1010"", 730], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/899"", 577], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1470"", 1015], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/807"", 802], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/599"", 636], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1430"", 976], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1291"", 680], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1339"", 604], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1251"", 247], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/845"", 406], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/916"", 389], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/549"", 1007], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/841"", 903], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/474"", 791], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/958"", 1478], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/470"", 697], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/883"", 638], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1445"", 584], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/587"", 226], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1492"", 779], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1171"", 586], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/996"", 954], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1266"", 915], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/48"", 576], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/533"", 802], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/166"", 866], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1087"", 1039], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/946"", 697], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/575"", 410], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/988"", 404], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/116"", 292], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1146"", 515], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/525"", 244], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/158"", 1281], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1431"", 2024], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/567"", 439], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/930"", 518], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/563"", 1549], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1026"", 1221], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1073"", 780], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/217"", 840], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/16"", 585], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1311"", 397], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/142"", 805], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/626"", 307], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/259"", 440], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/668"", 730], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/180"", 673], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1227"", 575], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/593"", 372], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/664"", 300], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1326"", 323], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/201"", 425], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/910"", 532], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/614"", 477], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/318"", 297], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1468"", 350], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1333"", 90], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/172"", 609], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/610"", 895], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1147"", 328], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1012"", 363], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1194"", 338], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/67"", 570], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/652"", 238], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/285"", 98], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1432"", 210], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/356"", 463], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/281"", 1792], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/990"", 482], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/694"", 985], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/765"", 697], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/398"", 534], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1388"", 210], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/690"", 857], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/306"", 719], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1348"", 808], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1074"", 539], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/348"", 147], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/640"", 567], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1454"", 607], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1268"", 99], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/799"", 170], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1228"", 633], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/795"", 612], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/499"", 404], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/336"", 783], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/407"", 414], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/261"", 288], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/332"", 768], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1469"", 582], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/670"", 916], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1334"", 735], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1148"", 231], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/445"", 514], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/370"", 327], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/324"", 1139], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1480"", 1148], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/320"", 418], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1440"", 97], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1254"", 817], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/437"", 481], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1214"", 336], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1261"", 724], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/846"", 496], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1075"", 732], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1309"", 545], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/86"", 325], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/475"", 381], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1035"", 1316], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/179"", 1406], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1495"", 245], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1320"", 106], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1181"", 311], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/909"", 303], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1229"", 178], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1141"", 599], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/834"", 801], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/467"", 873], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/830"", 413], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/463"", 983], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/876"", 639], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/576"", 626], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1200"", 796], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/989"", 265], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1382"", 152], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/413"", 766], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1156"", 710], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1021"", 519], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/939"", 511], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/451"", 252], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1441"", 1170], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/155"", 410], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/639"", 275], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1215"", 660], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/931"", 374], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/197"", 394], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1262"", 843], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1076"", 527], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/105"", 118], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1036"", 419], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/218"", 1172], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/510"", 430], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/923"", 461], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1416"", 153], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/552"", 532], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1277"", 848], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/965"", 760], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1142"", 760], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/669"", 847], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/298"", 567], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/502"", 1120], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/22"", 498], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1201"", 1009], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/202"", 386], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/911"", 412], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1383"", 1226], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/615"", 467], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/319"", 1088], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1157"", 894], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1022"", 239], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/657"", 405], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/582"", 962], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1442"", 357], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/699"", 530], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/282"", 169], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1402"", 391], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1216"", 854], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/399"", 780], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1398"", 234], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1263"", 115], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/603"", 426], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/236"", 388], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/691"", 258], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/307"", 746], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1037"", 952], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/232"", 608], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/349"", 616], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/570"", 323], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1457"", 846], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/345"", 1000], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1417"", 125], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/270"", 731], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/683"", 643], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1278"", 1363], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1190"", 756], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/708"", 794], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1238"", 95], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/796"", 962], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/704"", 840], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/408"", 515], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/28"", 459], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1384"", 438], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/746"", 476], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/379"", 624], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1479"", 1241], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/671"", 504], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1023"", 824], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1070"", 560], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/325"", 264], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1490"", 227], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/897"", 248], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/321"", 356], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1399"", 209], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1264"", 117], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/734"", 419], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1224"", 992], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/730"", 476], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1038"", 843], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/434"", 694], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/847"", 671], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1085"", 1149], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/843"", 463], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/476"", 973], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1045"", 644], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/313"", 738], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1279"", 1024], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1286"", 109], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1151"", 845], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/468"", 192], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/906"", 432], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/539"", 893], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/831"", 493], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/464"", 674], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/877"", 335], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1385"", 582], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/948"", 497], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1345"", 549], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/577"", 857], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1210"", 476], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/986"", 802], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1404"", 178], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/865"", 778], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/569"", 324], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/861"", 304], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/978"", 791], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1272"", 943], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1046"", 786], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/811"", 747], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/515"", 640], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/148"", 278], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/219"", 245], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1419"", 171], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/557"", 109], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/628"", 970], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1426"", 96], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/920"", 634], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/553"", 676], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1287"", 87], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/257"", 434], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/966"", 444], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/595"", 429], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1346"", 631], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/912"", 809], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1393"", 642], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/545"", 1251], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/616"", 272], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/249"", 1466], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1072"", 447], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1032"", 840], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/658"", 1284], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/170"", 421], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/583"", 657], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/654"", 876], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1452"", 540], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/8"", 196], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/992"", 909], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1412"", 674], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1226"", 861], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1273"", 171], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/604"", 495], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/237"", 690], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/308"", 318], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/162"", 910], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1047"", 238], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/233"", 712], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/66"", 422], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/571"", 553], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/275"", 1837], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1332"", 99], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1427"", 146], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/271"", 401], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/684"", 691], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/388"", 764], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1288"", 92], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/1153"", 606], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/revenge/634"", 703]]]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/array_records]633;D;0",,terminal_output +5705,9264296,"TERMINAL",0,0,"cat metadata.json | less",,terminal_command +5706,9264347,"TERMINAL",0,0,"]633;E;2025-09-04 12:31:21 cat metadata.json | less;e3f3d151-a063-4c85-891d-0bfb917c5617]633;Ccat: metadata.json[?1049h[?1h=\r: No such file or directory\r\n\r(END)",,terminal_output +5707,9265933,"TERMINAL",0,0,"\r[?1l>[?1049l]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/array_records]633;D;0",,terminal_output +5708,9269153,"TERMINAL",0,0,"cat meta_data.json | less",,terminal_command +5709,9269206,"TERMINAL",0,0,"]633;E;2025-09-04 12:31:26 cat meta_data.json | less;e3f3d151-a063-4c85-891d-0bfb917c5617]633;C[?1049h[?1h=\r[[""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/705"", 444], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/630"", 313] , [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/263"", 136], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/747"", 488 ], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/672"", 103], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/743"", 14 5], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/376"", 410], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/789"", 7 16], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/785"", 434], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/326"", 274], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/251"", 434], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/322"",  169], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/735"", 43], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/368"",  417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/806"", 610], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/660"" , 268], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/731"", 128], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/364 "", 743], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/848"", 374], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/36 0"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/773"", 575], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/8 86"", 120], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/310"", 242], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/ 723"", 911], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/427"", 482], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert /352"", 376], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/85"", 201], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert /832"", 1327], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/465"", 177], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbe rt/878"", 627], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/949"", 838], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qb ert/80"", 477], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1034"", 767], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/q bert/119"", 361], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/411"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/ qbert/824"", 174], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/457"", 184], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens /qbert/528"", 307], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/382"", 68], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens /qbert/820"", 720], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/157"", 80], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens /qbert/866"", 601], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/937"", 852], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screen s/qbert/88"", 214], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/862"", 553], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screen s/qbert/199"", 309], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/403"", 850], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/scree ns/qbert/107"", 376], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/103"", 341], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/scre ens/qbert/516"", 347], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/149"", 495], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/scr eens/qbert/929"", 85], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/854"", 454], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/scr eens/qbert/925"", 261], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/850"", 333], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/sc reens/qbert/483"", 199], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/187"", 171], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/s creens/qbert/892"", 486], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/963"", 379], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/ screens/qbert/596"", 258], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/800"", 97], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/ screens/qbert/208"", 184], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/500"", 266], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1 /screens/qbert/913"", 211], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/546"", 170], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v 1/screens/qbert/471"", 458], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/542"", 309], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_ v1/screens/qbert/955"", 576], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/659"", 183], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari _v1/screens/qbert/584"", 80], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/997"", 1064], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atar i_v1/screens/qbert/51"", 287], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/580"", 255], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atar i_v1/screens/qbert/993"", 350], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/121"", 416], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/ata ri_v1/screens/qbert/534"", 827], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/605"", 109], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/at ari_v1/screens/qbert/530"", 307], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/163"", 97], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/at ari_v1/screens/qbert/647"", 77], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/718"", 889], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/at ari_v1/screens/qbert/59"", 638], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1017"", 1429], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/ atari_v1/screens/qbert/643"", 836], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/689"", 151], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game /atari_v1/screens/qbert/635"", 233], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/268"", 231], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-gam e/atari_v1/screens/qbert/706"", 94], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/339"", 767], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-gam e/atari_v1/screens/qbert/631"", 402], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/677"", 191], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-ga me/atari_v1/screens/qbert/377"", 332], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/214"", 378], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-g :",,terminal_output +5710,9270786,"TERMINAL",0,0,"\r\r:",,terminal_output +5711,9270978,"TERMINAL",0,0,"\r\r:",,terminal_output +5712,9280316,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"#!/usr/bin/env bash\n\npython generate_dataset.py \\n --num_episodes 10 \\n --output_dir /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev \\n --min_episode_length 1000",shellscript,tab +5713,9282404,"generate_dataset.py",0,0,"",python,tab +5714,9287664,"generate_dataset.py",1624,0,"",python,selection_mouse +5715,9287811,"generate_dataset.py",1623,4,"path",python,selection_mouse +5716,9290036,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +5717,9297341,"input_pipeline/preprocess/pngs_to_array_records.py",3063,0,"",python,selection_mouse +5718,9297462,"input_pipeline/preprocess/pngs_to_array_records.py",3059,13,"num_processes",python,selection_mouse +5719,9298887,"input_pipeline/preprocess/pngs_to_array_records.py",3064,0,"",python,selection_mouse +5720,9298887,"input_pipeline/preprocess/pngs_to_array_records.py",3059,13,"num_processes",python,selection_mouse +5721,9299470,"input_pipeline/preprocess/pngs_to_array_records.py",3045,0,"",python,selection_mouse +5722,9299605,"input_pipeline/preprocess/pngs_to_array_records.py",3042,7,"results",python,selection_mouse +5723,9300358,"input_pipeline/preprocess/pngs_to_array_records.py",3303,0,"",python,selection_mouse +5724,9300747,"input_pipeline/preprocess/pngs_to_array_records.py",3354,0,"",python,selection_mouse +5725,9300886,"input_pipeline/preprocess/pngs_to_array_records.py",3351,6,"result",python,selection_mouse +5726,9303408,"input_pipeline/preprocess/pngs_to_array_records.py",3046,0,"",python,selection_mouse +5727,9303512,"input_pipeline/preprocess/pngs_to_array_records.py",3042,7,"results",python,selection_mouse +5728,9304472,"input_pipeline/preprocess/pngs_to_array_records.py",3378,0,"",python,selection_mouse +5729,9304629,"input_pipeline/preprocess/pngs_to_array_records.py",3374,15,"preprocess_pngs",python,selection_mouse +5730,9310428,"input_pipeline/preprocess/pngs_to_array_records.py",2491,0,"",python,selection_mouse +5731,9311354,"input_pipeline/preprocess/pngs_to_array_records.py",2490,0,"",python,selection_command +5732,9312557,"input_pipeline/preprocess/pngs_to_array_records.py",2490,0,"[",python,content +5733,9312559,"input_pipeline/preprocess/pngs_to_array_records.py",2491,0,"",python,selection_keyboard +5734,9313142,"input_pipeline/preprocess/pngs_to_array_records.py",2490,1,"",python,content +5735,9313816,"input_pipeline/preprocess/pngs_to_array_records.py",2490,0,"{",python,content +5736,9313817,"input_pipeline/preprocess/pngs_to_array_records.py",2491,0,"",python,selection_keyboard +5737,9316139,"input_pipeline/preprocess/pngs_to_array_records.py",2517,0,"",python,selection_command +5738,9317033,"input_pipeline/preprocess/pngs_to_array_records.py",2517,0,"}",python,content +5739,9317034,"input_pipeline/preprocess/pngs_to_array_records.py",2518,0,"",python,selection_keyboard +5740,9318328,"input_pipeline/preprocess/pngs_to_array_records.py",2491,0,"",python,selection_mouse +5741,9318860,"input_pipeline/preprocess/pngs_to_array_records.py",2491,0,"""",python,content +5742,9318862,"input_pipeline/preprocess/pngs_to_array_records.py",2492,0,"",python,selection_keyboard +5743,9320037,"input_pipeline/preprocess/pngs_to_array_records.py",2492,0,"p",python,content +5744,9320038,"input_pipeline/preprocess/pngs_to_array_records.py",2493,0,"",python,selection_keyboard +5745,9320155,"input_pipeline/preprocess/pngs_to_array_records.py",2493,0,"a",python,content +5746,9320156,"input_pipeline/preprocess/pngs_to_array_records.py",2494,0,"",python,selection_keyboard +5747,9320320,"input_pipeline/preprocess/pngs_to_array_records.py",2494,0,"t",python,content +5748,9320321,"input_pipeline/preprocess/pngs_to_array_records.py",2495,0,"",python,selection_keyboard +5749,9320415,"input_pipeline/preprocess/pngs_to_array_records.py",2495,0,"h",python,content +5750,9320416,"input_pipeline/preprocess/pngs_to_array_records.py",2496,0,"",python,selection_keyboard +5751,9320871,"input_pipeline/preprocess/pngs_to_array_records.py",2496,0,"""",python,content +5752,9320872,"input_pipeline/preprocess/pngs_to_array_records.py",2497,0,"",python,selection_keyboard +5753,9321224,"input_pipeline/preprocess/pngs_to_array_records.py",2497,0,":",python,content +5754,9321225,"input_pipeline/preprocess/pngs_to_array_records.py",2498,0,"",python,selection_keyboard +5755,9321715,"input_pipeline/preprocess/pngs_to_array_records.py",2498,0," ",python,content +5756,9321716,"input_pipeline/preprocess/pngs_to_array_records.py",2499,0,"",python,selection_keyboard +5757,9323148,"input_pipeline/preprocess/pngs_to_array_records.py",2510,0,"",python,selection_mouse +5758,9326629,"generate_dataset.py",0,0,"",python,tab +5759,9328489,"generate_dataset.py",1649,0,"",python,selection_mouse +5760,9328670,"generate_dataset.py",1649,2,"""l",python,selection_mouse +5761,9328705,"generate_dataset.py",1649,4,"""len",python,selection_mouse +5762,9328767,"generate_dataset.py",1649,5,"""leng",python,selection_mouse +5763,9328768,"generate_dataset.py",1649,6,"""lengt",python,selection_mouse +5764,9328831,"generate_dataset.py",1649,7,"""length",python,selection_mouse +5765,9328890,"generate_dataset.py",1649,8,"""length""",python,selection_mouse +5766,9328992,"generate_dataset.py",1649,9,"""length"":",python,selection_mouse +5767,9332077,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +5768,9333466,"input_pipeline/preprocess/pngs_to_array_records.py",2510,0,"""length"":",python,content +5769,9334061,"input_pipeline/preprocess/pngs_to_array_records.py",2519,0," ",python,content +5770,9334062,"input_pipeline/preprocess/pngs_to_array_records.py",2520,0,"",python,selection_keyboard +5771,9336578,"input_pipeline/preprocess/pngs_to_array_records.py",2641,0,"",python,selection_mouse +5772,9337458,"input_pipeline/preprocess/pngs_to_array_records.py",2641,0,"""length"":",python,content +5773,9339225,"input_pipeline/preprocess/pngs_to_array_records.py",2641,0,"",python,selection_mouse +5774,9339372,"input_pipeline/preprocess/pngs_to_array_records.py",2641,0," ",python,content +5775,9339373,"input_pipeline/preprocess/pngs_to_array_records.py",2642,0,"",python,selection_keyboard +5776,9340685,"input_pipeline/preprocess/pngs_to_array_records.py",2490,0,"",python,selection_mouse +5777,9340863,"input_pipeline/preprocess/pngs_to_array_records.py",2490,1,"{",python,selection_mouse +5778,9340993,"input_pipeline/preprocess/pngs_to_array_records.py",2490,2,"{""",python,selection_mouse +5779,9340994,"input_pipeline/preprocess/pngs_to_array_records.py",2490,3,"{""p",python,selection_mouse +5780,9340994,"input_pipeline/preprocess/pngs_to_array_records.py",2490,5,"{""pat",python,selection_mouse +5781,9341052,"input_pipeline/preprocess/pngs_to_array_records.py",2490,6,"{""path",python,selection_mouse +5782,9341186,"input_pipeline/preprocess/pngs_to_array_records.py",2490,7,"{""path""",python,selection_mouse +5783,9341805,"input_pipeline/preprocess/pngs_to_array_records.py",2490,8,"{""path"":",python,selection_mouse +5784,9342071,"input_pipeline/preprocess/pngs_to_array_records.py",2490,7,"{""path""",python,selection_mouse +5785,9344705,"input_pipeline/preprocess/pngs_to_array_records.py",2631,0,"",python,selection_mouse +5786,9345143,"input_pipeline/preprocess/pngs_to_array_records.py",2631,0,"{""path""",python,content +5787,9345539,"input_pipeline/preprocess/pngs_to_array_records.py",2638,0," ",python,content +5788,9345540,"input_pipeline/preprocess/pngs_to_array_records.py",2639,0,"",python,selection_keyboard +5789,9346642,"input_pipeline/preprocess/pngs_to_array_records.py",2661,0,"",python,selection_mouse +5790,9347881,"input_pipeline/preprocess/pngs_to_array_records.py",2661,0,"}",python,content +5791,9347882,"input_pipeline/preprocess/pngs_to_array_records.py",2662,0,"",python,selection_keyboard +5792,9350787,"input_pipeline/preprocess/pngs_to_array_records.py",2643,0,"",python,selection_mouse +5793,9354774,"input_pipeline/preprocess/pngs_to_array_records.py",2640,0,"",python,selection_mouse +5794,9355999,"input_pipeline/preprocess/pngs_to_array_records.py",2638,0,"",python,selection_mouse +5795,9356646,"input_pipeline/preprocess/pngs_to_array_records.py",2638,0,":",python,content +5796,9356648,"input_pipeline/preprocess/pngs_to_array_records.py",2639,0,"",python,selection_keyboard +5797,9362151,"input_pipeline/preprocess/pngs_to_array_records.py",4058,0,"",python,selection_mouse +5798,9366885,"input_pipeline/preprocess/pngs_to_array_records.py",4057,1,"",python,content +5799,9368284,"generate_dataset.py",0,0,"",python,tab +5800,9371392,"generate_dataset.py",2105,0,"",python,selection_mouse +5801,9371695,"generate_dataset.py",2104,0,"",python,selection_command +5802,9372921,"generate_dataset.py",2104,1,"",python,content +5803,9376920,"input_pipeline/download/openai/download_actions_files.py",0,0,"import subprocess\nimport json\nimport tyro\nfrom dataclasses import dataclass\nimport os\nfrom multiprocessing import Pool, cpu_count\nfrom tqdm import tqdm\n\n\n@dataclass\nclass Args:\n index_file: str = ""data/open_ai_index_files/all_6xx_Jun_29.json""\n output_dir: str = ""data/open_ai_minecraft_actions_files""\n num_workers: int = -1 # -1 means use all available cores\n\n\ndef flatten_path(relpath):\n """"""Convert nested path to flattened filename with subdirectory as prefix\n e.g. data/6.10/filename.mp4 -> 6.10_filename.mp4\n """"""\n\n parts = relpath.split(""/"")\n\n if len(parts) >= 3:\n subdir = parts[1]\n filename = parts[2]\n return f""{subdir}_{filename}""\n else:\n return relpath.replace(""/"", ""_"")\n\n\ndef download_file(args):\n try:\n url, base_dir, output_dir = args\n jsonl_url = url.rsplit(""."", 1)[0] + "".jsonl""\n filename = flatten_path(jsonl_url)\n output_file = os.path.join(output_dir, filename)\n subprocess.run(\n [""wget"", ""-q"", base_dir + jsonl_url, ""-O"", output_file], check=True\n )\n return {""file"": jsonl_url, ""success"": True}\n except subprocess.CalledProcessError as e:\n # delete file if it exists\n if os.path.exists(output_file):\n os.remove(output_file)\n return {""file"": jsonl_url, ""success"": False, ""error"": str(e)}\n\n\ndef download_actions_files(index_file: str, output_dir: str, num_workers: int):\n # load json file\n with open(index_file, ""r"") as f:\n data = json.load(f)\n\n base_dir = data[""basedir""]\n urls = data[""relpaths""]\n\n # Prepare arguments for each process\n args_list = [(url, base_dir, output_dir) for url in urls]\n\n results = []\n with tqdm(total=len(args_list), desc=""Downloading actions files"") as pbar:\n with Pool(processes=num_workers) as pool:\n for result in pool.imap_unordered(download_file, args_list):\n results.append(result)\n pbar.update(1)\n\n # save results to json\n meta_data_file_name = index_file.split(""/"")[-1].split(""."")[0] + ""_metadata.json""\n with open(os.path.join(output_dir, meta_data_file_name), ""w"") as f:\n json.dump(results, f)\n\n # print number of failed downloads\n failed_downloads = [result for result in results if not result[""success""]]\n print(f""Number of failed downloads: {len(failed_downloads)}"")\n\n # print number of successful downloads\n successful_downloads = [result for result in results if result[""success""]]\n print(f""Number of successful downloads: {len(successful_downloads)}"")\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(Args)\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n if args.num_workers == -1:\n args.num_workers = cpu_count()\n\n print(f""Index file: {args.index_file}"")\n print(f""Output directory: {args.output_dir}"")\n print(f""Number of workers: {args.num_workers}"")\n\n download_actions_files(args.index_file, args.output_dir, args.num_workers)\n",python,tab +5804,9387599,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +5805,9388965,"input_pipeline/preprocess/video_to_array_records.py",3041,0,"",python,selection_mouse +5806,9389831,"input_pipeline/preprocess/video_to_array_records.py",3041,1,"",python,content +5807,9391959,"input_pipeline/preprocess/video_to_array_records.py",3134,0,"",python,selection_mouse +5808,9391970,"input_pipeline/preprocess/video_to_array_records.py",3133,0,"",python,selection_command +5809,9392627,"input_pipeline/preprocess/video_to_array_records.py",2990,0,"",python,selection_mouse +5810,9402620,"input_pipeline/preprocess/video_to_array_records.py",1483,0,"",python,selection_mouse +5811,9407668,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +5812,9412224,"input_pipeline/preprocess/pngs_to_array_records.py",2631,0,"",python,selection_mouse +5813,9412425,"input_pipeline/preprocess/pngs_to_array_records.py",2631,1,"{",python,selection_mouse +5814,9412426,"input_pipeline/preprocess/pngs_to_array_records.py",2631,3,"{""p",python,selection_mouse +5815,9412426,"input_pipeline/preprocess/pngs_to_array_records.py",2631,4,"{""pa",python,selection_mouse +5816,9412427,"input_pipeline/preprocess/pngs_to_array_records.py",2631,5,"{""pat",python,selection_mouse +5817,9412454,"input_pipeline/preprocess/pngs_to_array_records.py",2631,6,"{""path",python,selection_mouse +5818,9412455,"input_pipeline/preprocess/pngs_to_array_records.py",2631,7,"{""path""",python,selection_mouse +5819,9412509,"input_pipeline/preprocess/pngs_to_array_records.py",2631,8,"{""path"":",python,selection_mouse +5820,9412731,"input_pipeline/preprocess/pngs_to_array_records.py",2631,9,"{""path"": ",python,selection_mouse +5821,9415343,"input_pipeline/download/openai/download_actions_files.py",0,0,"",python,tab +5822,9416968,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +5823,9417583,"generate_dataset.py",0,0,"",python,tab +5824,9418216,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +5825,9420911,"input_pipeline/preprocess/video_to_array_records.py",1483,0,"{""path"": ",python,content +5826,9421611,"input_pipeline/preprocess/video_to_array_records.py",1492,0," ",python,content +5827,9421612,"input_pipeline/preprocess/video_to_array_records.py",1493,0,"",python,selection_keyboard +5828,9422134,"input_pipeline/preprocess/video_to_array_records.py",1492,1,"",python,content +5829,9423070,"input_pipeline/preprocess/video_to_array_records.py",1624,0,"",python,selection_mouse +5830,9423417,"input_pipeline/preprocess/video_to_array_records.py",1624,0,"{""path"": ",python,content +5831,9424785,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +5832,9426526,"input_pipeline/preprocess/pngs_to_array_records.py",2663,0,"",python,selection_mouse +5833,9427742,"input_pipeline/preprocess/pngs_to_array_records.py",2509,0,"",python,selection_mouse +5834,9427940,"input_pipeline/preprocess/pngs_to_array_records.py",2509,1," ",python,selection_mouse +5835,9427941,"input_pipeline/preprocess/pngs_to_array_records.py",2509,2," """,python,selection_mouse +5836,9427941,"input_pipeline/preprocess/pngs_to_array_records.py",2509,4," ""le",python,selection_mouse +5837,9427942,"input_pipeline/preprocess/pngs_to_array_records.py",2509,5," ""len",python,selection_mouse +5838,9427942,"input_pipeline/preprocess/pngs_to_array_records.py",2509,6," ""leng",python,selection_mouse +5839,9427978,"input_pipeline/preprocess/pngs_to_array_records.py",2509,7," ""lengt",python,selection_mouse +5840,9427981,"input_pipeline/preprocess/pngs_to_array_records.py",2509,8," ""length",python,selection_mouse +5841,9428034,"input_pipeline/preprocess/pngs_to_array_records.py",2509,9," ""length""",python,selection_mouse +5842,9428101,"input_pipeline/preprocess/pngs_to_array_records.py",2509,10," ""length"":",python,selection_mouse +5843,9431356,"generate_dataset.py",0,0,"",python,tab +5844,9432145,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +5845,9435379,"input_pipeline/preprocess/video_to_array_records.py",1504,0,"",python,selection_mouse +5846,9436675,"input_pipeline/preprocess/video_to_array_records.py",1505,0,"",python,selection_command +5847,9437256,"input_pipeline/preprocess/video_to_array_records.py",1505,0," ""length"":",python,content +5848,9437878,"input_pipeline/preprocess/video_to_array_records.py",1515,0," ",python,content +5849,9437879,"input_pipeline/preprocess/video_to_array_records.py",1516,0,"",python,selection_keyboard +5850,9438734,"input_pipeline/preprocess/video_to_array_records.py",1506,0,"",python,selection_mouse +5851,9439051,"input_pipeline/preprocess/video_to_array_records.py",1505,1,"",python,content +5852,9440119,"input_pipeline/preprocess/video_to_array_records.py",1656,0,"",python,selection_mouse +5853,9440986,"input_pipeline/preprocess/video_to_array_records.py",1655,0,"",python,selection_mouse +5854,9441308,"input_pipeline/preprocess/video_to_array_records.py",1655,0," ""length"":",python,content +5855,9442238,"input_pipeline/preprocess/video_to_array_records.py",1667,0,"",python,selection_mouse +5856,9443257,"input_pipeline/preprocess/video_to_array_records.py",1667,0,"}",python,content +5857,9443258,"input_pipeline/preprocess/video_to_array_records.py",1668,0,"",python,selection_keyboard +5858,9444055,"input_pipeline/preprocess/video_to_array_records.py",1523,0,"",python,selection_mouse +5859,9444767,"input_pipeline/preprocess/video_to_array_records.py",1523,0,"}",python,content +5860,9444768,"input_pipeline/preprocess/video_to_array_records.py",1524,0,"",python,selection_keyboard +5861,9454684,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +5862,9459985,"generate_dataset.py",0,0,"",python,tab +5863,9461042,"generate_dataset.py",0,0,"",python,tab +5864,9485715,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +5865,9499991,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +5866,9513026,"generate_dataset.py",0,0,"",python,tab +5867,9513027,"generate_dataset.py",2075,0,"",python,selection_mouse +5868,9513080,"generate_dataset.py",2074,0,"",python,selection_command +5869,9513256,"generate_dataset.py",2074,1,"}",python,selection_mouse +5870,9513256,"generate_dataset.py",1958,116," ""avg_episode_len"": np.mean([ep[""length""] for ep in episode_metadata]),\n ""episode_metadata"": episode_metadata,\n",python,selection_mouse +5871,9513257,"generate_dataset.py",1918,156," ""num_episodes"": args.num_episodes,\n ""avg_episode_len"": np.mean([ep[""length""] for ep in episode_metadata]),\n ""episode_metadata"": episode_metadata,\n",python,selection_mouse +5872,9513257,"generate_dataset.py",1896,178," ""env"": ""coinrun"",\n ""num_episodes"": args.num_episodes,\n ""avg_episode_len"": np.mean([ep[""length""] for ep in episode_metadata]),\n ""episode_metadata"": episode_metadata,\n",python,selection_mouse +5873,9513278,"generate_dataset.py",2075,0,"",python,selection_command +5874,9513279,"generate_dataset.py",1883,192,"metadata = {\n ""env"": ""coinrun"",\n ""num_episodes"": args.num_episodes,\n ""avg_episode_len"": np.mean([ep[""length""] for ep in episode_metadata]),\n ""episode_metadata"": episode_metadata,\n}",python,selection_mouse +5875,9517671,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +5876,9518671,"input_pipeline/preprocess/video_to_array_records.py",3030,0,"",python,selection_mouse +5877,9520596,"input_pipeline/preprocess/video_to_array_records.py",3030,0,"metadata = {\n ""env"": ""coinrun"",\n ""num_episodes"": args.num_episodes,\n ""avg_episode_len"": np.mean([ep[""length""] for ep in episode_metadata]),\n ""episode_metadata"": episode_metadata,\n}",python,content +5878,9521188,"input_pipeline/preprocess/video_to_array_records.py",3221,0,"",python,selection_command +5879,9521900,"input_pipeline/preprocess/video_to_array_records.py",3221,1,"}",python,selection_command +5880,9522076,"input_pipeline/preprocess/video_to_array_records.py",3179,43," ""episode_metadata"": episode_metadata,\n}",python,selection_command +5881,9522258,"input_pipeline/preprocess/video_to_array_records.py",3104,118," ""avg_episode_len"": np.mean([ep[""length""] for ep in episode_metadata]),\n ""episode_metadata"": episode_metadata,\n}",python,selection_command +5882,9522344,"input_pipeline/preprocess/video_to_array_records.py",3065,157," ""num_episodes"": args.num_episodes,\n ""avg_episode_len"": np.mean([ep[""length""] for ep in episode_metadata]),\n ""episode_metadata"": episode_metadata,\n}",python,selection_command +5883,9522458,"input_pipeline/preprocess/video_to_array_records.py",3043,179," ""env"": ""coinrun"",\n ""num_episodes"": args.num_episodes,\n ""avg_episode_len"": np.mean([ep[""length""] for ep in episode_metadata]),\n ""episode_metadata"": episode_metadata,\n}",python,selection_command +5884,9522609,"input_pipeline/preprocess/video_to_array_records.py",3030,192,"metadata = {\n ""env"": ""coinrun"",\n ""num_episodes"": args.num_episodes,\n ""avg_episode_len"": np.mean([ep[""length""] for ep in episode_metadata]),\n ""episode_metadata"": episode_metadata,\n}",python,selection_command +5885,9522963,"input_pipeline/preprocess/video_to_array_records.py",3030,0,"",python,selection_command +5886,9524041,"input_pipeline/preprocess/video_to_array_records.py",3221,0," ",python,content +5887,9524041,"input_pipeline/preprocess/video_to_array_records.py",3183,0," ",python,content +5888,9524041,"input_pipeline/preprocess/video_to_array_records.py",3108,0," ",python,content +5889,9524041,"input_pipeline/preprocess/video_to_array_records.py",3069,0," ",python,content +5890,9524041,"input_pipeline/preprocess/video_to_array_records.py",3047,0," ",python,content +5891,9524041,"input_pipeline/preprocess/video_to_array_records.py",3030,0," ",python,content +5892,9524346,"input_pipeline/preprocess/video_to_array_records.py",3033,0,"",python,selection_command +5893,9525286,"input_pipeline/preprocess/video_to_array_records.py",3029,0,"\n ",python,content +5894,9525782,"input_pipeline/preprocess/video_to_array_records.py",3030,4,"",python,content +5895,9525932,"input_pipeline/preprocess/video_to_array_records.py",3031,0,"",python,selection_command +5896,9526053,"input_pipeline/preprocess/video_to_array_records.py",3048,0,"",python,selection_command +5897,9526198,"input_pipeline/preprocess/video_to_array_records.py",3074,0,"",python,selection_command +5898,9526337,"input_pipeline/preprocess/video_to_array_records.py",3117,0,"",python,selection_command +5899,9526496,"input_pipeline/preprocess/video_to_array_records.py",3196,0,"",python,selection_command +5900,9526656,"input_pipeline/preprocess/video_to_array_records.py",3242,0,"",python,selection_command +5901,9526992,"input_pipeline/preprocess/video_to_array_records.py",3247,0,"\n ",python,content +5902,9527319,"input_pipeline/preprocess/video_to_array_records.py",3248,4,"",python,content +5903,9527524,"input_pipeline/preprocess/video_to_array_records.py",3242,0,"",python,selection_command +5904,9527730,"input_pipeline/preprocess/video_to_array_records.py",3196,0,"",python,selection_command +5905,9527784,"input_pipeline/preprocess/video_to_array_records.py",3117,0,"",python,selection_command +5906,9527940,"input_pipeline/preprocess/video_to_array_records.py",3074,0,"",python,selection_command +5907,9528198,"input_pipeline/preprocess/video_to_array_records.py",3048,0,"",python,selection_command +5908,9529191,"input_pipeline/preprocess/video_to_array_records.py",3066,0,"",python,selection_mouse +5909,9529682,"input_pipeline/preprocess/video_to_array_records.py",3064,0,"",python,selection_mouse +5910,9534936,"input_pipeline/preprocess/video_to_array_records.py",360,0,"",python,selection_mouse +5911,9534948,"input_pipeline/preprocess/video_to_array_records.py",359,0,"",python,selection_command +5912,9537155,"input_pipeline/preprocess/video_to_array_records.py",360,0,"\n ",python,content +5913,9539464,"input_pipeline/preprocess/video_to_array_records.py",365,0,"e",python,content +5914,9539465,"input_pipeline/preprocess/video_to_array_records.py",366,0,"",python,selection_keyboard +5915,9539592,"input_pipeline/preprocess/video_to_array_records.py",366,0,"n",python,content +5916,9539593,"input_pipeline/preprocess/video_to_array_records.py",367,0,"",python,selection_keyboard +5917,9540577,"input_pipeline/preprocess/video_to_array_records.py",367,0,"v",python,content +5918,9540578,"input_pipeline/preprocess/video_to_array_records.py",368,0,"",python,selection_keyboard +5919,9540695,"input_pipeline/preprocess/video_to_array_records.py",368,0,"i",python,content +5920,9540696,"input_pipeline/preprocess/video_to_array_records.py",369,0,"",python,selection_keyboard +5921,9540926,"input_pipeline/preprocess/video_to_array_records.py",369,0,"r",python,content +5922,9540927,"input_pipeline/preprocess/video_to_array_records.py",370,0,"",python,selection_keyboard +5923,9542423,"input_pipeline/preprocess/video_to_array_records.py",370,0,"´",python,content +5924,9542424,"input_pipeline/preprocess/video_to_array_records.py",371,0,"",python,selection_keyboard +5925,9542753,"input_pipeline/preprocess/video_to_array_records.py",370,1,"",python,content +5926,9542754,"input_pipeline/preprocess/video_to_array_records.py",370,0,"",python,selection_keyboard +5927,9542895,"input_pipeline/preprocess/video_to_array_records.py",369,1,"",python,content +5928,9543011,"input_pipeline/preprocess/video_to_array_records.py",368,1,"",python,content +5929,9543741,"input_pipeline/preprocess/video_to_array_records.py",368,0,"_",python,content +5930,9543742,"input_pipeline/preprocess/video_to_array_records.py",369,0,"",python,selection_keyboard +5931,9544040,"input_pipeline/preprocess/video_to_array_records.py",369,0,"n",python,content +5932,9544040,"input_pipeline/preprocess/video_to_array_records.py",370,0,"",python,selection_keyboard +5933,9544422,"input_pipeline/preprocess/video_to_array_records.py",370,0,"a",python,content +5934,9544422,"input_pipeline/preprocess/video_to_array_records.py",371,0,"",python,selection_keyboard +5935,9544501,"input_pipeline/preprocess/video_to_array_records.py",371,0,"m",python,content +5936,9544502,"input_pipeline/preprocess/video_to_array_records.py",372,0,"",python,selection_keyboard +5937,9544656,"input_pipeline/preprocess/video_to_array_records.py",372,0,"e",python,content +5938,9544657,"input_pipeline/preprocess/video_to_array_records.py",373,0,"",python,selection_keyboard +5939,9545193,"input_pipeline/preprocess/video_to_array_records.py",373,0,":",python,content +5940,9545194,"input_pipeline/preprocess/video_to_array_records.py",374,0,"",python,selection_keyboard +5941,9546824,"input_pipeline/preprocess/video_to_array_records.py",374,0," ",python,content +5942,9546825,"input_pipeline/preprocess/video_to_array_records.py",375,0,"",python,selection_keyboard +5943,9547180,"input_pipeline/preprocess/video_to_array_records.py",375,0,"""""",python,content +5944,9547181,"input_pipeline/preprocess/video_to_array_records.py",376,0,"",python,selection_keyboard +5945,9547338,"input_pipeline/preprocess/video_to_array_records.py",376,0,"m",python,content +5946,9547339,"input_pipeline/preprocess/video_to_array_records.py",377,0,"",python,selection_keyboard +5947,9547528,"input_pipeline/preprocess/video_to_array_records.py",377,0,"i",python,content +5948,9547529,"input_pipeline/preprocess/video_to_array_records.py",378,0,"",python,selection_keyboard +5949,9547622,"input_pipeline/preprocess/video_to_array_records.py",378,0,"n",python,content +5950,9547623,"input_pipeline/preprocess/video_to_array_records.py",379,0,"",python,selection_keyboard +5951,9547661,"input_pipeline/preprocess/video_to_array_records.py",379,0,"e",python,content +5952,9547662,"input_pipeline/preprocess/video_to_array_records.py",380,0,"",python,selection_keyboard +5953,9547892,"input_pipeline/preprocess/video_to_array_records.py",380,0,"c",python,content +5954,9547893,"input_pipeline/preprocess/video_to_array_records.py",381,0,"",python,selection_keyboard +5955,9548117,"input_pipeline/preprocess/video_to_array_records.py",381,0,"r",python,content +5956,9548118,"input_pipeline/preprocess/video_to_array_records.py",382,0,"",python,selection_keyboard +5957,9548294,"input_pipeline/preprocess/video_to_array_records.py",382,0,"a",python,content +5958,9548295,"input_pipeline/preprocess/video_to_array_records.py",383,0,"",python,selection_keyboard +5959,9548441,"input_pipeline/preprocess/video_to_array_records.py",383,0,"f",python,content +5960,9548442,"input_pipeline/preprocess/video_to_array_records.py",384,0,"",python,selection_keyboard +5961,9548578,"input_pipeline/preprocess/video_to_array_records.py",384,0,"t",python,content +5962,9548579,"input_pipeline/preprocess/video_to_array_records.py",385,0,"",python,selection_keyboard +5963,9548982,"input_pipeline/preprocess/video_to_array_records.py",384,0,"",python,selection_command +5964,9550340,"input_pipeline/preprocess/video_to_array_records.py",374,0,"",python,selection_mouse +5965,9551598,"input_pipeline/preprocess/video_to_array_records.py",374,0," ",python,content +5966,9551599,"input_pipeline/preprocess/video_to_array_records.py",375,0,"",python,selection_keyboard +5967,9551765,"input_pipeline/preprocess/video_to_array_records.py",375,0,"s",python,content +5968,9551766,"input_pipeline/preprocess/video_to_array_records.py",376,0,"",python,selection_keyboard +5969,9551903,"input_pipeline/preprocess/video_to_array_records.py",376,0,"t",python,content +5970,9551904,"input_pipeline/preprocess/video_to_array_records.py",377,0,"",python,selection_keyboard +5971,9552047,"input_pipeline/preprocess/video_to_array_records.py",377,0,"r",python,content +5972,9552048,"input_pipeline/preprocess/video_to_array_records.py",378,0,"",python,selection_keyboard +5973,9552490,"input_pipeline/preprocess/video_to_array_records.py",378,0," ",python,content +5974,9552491,"input_pipeline/preprocess/video_to_array_records.py",379,0,"",python,selection_keyboard +5975,9552642,"input_pipeline/preprocess/video_to_array_records.py",379,0,"=",python,content +5976,9552643,"input_pipeline/preprocess/video_to_array_records.py",380,0,"",python,selection_keyboard +5977,9553248,"input_pipeline/preprocess/video_to_array_records.py",379,0,"",python,selection_command +5978,9558748,"input_pipeline/preprocess/video_to_array_records.py",3101,0,"",python,selection_mouse +5979,9558897,"input_pipeline/preprocess/video_to_array_records.py",3096,7,"coinrun",python,selection_mouse +5980,9559675,"input_pipeline/preprocess/video_to_array_records.py",3096,7,"",python,content +5981,9560433,"input_pipeline/preprocess/video_to_array_records.py",3095,0,"",python,selection_command +5982,9560649,"input_pipeline/preprocess/video_to_array_records.py",3095,1,"",python,content +5983,9560801,"input_pipeline/preprocess/video_to_array_records.py",3095,1,"",python,content +5984,9561320,"input_pipeline/preprocess/video_to_array_records.py",3095,0,"a",python,content +5985,9561321,"input_pipeline/preprocess/video_to_array_records.py",3096,0,"",python,selection_keyboard +5986,9561540,"input_pipeline/preprocess/video_to_array_records.py",3096,0,"r",python,content +5987,9561541,"input_pipeline/preprocess/video_to_array_records.py",3097,0,"",python,selection_keyboard +5988,9561617,"input_pipeline/preprocess/video_to_array_records.py",3097,0,"g",python,content +5989,9561618,"input_pipeline/preprocess/video_to_array_records.py",3098,0,"",python,selection_keyboard +5990,9561763,"input_pipeline/preprocess/video_to_array_records.py",3098,0,"s",python,content +5991,9561764,"input_pipeline/preprocess/video_to_array_records.py",3099,0,"",python,selection_keyboard +5992,9561887,"input_pipeline/preprocess/video_to_array_records.py",3099,0,".",python,content +5993,9561888,"input_pipeline/preprocess/video_to_array_records.py",3100,0,"",python,selection_keyboard +5994,9562269,"input_pipeline/preprocess/video_to_array_records.py",3100,0,"e",python,content +5995,9562270,"input_pipeline/preprocess/video_to_array_records.py",3101,0,"",python,selection_keyboard +5996,9562369,"input_pipeline/preprocess/video_to_array_records.py",3101,0,"n",python,content +5997,9562370,"input_pipeline/preprocess/video_to_array_records.py",3102,0,"",python,selection_keyboard +5998,9562637,"input_pipeline/preprocess/video_to_array_records.py",3100,2,"env_name",python,content +5999,9563122,"input_pipeline/preprocess/video_to_array_records.py",3107,0,"",python,selection_command +6000,9563193,"input_pipeline/preprocess/video_to_array_records.py",3137,0,"",python,selection_command +6001,9563592,"input_pipeline/preprocess/video_to_array_records.py",3180,0,"",python,selection_command +6002,9564139,"input_pipeline/preprocess/video_to_array_records.py",3181,0,"",python,selection_command +6003,9565343,"input_pipeline/preprocess/video_to_array_records.py",3182,0,"",python,selection_command +6004,9565865,"input_pipeline/preprocess/video_to_array_records.py",3183,0,"",python,selection_command +6005,9565897,"input_pipeline/preprocess/video_to_array_records.py",3184,0,"",python,selection_command +6006,9565923,"input_pipeline/preprocess/video_to_array_records.py",3185,0,"",python,selection_command +6007,9565990,"input_pipeline/preprocess/video_to_array_records.py",3186,0,"",python,selection_command +6008,9565991,"input_pipeline/preprocess/video_to_array_records.py",3187,0,"",python,selection_command +6009,9566027,"input_pipeline/preprocess/video_to_array_records.py",3188,0,"",python,selection_command +6010,9566050,"input_pipeline/preprocess/video_to_array_records.py",3189,0,"",python,selection_command +6011,9566081,"input_pipeline/preprocess/video_to_array_records.py",3190,0,"",python,selection_command +6012,9566103,"input_pipeline/preprocess/video_to_array_records.py",3191,0,"",python,selection_command +6013,9566134,"input_pipeline/preprocess/video_to_array_records.py",3192,0,"",python,selection_command +6014,9566195,"input_pipeline/preprocess/video_to_array_records.py",3193,0,"",python,selection_command +6015,9566226,"input_pipeline/preprocess/video_to_array_records.py",3194,0,"",python,selection_command +6016,9566242,"input_pipeline/preprocess/video_to_array_records.py",3195,0,"",python,selection_command +6017,9566255,"input_pipeline/preprocess/video_to_array_records.py",3196,0,"",python,selection_command +6018,9566286,"input_pipeline/preprocess/video_to_array_records.py",3197,0,"",python,selection_command +6019,9566307,"input_pipeline/preprocess/video_to_array_records.py",3198,0,"",python,selection_command +6020,9566356,"input_pipeline/preprocess/video_to_array_records.py",3199,0,"",python,selection_command +6021,9566379,"input_pipeline/preprocess/video_to_array_records.py",3200,0,"",python,selection_command +6022,9566407,"input_pipeline/preprocess/video_to_array_records.py",3201,0,"",python,selection_command +6023,9566441,"input_pipeline/preprocess/video_to_array_records.py",3202,0,"",python,selection_command +6024,9566490,"input_pipeline/preprocess/video_to_array_records.py",3203,0,"",python,selection_command +6025,9566498,"input_pipeline/preprocess/video_to_array_records.py",3204,0,"",python,selection_command +6026,9566532,"input_pipeline/preprocess/video_to_array_records.py",3205,0,"",python,selection_command +6027,9566555,"input_pipeline/preprocess/video_to_array_records.py",3206,0,"",python,selection_command +6028,9566595,"input_pipeline/preprocess/video_to_array_records.py",3207,0,"",python,selection_command +6029,9566609,"input_pipeline/preprocess/video_to_array_records.py",3208,0,"",python,selection_command +6030,9566637,"input_pipeline/preprocess/video_to_array_records.py",3209,0,"",python,selection_command +6031,9566711,"input_pipeline/preprocess/video_to_array_records.py",3210,0,"",python,selection_command +6032,9566802,"input_pipeline/preprocess/video_to_array_records.py",3211,0,"",python,selection_command +6033,9566964,"input_pipeline/preprocess/video_to_array_records.py",3212,0,"",python,selection_command +6034,9567114,"input_pipeline/preprocess/video_to_array_records.py",3213,0,"",python,selection_command +6035,9567594,"input_pipeline/preprocess/video_to_array_records.py",3212,0,"",python,selection_command +6036,9568270,"input_pipeline/preprocess/video_to_array_records.py",3212,16,"",python,content +6037,9568633,"input_pipeline/preprocess/video_to_array_records.py",3212,0,"r",python,content +6038,9568633,"input_pipeline/preprocess/video_to_array_records.py",3213,0,"",python,selection_keyboard +6039,9568826,"input_pipeline/preprocess/video_to_array_records.py",3213,0,"e",python,content +6040,9568827,"input_pipeline/preprocess/video_to_array_records.py",3214,0,"",python,selection_keyboard +6041,9569028,"input_pipeline/preprocess/video_to_array_records.py",3214,0,"s",python,content +6042,9569029,"input_pipeline/preprocess/video_to_array_records.py",3215,0,"",python,selection_keyboard +6043,9569120,"input_pipeline/preprocess/video_to_array_records.py",3215,0,"u",python,content +6044,9569121,"input_pipeline/preprocess/video_to_array_records.py",3216,0,"",python,selection_keyboard +6045,9570985,"input_pipeline/preprocess/video_to_array_records.py",3212,4,"results",python,content +6046,9572132,"input_pipeline/preprocess/video_to_array_records.py",3218,0,"",python,selection_command +6047,9572335,"input_pipeline/preprocess/video_to_array_records.py",3267,0,"",python,selection_command +6048,9572717,"input_pipeline/preprocess/video_to_array_records.py",3266,0,"",python,selection_command +6049,9572910,"input_pipeline/preprocess/video_to_array_records.py",3265,0,"",python,selection_command +6050,9573056,"input_pipeline/preprocess/video_to_array_records.py",3264,0,"",python,selection_command +6051,9573572,"input_pipeline/preprocess/video_to_array_records.py",3263,0,"",python,selection_command +6052,9573593,"input_pipeline/preprocess/video_to_array_records.py",3262,0,"",python,selection_command +6053,9573634,"input_pipeline/preprocess/video_to_array_records.py",3261,0,"",python,selection_command +6054,9573680,"input_pipeline/preprocess/video_to_array_records.py",3260,0,"",python,selection_command +6055,9573697,"input_pipeline/preprocess/video_to_array_records.py",3259,0,"",python,selection_command +6056,9573723,"input_pipeline/preprocess/video_to_array_records.py",3258,0,"",python,selection_command +6057,9573775,"input_pipeline/preprocess/video_to_array_records.py",3257,0,"",python,selection_command +6058,9573789,"input_pipeline/preprocess/video_to_array_records.py",3256,0,"",python,selection_command +6059,9573917,"input_pipeline/preprocess/video_to_array_records.py",3255,0,"",python,selection_command +6060,9574099,"input_pipeline/preprocess/video_to_array_records.py",3254,0,"",python,selection_command +6061,9574235,"input_pipeline/preprocess/video_to_array_records.py",3253,0,"",python,selection_command +6062,9574369,"input_pipeline/preprocess/video_to_array_records.py",3252,0,"",python,selection_command +6063,9574521,"input_pipeline/preprocess/video_to_array_records.py",3251,0,"",python,selection_command +6064,9574791,"input_pipeline/preprocess/video_to_array_records.py",3251,16,"",python,content +6065,9575236,"input_pipeline/preprocess/video_to_array_records.py",3251,0,"r",python,content +6066,9575237,"input_pipeline/preprocess/video_to_array_records.py",3252,0,"",python,selection_keyboard +6067,9575431,"input_pipeline/preprocess/video_to_array_records.py",3252,0,"e",python,content +6068,9575432,"input_pipeline/preprocess/video_to_array_records.py",3253,0,"",python,selection_keyboard +6069,9575561,"input_pipeline/preprocess/video_to_array_records.py",3253,0,"s",python,content +6070,9575562,"input_pipeline/preprocess/video_to_array_records.py",3254,0,"",python,selection_keyboard +6071,9575949,"input_pipeline/preprocess/video_to_array_records.py",3251,3,"results",python,content +6072,9576263,"input_pipeline/preprocess/video_to_array_records.py",3257,0,"",python,selection_command +6073,9576462,"input_pipeline/preprocess/video_to_array_records.py",3187,0,"",python,selection_command +6074,9576710,"input_pipeline/preprocess/video_to_array_records.py",3144,0,"",python,selection_command +6075,9576862,"input_pipeline/preprocess/video_to_array_records.py",3108,0,"",python,selection_command +6076,9578036,"input_pipeline/preprocess/video_to_array_records.py",3144,0,"",python,selection_command +6077,9579152,"input_pipeline/preprocess/video_to_array_records.py",3146,0,"",python,selection_mouse +6078,9579912,"input_pipeline/preprocess/video_to_array_records.py",3136,0,"",python,selection_mouse +6079,9580872,"input_pipeline/preprocess/video_to_array_records.py",3134,0,"",python,selection_mouse +6080,9581462,"input_pipeline/preprocess/video_to_array_records.py",3123,0,"",python,selection_mouse +6081,9582022,"input_pipeline/preprocess/video_to_array_records.py",3119,0,"",python,selection_mouse +6082,9582777,"input_pipeline/preprocess/video_to_array_records.py",3125,0,"",python,selection_mouse +6083,9583535,"input_pipeline/preprocess/video_to_array_records.py",3120,0,"",python,selection_mouse +6084,9584513,"input_pipeline/preprocess/video_to_array_records.py",3124,0,"",python,selection_mouse +6085,9585419,"input_pipeline/preprocess/video_to_array_records.py",3128,0,"",python,selection_mouse +6086,9586797,"input_pipeline/preprocess/video_to_array_records.py",3138,0,"",python,selection_mouse +6087,9587410,"input_pipeline/preprocess/video_to_array_records.py",3133,0,"",python,selection_mouse +6088,9588692,"input_pipeline/preprocess/video_to_array_records.py",3134,0,"",python,selection_command +6089,9589014,"input_pipeline/preprocess/video_to_array_records.py",3134,4,"",python,content +6090,9589673,"input_pipeline/preprocess/video_to_array_records.py",3134,0,"d",python,content +6091,9589673,"input_pipeline/preprocess/video_to_array_records.py",3135,0,"",python,selection_keyboard +6092,9590097,"input_pipeline/preprocess/video_to_array_records.py",3135,1,"",python,content +6093,9590390,"input_pipeline/preprocess/video_to_array_records.py",3135,12,"",python,content +6094,9590757,"input_pipeline/preprocess/video_to_array_records.py",3134,1,"",python,content +6095,9591133,"input_pipeline/preprocess/video_to_array_records.py",3134,0,"l",python,content +6096,9591134,"input_pipeline/preprocess/video_to_array_records.py",3135,0,"",python,selection_keyboard +6097,9591293,"input_pipeline/preprocess/video_to_array_records.py",3135,0,"e",python,content +6098,9591294,"input_pipeline/preprocess/video_to_array_records.py",3136,0,"",python,selection_keyboard +6099,9591388,"input_pipeline/preprocess/video_to_array_records.py",3136,0,"n",python,content +6100,9591389,"input_pipeline/preprocess/video_to_array_records.py",3137,0,"",python,selection_keyboard +6101,9592621,"input_pipeline/preprocess/video_to_array_records.py",3134,3,"len",python,content +6102,9593073,"input_pipeline/preprocess/video_to_array_records.py",3137,0,"#",python,content +6103,9593074,"input_pipeline/preprocess/video_to_array_records.py",3138,0,"",python,selection_keyboard +6104,9594434,"input_pipeline/preprocess/video_to_array_records.py",3137,1,"",python,content +6105,9595080,"input_pipeline/preprocess/video_to_array_records.py",3137,0,"()",python,content +6106,9595081,"input_pipeline/preprocess/video_to_array_records.py",3138,0,"",python,selection_keyboard +6107,9595221,"input_pipeline/preprocess/video_to_array_records.py",3138,0,"e",python,content +6108,9595222,"input_pipeline/preprocess/video_to_array_records.py",3139,0,"",python,selection_keyboard +6109,9595769,"input_pipeline/preprocess/video_to_array_records.py",3138,1,"",python,content +6110,9595925,"input_pipeline/preprocess/video_to_array_records.py",3138,0,"r",python,content +6111,9595926,"input_pipeline/preprocess/video_to_array_records.py",3139,0,"",python,selection_keyboard +6112,9596091,"input_pipeline/preprocess/video_to_array_records.py",3139,0,"e",python,content +6113,9596092,"input_pipeline/preprocess/video_to_array_records.py",3140,0,"",python,selection_keyboard +6114,9597767,"input_pipeline/preprocess/video_to_array_records.py",3138,2,"results",python,content +6115,9599166,"input_pipeline/preprocess/video_to_array_records.py",2869,0,"",python,selection_mouse +6116,9599833,"input_pipeline/preprocess/video_to_array_records.py",2847,0,"",python,selection_mouse +6117,9600337,"input_pipeline/preprocess/video_to_array_records.py",2652,0,"",python,selection_mouse +6118,9602412,"input_pipeline/preprocess/video_to_array_records.py",2713,0,"",python,selection_mouse +6119,9603597,"input_pipeline/preprocess/video_to_array_records.py",2715,0,"",python,selection_mouse +6120,9607063,"input_pipeline/preprocess/video_to_array_records.py",3129,0,"",python,selection_mouse +6121,9607745,"input_pipeline/preprocess/video_to_array_records.py",3147,0,"",python,selection_mouse +6122,9608263,"input_pipeline/preprocess/video_to_array_records.py",3147,0,"\n ",python,content +6123,9608978,"input_pipeline/preprocess/video_to_array_records.py",3156,0,"""""",python,content +6124,9608978,"input_pipeline/preprocess/video_to_array_records.py",3157,0,"",python,selection_keyboard +6125,9610148,"input_pipeline/preprocess/video_to_array_records.py",3157,0,"f",python,content +6126,9610150,"input_pipeline/preprocess/video_to_array_records.py",3158,0,"",python,selection_keyboard +6127,9610312,"input_pipeline/preprocess/video_to_array_records.py",3158,0,"a",python,content +6128,9610313,"input_pipeline/preprocess/video_to_array_records.py",3159,0,"",python,selection_keyboard +6129,9610390,"input_pipeline/preprocess/video_to_array_records.py",3159,0,"i",python,content +6130,9610391,"input_pipeline/preprocess/video_to_array_records.py",3160,0,"",python,selection_keyboard +6131,9610537,"input_pipeline/preprocess/video_to_array_records.py",3160,0,"l",python,content +6132,9610538,"input_pipeline/preprocess/video_to_array_records.py",3161,0,"",python,selection_keyboard +6133,9610662,"input_pipeline/preprocess/video_to_array_records.py",3161,0,"e",python,content +6134,9610662,"input_pipeline/preprocess/video_to_array_records.py",3162,0,"",python,selection_keyboard +6135,9610839,"input_pipeline/preprocess/video_to_array_records.py",3162,0,"s",python,content +6136,9610840,"input_pipeline/preprocess/video_to_array_records.py",3163,0,"",python,selection_keyboard +6137,9610999,"input_pipeline/preprocess/video_to_array_records.py",3163,0,"_",python,content +6138,9611000,"input_pipeline/preprocess/video_to_array_records.py",3164,0,"",python,selection_keyboard +6139,9611434,"input_pipeline/preprocess/video_to_array_records.py",3163,1,"",python,content +6140,9611546,"input_pipeline/preprocess/video_to_array_records.py",3162,1,"",python,content +6141,9611650,"input_pipeline/preprocess/video_to_array_records.py",3162,0,"d",python,content +6142,9611651,"input_pipeline/preprocess/video_to_array_records.py",3163,0,"",python,selection_keyboard +6143,9611804,"input_pipeline/preprocess/video_to_array_records.py",3163,0,"_",python,content +6144,9611805,"input_pipeline/preprocess/video_to_array_records.py",3164,0,"",python,selection_keyboard +6145,9611979,"input_pipeline/preprocess/video_to_array_records.py",3164,0,"v",python,content +6146,9611979,"input_pipeline/preprocess/video_to_array_records.py",3165,0,"",python,selection_keyboard +6147,9612128,"input_pipeline/preprocess/video_to_array_records.py",3165,0,"i",python,content +6148,9612129,"input_pipeline/preprocess/video_to_array_records.py",3166,0,"",python,selection_keyboard +6149,9612339,"input_pipeline/preprocess/video_to_array_records.py",3166,0,"d",python,content +6150,9612340,"input_pipeline/preprocess/video_to_array_records.py",3167,0,"",python,selection_keyboard +6151,9612496,"input_pipeline/preprocess/video_to_array_records.py",3167,0,"e",python,content +6152,9612497,"input_pipeline/preprocess/video_to_array_records.py",3168,0,"",python,selection_keyboard +6153,9612607,"input_pipeline/preprocess/video_to_array_records.py",3168,0,"o",python,content +6154,9612608,"input_pipeline/preprocess/video_to_array_records.py",3169,0,"",python,selection_keyboard +6155,9612741,"input_pipeline/preprocess/video_to_array_records.py",3169,0,"s",python,content +6156,9612742,"input_pipeline/preprocess/video_to_array_records.py",3170,0,"",python,selection_keyboard +6157,9613684,"input_pipeline/preprocess/video_to_array_records.py",3171,0,"",python,selection_command +6158,9614169,"input_pipeline/preprocess/video_to_array_records.py",3171,0,":",python,content +6159,9614171,"input_pipeline/preprocess/video_to_array_records.py",3172,0,"",python,selection_keyboard +6160,9615045,"input_pipeline/preprocess/video_to_array_records.py",3172,0," ",python,content +6161,9615045,"input_pipeline/preprocess/video_to_array_records.py",3173,0,"",python,selection_keyboard +6162,9617078,"input_pipeline/preprocess/video_to_array_records.py",3173,0,"l",python,content +6163,9617080,"input_pipeline/preprocess/video_to_array_records.py",3174,0,"",python,selection_keyboard +6164,9617186,"input_pipeline/preprocess/video_to_array_records.py",3174,0,"e",python,content +6165,9617188,"input_pipeline/preprocess/video_to_array_records.py",3175,0,"",python,selection_keyboard +6166,9617294,"input_pipeline/preprocess/video_to_array_records.py",3175,0,"n",python,content +6167,9617295,"input_pipeline/preprocess/video_to_array_records.py",3176,0,"",python,selection_keyboard +6168,9618085,"input_pipeline/preprocess/video_to_array_records.py",3176,0,"()",python,content +6169,9618086,"input_pipeline/preprocess/video_to_array_records.py",3177,0,"",python,selection_keyboard +6170,9618253,"input_pipeline/preprocess/video_to_array_records.py",3177,0,"f",python,content +6171,9618254,"input_pipeline/preprocess/video_to_array_records.py",3178,0,"",python,selection_keyboard +6172,9618404,"input_pipeline/preprocess/video_to_array_records.py",3178,0,"a",python,content +6173,9618405,"input_pipeline/preprocess/video_to_array_records.py",3179,0,"",python,selection_keyboard +6174,9619909,"input_pipeline/preprocess/video_to_array_records.py",3179,0,"i",python,content +6175,9619910,"input_pipeline/preprocess/video_to_array_records.py",3180,0,"",python,selection_keyboard +6176,9620011,"input_pipeline/preprocess/video_to_array_records.py",3180,0,"l",python,content +6177,9620013,"input_pipeline/preprocess/video_to_array_records.py",3181,0,"",python,selection_keyboard +6178,9620139,"input_pipeline/preprocess/video_to_array_records.py",3181,0,"e",python,content +6179,9620140,"input_pipeline/preprocess/video_to_array_records.py",3182,0,"",python,selection_keyboard +6180,9622527,"input_pipeline/preprocess/video_to_array_records.py",3177,5,"failed_videos",python,content +6181,9624246,"input_pipeline/preprocess/video_to_array_records.py",3189,0,"",python,selection_command +6182,9624724,"input_pipeline/preprocess/video_to_array_records.py",3191,0,"",python,selection_command +6183,9624930,"input_pipeline/preprocess/video_to_array_records.py",3191,0,",",python,content +6184,9624932,"input_pipeline/preprocess/video_to_array_records.py",3192,0,"",python,selection_keyboard +6185,9625142,"input_pipeline/preprocess/video_to_array_records.py",3192,0,"\n ",python,content +6186,9625954,"input_pipeline/preprocess/video_to_array_records.py",3201,0,"""""",python,content +6187,9625955,"input_pipeline/preprocess/video_to_array_records.py",3202,0,"",python,selection_keyboard +6188,9626790,"input_pipeline/preprocess/video_to_array_records.py",3202,0,"s",python,content +6189,9626791,"input_pipeline/preprocess/video_to_array_records.py",3203,0,"",python,selection_keyboard +6190,9626921,"input_pipeline/preprocess/video_to_array_records.py",3203,0,"h",python,content +6191,9626922,"input_pipeline/preprocess/video_to_array_records.py",3204,0,"",python,selection_keyboard +6192,9627087,"input_pipeline/preprocess/video_to_array_records.py",3204,0,"o",python,content +6193,9627088,"input_pipeline/preprocess/video_to_array_records.py",3205,0,"",python,selection_keyboard +6194,9627626,"input_pipeline/preprocess/video_to_array_records.py",3205,0,"r",python,content +6195,9627627,"input_pipeline/preprocess/video_to_array_records.py",3206,0,"",python,selection_keyboard +6196,9627828,"input_pipeline/preprocess/video_to_array_records.py",3206,0,"t",python,content +6197,9627829,"input_pipeline/preprocess/video_to_array_records.py",3207,0,"",python,selection_keyboard +6198,9628253,"input_pipeline/preprocess/video_to_array_records.py",3207,0,"_",python,content +6199,9628254,"input_pipeline/preprocess/video_to_array_records.py",3208,0,"",python,selection_keyboard +6200,9629431,"input_pipeline/preprocess/video_to_array_records.py",3208,0,"e",python,content +6201,9629432,"input_pipeline/preprocess/video_to_array_records.py",3209,0,"",python,selection_keyboard +6202,9629498,"input_pipeline/preprocess/video_to_array_records.py",3209,0,"p",python,content +6203,9629498,"input_pipeline/preprocess/video_to_array_records.py",3210,0,"",python,selection_keyboard +6204,9629662,"input_pipeline/preprocess/video_to_array_records.py",3210,0,"i",python,content +6205,9629663,"input_pipeline/preprocess/video_to_array_records.py",3211,0,"",python,selection_keyboard +6206,9629798,"input_pipeline/preprocess/video_to_array_records.py",3211,0,"s",python,content +6207,9629799,"input_pipeline/preprocess/video_to_array_records.py",3212,0,"",python,selection_keyboard +6208,9629881,"input_pipeline/preprocess/video_to_array_records.py",3212,0,"o",python,content +6209,9629882,"input_pipeline/preprocess/video_to_array_records.py",3213,0,"",python,selection_keyboard +6210,9629990,"input_pipeline/preprocess/video_to_array_records.py",3213,0,"d",python,content +6211,9629991,"input_pipeline/preprocess/video_to_array_records.py",3214,0,"",python,selection_keyboard +6212,9630198,"input_pipeline/preprocess/video_to_array_records.py",3214,0,"e",python,content +6213,9630199,"input_pipeline/preprocess/video_to_array_records.py",3215,0,"",python,selection_keyboard +6214,9630371,"input_pipeline/preprocess/video_to_array_records.py",3215,0,"s",python,content +6215,9630372,"input_pipeline/preprocess/video_to_array_records.py",3216,0,"",python,selection_keyboard +6216,9630608,"input_pipeline/preprocess/video_to_array_records.py",3217,0,"",python,selection_command +6217,9630959,"input_pipeline/preprocess/video_to_array_records.py",3217,0,":",python,content +6218,9630960,"input_pipeline/preprocess/video_to_array_records.py",3218,0,"",python,selection_keyboard +6219,9632196,"input_pipeline/preprocess/video_to_array_records.py",3217,0,"",python,selection_command +6220,9632580,"input_pipeline/preprocess/video_to_array_records.py",3216,0,"",python,selection_command +6221,9632736,"input_pipeline/preprocess/video_to_array_records.py",3215,0,"",python,selection_command +6222,9633021,"input_pipeline/preprocess/video_to_array_records.py",3214,1,"",python,content +6223,9633327,"input_pipeline/preprocess/video_to_array_records.py",3213,1,"",python,content +6224,9633479,"input_pipeline/preprocess/video_to_array_records.py",3212,1,"",python,content +6225,9633621,"input_pipeline/preprocess/video_to_array_records.py",3211,1,"",python,content +6226,9633758,"input_pipeline/preprocess/video_to_array_records.py",3210,1,"",python,content +6227,9633938,"input_pipeline/preprocess/video_to_array_records.py",3209,1,"",python,content +6228,9634030,"input_pipeline/preprocess/video_to_array_records.py",3208,1,"",python,content +6229,9634250,"input_pipeline/preprocess/video_to_array_records.py",3208,1,"",python,content +6230,9634713,"input_pipeline/preprocess/video_to_array_records.py",3208,0,"v",python,content +6231,9634715,"input_pipeline/preprocess/video_to_array_records.py",3209,0,"",python,selection_keyboard +6232,9634842,"input_pipeline/preprocess/video_to_array_records.py",3209,0,"i",python,content +6233,9634843,"input_pipeline/preprocess/video_to_array_records.py",3210,0,"",python,selection_keyboard +6234,9635019,"input_pipeline/preprocess/video_to_array_records.py",3210,0,"d",python,content +6235,9635020,"input_pipeline/preprocess/video_to_array_records.py",3211,0,"",python,selection_keyboard +6236,9635145,"input_pipeline/preprocess/video_to_array_records.py",3211,0,"e",python,content +6237,9635146,"input_pipeline/preprocess/video_to_array_records.py",3212,0,"",python,selection_keyboard +6238,9635233,"input_pipeline/preprocess/video_to_array_records.py",3212,0,"o",python,content +6239,9635234,"input_pipeline/preprocess/video_to_array_records.py",3213,0,"",python,selection_keyboard +6240,9635341,"input_pipeline/preprocess/video_to_array_records.py",3213,0,"s",python,content +6241,9635342,"input_pipeline/preprocess/video_to_array_records.py",3214,0,"",python,selection_keyboard +6242,9637517,"input_pipeline/preprocess/video_to_array_records.py",3216,0,"",python,selection_mouse +6243,9637957,"input_pipeline/preprocess/video_to_array_records.py",3216,0," ",python,content +6244,9637958,"input_pipeline/preprocess/video_to_array_records.py",3217,0,"",python,selection_keyboard +6245,9638215,"input_pipeline/preprocess/video_to_array_records.py",3217,0,"s",python,content +6246,9638216,"input_pipeline/preprocess/video_to_array_records.py",3218,0,"",python,selection_keyboard +6247,9638290,"input_pipeline/preprocess/video_to_array_records.py",3218,0,"h",python,content +6248,9638291,"input_pipeline/preprocess/video_to_array_records.py",3219,0,"",python,selection_keyboard +6249,9639071,"input_pipeline/preprocess/video_to_array_records.py",3217,2,"short_episodes",python,content +6250,9639906,"input_pipeline/preprocess/video_to_array_records.py",3231,0,",",python,content +6251,9639907,"input_pipeline/preprocess/video_to_array_records.py",3232,0,"",python,selection_keyboard +6252,9640699,"input_pipeline/preprocess/video_to_array_records.py",3231,0,"",python,selection_command +6253,9641337,"input_pipeline/preprocess/video_to_array_records.py",3231,0,")",python,content +6254,9641338,"input_pipeline/preprocess/video_to_array_records.py",3232,0,"",python,selection_keyboard +6255,9642348,"input_pipeline/preprocess/video_to_array_records.py",3217,0,"e",python,content +6256,9642349,"input_pipeline/preprocess/video_to_array_records.py",3218,0,"",python,selection_keyboard +6257,9642635,"input_pipeline/preprocess/video_to_array_records.py",3217,1,"",python,content +6258,9642836,"input_pipeline/preprocess/video_to_array_records.py",3217,0,"l",python,content +6259,9642837,"input_pipeline/preprocess/video_to_array_records.py",3218,0,"",python,selection_keyboard +6260,9642973,"input_pipeline/preprocess/video_to_array_records.py",3218,0,"e",python,content +6261,9642974,"input_pipeline/preprocess/video_to_array_records.py",3219,0,"",python,selection_keyboard +6262,9643053,"input_pipeline/preprocess/video_to_array_records.py",3219,0,"n",python,content +6263,9643054,"input_pipeline/preprocess/video_to_array_records.py",3220,0,"",python,selection_keyboard +6264,9643717,"input_pipeline/preprocess/video_to_array_records.py",3220,0,"(",python,content +6265,9643718,"input_pipeline/preprocess/video_to_array_records.py",3221,0,"",python,selection_keyboard +6266,9644980,"input_pipeline/preprocess/video_to_array_records.py",3157,0,"",python,selection_mouse +6267,9645582,"input_pipeline/preprocess/video_to_array_records.py",3157,0,"n",python,content +6268,9645583,"input_pipeline/preprocess/video_to_array_records.py",3158,0,"",python,selection_keyboard +6269,9645797,"input_pipeline/preprocess/video_to_array_records.py",3158,0,"u",python,content +6270,9645798,"input_pipeline/preprocess/video_to_array_records.py",3159,0,"",python,selection_keyboard +6271,9646040,"input_pipeline/preprocess/video_to_array_records.py",3159,0,"m",python,content +6272,9646040,"input_pipeline/preprocess/video_to_array_records.py",3160,0,"",python,selection_keyboard +6273,9646380,"input_pipeline/preprocess/video_to_array_records.py",3160,0,"_",python,content +6274,9646381,"input_pipeline/preprocess/video_to_array_records.py",3161,0,"",python,selection_keyboard +6275,9647717,"input_pipeline/preprocess/video_to_array_records.py",3206,0,"",python,selection_mouse +6276,9648291,"input_pipeline/preprocess/video_to_array_records.py",3206,0,"n",python,content +6277,9648292,"input_pipeline/preprocess/video_to_array_records.py",3207,0,"",python,selection_keyboard +6278,9648447,"input_pipeline/preprocess/video_to_array_records.py",3207,0,"u",python,content +6279,9648448,"input_pipeline/preprocess/video_to_array_records.py",3208,0,"",python,selection_keyboard +6280,9648576,"input_pipeline/preprocess/video_to_array_records.py",3208,0,"m",python,content +6281,9648577,"input_pipeline/preprocess/video_to_array_records.py",3209,0,"",python,selection_keyboard +6282,9648876,"input_pipeline/preprocess/video_to_array_records.py",3209,0,"_",python,content +6283,9648876,"input_pipeline/preprocess/video_to_array_records.py",3210,0,"",python,selection_keyboard +6284,9649294,"input_pipeline/preprocess/video_to_array_records.py",3209,0,"",python,selection_command +6285,9650892,"input_pipeline/preprocess/video_to_array_records.py",2854,0,"",python,selection_mouse +6286,9651329,"input_pipeline/preprocess/video_to_array_records.py",2927,0,"",python,selection_mouse +6287,9652249,"input_pipeline/preprocess/video_to_array_records.py",2925,10,"successful",python,selection_mouse +6288,9653069,"input_pipeline/preprocess/video_to_array_records.py",3008,0,"",python,selection_mouse +6289,9653087,"input_pipeline/preprocess/video_to_array_records.py",3007,0,"",python,selection_command +6290,9653585,"input_pipeline/preprocess/video_to_array_records.py",2919,0,"",python,selection_mouse +6291,9653753,"input_pipeline/preprocess/video_to_array_records.py",2915,6,"Number",python,selection_mouse +6292,9653922,"input_pipeline/preprocess/video_to_array_records.py",2915,9,"Number of",python,selection_mouse +6293,9653969,"input_pipeline/preprocess/video_to_array_records.py",2915,10,"Number of ",python,selection_mouse +6294,9653970,"input_pipeline/preprocess/video_to_array_records.py",2915,20,"Number of successful",python,selection_mouse +6295,9654195,"input_pipeline/preprocess/video_to_array_records.py",2915,21,"Number of successful ",python,selection_mouse +6296,9654276,"input_pipeline/preprocess/video_to_array_records.py",2915,27,"Number of successful videos",python,selection_mouse +6297,9654645,"input_pipeline/preprocess/video_to_array_records.py",2938,0,"",python,selection_mouse +6298,9654646,"input_pipeline/preprocess/video_to_array_records.py",2936,6,"videos",python,selection_mouse +6299,9654812,"input_pipeline/preprocess/video_to_array_records.py",2925,17,"successful videos",python,selection_mouse +6300,9654877,"input_pipeline/preprocess/video_to_array_records.py",2924,18," successful videos",python,selection_mouse +6301,9654908,"input_pipeline/preprocess/video_to_array_records.py",2922,20,"of successful videos",python,selection_mouse +6302,9654908,"input_pipeline/preprocess/video_to_array_records.py",2921,21," of successful videos",python,selection_mouse +6303,9654925,"input_pipeline/preprocess/video_to_array_records.py",2915,27,"Number of successful videos",python,selection_mouse +6304,9655383,"input_pipeline/preprocess/video_to_array_records.py",2917,0,"",python,selection_mouse +6305,9655384,"input_pipeline/preprocess/video_to_array_records.py",2915,6,"Number",python,selection_mouse +6306,9655646,"input_pipeline/preprocess/video_to_array_records.py",2915,9,"Number of",python,selection_mouse +6307,9655653,"input_pipeline/preprocess/video_to_array_records.py",2915,20,"Number of successful",python,selection_mouse +6308,9656080,"input_pipeline/preprocess/video_to_array_records.py",2934,0,"",python,selection_mouse +6309,9656669,"input_pipeline/preprocess/video_to_array_records.py",3119,0,"",python,selection_mouse +6310,9657971,"input_pipeline/preprocess/video_to_array_records.py",3123,0,"",python,selection_mouse +6311,9658901,"input_pipeline/preprocess/video_to_array_records.py",3123,0,"s",python,content +6312,9658903,"input_pipeline/preprocess/video_to_array_records.py",3124,0,"",python,selection_keyboard +6313,9659099,"input_pipeline/preprocess/video_to_array_records.py",3124,0,"u",python,content +6314,9659100,"input_pipeline/preprocess/video_to_array_records.py",3125,0,"",python,selection_keyboard +6315,9659291,"input_pipeline/preprocess/video_to_array_records.py",3125,0,"c",python,content +6316,9659291,"input_pipeline/preprocess/video_to_array_records.py",3126,0,"",python,selection_keyboard +6317,9659419,"input_pipeline/preprocess/video_to_array_records.py",3126,0,"c",python,content +6318,9659420,"input_pipeline/preprocess/video_to_array_records.py",3127,0,"",python,selection_keyboard +6319,9659590,"input_pipeline/preprocess/video_to_array_records.py",3127,0,"e",python,content +6320,9659591,"input_pipeline/preprocess/video_to_array_records.py",3128,0,"",python,selection_keyboard +6321,9659786,"input_pipeline/preprocess/video_to_array_records.py",3128,0,"s",python,content +6322,9659787,"input_pipeline/preprocess/video_to_array_records.py",3129,0,"",python,selection_keyboard +6323,9659944,"input_pipeline/preprocess/video_to_array_records.py",3129,0,"s",python,content +6324,9659945,"input_pipeline/preprocess/video_to_array_records.py",3130,0,"",python,selection_keyboard +6325,9660157,"input_pipeline/preprocess/video_to_array_records.py",3130,0,"f",python,content +6326,9660158,"input_pipeline/preprocess/video_to_array_records.py",3131,0,"",python,selection_keyboard +6327,9660247,"input_pipeline/preprocess/video_to_array_records.py",3131,0,"u",python,content +6328,9660248,"input_pipeline/preprocess/video_to_array_records.py",3132,0,"",python,selection_keyboard +6329,9660467,"input_pipeline/preprocess/video_to_array_records.py",3132,0,"l",python,content +6330,9660468,"input_pipeline/preprocess/video_to_array_records.py",3133,0,"",python,selection_keyboard +6331,9660895,"input_pipeline/preprocess/video_to_array_records.py",3133,0,"_",python,content +6332,9660896,"input_pipeline/preprocess/video_to_array_records.py",3134,0,"",python,selection_keyboard +6333,9662130,"input_pipeline/preprocess/video_to_array_records.py",3126,0,"",python,selection_mouse +6334,9663024,"input_pipeline/preprocess/video_to_array_records.py",3125,0,"",python,selection_command +6335,9664288,"input_pipeline/preprocess/video_to_array_records.py",3124,0,"",python,selection_command +6336,9664428,"input_pipeline/preprocess/video_to_array_records.py",3123,0,"",python,selection_command +6337,9667394,"input_pipeline/preprocess/video_to_array_records.py",3123,1,"",python,content +6338,9667611,"input_pipeline/preprocess/video_to_array_records.py",3123,1,"",python,content +6339,9667760,"input_pipeline/preprocess/video_to_array_records.py",3123,1,"",python,content +6340,9667946,"input_pipeline/preprocess/video_to_array_records.py",3123,1,"",python,content +6341,9668174,"input_pipeline/preprocess/video_to_array_records.py",3123,1,"",python,content +6342,9668300,"input_pipeline/preprocess/video_to_array_records.py",3123,1,"",python,content +6343,9668501,"input_pipeline/preprocess/video_to_array_records.py",3123,1,"",python,content +6344,9668681,"input_pipeline/preprocess/video_to_array_records.py",3123,1,"",python,content +6345,9668880,"input_pipeline/preprocess/video_to_array_records.py",3123,1,"",python,content +6346,9669069,"input_pipeline/preprocess/video_to_array_records.py",3123,1,"",python,content +6347,9669557,"input_pipeline/preprocess/video_to_array_records.py",3122,0,"",python,selection_command +6348,9669736,"input_pipeline/preprocess/video_to_array_records.py",3121,0,"",python,selection_command +6349,9670075,"input_pipeline/preprocess/video_to_array_records.py",3120,0,"",python,selection_command +6350,9670260,"input_pipeline/preprocess/video_to_array_records.py",3119,0,"",python,selection_command +6351,9670571,"input_pipeline/preprocess/video_to_array_records.py",3119,1,"",python,content +6352,9670699,"input_pipeline/preprocess/video_to_array_records.py",3119,1,"",python,content +6353,9670863,"input_pipeline/preprocess/video_to_array_records.py",3119,1,"",python,content +6354,9671049,"input_pipeline/preprocess/video_to_array_records.py",3119,1,"",python,content +6355,9671657,"input_pipeline/preprocess/video_to_array_records.py",3119,0,"t",python,content +6356,9671658,"input_pipeline/preprocess/video_to_array_records.py",3120,0,"",python,selection_keyboard +6357,9671783,"input_pipeline/preprocess/video_to_array_records.py",3120,0,"o",python,content +6358,9671784,"input_pipeline/preprocess/video_to_array_records.py",3121,0,"",python,selection_keyboard +6359,9672432,"input_pipeline/preprocess/video_to_array_records.py",3121,0,"t",python,content +6360,9672433,"input_pipeline/preprocess/video_to_array_records.py",3122,0,"",python,selection_keyboard +6361,9672606,"input_pipeline/preprocess/video_to_array_records.py",3122,0,"a",python,content +6362,9672607,"input_pipeline/preprocess/video_to_array_records.py",3123,0,"",python,selection_keyboard +6363,9672646,"input_pipeline/preprocess/video_to_array_records.py",3123,0,"l",python,content +6364,9672647,"input_pipeline/preprocess/video_to_array_records.py",3124,0,"",python,selection_keyboard +6365,9672964,"input_pipeline/preprocess/video_to_array_records.py",3123,0,"",python,selection_command +6366,9673411,"input_pipeline/preprocess/video_to_array_records.py",3124,0,"",python,selection_command +6367,9673590,"input_pipeline/preprocess/video_to_array_records.py",3125,0,"",python,selection_command +6368,9674296,"input_pipeline/preprocess/video_to_array_records.py",3125,8,"",python,content +6369,9675644,"input_pipeline/preprocess/video_to_array_records.py",3125,0,"c",python,content +6370,9675645,"input_pipeline/preprocess/video_to_array_records.py",3126,0,"",python,selection_keyboard +6371,9675744,"input_pipeline/preprocess/video_to_array_records.py",3126,0,"i",python,content +6372,9675745,"input_pipeline/preprocess/video_to_array_records.py",3127,0,"",python,selection_keyboard +6373,9675914,"input_pipeline/preprocess/video_to_array_records.py",3127,0,"d",python,content +6374,9675915,"input_pipeline/preprocess/video_to_array_records.py",3128,0,"",python,selection_keyboard +6375,9676326,"input_pipeline/preprocess/video_to_array_records.py",3127,1,"",python,content +6376,9676470,"input_pipeline/preprocess/video_to_array_records.py",3126,1,"",python,content +6377,9676582,"input_pipeline/preprocess/video_to_array_records.py",3125,1,"",python,content +6378,9676713,"input_pipeline/preprocess/video_to_array_records.py",3125,0,"v",python,content +6379,9676714,"input_pipeline/preprocess/video_to_array_records.py",3126,0,"",python,selection_keyboard +6380,9676826,"input_pipeline/preprocess/video_to_array_records.py",3126,0,"i",python,content +6381,9676827,"input_pipeline/preprocess/video_to_array_records.py",3127,0,"",python,selection_keyboard +6382,9676934,"input_pipeline/preprocess/video_to_array_records.py",3127,0,"d",python,content +6383,9676935,"input_pipeline/preprocess/video_to_array_records.py",3128,0,"",python,selection_keyboard +6384,9677123,"input_pipeline/preprocess/video_to_array_records.py",3128,0,"e",python,content +6385,9677123,"input_pipeline/preprocess/video_to_array_records.py",3129,0,"",python,selection_keyboard +6386,9677263,"input_pipeline/preprocess/video_to_array_records.py",3129,0,"o",python,content +6387,9677264,"input_pipeline/preprocess/video_to_array_records.py",3130,0,"",python,selection_keyboard +6388,9677383,"input_pipeline/preprocess/video_to_array_records.py",3130,0,"s",python,content +6389,9677383,"input_pipeline/preprocess/video_to_array_records.py",3131,0,"",python,selection_keyboard +6390,9678183,"input_pipeline/preprocess/video_to_array_records.py",3130,0,"",python,selection_command +6391,9678881,"input_pipeline/preprocess/video_to_array_records.py",3147,0,"\n ""total_videos"": len(results),",python,content +6392,9678913,"input_pipeline/preprocess/video_to_array_records.py",3156,0,"",python,selection_command +6393,9679229,"input_pipeline/preprocess/video_to_array_records.py",3157,0,"",python,selection_command +6394,9679957,"input_pipeline/preprocess/video_to_array_records.py",3157,12,"",python,content +6395,9680298,"input_pipeline/preprocess/video_to_array_records.py",3157,0,"n",python,content +6396,9680299,"input_pipeline/preprocess/video_to_array_records.py",3158,0,"",python,selection_keyboard +6397,9680519,"input_pipeline/preprocess/video_to_array_records.py",3158,0,"u",python,content +6398,9680520,"input_pipeline/preprocess/video_to_array_records.py",3159,0,"",python,selection_keyboard +6399,9681158,"input_pipeline/preprocess/video_to_array_records.py",3159,0,"m",python,content +6400,9681159,"input_pipeline/preprocess/video_to_array_records.py",3160,0,"",python,selection_keyboard +6401,9681625,"input_pipeline/preprocess/video_to_array_records.py",3160,0,"_",python,content +6402,9681626,"input_pipeline/preprocess/video_to_array_records.py",3161,0,"",python,selection_keyboard +6403,9682348,"input_pipeline/preprocess/video_to_array_records.py",3161,0,"s",python,content +6404,9682349,"input_pipeline/preprocess/video_to_array_records.py",3162,0,"",python,selection_keyboard +6405,9682471,"input_pipeline/preprocess/video_to_array_records.py",3162,0,"u",python,content +6406,9682472,"input_pipeline/preprocess/video_to_array_records.py",3163,0,"",python,selection_keyboard +6407,9682572,"input_pipeline/preprocess/video_to_array_records.py",3163,0,"c",python,content +6408,9682573,"input_pipeline/preprocess/video_to_array_records.py",3164,0,"",python,selection_keyboard +6409,9682735,"input_pipeline/preprocess/video_to_array_records.py",3164,0,"c",python,content +6410,9682736,"input_pipeline/preprocess/video_to_array_records.py",3165,0,"",python,selection_keyboard +6411,9682907,"input_pipeline/preprocess/video_to_array_records.py",3165,0,"e",python,content +6412,9682908,"input_pipeline/preprocess/video_to_array_records.py",3166,0,"",python,selection_keyboard +6413,9683083,"input_pipeline/preprocess/video_to_array_records.py",3166,0,"s",python,content +6414,9683084,"input_pipeline/preprocess/video_to_array_records.py",3167,0,"",python,selection_keyboard +6415,9683525,"input_pipeline/preprocess/video_to_array_records.py",3167,0,"s",python,content +6416,9683526,"input_pipeline/preprocess/video_to_array_records.py",3168,0,"",python,selection_keyboard +6417,9683726,"input_pipeline/preprocess/video_to_array_records.py",3168,0,"f",python,content +6418,9683727,"input_pipeline/preprocess/video_to_array_records.py",3169,0,"",python,selection_keyboard +6419,9683843,"input_pipeline/preprocess/video_to_array_records.py",3169,0,"u",python,content +6420,9683844,"input_pipeline/preprocess/video_to_array_records.py",3170,0,"",python,selection_keyboard +6421,9684039,"input_pipeline/preprocess/video_to_array_records.py",3170,0,"l",python,content +6422,9684040,"input_pipeline/preprocess/video_to_array_records.py",3171,0,"",python,selection_keyboard +6423,9684221,"input_pipeline/preprocess/video_to_array_records.py",3171,0," ",python,content +6424,9684222,"input_pipeline/preprocess/video_to_array_records.py",3172,0,"",python,selection_keyboard +6425,9684664,"input_pipeline/preprocess/video_to_array_records.py",3171,1,"",python,content +6426,9684948,"input_pipeline/preprocess/video_to_array_records.py",3171,0,"_",python,content +6427,9684949,"input_pipeline/preprocess/video_to_array_records.py",3172,0,"",python,selection_keyboard +6428,9685160,"input_pipeline/preprocess/video_to_array_records.py",3172,0,"v",python,content +6429,9685161,"input_pipeline/preprocess/video_to_array_records.py",3173,0,"",python,selection_keyboard +6430,9685295,"input_pipeline/preprocess/video_to_array_records.py",3173,0,"i",python,content +6431,9685296,"input_pipeline/preprocess/video_to_array_records.py",3174,0,"",python,selection_keyboard +6432,9685453,"input_pipeline/preprocess/video_to_array_records.py",3174,0,"d",python,content +6433,9685454,"input_pipeline/preprocess/video_to_array_records.py",3175,0,"",python,selection_keyboard +6434,9685597,"input_pipeline/preprocess/video_to_array_records.py",3175,0,"e",python,content +6435,9685597,"input_pipeline/preprocess/video_to_array_records.py",3176,0,"",python,selection_keyboard +6436,9685685,"input_pipeline/preprocess/video_to_array_records.py",3176,0,"o",python,content +6437,9685686,"input_pipeline/preprocess/video_to_array_records.py",3177,0,"",python,selection_keyboard +6438,9685803,"input_pipeline/preprocess/video_to_array_records.py",3177,0,"s",python,content +6439,9685804,"input_pipeline/preprocess/video_to_array_records.py",3178,0,"",python,selection_keyboard +6440,9686397,"input_pipeline/preprocess/video_to_array_records.py",3179,0,"",python,selection_command +6441,9686497,"input_pipeline/preprocess/video_to_array_records.py",3180,0,"",python,selection_command +6442,9686661,"input_pipeline/preprocess/video_to_array_records.py",3181,0,"",python,selection_command +6443,9688564,"input_pipeline/preprocess/video_to_array_records.py",2945,0,"",python,selection_mouse +6444,9689480,"input_pipeline/preprocess/video_to_array_records.py",2945,1,"l",python,selection_mouse +6445,9689480,"input_pipeline/preprocess/video_to_array_records.py",2945,8,"len(resu",python,selection_mouse +6446,9689481,"input_pipeline/preprocess/video_to_array_records.py",2945,12,"len(results)",python,selection_mouse +6447,9689481,"input_pipeline/preprocess/video_to_array_records.py",2945,21,"len(results) - len(fa",python,selection_mouse +6448,9689481,"input_pipeline/preprocess/video_to_array_records.py",2945,26,"len(results) - len(failed_",python,selection_mouse +6449,9689506,"input_pipeline/preprocess/video_to_array_records.py",2945,32,"len(results) - len(failed_videos",python,selection_mouse +6450,9689525,"input_pipeline/preprocess/video_to_array_records.py",2945,38,"len(results) - len(failed_videos) - le",python,selection_mouse +6451,9689544,"input_pipeline/preprocess/video_to_array_records.py",2945,42,"len(results) - len(failed_videos) - len(sh",python,selection_mouse +6452,9689560,"input_pipeline/preprocess/video_to_array_records.py",2945,44,"len(results) - len(failed_videos) - len(shor",python,selection_mouse +6453,9689580,"input_pipeline/preprocess/video_to_array_records.py",2945,46,"len(results) - len(failed_videos) - len(short_",python,selection_mouse +6454,9689596,"input_pipeline/preprocess/video_to_array_records.py",2945,47,"len(results) - len(failed_videos) - len(short_e",python,selection_mouse +6455,9689627,"input_pipeline/preprocess/video_to_array_records.py",2945,49,"len(results) - len(failed_videos) - len(short_epi",python,selection_mouse +6456,9689644,"input_pipeline/preprocess/video_to_array_records.py",2945,50,"len(results) - len(failed_videos) - len(short_epis",python,selection_mouse +6457,9689660,"input_pipeline/preprocess/video_to_array_records.py",2945,51,"len(results) - len(failed_videos) - len(short_episo",python,selection_mouse +6458,9689688,"input_pipeline/preprocess/video_to_array_records.py",2945,52,"len(results) - len(failed_videos) - len(short_episod",python,selection_mouse +6459,9689721,"input_pipeline/preprocess/video_to_array_records.py",2945,53,"len(results) - len(failed_videos) - len(short_episode",python,selection_mouse +6460,9689734,"input_pipeline/preprocess/video_to_array_records.py",2945,54,"len(results) - len(failed_videos) - len(short_episodes",python,selection_mouse +6461,9690167,"input_pipeline/preprocess/video_to_array_records.py",2945,55,"len(results) - len(failed_videos) - len(short_episodes)",python,selection_mouse +6462,9693169,"input_pipeline/preprocess/video_to_array_records.py",3181,0,"",python,selection_mouse +6463,9693338,"input_pipeline/preprocess/video_to_array_records.py",3181,2,"le",python,selection_mouse +6464,9693339,"input_pipeline/preprocess/video_to_array_records.py",3181,3,"len",python,selection_mouse +6465,9693339,"input_pipeline/preprocess/video_to_array_records.py",3181,5,"len(r",python,selection_mouse +6466,9693404,"input_pipeline/preprocess/video_to_array_records.py",3181,6,"len(re",python,selection_mouse +6467,9693476,"input_pipeline/preprocess/video_to_array_records.py",3181,7,"len(res",python,selection_mouse +6468,9693553,"input_pipeline/preprocess/video_to_array_records.py",3181,8,"len(resu",python,selection_mouse +6469,9693553,"input_pipeline/preprocess/video_to_array_records.py",3181,9,"len(resul",python,selection_mouse +6470,9693554,"input_pipeline/preprocess/video_to_array_records.py",3181,10,"len(result",python,selection_mouse +6471,9693610,"input_pipeline/preprocess/video_to_array_records.py",3181,11,"len(results",python,selection_mouse +6472,9693918,"input_pipeline/preprocess/video_to_array_records.py",3181,12,"len(results)",python,selection_mouse +6473,9694818,"input_pipeline/preprocess/video_to_array_records.py",3181,12,"",python,content +6474,9695097,"input_pipeline/preprocess/video_to_array_records.py",3181,0,"len(results) - len(failed_videos) - len(short_episodes)",python,content +6475,9695735,"input_pipeline/preprocess/video_to_array_records.py",3235,0,"",python,selection_command +6476,9696240,"input_pipeline/preprocess/video_to_array_records.py",3320,0,"",python,selection_mouse +6477,9696669,"input_pipeline/preprocess/video_to_array_records.py",3439,0,"",python,selection_mouse +6478,9697138,"input_pipeline/preprocess/video_to_array_records.py",3442,0,"",python,selection_mouse +6479,9697160,"input_pipeline/preprocess/video_to_array_records.py",3441,0,"",python,selection_command +6480,9697714,"input_pipeline/preprocess/video_to_array_records.py",3448,0,"",python,selection_mouse +6481,9697720,"input_pipeline/preprocess/video_to_array_records.py",3447,0,"",python,selection_command +6482,9698187,"input_pipeline/preprocess/video_to_array_records.py",3439,0,"",python,selection_mouse +6483,9698754,"input_pipeline/preprocess/video_to_array_records.py",3359,0,"",python,selection_mouse +6484,9699398,"input_pipeline/preprocess/video_to_array_records.py",3378,0,"",python,selection_mouse +6485,9700868,"input_pipeline/preprocess/video_to_array_records.py",3076,0,"",python,selection_mouse +6486,9707253,"input_pipeline/preprocess/video_to_array_records.py",2692,0,"",python,selection_mouse +6487,9712922,"input_pipeline/preprocess/video_to_array_records.py",1691,0,"",python,selection_mouse +6488,9713056,"input_pipeline/preprocess/video_to_array_records.py",1690,6,"length",python,selection_mouse +6489,9716158,"input_pipeline/preprocess/video_to_array_records.py",2692,0,"",python,selection_mouse +6490,9717299,"input_pipeline/preprocess/video_to_array_records.py",2691,1,"",python,content +6491,9717734,"input_pipeline/preprocess/video_to_array_records.py",2691,0,"""""",python,content +6492,9717735,"input_pipeline/preprocess/video_to_array_records.py",2692,0,"",python,selection_keyboard +6493,9718225,"input_pipeline/preprocess/video_to_array_records.py",2692,0,"length",python,content +6494,9719306,"input_pipeline/preprocess/video_to_array_records.py",2769,0,"",python,selection_mouse +6495,9720267,"input_pipeline/preprocess/video_to_array_records.py",2768,1,"",python,content +6496,9720508,"input_pipeline/preprocess/video_to_array_records.py",2768,0,"length",python,content +6497,9721412,"input_pipeline/preprocess/video_to_array_records.py",2774,0,"""",python,content +6498,9721413,"input_pipeline/preprocess/video_to_array_records.py",2775,0,"",python,selection_keyboard +6499,9724132,"input_pipeline/preprocess/video_to_array_records.py",2768,0,"""",python,content +6500,9724133,"input_pipeline/preprocess/video_to_array_records.py",2769,0,"",python,selection_keyboard +6501,9735884,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +6502,9737113,"input_pipeline/preprocess/pngs_to_array_records.py",3636,0,"",python,selection_mouse +6503,9737635,"input_pipeline/preprocess/pngs_to_array_records.py",3635,1,"",python,content +6504,9738516,"input_pipeline/preprocess/pngs_to_array_records.py",3635,0,"""""",python,content +6505,9738517,"input_pipeline/preprocess/pngs_to_array_records.py",3636,0,"",python,selection_keyboard +6506,9739010,"input_pipeline/preprocess/pngs_to_array_records.py",3636,0,"length",python,content +6507,9740312,"input_pipeline/preprocess/pngs_to_array_records.py",3712,0,"",python,selection_mouse +6508,9740959,"input_pipeline/preprocess/pngs_to_array_records.py",3712,1,"",python,content +6509,9741383,"input_pipeline/preprocess/pngs_to_array_records.py",3712,0,"""""",python,content +6510,9741384,"input_pipeline/preprocess/pngs_to_array_records.py",3713,0,"",python,selection_keyboard +6511,9741816,"input_pipeline/preprocess/pngs_to_array_records.py",3713,0,"length",python,content +6512,9743349,"TERMINAL",0,0,"[[""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/705"", 444], [""/hkfs/work/workspace/scratch/tum_ind369 5-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/630"", 313], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari _v1/screens/qbert/263"", 136], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/747"", 488], [""/hkfs/work /workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/672"", 103], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_sha red/data_atari/per-game/atari_v1/screens/qbert/743"", 145], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/q bert/376"", 410], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/789"", 716], [""/hkfs/work/workspace/sc ratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/785"", 434], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atar i/per-game/atari_v1/screens/qbert/326"", 274], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/251"", 43 4], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/322"", 169], [""/hkfs/work/workspace/scratch/tum_ind 3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/735"", 43], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/ata ri_v1/screens/qbert/368"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/806"", 610], [""/hkfs/wo rk/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/660"", 268], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_s hared/data_atari/per-game/atari_v1/screens/qbert/731"", 128], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens /qbert/364"", 743], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/848"", 374], [""/hkfs/work/workspace/ scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/360"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_at ari/per-game/atari_v1/screens/qbert/773"", 575], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/886"", 120], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/310"", 242], [""/hkfs/work/workspace/scratch/tum_i nd3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/723"", 911], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/ atari_v1/screens/qbert/427"", 482], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/352"", 376], [""/hkfs /work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/85"", 201], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws _shared/data_atari/per-game/atari_v1/screens/qbert/832"", 1327], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/scre ens/qbert/465"", 177], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/878"", 627], [""/hkfs/work/workspa ce/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/949"", 838], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data _atari/per-game/atari_v1/screens/qbert/80"", 477], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1034 "", 767], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/119"", 361], [""/hkfs/work/workspace/scratch/tu m_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/411"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-ga me/atari_v1/screens/qbert/824"", 174], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/457"", 184], [""/h kfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/528"", 307], [""/hkfs/work/workspace/scratch/tum_ind3695-jaf a_ws_shared/data_atari/per-game/atari_v1/screens/qbert/382"", 68], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/sc reens/qbert/820"", 720], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/157"", 80], [""/hkfs/work/worksp ace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/866"", 601], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/dat a_atari/per-game/atari_v1/screens/qbert/937"", 852], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/88 "", 214], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/862"", 553], [""/hkfs/work/workspace/scratch/tu m_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/199"", 309], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-ga me/atari_v1/screens/qbert/403"", 850], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/107"", 376], [""/h kfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/103"", 341], [""/hkfs/work/workspace/scratch/tum_ind3695-jaf a_ws_shared/data_atari/per-game/atari_v1/screens/qbert/516"", 347], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/s creens/qbert/149"", 495], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/929"", 85], [""/hkfs/work/works pace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/854"", 454], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/da ta_atari/per-game/atari_v1/screens/qbert/925"", 261], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/8 50"", 333], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/483"", 199], [""/hkfs/work/workspace/scratch/ tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/187"", 171], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per- game/atari_v1/screens/qbert/892"", 486], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/963"", 379], ["" /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/596"", 258], [""/hkfs/work/workspace/scratch/tum_ind3695-j afa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/800"", 97], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/ screens/qbert/208"", 184], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/500"", 266], [""/hkfs/work/wor kspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/913"", 211], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/ :",,terminal_output +6513,9743859,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +6514,9745050,"input_pipeline/preprocess/video_to_array_records.py",3463,0,"",python,selection_mouse +6515,9745777,"input_pipeline/preprocess/video_to_array_records.py",3462,0,"",python,selection_mouse +6516,9747623,"input_pipeline/preprocess/video_to_array_records.py",3559,0,"",python,selection_mouse +6517,9748835,"input_pipeline/preprocess/video_to_array_records.py",3559,4,"",python,content +6518,9749330,"input_pipeline/preprocess/video_to_array_records.py",3558,1,"",python,content +6519,9749622,"input_pipeline/preprocess/video_to_array_records.py",3556,2,"",python,content +6520,9750060,"input_pipeline/preprocess/video_to_array_records.py",3556,0,"m",python,content +6521,9750061,"input_pipeline/preprocess/video_to_array_records.py",3557,0,"",python,selection_keyboard +6522,9750180,"input_pipeline/preprocess/video_to_array_records.py",3557,0,"e",python,content +6523,9750181,"input_pipeline/preprocess/video_to_array_records.py",3558,0,"",python,selection_keyboard +6524,9750983,"input_pipeline/preprocess/video_to_array_records.py",3558,0,"t",python,content +6525,9750984,"input_pipeline/preprocess/video_to_array_records.py",3559,0,"",python,selection_keyboard +6526,9751496,"input_pipeline/preprocess/video_to_array_records.py",3556,3,"metadata",python,content +6527,9752528,"input_pipeline/preprocess/video_to_array_records.py",3568,0,"",python,selection_mouse +6528,9752801,"input_pipeline/preprocess/video_to_array_records.py",3490,78,"(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)",python,selection_mouse +6529,9752801,"input_pipeline/preprocess/video_to_array_records.py",3462,106,"\n\n with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)",python,selection_mouse +6530,9752802,"input_pipeline/preprocess/video_to_array_records.py",3426,142," ""episode_metadata"": results,\n }\n\n with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)",python,selection_mouse +6531,9752802,"input_pipeline/preprocess/video_to_array_records.py",3354,214," ""avg_episode_len"": np.mean([ep[""length""] for ep in results]),\n ""episode_metadata"": results,\n }\n\n with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)",python,selection_mouse +6532,9752802,"input_pipeline/preprocess/video_to_array_records.py",3304,264," ""num_short_videos"": len(short_episodes),\n ""avg_episode_len"": np.mean([ep[""length""] for ep in results]),\n ""episode_metadata"": results,\n }\n\n with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)",python,selection_mouse +6533,9752802,"input_pipeline/preprocess/video_to_array_records.py",3254,314," ""num_failed_videos"": len(failed_videos),\n ""num_short_videos"": len(short_episodes),\n ""avg_episode_len"": np.mean([ep[""length""] for ep in results]),\n ""episode_metadata"": results,\n }\n\n with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)",python,selection_mouse +6534,9752833,"input_pipeline/preprocess/video_to_array_records.py",3164,404," ""num_successful_videos"": len(results) - len(failed_videos) - len(short_episodes),\n ""num_failed_videos"": len(failed_videos),\n ""num_short_videos"": len(short_episodes),\n ""avg_episode_len"": np.mean([ep[""length""] for ep in results]),\n ""episode_metadata"": results,\n }\n\n with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)",python,selection_mouse +6535,9752863,"input_pipeline/preprocess/video_to_array_records.py",3125,443," ""total_videos"": len(results),\n ""num_successful_videos"": len(results) - len(failed_videos) - len(short_episodes),\n ""num_failed_videos"": len(failed_videos),\n ""num_short_videos"": len(short_episodes),\n ""avg_episode_len"": np.mean([ep[""length""] for ep in results]),\n ""episode_metadata"": results,\n }\n\n with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)",python,selection_mouse +6536,9752886,"input_pipeline/preprocess/video_to_array_records.py",3094,474," ""env"": args.env_name,\n ""total_videos"": len(results),\n ""num_successful_videos"": len(results) - len(failed_videos) - len(short_episodes),\n ""num_failed_videos"": len(failed_videos),\n ""num_short_videos"": len(short_episodes),\n ""avg_episode_len"": np.mean([ep[""length""] for ep in results]),\n ""episode_metadata"": results,\n }\n\n with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)",python,selection_mouse +6537,9752959,"input_pipeline/preprocess/video_to_array_records.py",3077,491," metadata = {\n ""env"": args.env_name,\n ""total_videos"": len(results),\n ""num_successful_videos"": len(results) - len(failed_videos) - len(short_episodes),\n ""num_failed_videos"": len(failed_videos),\n ""num_short_videos"": len(short_episodes),\n ""avg_episode_len"": np.mean([ep[""length""] for ep in results]),\n ""episode_metadata"": results,\n }\n\n with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)",python,selection_mouse +6538,9755580,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +6539,9757224,"input_pipeline/preprocess/pngs_to_array_records.py",4124,0,"",python,selection_mouse +6540,9757443,"input_pipeline/preprocess/pngs_to_array_records.py",4123,1,")",python,selection_mouse +6541,9757444,"input_pipeline/preprocess/pngs_to_array_records.py",4121,3," f)",python,selection_mouse +6542,9757444,"input_pipeline/preprocess/pngs_to_array_records.py",4116,8,"ults, f)",python,selection_mouse +6543,9757444,"input_pipeline/preprocess/pngs_to_array_records.py",4113,11,"results, f)",python,selection_mouse +6544,9757444,"input_pipeline/preprocess/pngs_to_array_records.py",4111,13,"p(results, f)",python,selection_mouse +6545,9757480,"input_pipeline/preprocess/pngs_to_array_records.py",4109,15,"ump(results, f)",python,selection_mouse +6546,9757481,"input_pipeline/preprocess/pngs_to_array_records.py",4033,91,"n(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(results, f)",python,selection_mouse +6547,9757497,"input_pipeline/preprocess/pngs_to_array_records.py",4031,93,"pen(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(results, f)",python,selection_mouse +6548,9757555,"input_pipeline/preprocess/pngs_to_array_records.py",4030,94,"open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(results, f)",python,selection_mouse +6549,9757555,"input_pipeline/preprocess/pngs_to_array_records.py",4029,95," open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(results, f)",python,selection_mouse +6550,9757557,"input_pipeline/preprocess/pngs_to_array_records.py",4028,96,"h open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(results, f)",python,selection_mouse +6551,9757575,"input_pipeline/preprocess/pngs_to_array_records.py",4027,97,"th open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(results, f)",python,selection_mouse +6552,9757594,"input_pipeline/preprocess/pngs_to_array_records.py",4026,98,"ith open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(results, f)",python,selection_mouse +6553,9757634,"input_pipeline/preprocess/pngs_to_array_records.py",4024,100," with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(results, f)",python,selection_mouse +6554,9757676,"input_pipeline/preprocess/pngs_to_array_records.py",4023,101," with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(results, f)",python,selection_mouse +6555,9757677,"input_pipeline/preprocess/pngs_to_array_records.py",4022,102," with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(results, f)",python,selection_mouse +6556,9757746,"input_pipeline/preprocess/pngs_to_array_records.py",4021,103," with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(results, f)",python,selection_mouse +6557,9758333,"input_pipeline/preprocess/pngs_to_array_records.py",4021,103,"",python,content +6558,9758847,"input_pipeline/preprocess/pngs_to_array_records.py",4021,0," metadata = {\n ""env"": args.env_name,\n ""total_videos"": len(results),\n ""num_successful_videos"": len(results) - len(failed_videos) - len(short_episodes),\n ""num_failed_videos"": len(failed_videos),\n ""num_short_videos"": len(short_episodes),\n ""avg_episode_len"": np.mean([ep[""length""] for ep in results]),\n ""episode_metadata"": results,\n }\n\n with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)",python,content +6559,9760242,"input_pipeline/preprocess/pngs_to_array_records.py",4512,0,"\n ",python,content +6560,9760345,"input_pipeline/preprocess/pngs_to_array_records.py",4513,8,"",python,content +6561,9761112,"input_pipeline/preprocess/pngs_to_array_records.py",4407,0,"",python,selection_mouse +6562,9767799,"input_pipeline/preprocess/pngs_to_array_records.py",3747,0,"",python,selection_mouse +6563,9768149,"input_pipeline/preprocess/pngs_to_array_records.py",3742,6,"Number",python,selection_mouse +6564,9768304,"input_pipeline/preprocess/pngs_to_array_records.py",3730,60," print(f""Number of failed videos: {len(failed_videos)}"")\n",python,selection_mouse +6565,9768885,"input_pipeline/preprocess/pngs_to_array_records.py",3665,0,"",python,selection_mouse +6566,9769777,"input_pipeline/preprocess/pngs_to_array_records.py",3729,0,"\n ",python,content +6567,9770198,"input_pipeline/preprocess/pngs_to_array_records.py",3734,0,"s",python,content +6568,9770198,"input_pipeline/preprocess/pngs_to_array_records.py",3735,0,"",python,selection_keyboard +6569,9770295,"input_pipeline/preprocess/pngs_to_array_records.py",3735,0,"u",python,content +6570,9770296,"input_pipeline/preprocess/pngs_to_array_records.py",3736,0,"",python,selection_keyboard +6571,9770555,"input_pipeline/preprocess/pngs_to_array_records.py",3736,0,"c",python,content +6572,9770556,"input_pipeline/preprocess/pngs_to_array_records.py",3737,0,"",python,selection_keyboard +6573,9771505,"input_pipeline/preprocess/pngs_to_array_records.py",3737,0,"c",python,content +6574,9771506,"input_pipeline/preprocess/pngs_to_array_records.py",3738,0,"",python,selection_keyboard +6575,9771697,"input_pipeline/preprocess/pngs_to_array_records.py",3738,0,"e",python,content +6576,9771698,"input_pipeline/preprocess/pngs_to_array_records.py",3739,0,"",python,selection_keyboard +6577,9771849,"input_pipeline/preprocess/pngs_to_array_records.py",3739,0,"s",python,content +6578,9771850,"input_pipeline/preprocess/pngs_to_array_records.py",3740,0,"",python,selection_keyboard +6579,9771987,"input_pipeline/preprocess/pngs_to_array_records.py",3740,0,"s",python,content +6580,9771988,"input_pipeline/preprocess/pngs_to_array_records.py",3741,0,"",python,selection_keyboard +6581,9772165,"input_pipeline/preprocess/pngs_to_array_records.py",3741,0,"f",python,content +6582,9772166,"input_pipeline/preprocess/pngs_to_array_records.py",3742,0,"",python,selection_keyboard +6583,9772523,"input_pipeline/preprocess/pngs_to_array_records.py",3742,0,"u",python,content +6584,9772524,"input_pipeline/preprocess/pngs_to_array_records.py",3743,0,"",python,selection_keyboard +6585,9772914,"input_pipeline/preprocess/pngs_to_array_records.py",3743,0,"l",python,content +6586,9772915,"input_pipeline/preprocess/pngs_to_array_records.py",3744,0,"",python,selection_keyboard +6587,9773235,"input_pipeline/preprocess/pngs_to_array_records.py",3744,0,"_",python,content +6588,9773236,"input_pipeline/preprocess/pngs_to_array_records.py",3745,0,"",python,selection_keyboard +6589,9773595,"input_pipeline/preprocess/pngs_to_array_records.py",3745,0,"i",python,content +6590,9773595,"input_pipeline/preprocess/pngs_to_array_records.py",3746,0,"",python,selection_keyboard +6591,9773964,"input_pipeline/preprocess/pngs_to_array_records.py",3745,1,"",python,content +6592,9774648,"input_pipeline/preprocess/pngs_to_array_records.py",3745,0,"v",python,content +6593,9774649,"input_pipeline/preprocess/pngs_to_array_records.py",3746,0,"",python,selection_keyboard +6594,9774712,"input_pipeline/preprocess/pngs_to_array_records.py",3746,0,"i",python,content +6595,9774713,"input_pipeline/preprocess/pngs_to_array_records.py",3747,0,"",python,selection_keyboard +6596,9774858,"input_pipeline/preprocess/pngs_to_array_records.py",3747,0,"d",python,content +6597,9774858,"input_pipeline/preprocess/pngs_to_array_records.py",3748,0,"",python,selection_keyboard +6598,9775033,"input_pipeline/preprocess/pngs_to_array_records.py",3748,0,"e",python,content +6599,9775034,"input_pipeline/preprocess/pngs_to_array_records.py",3749,0,"",python,selection_keyboard +6600,9775159,"input_pipeline/preprocess/pngs_to_array_records.py",3749,0,"o",python,content +6601,9775160,"input_pipeline/preprocess/pngs_to_array_records.py",3750,0,"",python,selection_keyboard +6602,9775457,"input_pipeline/preprocess/pngs_to_array_records.py",3750,0,"s",python,content +6603,9775457,"input_pipeline/preprocess/pngs_to_array_records.py",3751,0,"",python,selection_keyboard +6604,9778973,"input_pipeline/preprocess/pngs_to_array_records.py",3734,0,"",python,selection_mouse +6605,9779609,"input_pipeline/preprocess/pngs_to_array_records.py",3734,0,"n",python,content +6606,9779610,"input_pipeline/preprocess/pngs_to_array_records.py",3735,0,"",python,selection_keyboard +6607,9779848,"input_pipeline/preprocess/pngs_to_array_records.py",3735,0,"u",python,content +6608,9779849,"input_pipeline/preprocess/pngs_to_array_records.py",3736,0,"",python,selection_keyboard +6609,9779947,"input_pipeline/preprocess/pngs_to_array_records.py",3736,0,"m",python,content +6610,9779948,"input_pipeline/preprocess/pngs_to_array_records.py",3737,0,"",python,selection_keyboard +6611,9780192,"input_pipeline/preprocess/pngs_to_array_records.py",3737,0,"_",python,content +6612,9780193,"input_pipeline/preprocess/pngs_to_array_records.py",3738,0,"",python,selection_keyboard +6613,9781216,"input_pipeline/preprocess/pngs_to_array_records.py",3755,0,"",python,selection_mouse +6614,9781396,"input_pipeline/preprocess/pngs_to_array_records.py",3755,0," ",python,content +6615,9781396,"input_pipeline/preprocess/pngs_to_array_records.py",3756,0,"",python,selection_keyboard +6616,9781696,"input_pipeline/preprocess/pngs_to_array_records.py",3756,0,"=",python,content +6617,9781697,"input_pipeline/preprocess/pngs_to_array_records.py",3757,0,"",python,selection_keyboard +6618,9782062,"input_pipeline/preprocess/pngs_to_array_records.py",3757,0," ",python,content +6619,9782062,"input_pipeline/preprocess/pngs_to_array_records.py",3758,0,"",python,selection_keyboard +6620,9783106,"input_pipeline/preprocess/pngs_to_array_records.py",3931,0,"",python,selection_mouse +6621,9783201,"input_pipeline/preprocess/pngs_to_array_records.py",3931,1,"{",python,selection_mouse +6622,9783257,"input_pipeline/preprocess/pngs_to_array_records.py",3931,5,"{len(",python,selection_mouse +6623,9783258,"input_pipeline/preprocess/pngs_to_array_records.py",3931,17,"{len(results) - l",python,selection_mouse +6624,9783258,"input_pipeline/preprocess/pngs_to_array_records.py",3931,26,"{len(results) - len(failed",python,selection_mouse +6625,9783258,"input_pipeline/preprocess/pngs_to_array_records.py",3931,34,"{len(results) - len(failed_videos)",python,selection_mouse +6626,9783275,"input_pipeline/preprocess/pngs_to_array_records.py",3931,40,"{len(results) - len(failed_videos) - len",python,selection_mouse +6627,9783317,"input_pipeline/preprocess/pngs_to_array_records.py",3931,46,"{len(results) - len(failed_videos) - len(short",python,selection_mouse +6628,9783333,"input_pipeline/preprocess/pngs_to_array_records.py",3931,64,"{len(results) - len(failed_videos) - len(short_episodes)}""\n )",python,selection_mouse +6629,9783716,"input_pipeline/preprocess/pngs_to_array_records.py",3931,56,"{len(results) - len(failed_videos) - len(short_episodes)",python,selection_mouse +6630,9786117,"input_pipeline/preprocess/pngs_to_array_records.py",3931,56,"",python,content +6631,9786118,"input_pipeline/preprocess/pngs_to_array_records.py",3931,0,"",python,selection_keyboard +6632,9786797,"input_pipeline/preprocess/pngs_to_array_records.py",3758,0,"",python,selection_mouse +6633,9786995,"input_pipeline/preprocess/pngs_to_array_records.py",3758,0,"{len(results) - len(failed_videos) - len(short_episodes)",python,content +6634,9787903,"input_pipeline/preprocess/pngs_to_array_records.py",3987,0,"",python,selection_mouse +6635,9788605,"input_pipeline/preprocess/pngs_to_array_records.py",3987,0,"n",python,content +6636,9788606,"input_pipeline/preprocess/pngs_to_array_records.py",3988,0,"",python,selection_keyboard +6637,9788742,"input_pipeline/preprocess/pngs_to_array_records.py",3988,0,"u",python,content +6638,9788743,"input_pipeline/preprocess/pngs_to_array_records.py",3989,0,"",python,selection_keyboard +6639,9789203,"input_pipeline/preprocess/pngs_to_array_records.py",3989,0,"m",python,content +6640,9789204,"input_pipeline/preprocess/pngs_to_array_records.py",3990,0,"",python,selection_keyboard +6641,9790202,"input_pipeline/preprocess/pngs_to_array_records.py",3990,0,"_",python,content +6642,9790203,"input_pipeline/preprocess/pngs_to_array_records.py",3991,0,"",python,selection_keyboard +6643,9791053,"input_pipeline/preprocess/pngs_to_array_records.py",3990,1,"",python,content +6644,9791356,"input_pipeline/preprocess/pngs_to_array_records.py",3987,3,"",python,content +6645,9792759,"input_pipeline/preprocess/pngs_to_array_records.py",3986,1,"",python,content +6646,9793208,"input_pipeline/preprocess/pngs_to_array_records.py",3987,0,"",python,selection_command +6647,9793758,"input_pipeline/preprocess/pngs_to_array_records.py",3986,0,"",python,selection_command +6648,9794160,"input_pipeline/preprocess/pngs_to_array_records.py",3986,0," ",python,content +6649,9794161,"input_pipeline/preprocess/pngs_to_array_records.py",3987,0,"",python,selection_keyboard +6650,9795170,"input_pipeline/preprocess/pngs_to_array_records.py",3987,0,"{}",python,content +6651,9795171,"input_pipeline/preprocess/pngs_to_array_records.py",3988,0,"",python,selection_keyboard +6652,9795416,"input_pipeline/preprocess/pngs_to_array_records.py",3988,0,"n",python,content +6653,9795417,"input_pipeline/preprocess/pngs_to_array_records.py",3989,0,"",python,selection_keyboard +6654,9795583,"input_pipeline/preprocess/pngs_to_array_records.py",3989,0,"u",python,content +6655,9795584,"input_pipeline/preprocess/pngs_to_array_records.py",3990,0,"",python,selection_keyboard +6656,9795721,"input_pipeline/preprocess/pngs_to_array_records.py",3990,0,"m",python,content +6657,9795721,"input_pipeline/preprocess/pngs_to_array_records.py",3991,0,"",python,selection_keyboard +6658,9796946,"input_pipeline/preprocess/pngs_to_array_records.py",3988,3,"num_successful_videos",python,content +6659,9800496,"input_pipeline/preprocess/pngs_to_array_records.py",4010,0,"",python,selection_mouse +6660,9801504,"input_pipeline/preprocess/pngs_to_array_records.py",4010,1,"",python,content +6661,9805886,"input_pipeline/preprocess/pngs_to_array_records.py",3759,0,"",python,selection_mouse +6662,9806455,"input_pipeline/preprocess/pngs_to_array_records.py",3758,1,"",python,content +6663,9811567,"input_pipeline/preprocess/pngs_to_array_records.py",3955,0,"",python,selection_mouse +6664,9812179,"input_pipeline/preprocess/pngs_to_array_records.py",3951,4,"",python,content +6665,9812309,"input_pipeline/preprocess/pngs_to_array_records.py",3947,4,"",python,content +6666,9812683,"input_pipeline/preprocess/pngs_to_array_records.py",3946,1,"",python,content +6667,9813584,"input_pipeline/preprocess/pngs_to_array_records.py",4006,0,"",python,selection_mouse +6668,9813887,"input_pipeline/preprocess/pngs_to_array_records.py",4002,4,"",python,content +6669,9813996,"input_pipeline/preprocess/pngs_to_array_records.py",4001,1,"",python,content +6670,9815379,"input_pipeline/preprocess/pngs_to_array_records.py",4002,0,"",python,selection_mouse +6671,9815543,"input_pipeline/preprocess/pngs_to_array_records.py",3994,8,"ideos}"")",python,selection_mouse +6672,9815560,"input_pipeline/preprocess/pngs_to_array_records.py",3988,14,"sful_videos}"")",python,selection_mouse +6673,9815578,"input_pipeline/preprocess/pngs_to_array_records.py",3980,22,"m_successful_videos}"")",python,selection_mouse +6674,9815595,"input_pipeline/preprocess/pngs_to_array_records.py",3973,29,"os: {num_successful_videos}"")",python,selection_mouse +6675,9815652,"input_pipeline/preprocess/pngs_to_array_records.py",3899,103,"rt episodes: {len(short_episodes)}"")\n print(f""Number of successful videos: {num_successful_videos}"")",python,selection_mouse +6676,9815652,"input_pipeline/preprocess/pngs_to_array_records.py",3892,110," of short episodes: {len(short_episodes)}"")\n print(f""Number of successful videos: {num_successful_videos}"")",python,selection_mouse +6677,9815676,"input_pipeline/preprocess/pngs_to_array_records.py",3890,112,"er of short episodes: {len(short_episodes)}"")\n print(f""Number of successful videos: {num_successful_videos}"")",python,selection_mouse +6678,9815700,"input_pipeline/preprocess/pngs_to_array_records.py",3885,117,"""Number of short episodes: {len(short_episodes)}"")\n print(f""Number of successful videos: {num_successful_videos}"")",python,selection_mouse +6679,9815731,"input_pipeline/preprocess/pngs_to_array_records.py",3882,120,"t(f""Number of short episodes: {len(short_episodes)}"")\n print(f""Number of successful videos: {num_successful_videos}"")",python,selection_mouse +6680,9815774,"input_pipeline/preprocess/pngs_to_array_records.py",3820,182,"int(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(f""Number of successful videos: {num_successful_videos}"")",python,selection_mouse +6681,9815808,"input_pipeline/preprocess/pngs_to_array_records.py",3816,186," print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(f""Number of successful videos: {num_successful_videos}"")",python,selection_mouse +6682,9815829,"input_pipeline/preprocess/pngs_to_array_records.py",3815,187," print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(f""Number of successful videos: {num_successful_videos}"")",python,selection_mouse +6683,9815849,"input_pipeline/preprocess/pngs_to_array_records.py",3814,188," print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(f""Number of successful videos: {num_successful_videos}"")",python,selection_mouse +6684,9816031,"input_pipeline/preprocess/pngs_to_array_records.py",3730,272," num_successful_videos = len(results) - len(failed_videos) - len(short_episodes)\n print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(f""Number of successful videos: {num_successful_videos}"")",python,selection_mouse +6685,9818574,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +6686,9819548,"input_pipeline/preprocess/video_to_array_records.py",3384,0,"",python,selection_mouse +6687,9820844,"input_pipeline/preprocess/video_to_array_records.py",3022,0,"",python,selection_mouse +6688,9821009,"input_pipeline/preprocess/video_to_array_records.py",2928,94,"""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )",python,selection_mouse +6689,9821024,"input_pipeline/preprocess/video_to_array_records.py",2925,97," f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )",python,selection_mouse +6690,9821079,"input_pipeline/preprocess/video_to_array_records.py",2912,110,"print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )",python,selection_mouse +6691,9821080,"input_pipeline/preprocess/video_to_array_records.py",2910,112," print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )",python,selection_mouse +6692,9821080,"input_pipeline/preprocess/video_to_array_records.py",2909,113," print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )",python,selection_mouse +6693,9821106,"input_pipeline/preprocess/video_to_array_records.py",2846,176," print(f""Number of short episodes: {len(short_episodes)}"")\n print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )",python,selection_mouse +6694,9821167,"input_pipeline/preprocess/video_to_array_records.py",2786,236," print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )",python,selection_mouse +6695,9821615,"input_pipeline/preprocess/video_to_array_records.py",2707,315," short_episodes = [result for result in results if result[""length""] < 1600]\n print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )",python,selection_mouse +6696,9822441,"input_pipeline/preprocess/video_to_array_records.py",2786,236," print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )",python,selection_mouse +6697,9822590,"input_pipeline/preprocess/video_to_array_records.py",2846,176," print(f""Number of short episodes: {len(short_episodes)}"")\n print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )",python,selection_mouse +6698,9822836,"input_pipeline/preprocess/video_to_array_records.py",2786,236," print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(\n f""Number of successful videos: {len(results) - len(failed_videos) - len(short_episodes)}""\n )",python,selection_mouse +6699,9824365,"input_pipeline/preprocess/video_to_array_records.py",2786,236,"",python,content +6700,9824766,"input_pipeline/preprocess/video_to_array_records.py",2786,0," num_successful_videos = len(results) - len(failed_videos) - len(short_episodes)\n print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short episodes: {len(short_episodes)}"")\n print(f""Number of successful videos: {num_successful_videos}"")",python,content +6701,9831231,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +6702,9832282,"input_pipeline/preprocess/pngs_to_array_records.py",3180,0,"",python,selection_mouse +6703,9832776,"input_pipeline/preprocess/pngs_to_array_records.py",4055,0,"",python,selection_mouse +6704,9833208,"input_pipeline/preprocess/pngs_to_array_records.py",3935,0,"",python,selection_mouse +6705,9833702,"input_pipeline/preprocess/pngs_to_array_records.py",4002,0,"",python,selection_mouse +6706,9834181,"input_pipeline/preprocess/pngs_to_array_records.py",4055,0,"",python,selection_mouse +6707,9907525,"input_pipeline/preprocess/pngs_to_array_records.py",4073,0,"",python,selection_mouse +6708,9908145,"input_pipeline/preprocess/pngs_to_array_records.py",4056,0,"",python,selection_mouse +6709,9911571,"input_pipeline/preprocess/pngs_to_array_records.py",4028,0,"",python,selection_mouse +6710,9914581,"input_pipeline/preprocess/pngs_to_array_records.py",4156,0,"",python,selection_mouse +6711,9915101,"input_pipeline/preprocess/pngs_to_array_records.py",4121,0,"",python,selection_mouse +6712,9915915,"input_pipeline/preprocess/pngs_to_array_records.py",4353,0,"",python,selection_mouse +6713,9916595,"input_pipeline/preprocess/pngs_to_array_records.py",4298,0,"",python,selection_mouse +6714,9917180,"input_pipeline/preprocess/pngs_to_array_records.py",4300,0,"",python,selection_mouse +6715,9917748,"input_pipeline/preprocess/pngs_to_array_records.py",4367,0,"",python,selection_mouse +6716,9918716,"input_pipeline/preprocess/pngs_to_array_records.py",3588,0,"",python,selection_mouse +6717,9919257,"input_pipeline/preprocess/pngs_to_array_records.py",3664,0,"",python,selection_mouse +6718,9920516,"input_pipeline/preprocess/pngs_to_array_records.py",3658,0,"",python,selection_mouse +6719,9921996,"input_pipeline/preprocess/pngs_to_array_records.py",3657,0,"",python,selection_mouse +6720,9922433,"input_pipeline/preprocess/pngs_to_array_records.py",3661,0,"",python,selection_mouse +6721,9923090,"input_pipeline/preprocess/pngs_to_array_records.py",3655,14,"short_episodes",python,selection_mouse +6722,9928870,"input_pipeline/preprocess/pngs_to_array_records.py",3669,0,"",python,selection_command +6723,9928885,"input_pipeline/preprocess/pngs_to_array_records.py",4326,0,"o",python,content +6724,9928886,"input_pipeline/preprocess/pngs_to_array_records.py",4322,2,"",python,content +6725,9928886,"input_pipeline/preprocess/pngs_to_array_records.py",4319,2,"v",python,content +6726,9928886,"input_pipeline/preprocess/pngs_to_array_records.py",4228,0,"o",python,content +6727,9928886,"input_pipeline/preprocess/pngs_to_array_records.py",4224,2,"",python,content +6728,9928886,"input_pipeline/preprocess/pngs_to_array_records.py",4221,2,"v",python,content +6729,9928886,"input_pipeline/preprocess/pngs_to_array_records.py",3930,0,"o",python,content +6730,9928886,"input_pipeline/preprocess/pngs_to_array_records.py",3926,2,"",python,content +6731,9928886,"input_pipeline/preprocess/pngs_to_array_records.py",3923,2,"v",python,content +6732,9928886,"input_pipeline/preprocess/pngs_to_array_records.py",3811,0,"o",python,content +6733,9928886,"input_pipeline/preprocess/pngs_to_array_records.py",3807,2,"",python,content +6734,9928886,"input_pipeline/preprocess/pngs_to_array_records.py",3804,2,"v",python,content +6735,9928887,"input_pipeline/preprocess/pngs_to_array_records.py",3668,0,"o",python,content +6736,9928887,"input_pipeline/preprocess/pngs_to_array_records.py",3664,2,"",python,content +6737,9928887,"input_pipeline/preprocess/pngs_to_array_records.py",3661,2,"v",python,content +6738,9930373,"input_pipeline/preprocess/pngs_to_array_records.py",3586,0,"",python,selection_mouse +6739,9930497,"input_pipeline/preprocess/pngs_to_array_records.py",3579,13,"failed_videos",python,selection_mouse +6740,9944029,"input_pipeline/preprocess/pngs_to_array_records.py",3741,0,"",python,selection_mouse +6741,9944206,"input_pipeline/preprocess/pngs_to_array_records.py",3732,21,"num_successful_videos",python,selection_mouse +6742,9947371,"input_pipeline/preprocess/pngs_to_array_records.py",3901,0,"",python,selection_mouse +6743,9948829,"input_pipeline/preprocess/pngs_to_array_records.py",3904,1,"e",python,selection_command +6744,9948990,"input_pipeline/preprocess/pngs_to_array_records.py",4333,2,"ep",python,selection_command +6745,9949181,"input_pipeline/preprocess/pngs_to_array_records.py",4333,3,"epi",python,selection_command +6746,9949336,"input_pipeline/preprocess/pngs_to_array_records.py",4333,4,"epis",python,selection_command +6747,9949401,"input_pipeline/preprocess/pngs_to_array_records.py",4333,5,"episo",python,selection_command +6748,9949767,"input_pipeline/preprocess/pngs_to_array_records.py",4333,6,"episod",python,selection_command +6749,9949973,"input_pipeline/preprocess/pngs_to_array_records.py",4333,7,"episode",python,selection_command +6750,9962046,"input_pipeline/preprocess/pngs_to_array_records.py",3901,0,"",python,selection_mouse +6751,9962210,"input_pipeline/preprocess/pngs_to_array_records.py",3898,8,"episodes",python,selection_mouse +6752,9962990,"input_pipeline/preprocess/pngs_to_array_records.py",3898,8,"",python,content +6753,9963649,"input_pipeline/preprocess/pngs_to_array_records.py",3898,0,"v",python,content +6754,9963650,"input_pipeline/preprocess/pngs_to_array_records.py",3899,0,"",python,selection_keyboard +6755,9963763,"input_pipeline/preprocess/pngs_to_array_records.py",3899,0,"i",python,content +6756,9963764,"input_pipeline/preprocess/pngs_to_array_records.py",3900,0,"",python,selection_keyboard +6757,9963886,"input_pipeline/preprocess/pngs_to_array_records.py",3900,0,"d",python,content +6758,9963887,"input_pipeline/preprocess/pngs_to_array_records.py",3901,0,"",python,selection_keyboard +6759,9964044,"input_pipeline/preprocess/pngs_to_array_records.py",3901,0,"e",python,content +6760,9964045,"input_pipeline/preprocess/pngs_to_array_records.py",3902,0,"",python,selection_keyboard +6761,9964168,"input_pipeline/preprocess/pngs_to_array_records.py",3902,0,"o",python,content +6762,9964168,"input_pipeline/preprocess/pngs_to_array_records.py",3903,0,"",python,selection_keyboard +6763,9965113,"input_pipeline/preprocess/pngs_to_array_records.py",3903,0,"s",python,content +6764,9965114,"input_pipeline/preprocess/pngs_to_array_records.py",3904,0,"",python,selection_keyboard +6765,9965760,"input_pipeline/preprocess/pngs_to_array_records.py",3903,0,"",python,selection_command +6766,9969787,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +6767,9971220,"input_pipeline/preprocess/video_to_array_records.py",2717,0,"",python,selection_mouse +6768,9971447,"input_pipeline/preprocess/video_to_array_records.py",2717,1,"e",python,selection_mouse +6769,9971448,"input_pipeline/preprocess/video_to_array_records.py",2717,2,"ep",python,selection_mouse +6770,9971448,"input_pipeline/preprocess/video_to_array_records.py",2717,3,"epi",python,selection_mouse +6771,9971449,"input_pipeline/preprocess/video_to_array_records.py",2717,4,"epis",python,selection_mouse +6772,9971486,"input_pipeline/preprocess/video_to_array_records.py",2717,5,"episo",python,selection_mouse +6773,9971542,"input_pipeline/preprocess/video_to_array_records.py",2717,6,"episod",python,selection_mouse +6774,9971748,"input_pipeline/preprocess/video_to_array_records.py",2717,7,"episode",python,selection_mouse +6775,9971946,"input_pipeline/preprocess/video_to_array_records.py",2717,8,"episodes",python,selection_mouse +6776,9972801,"input_pipeline/preprocess/video_to_array_records.py",2717,8,"v",python,content +6777,9972802,"input_pipeline/preprocess/video_to_array_records.py",2718,0,"",python,selection_keyboard +6778,9972909,"input_pipeline/preprocess/video_to_array_records.py",2718,0,"i",python,content +6779,9972910,"input_pipeline/preprocess/video_to_array_records.py",2719,0,"",python,selection_keyboard +6780,9973038,"input_pipeline/preprocess/video_to_array_records.py",2719,0,"d",python,content +6781,9973039,"input_pipeline/preprocess/video_to_array_records.py",2720,0,"",python,selection_keyboard +6782,9973149,"input_pipeline/preprocess/video_to_array_records.py",2720,0,"e",python,content +6783,9973150,"input_pipeline/preprocess/video_to_array_records.py",2721,0,"",python,selection_keyboard +6784,9973236,"input_pipeline/preprocess/video_to_array_records.py",2721,0,"o",python,content +6785,9973237,"input_pipeline/preprocess/video_to_array_records.py",2722,0,"",python,selection_keyboard +6786,9973344,"input_pipeline/preprocess/video_to_array_records.py",2722,0,"s",python,content +6787,9973345,"input_pipeline/preprocess/video_to_array_records.py",2723,0,"",python,selection_keyboard +6788,9974469,"input_pipeline/preprocess/video_to_array_records.py",2958,0,"",python,selection_mouse +6789,9974618,"input_pipeline/preprocess/video_to_array_records.py",2956,8,"episodes",python,selection_mouse +6790,9975697,"input_pipeline/preprocess/video_to_array_records.py",2932,32,"",python,content +6791,9976840,"input_pipeline/preprocess/video_to_array_records.py",2932,0,"print(f""Number of short episodes",python,content +6792,9977480,"input_pipeline/preprocess/video_to_array_records.py",2717,6,"episodes",python,content +6793,9978669,"input_pipeline/preprocess/video_to_array_records.py",2722,0,"",python,selection_mouse +6794,9978772,"input_pipeline/preprocess/video_to_array_records.py",2711,14,"short_episodes",python,selection_mouse +6795,9984273,"input_pipeline/preprocess/video_to_array_records.py",2725,0,"",python,selection_command +6796,9984292,"input_pipeline/preprocess/video_to_array_records.py",3382,0,"o",python,content +6797,9984292,"input_pipeline/preprocess/video_to_array_records.py",3378,2,"",python,content +6798,9984292,"input_pipeline/preprocess/video_to_array_records.py",3375,2,"v",python,content +6799,9984292,"input_pipeline/preprocess/video_to_array_records.py",3284,0,"o",python,content +6800,9984293,"input_pipeline/preprocess/video_to_array_records.py",3280,2,"",python,content +6801,9984293,"input_pipeline/preprocess/video_to_array_records.py",3277,2,"v",python,content +6802,9984293,"input_pipeline/preprocess/video_to_array_records.py",2986,0,"o",python,content +6803,9984293,"input_pipeline/preprocess/video_to_array_records.py",2982,2,"",python,content +6804,9984293,"input_pipeline/preprocess/video_to_array_records.py",2979,2,"v",python,content +6805,9984293,"input_pipeline/preprocess/video_to_array_records.py",2867,0,"o",python,content +6806,9984293,"input_pipeline/preprocess/video_to_array_records.py",2863,2,"",python,content +6807,9984293,"input_pipeline/preprocess/video_to_array_records.py",2860,2,"v",python,content +6808,9984293,"input_pipeline/preprocess/video_to_array_records.py",2724,0,"o",python,content +6809,9984293,"input_pipeline/preprocess/video_to_array_records.py",2720,2,"",python,content +6810,9984293,"input_pipeline/preprocess/video_to_array_records.py",2717,2,"v",python,content +6811,9985426,"input_pipeline/preprocess/video_to_array_records.py",2900,0,"",python,selection_mouse +6812,9985586,"input_pipeline/preprocess/video_to_array_records.py",2895,6,"videos",python,selection_mouse +6813,9986066,"input_pipeline/preprocess/video_to_array_records.py",2958,0,"",python,selection_mouse +6814,9986226,"input_pipeline/preprocess/video_to_array_records.py",2954,8,"episodes",python,selection_mouse +6815,9986697,"input_pipeline/preprocess/video_to_array_records.py",2954,8,"v",python,content +6816,9986699,"input_pipeline/preprocess/video_to_array_records.py",2955,0,"",python,selection_keyboard +6817,9986790,"input_pipeline/preprocess/video_to_array_records.py",2955,0,"i",python,content +6818,9986790,"input_pipeline/preprocess/video_to_array_records.py",2956,0,"",python,selection_keyboard +6819,9986956,"input_pipeline/preprocess/video_to_array_records.py",2956,0,"d",python,content +6820,9986957,"input_pipeline/preprocess/video_to_array_records.py",2957,0,"",python,selection_keyboard +6821,9987134,"input_pipeline/preprocess/video_to_array_records.py",2957,0,"e",python,content +6822,9987135,"input_pipeline/preprocess/video_to_array_records.py",2958,0,"",python,selection_keyboard +6823,9987214,"input_pipeline/preprocess/video_to_array_records.py",2958,0,"o",python,content +6824,9987215,"input_pipeline/preprocess/video_to_array_records.py",2959,0,"",python,selection_keyboard +6825,9987325,"input_pipeline/preprocess/video_to_array_records.py",2959,0,"s",python,content +6826,9987326,"input_pipeline/preprocess/video_to_array_records.py",2960,0,"",python,selection_keyboard +6827,10037250,"TERMINAL",0,0,"bash",,terminal_focus +6828,10041643,"TERMINAL",0,0,"bash",,terminal_focus +6829,10044568,"TERMINAL",0,0,"git status",,terminal_command +6830,10044611,"TERMINAL",0,0,"]633;E;2025-09-04 12:44:21 git status;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +6831,10045037,"TERMINAL",0,0,"On branch input_pipeline/add-npy2array_record\r\nYour branch is up to date with 'origin/input_pipeline/add-npy2array_record'.\r\n\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: generate_dataset.py\r\n\tmodified: input_pipeline/preprocess/pngs_to_array_records.py\r\n\tmodified: input_pipeline/preprocess/video_to_array_records.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdata/\r\n\tdata_atari/\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\tlogs/\r\n\toverfit_dir.zip\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\tutils/visualizer.py\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +6832,10056068,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +6833,10060559,"input_pipeline/preprocess/pngs_to_array_records.py",366,0,"",python,selection_mouse +6834,10061520,"input_pipeline/preprocess/pngs_to_array_records.py",367,0,"\n ",python,content +6835,10062889,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +6836,10064841,"input_pipeline/preprocess/video_to_array_records.py",372,0,"",python,selection_mouse +6837,10065369,"input_pipeline/preprocess/video_to_array_records.py",371,0,"",python,selection_command +6838,10067419,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +6839,10068140,"input_pipeline/preprocess/pngs_to_array_records.py",346,0,"",python,selection_mouse +6840,10068736,"input_pipeline/preprocess/pngs_to_array_records.py",345,0,"",python,selection_command +6841,10069461,"input_pipeline/preprocess/pngs_to_array_records.py",368,4,"",python,content +6842,10069461,"input_pipeline/preprocess/pngs_to_array_records.py",367,0,"\n env_name: str = ""minecraft""",python,content +6843,10069497,"input_pipeline/preprocess/pngs_to_array_records.py",372,0,"",python,selection_command +6844,10070422,"input_pipeline/preprocess/pngs_to_array_records.py",399,0,"",python,selection_command +6845,10070761,"input_pipeline/preprocess/pngs_to_array_records.py",398,0,"",python,selection_command +6846,10071051,"input_pipeline/preprocess/pngs_to_array_records.py",389,9,"",python,content +6847,10071344,"input_pipeline/preprocess/pngs_to_array_records.py",389,0,"a",python,content +6848,10071345,"input_pipeline/preprocess/pngs_to_array_records.py",390,0,"",python,selection_keyboard +6849,10071652,"input_pipeline/preprocess/pngs_to_array_records.py",390,0,"t",python,content +6850,10071653,"input_pipeline/preprocess/pngs_to_array_records.py",391,0,"",python,selection_keyboard +6851,10071851,"input_pipeline/preprocess/pngs_to_array_records.py",391,0,"a",python,content +6852,10071852,"input_pipeline/preprocess/pngs_to_array_records.py",392,0,"",python,selection_keyboard +6853,10072063,"input_pipeline/preprocess/pngs_to_array_records.py",392,0,"r",python,content +6854,10072064,"input_pipeline/preprocess/pngs_to_array_records.py",393,0,"",python,selection_keyboard +6855,10072141,"input_pipeline/preprocess/pngs_to_array_records.py",393,0,"i",python,content +6856,10072142,"input_pipeline/preprocess/pngs_to_array_records.py",394,0,"",python,selection_keyboard +6857,10072441,"input_pipeline/preprocess/pngs_to_array_records.py",393,0,"",python,selection_command +6858,10073274,"input_pipeline/preprocess/pngs_to_array_records.py",396,0,"",python,selection_mouse +6859,10074587,"input_pipeline/preprocess/pngs_to_array_records.py",396,1,"",python,content +6860,10075594,"input_pipeline/preprocess/pngs_to_array_records.py",386,0,"",python,selection_mouse +6861,10076108,"input_pipeline/preprocess/pngs_to_array_records.py",377,0,"",python,selection_mouse +6862,10076410,"input_pipeline/preprocess/pngs_to_array_records.py",372,8,"env_name",python,selection_mouse +6863,10081123,"TERMINAL",0,0,"bash",,terminal_focus +6864,10082783,"TERMINAL",0,0,"bash",,terminal_focus +6865,10124439,"TERMINAL",0,0,"git commit -am ""standardized metadata""",,terminal_command +6866,10124490,"TERMINAL",0,0,"]633;E;2025-09-04 12:45:41 git commit -am ""standardized metadata"";86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +6867,10124836,"TERMINAL",0,0,"[input_pipeline/add-npy2array_record c2533c8] standardized metadata\r\n 3 files changed, 42 insertions(+), 21 deletions(-)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +6868,10127149,"TERMINAL",0,0,"git push",,terminal_command +6869,10127198,"TERMINAL",0,0,"]633;E;2025-09-04 12:45:44 git push;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +6870,10144016,"TERMINAL",0,0,"bash",,terminal_focus +6871,10146242,"TERMINAL",0,0,"git",,terminal_focus +6872,10146652,"TERMINAL",0,0,"^C\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;130",,terminal_output +6873,10150391,"TERMINAL",0,0,"ping google.com",,terminal_command +6874,10150446,"TERMINAL",0,0,"]633;E;2025-09-04 12:46:07 ping google.com;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +6875,10151635,"TERMINAL",0,0,"^C\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;130",,terminal_output +6876,10152740,"TERMINAL",0,0,"bash",,terminal_focus +6877,10154742,"TERMINAL",0,0,"git push",,terminal_command +6878,10154785,"TERMINAL",0,0,"]633;E;2025-09-04 12:46:11 git push;98e58a9e-4278-4ee6-ae9b-7614f41efecb]633;C",,terminal_output +6879,10155396,"TERMINAL",0,0,"^C\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;130",,terminal_output +6880,10158738,"TERMINAL",0,0,"bash",,terminal_focus +6881,10159971,"TERMINAL",0,0,"bash",,terminal_focus +6882,10162650,"TERMINAL",0,0,"ping google.com",,terminal_command +6883,10162699,"TERMINAL",0,0,"]633;E;2025-09-04 12:46:19 ping google.com;98e58a9e-4278-4ee6-ae9b-7614f41efecb]633;C",,terminal_output +6884,10162872,"TERMINAL",0,0,"PING google.com(fra24s05-in-x0e.1e100.net (2a00:1450:4001:828::200e)) 56 data bytes\r\n",,terminal_output +6885,10163989,"TERMINAL",0,0,"64 bytes from fra24s05-in-x0e.1e100.net (2a00:1450:4001:828::200e): icmp_seq=2 ttl=116 time=5.85 ms\r\n",,terminal_output +6886,10164043,"TERMINAL",0,0,"64 bytes from fra24s05-in-x0e.1e100.net (2a00:1450:4001:828::200e): icmp_seq=1 ttl=116 time=1154 ms\r\n",,terminal_output +6887,10164320,"TERMINAL",0,0,"^C\r\n--- google.com ping statistics ---\r\n2 packets transmitted, 2 received, 0% packet loss, time 1026ms\r\nrtt min/avg/max/mdev = 5.853/579.991/1154.130/574.138 ms, pipe 2\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +6888,10165533,"TERMINAL",0,0,"git push",,terminal_command +6889,10165578,"TERMINAL",0,0,"]633;E;2025-09-04 12:46:22 git push;98e58a9e-4278-4ee6-ae9b-7614f41efecb]633;C",,terminal_output +6890,10168970,"TERMINAL",0,0,"bash",,terminal_focus +6891,10171069,"TERMINAL",0,0,"git",,terminal_focus +6892,10172564,"TERMINAL",0,0,"^C\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;130",,terminal_output +6893,10175539,"TERMINAL",0,0,"ping github.com",,terminal_command +6894,10175587,"TERMINAL",0,0,"]633;E;2025-09-04 12:46:32 ping github.com;98e58a9e-4278-4ee6-ae9b-7614f41efecb]633;CPING github.com (140.82.121.4) 56(84) bytes of data.\r\n",,terminal_output +6895,10179050,"TERMINAL",0,0,"64 bytes from lb-140-82-121-4-fra.github.com (140.82.121.4): icmp_seq=1 ttl=52 time=1105 ms\r\n",,terminal_output +6896,10182311,"TERMINAL",0,0,"bash",,terminal_focus +6897,10183642,"TERMINAL",0,0,"git push",,terminal_command +6898,10183692,"TERMINAL",0,0,"]633;E;2025-09-04 12:46:40 git push;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +6899,10184323,"TERMINAL",0,0,"64 bytes from lb-140-82-121-4-fra.github.com (140.82.121.4): icmp_seq=3 ttl=52 time=5.95 ms\r\n64 bytes from lb-140-82-121-4-fra.github.com (140.82.121.4): icmp_seq=4 ttl=52 time=5.80 ms\r\n",,terminal_output +6900,10185097,"TERMINAL",0,0,"Enumerating objects: 12, done.\r\nCounting objects: 8% (1/12)\rCounting objects: 16% (2/12)\rCounting objects: 25% (3/12)\rCounting objects: 33% (4/12)\rCounting objects: 41% (5/12)\rCounting objects: 50% (6/12)\rCounting objects: 58% (7/12)\rCounting objects: 66% (8/12)\rCounting objects: 75% (9/12)\rCounting objects: 83% (10/12)\rCounting objects: 91% (11/12)\rCounting objects: 100% (12/12)\rCounting objects: 100% (12/12), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 14% (1/7)\rCompressing objects: 28% (2/7)\rCompressing objects: 42% (3/7)\rCompressing objects: 57% (4/7)\rCompressing objects: 71% (5/7)\rCompressing objects: 85% (6/7)\rCompressing objects: 100% (7/7)\rCompressing objects: 100% (7/7), done.\r\nWriting objects: 14% (1/7)\rWriting objects: 28% (2/7)\rWriting objects: 42% (3/7)\rWriting objects: 57% (4/7)\rWriting objects: 71% (5/7)\rWriting objects: 85% (6/7)\rWriting objects: 100% (7/7)\rWriting objects: 100% (7/7), 1.61 KiB | 550.00 KiB/s, done.\r\nTotal 7 (delta 4), reused 0 (delta 0), pack-reused 0\r\n",,terminal_output +6901,10185202,"TERMINAL",0,0,"remote: Resolving deltas: 0% (0/4)\rremote: Resolving deltas: 25% (1/4)\rremote: Resolving deltas: 50% (2/4)\rremote: Resolving deltas: 75% (3/4)\rremote: Resolving deltas: 100% (4/4)\rremote: Resolving deltas: 100% (4/4), completed with 3 local objects.\r\n",,terminal_output +6902,10185398,"TERMINAL",0,0,"To github.com:p-doom/jasmine.git\r\n 43bdbba..c2533c8 input_pipeline/add-npy2array_record -> input_pipeline/add-npy2array_record\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +6903,10187122,"TERMINAL",0,0,"ping",,terminal_focus +6904,10187398,"TERMINAL",0,0,"64 bytes from lb-140-82-121-4-fra.github.com (140.82.121.4): icmp_seq=5 ttl=52 time=5.83 ms\r\n",,terminal_output +6905,10188389,"TERMINAL",0,0,"^C\r\n--- github.com ping statistics ---\r\n6 packets transmitted, 4 received, 33.3333% packet loss, time 11838ms\r\nrtt min/avg/max/mdev = 5.797/280.712/1105.272/476.059 ms, pipe 2\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +6906,10195019,"generate_dataset.py",0,0,"",python,tab +6907,10195020,"generate_dataset.py",1310,0,"",python,selection_mouse +6908,10195079,"generate_dataset.py",1309,0,"",python,selection_command +6909,10195558,"generate_dataset.py",1882,0,"",python,selection_mouse +6910,10195578,"generate_dataset.py",1881,0,"",python,selection_command +6911,10195986,"generate_dataset.py",2018,0,"",python,selection_mouse +6912,10196471,"generate_dataset.py",2073,0,"",python,selection_mouse +6913,10196476,"generate_dataset.py",2072,0,"",python,selection_command +6914,10199432,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +6915,10212058,"input_pipeline/preprocess/pngs_to_array_records.py",2518,0,"",python,selection_mouse +6916,10212309,"input_pipeline/preprocess/pngs_to_array_records.py",2518,1,"{",python,selection_mouse +6917,10212356,"input_pipeline/preprocess/pngs_to_array_records.py",2518,2,"{""",python,selection_mouse +6918,10215233,"input_pipeline/preprocess/pngs_to_array_records.py",2691,0,"",python,selection_mouse +6919,10215251,"input_pipeline/preprocess/pngs_to_array_records.py",2690,0,"",python,selection_command +6920,10215352,"input_pipeline/preprocess/pngs_to_array_records.py",2687,3,": 0",python,selection_mouse +6921,10215392,"input_pipeline/preprocess/pngs_to_array_records.py",2687,4,": 0}",python,selection_command +6922,10215392,"input_pipeline/preprocess/pngs_to_array_records.py",2683,8,"gth"": 0}",python,selection_mouse +6923,10215392,"input_pipeline/preprocess/pngs_to_array_records.py",2676,15,"r, ""length"": 0}",python,selection_mouse +6924,10215452,"input_pipeline/preprocess/pngs_to_array_records.py",2673,18,"_dir, ""length"": 0}",python,selection_mouse +6925,10215452,"input_pipeline/preprocess/pngs_to_array_records.py",2671,20,"ut_dir, ""length"": 0}",python,selection_mouse +6926,10215453,"input_pipeline/preprocess/pngs_to_array_records.py",2691,1,"\n",python,selection_mouse +6927,10215688,"input_pipeline/preprocess/pngs_to_array_records.py",2662,29,"ath"": input_dir, ""length"": 0}",python,selection_mouse +6928,10215714,"input_pipeline/preprocess/pngs_to_array_records.py",2661,30,"path"": input_dir, ""length"": 0}",python,selection_mouse +6929,10215745,"input_pipeline/preprocess/pngs_to_array_records.py",2660,31,"""path"": input_dir, ""length"": 0}",python,selection_mouse +6930,10215863,"input_pipeline/preprocess/pngs_to_array_records.py",2659,32,"{""path"": input_dir, ""length"": 0}",python,selection_mouse +6931,10218964,"input_pipeline/preprocess/pngs_to_array_records.py",825,0,"",python,selection_mouse +6932,10218966,"input_pipeline/preprocess/pngs_to_array_records.py",824,0,"",python,selection_command +6933,10219155,"input_pipeline/preprocess/pngs_to_array_records.py",824,1,"0",python,selection_mouse +6934,10219156,"input_pipeline/preprocess/pngs_to_array_records.py",824,0,"",python,selection_mouse +6935,10219156,"input_pipeline/preprocess/pngs_to_array_records.py",822,2,", ",python,selection_mouse +6936,10219157,"input_pipeline/preprocess/pngs_to_array_records.py",820,4,"ir, ",python,selection_mouse +6937,10219157,"input_pipeline/preprocess/pngs_to_array_records.py",817,7,"t_dir, ",python,selection_mouse +6938,10219157,"input_pipeline/preprocess/pngs_to_array_records.py",816,8,"ut_dir, ",python,selection_mouse +6939,10219157,"input_pipeline/preprocess/pngs_to_array_records.py",825,0,"",python,selection_command +6940,10219209,"input_pipeline/preprocess/pngs_to_array_records.py",815,10,"put_dir, 0",python,selection_mouse +6941,10219282,"input_pipeline/preprocess/pngs_to_array_records.py",814,11,"nput_dir, 0",python,selection_mouse +6942,10219383,"input_pipeline/preprocess/pngs_to_array_records.py",813,12,"input_dir, 0",python,selection_mouse +6943,10219410,"input_pipeline/preprocess/pngs_to_array_records.py",812,13," input_dir, 0",python,selection_mouse +6944,10219474,"input_pipeline/preprocess/pngs_to_array_records.py",811,14,"n input_dir, 0",python,selection_mouse +6945,10219531,"input_pipeline/preprocess/pngs_to_array_records.py",810,15,"rn input_dir, 0",python,selection_mouse +6946,10219878,"input_pipeline/preprocess/pngs_to_array_records.py",811,14,"n input_dir, 0",python,selection_mouse +6947,10219963,"input_pipeline/preprocess/pngs_to_array_records.py",812,13," input_dir, 0",python,selection_mouse +6948,10220232,"input_pipeline/preprocess/pngs_to_array_records.py",813,12,"input_dir, 0",python,selection_mouse +6949,10220821,"input_pipeline/preprocess/pngs_to_array_records.py",813,12,"",python,content +6950,10220849,"input_pipeline/preprocess/pngs_to_array_records.py",812,0,"",python,selection_command +6951,10221612,"input_pipeline/preprocess/pngs_to_array_records.py",812,0," ",python,content +6952,10221613,"input_pipeline/preprocess/pngs_to_array_records.py",813,0,"",python,selection_keyboard +6953,10221839,"input_pipeline/preprocess/pngs_to_array_records.py",813,0,"{""path"": input_dir, ""length"": 0}",python,content +6954,10222665,"input_pipeline/preprocess/pngs_to_array_records.py",844,0,"",python,selection_command +6955,10226251,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +6956,10235287,"TERMINAL",0,0,"bash",,terminal_focus +6957,10254935,"TERMINAL",0,0,"git commit -am ""new return format for edge case""",,terminal_command +6958,10254985,"TERMINAL",0,0,"]633;E;2025-09-04 12:47:52 git commit -am ""new return format for edge case"";86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +6959,10255140,"TERMINAL",0,0,"[input_pipeline/add-npy2array_record e44cb26] new return format for edge case\r\n 1 file changed, 1 insertion(+), 1 deletion(-)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +6960,10256653,"TERMINAL",0,0,"git push",,terminal_command +6961,10256726,"TERMINAL",0,0,"]633;E;2025-09-04 12:47:53 git push;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +6962,10258070,"TERMINAL",0,0,"Enumerating objects: 9, done.\r\nCounting objects: 11% (1/9)\rCounting objects: 22% (2/9)\rCounting objects: 33% (3/9)\rCounting objects: 44% (4/9)\rCounting objects: 55% (5/9)\rCounting objects: 66% (6/9)\rCounting objects: 77% (7/9)\rCounting objects: 88% (8/9)\rCounting objects: 100% (9/9)\rCounting objects: 100% (9/9), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 20% (1/5)\rCompressing objects: 40% (2/5)\rCompressing objects: 60% (3/5)\rCompressing objects: 80% (4/5)\rCompressing objects: 100% (5/5)\rCompressing objects: 100% (5/5), done.\r\nWriting objects: 20% (1/5)\rWriting objects: 40% (2/5)\rWriting objects: 60% (3/5)\rWriting objects: 80% (4/5)\rWriting objects: 100% (5/5)\rWriting objects: 100% (5/5), 493 bytes | 246.00 KiB/s, done.\r\nTotal 5 (delta 2), reused 0 (delta 0), pack-reused 0\r\n",,terminal_output +6963,10258174,"TERMINAL",0,0,"remote: Resolving deltas: 0% (0/2)\rremote: Resolving deltas: 50% (1/2)\rremote: Resolving deltas: 100% (2/2)\rremote: Resolving deltas: 100% (2/2), completed with 2 local objects.\r\n",,terminal_output +6964,10258316,"TERMINAL",0,0,"To github.com:p-doom/jasmine.git\r\n c2533c8..e44cb26 input_pipeline/add-npy2array_record -> input_pipeline/add-npy2array_record\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +6965,10307998,"TERMINAL",0,0,"git branch",,terminal_command +6966,10308041,"TERMINAL",0,0,"]633;E;2025-09-04 12:48:45 git branch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[?1h=\r add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n coinrun-gt-actions\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/darkness-filter\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n* input_pipeline/add-npy2array_record\r\n logging-variants\r\n lr-schedules\r\n main\r\n maskgit-different-maskprob-per-sample\r\n maskgit-sampling-iterative-unmasking-fix\r\n metrics-logging-for-dynamics-model\r\n monkey-patch\r\n new-arch-sampling\r\n preprocess_video\r\n refactor-tmp\r\n revised-dataloader\r\n runner\r\n runner-grain\r\n sample-ali-branch\r\n sample-from-different-topologies\r\n sampling-startframe-indexing-fix\r\n speedup-tfrecord-preprocessing\r\n tmp\r\n\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +6967,10312643,"TERMINAL",0,0,"git checkout coinrun-gt-actions",,terminal_command +6968,10312681,"TERMINAL",0,0,"]633;E;2025-09-04 12:48:49 git checkout coinrun-gt-actions;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CSwitched to branch 'coinrun-gt-actions'\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +6969,10314593,"TERMINAL",0,0,"git status",,terminal_command +6970,10314642,"TERMINAL",0,0,"]633;E;2025-09-04 12:48:51 git status;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;COn branch coinrun-gt-actions\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdata/\r\n\tdata_atari/\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\tlogs/\r\n\toverfit_dir.zip\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\tutils/visualizer.py\r\n\r\nnothing added to commit but untracked files present (use ""git add"" to track)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +6971,10316303,"",0,0,"Switched from branch 'input_pipeline/add-npy2array_record' to 'coinrun-gt-actions'",,git_branch_checkout +6972,10317687,"generate_dataset.py",0,0,"""""""\nGenerates a dataset of random-action CoinRun episodes.\nEpisodes are saved individually as memory-mapped files for efficient loading.\n""""""\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom gym3 import types_np\nimport numpy as np\nfrom procgen import ProcgenGym3Env\nimport tyro\nimport pickle\nimport json\nfrom array_record.python.array_record_module import ArrayRecordWriter \n\n\n\n@dataclass\nclass Args:\n num_episodes: int = 10000\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 50\n\n\nargs = tyro.cli(Args)\noutput_dir = Path(args.output_dir)\noutput_dir.mkdir(parents=True, exist_ok=True)\n\n# --- Generate episodes ---\ni = 0\nepisode_metadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n observations_seq = []\n actions_seq = []\n\n # --- Run episode ---\n for j in range(1000):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n print(action)\n env.act(action)\n rew, obs, first = env.observe()\n observations_seq.append(obs[""rgb""])\n actions_seq.append(action)\n if first:\n break\n\n # --- Save episode ---\n if len(observations_seq) >= args.min_episode_length:\n observations_data = np.concatenate(observations_seq, axis=0)\n action_data = np.concatenate(actions_seq, axis=0)\n episode_path = output_dir / f""episode_{i}.array_record"" \n\n # --- Save as ArrayRecord ---\n writer = ArrayRecordWriter(str(episode_path), ""group_size:1"")\n record = {""raw_video"": observations_data.tobytes(), ""actions"": action_data, ""sequence_length"": len(observations_seq)}\n writer.write(pickle.dumps(record))\n writer.close()\n\n episode_metadata.append({""path"": str(episode_path), ""length"": len(observations_seq)})\n print(f""Episode {i} completed, length: {len(observations_seq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(observations_seq)}), resampling..."")\n\n# --- Save metadata ---\nmetadata = {\n ""env"": ""coinrun"",\n ""num_actions"": env.ac_space.eltype.n,\n ""num_episodes"": args.num_episodes,\n ""avg_episode_len"": np.mean([ep[""length""] for ep in episode_metadata]),\n ""episode_metadata"": episode_metadata,\n}\nwith open(output_dir / ""meta_data.json"", ""w"") as f:\n json.dump(metadata, f)\n\nprint(f""Dataset generated with {len(episode_metadata)} valid episodes"")\n",python,tab +6973,10317689,"generate_dataset.py",1509,0,"",python,selection_mouse +6974,10317720,"generate_dataset.py",1508,0,"",python,selection_command +6975,10318321,"generate_dataset.py",2042,0,"",python,selection_mouse +6976,10318969,"generate_dataset.py",1923,0,"",python,selection_mouse +6977,10319631,"generate_dataset.py",1964,0,"",python,selection_mouse +6978,10319643,"generate_dataset.py",1963,0,"",python,selection_command +6979,10320392,"generate_dataset.py",1954,0,"",python,selection_mouse +6980,10320404,"generate_dataset.py",1953,0,"",python,selection_command +6981,10329005,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_type: str = ""maskgit"" # supported options: maskgit, causal\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(\n model: Genie, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n """"""Compute masked dynamics loss""""""\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@nnx.jit\ndef train_step(\n model: Genie, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n """"""Update state and compute metrics""""""\n\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=False,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(genie, tx)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab +6982,10331633,"generate_dataset.py",0,0,"",python,tab +6983,10332557,"generate_dataset.py",0,0,"",python,tab +6984,10333805,"train_dynamics.py",0,0,"",python,tab +6985,10365319,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"#!/usr/bin/env bash\n\npython generate_dataset.py \\n --num_episodes 10 \\n --output_dir /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/dev \\n --min_episode_length 1000",shellscript,tab +6986,10369911,"TERMINAL",0,0,"bash",,terminal_focus +6987,10373236,"TERMINAL",0,0,"runner-2",,terminal_command +6988,10381352,"TERMINAL",0,0,"sync-runner-2",,terminal_command +6989,10381435,"TERMINAL",0,0,"]633;E;2025-09-04 12:49:58 sync-runner-2;98e58a9e-4278-4ee6-ae9b-7614f41efecb]633;Csending incremental file list\r\n",,terminal_output +6990,10383705,"TERMINAL",0,0,"bash",,terminal_focus +6991,10386520,"TERMINAL",0,0,"runner-2",,terminal_command +6992,10386552,"TERMINAL",0,0,"]633;E;2025-09-04 12:50:03 runner-2;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs_2]633;D;0",,terminal_output +6993,10387496,"TERMINAL",0,0,"./\r\nREADME.md\r\ngenerate_dataset.py\r\ngenie.py\r\nkiller.sh\r\nkiller_partition.sh\r\nlog.log\r\nrequirements.txt\r\nsample.py\r\ntrain_dynamics.py\r\ntrain_lam.py\r\ntrain_tokenizer.py\r\n",,terminal_output +6994,10388517,"TERMINAL",0,0,"input_pipeline/download/\r\ninput_pipeline/download/download_array_records.sh\r\ninput_pipeline/download/openai/\r\ninput_pipeline/download/openai/download_actions_files.py\r\ninput_pipeline/preprocess/\r\ninput_pipeline/preprocess/pngs_to_array_records.py\r\ninput_pipeline/preprocess/video_to_array_records.py\r\nmodels/\r\nmodels/dynamics.py\r\nmodels/lam.py\r\nmodels/tokenizer.py\r\noverfit_dir/\r\noverfit_dir/oai_sample_69-1_8repl.npy\r\n",,terminal_output +6995,10388981,"TERMINAL",0,0,"slurm/dev/alfred/\r\nslurm/dev/alfred/berlin/\r\nslurm/dev/alfred/berlin/gt_actions/\r\nslurm/dev/alfred/berlin/gt_actions/sample_causal_32gpus.sbatch\r\nslurm/dev/alfred/berlin/gt_actions/sample_darkness_filter.sbatch\r\nslurm/dev/alfred/berlin/gt_actions/train_dynacmis_on_3nodes_2gpu.sbatch\r\nslurm/dev/alfred/berlin/gt_actions/train_dynacmis_on_8gpu.sbatch\r\nslurm/dev/alfred/berlin/gt_actions/train_dynacmis_on_more_than4.sbatch\r\nslurm/dev/alfred/berlin/gt_actions/train_dynacmis_overfit.sbatch\r\nslurm/dev/alfred/berlin/gt_actions/train_dynacmis_overfit_2gpus.sbatch\r\nslurm/dev/alfred/berlin/gt_actions/train_dynacmis_overfit_4gpu.sbatch\r\nslurm/dev/alfred/berlin/gt_actions/train_dynacmis_overfit_4gpu_to_8gpu.sbatch\r\nslurm/dev/alfred/berlin/gt_actions/train_tok_topology_one_gpu copy.sbatch\r\nslurm/dev/alfred/berlin/gt_actions/train_tok_topology_one_gpu.sbatch\r\nslurm/dev/alfred/berlin/gt_actions/train_tok_topology_restore_to_on_gpu.sbatch\r\nslurm/dev/alfred/berlin/gt_actions/train_tok_topology_two_gpus.sbatch\r\nslurm/dev/alfred/berlin/gt_sampling/\r\nslurm/dev/alfred/berlin/gt_sampling/sampling_mock.sbatch\r\nslurm/dev/alfred/berlin/job_requeueing/\r\nslurm/dev/alfred/berlin/restore_ckpt/\r\nslurm/dev/alfred/berlin/restore_ckpt/restore_ckpt_single.sbatch\r\nslurm/dev/alfred/berlin/restore_ckpt/restore_multi.sbatch\r\nslurm/dev/alfred/berlin/restore_dynamics_non_cotraining/\r\nslurm/dev/alfred/berlin/restore_dynamics_non_cotraining/coinrun_dynamics_batch_size_144_3e-5_invest_nan_restore_40k.sbatch\r\nslurm/dev/alfred/berlin/test_franz_pr/\r\nslurm/dev/alfred/berlin/test_franz_pr/train_dynacmis_overfit.sbatch\r\nslurm/dev/alfred/berlin/test_franz_pr/train_lam_overfit.sbatch\r\nslurm/dev/alfred/berlin/test_franz_pr/train_tokenizer_overfit.sbatch\r\nslurm/dev/alfred/berlin/topology/\r\nslurm/dev/alfred/berlin/topology/sample_overfit_single_gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/train_dynacmis_overfit_1.sbatch\r\nslurm/dev/alfred/berlin/topology/train_dynacmis_overfit_1_gt_actions.sbatch\r\nslurm/dev/alfred/berlin/topology/train_dynacmis_overfit_1_gt_actions_noise.sbatch\r\nslurm/dev/alfred/berlin/topology/train_dynacmis_overfit_1_noise.sbatch\r\nslurm/dev/alfred/berlin/topology/train_dynacmis_overfit_2_nodes_2_gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/train_dynacmis_overfit_2_nodes_4_gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/train_dynacmis_overfit_4gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/train_lam_overfit.sbatch\r\nslurm/dev/alfred/berlin/topology/train_tokenizer_overfit_1gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/train_tokenizer_overfit_2_gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/train_tokenizer_overfit_4_gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/train_tokenizer_overfit_single_record.sbatch\r\nslurm/dev/alfred/berlin/topology/train_tokenizer_overfit_single_record_requeue.sbatch\r\nslurm/dev/alfred/berlin/topology/train_tokenizer_restore_1gpu_to_1gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/train_tokenizer_restore_1gpu_to_2gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/train_tokenizer_restore_2gpu_to_1gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/train_tokenizer_restore_2gpu_to_2gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/nnx/\r\nslurm/dev/alfred/berlin/topology/nnx/train_tokenizer_overfit_1.sbatch\r\nslurm/dev/alfred/berlin/topology/nnx/train_tokenizer_overfit_2_gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/nnx/train_tokenizer_restore_2gpu_to_1gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/nnx/train_tokenizer_restore_2gpu_to_2gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/prennx/\r\nslurm/dev/alfred/berlin/topology/prennx/train_tokenizer_overfit_2_gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/prennx/train_tokenizer_restore_2gpu_to_1gpu.sbatch\r\nslurm/dev/alfred/berlin/topology/prennx/train_tokenizer_restore_2gpu_to_2gpu.sbatch\r\nslurm/dev/alfred/berlin/topology_restore_fix/\r\nslurm/dev/alfred/berlin/topology_restore_fix/train_tokenizer_overfit_4_gpu.sbatch\r\nslurm/dev/alfred/berlin/topology_restore_fix/train_tokenizer_overfit_4_to_1_gpu.sbatch\r\nslurm/dev/alfred/berlin/train_dyn_dev/\r\nslurm/dev/alfred/berlin/train_dyn_dev/train_dynacmis_overfit.sbatch\r\nslurm/dev/alfred/berlin/train_lam_dev/\r\nslurm/dev/alfred/berlin/train_lam_dev/train_lam.sbatch\r\nslurm/dev/alfred/berlin/train_tok_dev/\r\nslurm/dev/alfred/berlin/train_tok_dev/train_tok.sbatch\r\nslurm/dev/alfred/berlin/train_tok_dev/train_tok_overfit.sbatch\r\nslurm/dev/alfred/helmholtz_cluster/\r\nslurm/dev/alfred/helmholtz_cluster/train_tok_dev/\r\nslurm/dev/alfred/helmholtz_cluster/train_tok_dev/train_tok.sbatch\r\nslurm/dev/alfred/helmholtz_cluster/train_tok_dev/train_tok_overfit.sbatch\r\nslurm/dev/alfred/horeka/\r\nslurm/dev/alfred/horeka/input_pipeline_ws/\r\nslurm/dev/alfred/horeka/input_pipeline_ws/actions/\r\nslurm/dev/alfred/horeka/input_pipeline_ws/actions/download_actions.sbatch\r\nslurm/dev/alfred/horeka/input_pipeline_ws/actions/download_actions_all.sh\r\nslurm/dev/alfred/horeka/jobs_cur/\r\nslurm/dev/alfred/horeka/jobs_cur/atari/\r\nslurm/dev/alfred/horeka/jobs_cur/atari/sample_causal.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/atari/sample_maskgit.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/atari/train_dynamics_causal.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/atari/train_dynamics_maskgit.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/atari/train_tokenizer_lr_1e-4.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dev_dyn_gt_actions.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dev_dyn_gt_actions_dev_single.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions.sh\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation_baseline.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation_one_node.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation_single_gpu copy.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation_single_gpu.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/dyn_gt_actions_ablation_prepend/dyn_gt_actions_maskgit_causal.sh\r\nslurm/dev/alfred/horeka/jobs_cur/tokenizer/\r\nslurm/dev/alfred/horeka/jobs_cur/tokenizer/train_tokenizer_37M_single_gpu.sbatch\r\nslurm/dev/alfred/horeka/jobs_cur/tokenizer/train_tokenizer_37M_single_node.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/\r\nslurm/dev/alfred/horeka/jobs_old/allocate/\r\nslurm/dev/alfred/horeka/jobs_old/allocate/cpu.sh\r\nslurm/dev/alfred/horeka/jobs_old/allocate/multigpu_gpu.sh\r\nslurm/dev/alfred/horeka/jobs_old/allocate/single_gpu.sh\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_16_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_1_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_1_nodes.sh\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_2_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_32_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes.sh\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_4_nodes_frequent_chkpt.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_64_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/adjusted_lr/train_tokenizer_8_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/const_lr/\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/const_lr/train_tokenizer_16_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/const_lr/train_tokenizer_1_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/const_lr/train_tokenizer_2_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/const_lr/train_tokenizer_32_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/const_lr/train_tokenizer_4_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/const_lr/train_tokenizer_8_nodes.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/oai_subset/\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/oai_subset/train_tokenizer_1_nodes.sh\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/oai_subset/train_tokenizer_2_nodes.sh\r\nslurm/dev/alfred/horeka/jobs_old/batchsize_scaling/oai_subset/train_tokenizer_2_nodes_samples_500.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/checkpoint_fix/\r\nslurm/dev/alfred/horeka/jobs_old/checkpoint_fix/train_tokenizer.sh\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/train_tokenizer_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/base/\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/base/train_dynamics_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/base/train_lam_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/base/train_tokenizer_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_dynamics_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_dynamics_coinrun.sh\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_lam_12.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_lam_24.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_lam_48.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_lam_6.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_lam_6.sh\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_tokenizer_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/coinrun/latent_action_ablation/train_tokenizer_coinrun.sh\r\nslurm/dev/alfred/horeka/jobs_old/dyn_gt_actions_ablation/\r\nslurm/dev/alfred/horeka/jobs_old/dyn_gt_actions_ablation/dyn_gt_actions_ablation.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/dyn_gt_actions_ablation/dyn_gt_actions_ablation_baseline.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/dyn_gt_actions_ablation/dyn_gt_actions_ablation_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/dyn_gt_actions_ablation/masked_lim_yolo.sh\r\nslurm/dev/alfred/horeka/jobs_old/generate_single_samples/\r\nslurm/dev/alfred/horeka/jobs_old/generate_single_samples/generate_samples_50k.sh\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/download_10xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/download_6xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/download_7xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/download_8xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/download_9xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/download_index_json.sh\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/mp4_to_array_record_open_ai_6xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/mp4_to_array_record_open_ai_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/mp4_to_npy_open_ai_10xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/mp4_to_npy_open_ai_6xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/mp4_to_npy_open_ai_7xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/mp4_to_npy_open_ai_8xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_local/mp4_to_npy_open_ai_9xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/actions_download/\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/actions_download/download_actions.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/actions_download/download_actions_all.sh\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/mp4_to_arrayrecords_w_actions/\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/mp4_to_arrayrecords_w_actions/preproc_w_actions.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/download_10xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/download_6xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/download_7xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/download_8xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/download_9xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/download_index_json.sh\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/mp4_to_array_record_open_ai.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/mp4_to_array_record_open_ai_chunked.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/mp4_to_array_record_open_ai_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/mp4_to_array_record_open_ai_to_fast.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/input_pipeline_ws/videos/mp4_to_npy_open_ai.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/job_chaining/\r\nslurm/dev/alfred/horeka/jobs_old/job_chaining/chain_example.sh\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/train_lam_chain_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/train_lam_requeue_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/example_tokenizer_lr_tuning/\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/example_tokenizer_lr_tuning/lr_tuning_tokenizer.sh\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/example_tokenizer_lr_tuning/train_tokenizer_lr_general.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/lr_tuning/\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/lr_tuning/tokenizer_lr_tuning.py\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/lr_tuning/tokenizer/\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/lr_tuning/tokenizer/lr_tuning_tokenizer.sh\r\nslurm/dev/alfred/horeka/jobs_old/job_requeueing/lr_tuning/tokenizer/train_tokenizer_lr_general.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim/\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim/masked_lim_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim/masked_lim_yolo.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim/masked_lim_yolo.sh\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim_noise/\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim_noise/masked_lim_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim_noise/masked_lim_yolo.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/masked_lim_noise/masked_lim_yolo.sh\r\nslurm/dev/alfred/horeka/jobs_old/overfit_minecraft_single_sample/\r\nslurm/dev/alfred/horeka/jobs_old/overfit_minecraft_single_sample/train_dynamics_overfit_sample.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_minecraft_single_sample/train_dynamics_overfit_sample.sh\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_dev.sh\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_init.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_12.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_12288.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_1536.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_24576.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_3072.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_384.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_49152.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_6144.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/train_lam_samples_96.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/from_ckpt/\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/from_ckpt/train_lam_dev.sh\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/lam/from_ckpt/train_lam_samples_12.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_dev.sh\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_12.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_12288.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_1536.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_24576.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_3072.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_384.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_49152.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_6144.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/train_tokenizer_samples_96.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/from_ckpt/\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/from_ckpt/train_tokenizer_samples_12.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_run_ds_oai/tokenizer/from_ckpt/train_tokenizer_samples_12.sh\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_dynamics_overfit_sample.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_lam_overfit_sample.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_tokenizer_overfit_sample.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_tokenizer_overfit_sample_size_0.6_mio.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_tokenizer_overfit_sample_size_0_5.sh\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_tokenizer_overfit_sample_size_21_mio.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_tokenizer_overfit_sample_size_2_mio.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_tokenizer_overfit_sample_size_9_mio.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/overfit_sample/train_tokenizer_overfit_sample_size_small_mio.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/mp4_to_npy_10xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/mp4_to_npy_6xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/mp4_to_npy_7xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/mp4_to_npy_8xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/mp4_to_npy_9xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/mp4_to_npy_open_ai.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/mp4_to_npy_test.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/npy_to_tfrecord.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/npy_to_tfrecord_10xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/npy_to_tfrecord_6xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/npy_to_tfrecord_7xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/npy_to_tfrecord_8xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/npy_to_tfrecord_9xx.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/preprocess_video_splitter.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/preprocess_video_to_npy.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/preprocess/preprocess_video_to_npy_test.sh\r\nslurm/dev/alfred/horeka/jobs_old/procgen/\r\nslurm/dev/alfred/horeka/jobs_old/procgen/cp_script.sh\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_acrobot.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_carracing.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_coinrun.sh\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_mountaincar copy.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_mountaincar.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_multi.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/procgen/data_gen_gym_pendulum.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/rsync/\r\nslurm/dev/alfred/horeka/jobs_old/rsync/rsync.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/rsync/rsync_tf_records.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/sample_jafar/\r\nslurm/dev/alfred/horeka/jobs_old/sample_jafar/sample_coinrun.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/sampling/\r\nslurm/dev/alfred/horeka/jobs_old/sampling/sample_coinrun.sh\r\nslurm/dev/alfred/horeka/jobs_old/sampling/sample_knoms.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/sampling/sample_knoms.sh\r\nslurm/dev/alfred/horeka/jobs_old/sampling/sample_knoms_mihir.sh\r\nslurm/dev/alfred/horeka/jobs_old/train_dyn/\r\nslurm/dev/alfred/horeka/jobs_old/train_dyn/train_dyn_knoms_full.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_dyn_dev/\r\nslurm/dev/alfred/horeka/jobs_old/train_dyn_dev/train_dyn.sh\r\nslurm/dev/alfred/horeka/jobs_old/train_dyn_dev/train_dyn_checkpt_loading_test_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_dyn_dev/train_dyn_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_dyn_dev/train_dyn_single_batch.sh\r\nslurm/dev/alfred/horeka/jobs_old/train_lam/\r\nslurm/dev/alfred/horeka/jobs_old/train_lam/train_lam_full.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_lam_dev/\r\nslurm/dev/alfred/horeka/jobs_old/train_lam_dev/train_lam.sh\r\nslurm/dev/alfred/horeka/jobs_old/train_lam_dev/train_lam_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_lam_dev/train_lam_full_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_lam_dev/train_lam_single_batch.sh\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer/\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer/train_lam_oai_dev copy.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer/train_lam_oai_dev.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer/train_tokenizer_knoms_overfit_single_batch.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer/train_tokenizer_knoms_overfit_single_sample.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer/train_tokenizer_knoms_overfit_tfrecord_10.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer/train_tokenizer_knoms_overfit_tfrecord_full.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer_dev/\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer_dev/train_tokenizer.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer_dev/train_tokenizer.sh\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer_dev/train_tokenizer_copy.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer_dev/train_tokenizer_h100.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer_dev/train_tokenizer_overfit_tfrecord_10.sh\r\nslurm/dev/alfred/horeka/jobs_old/train_tokenizer_dev/train_tokenizer_single_batch.sh\r\nslurm/dev/alfred/horeka/jobs_old/validation/\r\nslurm/dev/alfred/horeka/jobs_old/validation/tokenizer_lr_tuning/\r\nslurm/dev/alfred/horeka/jobs_old/validation/tokenizer_lr_tuning/lr_tuning_tokenizer.sh\r\nslurm/dev/alfred/horeka/jobs_old/validation/tokenizer_lr_tuning/train_tokenizer_lr_general.sbatch\r\nslurm/dev/alfred/horeka/jobs_old/validation/tokenizer_without_optimizer/\r\nslurm/dev/alfred/horeka/jobs_old/validation/tokenizer_without_optimizer/lr_tuning_tokenizer.sh\r\nslurm/dev/alfred/horeka/jobs_old/validation/tokenizer_without_optimizer/train_tokenizer_lr_general.sbatch\r\nslurm/dev/mihir/horeka/\r\nslurm/dev/mihir/horeka/generate_dataset_10m.sh\r\nslurm/dev/mihir/horeka/train_tokenizer.sh\r\nslurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch\r\nslurm/dev/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample_gaussian_noise.sbatch\r\nslurm/dev/mihir/horeka/yolo-runs/sampling.sh\r\nslurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh\r\nslurm/jobs/alfred/\r\nslurm/jobs/alfred/berlin/\r\nslurm/jobs/alfred/berlin/atari/\r\nslurm/jobs/alfred/berlin/atari/atari_dynamics/\r\nslurm/jobs/alfred/berlin/atari/atari_dynamics/atari_dyn_maskgit_causal.sh\r\nslurm/jobs/alfred/berlin/atari/atari_dynamics/atari_dynamics.sbatch\r\nslurm/jobs/alfred/berlin/atari/atari_sampling/\r\nslurm/jobs/alfred/berlin/atari/atari_sampling/sample_causal.sbatch\r\nslurm/jobs/alfred/berlin/atari/atari_sampling/sample_maskgit.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_ablation/\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_baseline.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_mixed_prec.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_wsd.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_wsd_3e-5_3e-6.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_wsd_3e-6.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_ablation/coinrun_lam_wsd_8e-6.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_bigrun/\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_bigrun/coinrun_dynamics_reproduction.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_bigrun/coinrun_dynamics_reproduction_cotrain.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_bigrun/coinrun_lam_big_run.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_bigrun/coinrun_tokenizer_repoduction_ffn_512_n_blocks_8_full_prec.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_bigrun/generate_data.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_bigrun/sample.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_bigrun/sample_cotrain.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_data/\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_data/generate_data.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_data/generate_data_npy.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_dynamics/\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_dynamics/coinrun_dyn_maskgit_causal.sh\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_dynamics/coinrun_dynamics.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_dynamics/coinrun_dynamics_batch_size_144.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_dynamics/coinrun_dynamics_batch_size_144_3e-5.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_dynamics/coinrun_dynamics_batch_size_144_3e-5_invest_nan.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_dynamics/coinrun_dynamics_batch_size_144_3e-5_invest_nan_restore_40k.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_dynamics/coinrun_dynamics_batch_size_144_cotraining.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_dynamics/coinrun_dynamics_batch_size_144_cotraining_3e-5.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_dynamics/coinrun_dynamics_maskgit_overfit.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_lam/\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_lam/lam_coinrun.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction/\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction/coinrun_dynamics_reproduction.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction/coinrun_dynamics_reproduction_cotrain.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction/coinrun_lam_reproduction.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction/coinrun_lam_reproduction_full_prec.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction/coinrun_lam_reproduction_full_prec_cosine.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction/coinrun_lam_reproduction_full_prec_w_restore.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction/coinrun_lam_reproduction_mix_prec_cosine.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction/coinrun_lam_reproduction_mix_prec_cosine_min_init_lt_3e-6.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction/coinrun_tokenizer_repoduction.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction/sample.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction/sample_cotrain.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_dynamics_reproduction.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_dynamics_reproduction_cotrain.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_lam_reproduction.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_lam_reproduction_dc_0.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_lam_reproduction_ffn_512.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_lam_reproduction_ffn_512_num_blocks_8.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_lam_reproduction_ffn_512_num_blocks_8_full_prec.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_lam_reproduction_lower_lr.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_lam_reproduction_lower_lr_0.5x.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_lam_reproduction_lower_lr_3e6_1e5_0.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_lam_reproduction_lower_lr_3e6_3e5_0.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_tokenizer_repoduction.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_tokenizer_repoduction_ffn_512_n_blocks_8.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/coinrun_tokenizer_repoduction_ffn_512_n_blocks_8_full_prec.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/generate_data.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/sample.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_reproduction_10k/sample_cotrain.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_sampling/\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_sampling/sample_causal.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_sampling/sample_maskgit.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_sampling/sample_maskgit_cotrain_3e-5.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_sampling/sample_maskgit_non_cotrain_3e-5.sbatch\r\nslurm/jobs/alfred/berlin/coinrun/coinrun_sampling/sample_maskgit_overfit.sbatch\r\nslurm/jobs/alfred/berlin/downlaod/\r\nslurm/jobs/alfred/berlin/downlaod/download_actions.sbatch\r\nslurm/jobs/alfred/berlin/downlaod/download_actions.sh\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/og_coinrun_dynamics_reproduction_requeue.sbatch\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/og_coinrun_dynamics_reproduction_requeue_2.0.sbatch\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/og_coinrun_tokenizer_repoduction_requeue.sbatch\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/og_coinrun_tokenizer_repoduction_requeue_2.0.sbatch\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/sample.sbatch\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/sample_og_150k.sbatch\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/sample_og_175k.sbatch\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/sample_og_200k.sbatch\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/sample_og_spawner.sh\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/spawner.sh\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/spawner_sample.sh\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/requeue/\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/requeue/og_coinrun_dynamics_reproduction.sbatch\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/requeue/og_coinrun_lam_reproduction.sbatch\r\nslurm/jobs/alfred/berlin/jafar_og_reproduction/requeue/og_coinrun_tokenizer_repoduction.sbatch\r\nslurm/jobs/alfred/berlin/minecraft/\r\nslurm/jobs/alfred/berlin/minecraft/gt_act_sampling/\r\nslurm/jobs/alfred/berlin/minecraft/gt_act_sampling/sample_gt_act_maskgit.sbatch\r\nslurm/jobs/alfred/berlin/minecraft/minecraft_sampling/\r\nslurm/jobs/alfred/berlin/minecraft/minecraft_sampling/atari_dyn_maskgit_causal.sh\r\nslurm/jobs/alfred/berlin/minecraft/minecraft_sampling/sample_causal.sbatch\r\nslurm/jobs/alfred/berlin/minecraft/minecraft_sampling/sample_dynamics-maskgit-8-node-darkness-filter-3423250.sbatch\r\nslurm/jobs/alfred/helmholtz_cluster/\r\nslurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/\r\nslurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset.sbatch\r\nslurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/generate_dataset_10m.sbatch\r\nslurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch\r\nslurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch\r\nslurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch\r\nslurm/jobs/alfred/horeka/\r\nslurm/jobs/alfred/horeka/atari_dynamics/\r\nslurm/jobs/alfred/horeka/atari_dynamics/atari_dyn_maskgit_causal.sh\r\nslurm/jobs/alfred/horeka/atari_dynamics/atari_dynamics.sbatch\r\nslurm/jobs/alfred/horeka/dyn_gt_actions_ablation_prepend/\r\nslurm/jobs/alfred/horeka/dyn_gt_actions_ablation_prepend/dyn_gt_actions_ablation.sbatch\r\nslurm/jobs/alfred/horeka/dyn_gt_actions_ablation_prepend/dyn_gt_actions_maskgit_causal.sh\r\nslurm/jobs/alfred/horeka/misc/\r\nslurm/jobs/alfred/horeka/misc/preproc_mp4_to_npy_open_ai.sbatch\r\nslurm/jobs/alfred/horeka/misc/preproc_npy_to_tfrecord_open_ai.sbatch\r\nslurm/jobs/alfred/horeka/misc/train_dyn_knoms.sbatch\r\nslurm/jobs/alfred/horeka/misc/train_lam_knoms.sbatch\r\nslurm/jobs/alfred/horeka/misc/train_tokenizer_knoms.sbatch\r\nslurm/jobs/franz/\r\nslurm/jobs/franz/berlin/\r\nslurm/jobs/franz/berlin/coinrun/\r\nslurm/jobs/franz/berlin/coinrun/coinrun_dynamics/\r\nslurm/jobs/franz/berlin/coinrun/coinrun_dynamics/coinrun_dynamics_fp32_adam_moments.sh\r\nslurm/jobs/franz/berlin/coinrun/coinrun_dynamics/coinrun_dynamics_fp32_layernorm.sh\r\nslurm/jobs/franz/berlin/coinrun/coinrun_lam/\r\nslurm/jobs/franz/berlin/coinrun/coinrun_lam/lam_coinrun_nan_investigation_100k_to_107k.sbatch\r\nslurm/jobs/mihir/horeka/\r\nslurm/jobs/mihir/horeka/sbatch_holiday.sh\r\nslurm/jobs/mihir/horeka/atari/\r\nslurm/jobs/mihir/horeka/atari/sample_causal.sbatch\r\nslurm/jobs/mihir/horeka/atari/sample_maskgit.sbatch\r\nslurm/jobs/mihir/horeka/atari/train_dynamics_causal.sbatch\r\nslurm/jobs/mihir/horeka/atari/train_dynamics_maskgit.sbatch\r\nslurm/jobs/mihir/horeka/atari/train_tokenizer_lr_3e-5.sbatch\r\nslurm/jobs/mihir/horeka/causal_big_runs/\r\nslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes.sbatch\r\nslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_dev.sh\r\nslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_filter_dark.sbatch\r\nslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_8_nodes_filter_dark_req.sbatch\r\nslurm/jobs/mihir/horeka/causal_big_runs/train_dynamics_dev.sbatch\r\nslurm/jobs/mihir/horeka/coinrun/\r\nslurm/jobs/mihir/horeka/coinrun/sample_causal.sbatch\r\nslurm/jobs/mihir/horeka/coinrun/sample_maskgit.sbatch\r\nslurm/jobs/mihir/horeka/coinrun/train_dynamics_causal.sbatch\r\nslurm/jobs/mihir/horeka/coinrun/train_dynamics_maskgit.sbatch\r\nslurm/jobs/mihir/horeka/coinrun/train_tokenizer_lr_1e-4.sbatch\r\nslurm/jobs/mihir/horeka/jafar_og_reproduction/\r\nslurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset.sbatch\r\nslurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch\r\nslurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch\r\nslurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_lam_reproduction.sbatch\r\nslurm/jobs/mihir/horeka/jafar_og_reproduction/og_coinrun_tokenizer_repoduction.sbatch\r\nslurm/jobs/mihir/horeka/lam/\r\nslurm/jobs/mihir/horeka/lam/train_lam_minecraft_1node_dev.sbatch\r\nslurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-133M.sbatch\r\nslurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-311M.sbatch\r\nslurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch\r\nslurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch\r\nslurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node.sbatch\r\nslurm/jobs/mihir/horeka/lr_tuning/tokenizer/\r\nslurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4.sbatch\r\nslurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_2nodes.sbatch\r\nslurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_2nodes_req.sbatch\r\nslurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_4nodes.sbatch\r\nslurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_4nodes_req.sbatch\r\nslurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes.sbatch\r\nslurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_8nodes_dev.sbatch\r\nslurm/jobs/mihir/horeka/lr_tuning/tokenizer/train_tokenizer_lr_1e-4_dev.sbatch\r\nslurm/jobs/mihir/horeka/maskgit_big_runs/\r\nslurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes.sbatch\r\nslurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes_filter_dark.sbatch\r\nslurm/jobs/mihir/horeka/maskgit_big_runs/train_dynamics_8_nodes_filter_dark_req.sbatch\r\nslurm/jobs/mihir/horeka/preprocessing/\r\nslurm/jobs/mihir/horeka/preprocessing/preprocess_atari.sbatch\r\nslurm/utils/\r\nslurm/utils/exclude.txt\r\nslurm/utils/alfred/\r\nslurm/utils/alfred/scp_scripts/\r\nslurm/utils/alfred/scp_scripts/copy_to_berlin.sh\r\nslurm/utils/alfred/scp_scripts/copy_to_local.sh\r\nslurm/utils/mihir/\r\nslurm/utils/mihir/weekend-job-requeuer.sh\r\nslurm/utils/mihir/weekend-job-starter.sh\r\ntests/\r\ntests/test_dataloader.py\r\nutils/\r\nutils/dataloader.py\r\nutils/dataset_utils.py\r\nutils/lr_utils.py\r\nutils/nn.py\r\nutils/parameter_utils.py\r\nutils/preprocess.py\r\n",,terminal_output +6996,10390491,"TERMINAL",0,0,"\r\nsent 6,299,472 bytes received 8,739 bytes 664,022.21 bytes/sec\r\ntotal size is 128,388,545 speedup is 20.35\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs_2]633;D;0",,terminal_output +6997,10392823,"slurm/dev/mihir/horeka/generate_dataset_10m.sh",0,0,"",shellscript,tab +6998,10407228,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n\n\n# Log the sbatch script\ncat $0\nsource .venv/bin/activate\n\npython generate_dataset.py \\n --num_episodes 10000 \\n --output_dir /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m \\n --min_episode_length 1000",shellscript,tab +6999,10408610,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",670,0,"",shellscript,selection_mouse +7000,10408636,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",669,0,"",shellscript,selection_command +7001,10409163,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",622,0,"",shellscript,selection_mouse +7002,10409741,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",618,0,"",shellscript,selection_mouse +7003,10411041,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",636,0,"",shellscript,selection_mouse +7004,10411859,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",638,0,"",shellscript,selection_mouse +7005,10412824,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",636,0,"",shellscript,selection_mouse +7006,10415210,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",628,0,"",shellscript,selection_mouse +7007,10415838,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",637,0,"",shellscript,selection_mouse +7008,10416852,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",638,0,"",shellscript,selection_command +7009,10417633,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",638,0,"_",shellscript,content +7010,10417634,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",639,0,"",shellscript,selection_keyboard +7011,10418280,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",639,0,"g",shellscript,content +7012,10418281,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",640,0,"",shellscript,selection_keyboard +7013,10418436,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",640,0,"t",shellscript,content +7014,10418437,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",641,0,"",shellscript,selection_keyboard +7015,10418835,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",641,0,"_",shellscript,content +7016,10418836,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",642,0,"",shellscript,selection_keyboard +7017,10418956,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",642,0,"a",shellscript,content +7018,10418957,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",643,0,"",shellscript,selection_keyboard +7019,10419065,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",643,0,"c",shellscript,content +7020,10419066,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",644,0,"",shellscript,selection_keyboard +7021,10419285,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",644,0,"t",shellscript,content +7022,10419286,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",645,0,"",shellscript,selection_keyboard +7023,10419386,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",645,0,"i",shellscript,content +7024,10419387,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",646,0,"",shellscript,selection_keyboard +7025,10419474,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",646,0,"o",shellscript,content +7026,10419474,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",647,0,"",shellscript,selection_keyboard +7027,10419595,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",647,0,"n",shellscript,content +7028,10419596,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",648,0,"",shellscript,selection_keyboard +7029,10419656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",648,0,"s",shellscript,content +7030,10419657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",649,0,"",shellscript,selection_keyboard +7031,10420938,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",681,0,"",shellscript,selection_mouse +7032,10421600,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",680,0,"",shellscript,selection_command +7033,10425532,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",468,0,"",shellscript,selection_mouse +7034,10431649,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",681,0,"",shellscript,selection_mouse +7035,10431653,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",680,0,"",shellscript,selection_command +7036,10432551,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",638,11,"",shellscript,content +7037,10433611,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch",637,0,"",shellscript,selection_command +7038,10436005,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m copy.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n\n\n# Log the sbatch script\ncat $0\nsource .venv/bin/activate\n\npython generate_dataset.py \\n --num_episodes 10000 \\n --output_dir /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m \\n --min_episode_length 1000",shellscript,tab +7039,10442438,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=12:00:00\n#SBATCH --partition=cpuonly\n#SBATCH --cpus-per-task=8\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/data_coinrun/%x_%j.log\n#SBATCH --job-name=generate_coinrun_dataset_10m\n\n\n# Log the sbatch script\ncat $0\nsource .venv/bin/activate\n\npython generate_dataset.py \\n --num_episodes 10000 \\n --output_dir /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_coinrun/coinrun_episodes_10m \\n --min_episode_length 1000",shellscript,tab +7040,10443423,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",638,0,"",shellscript,selection_mouse +7041,10444465,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",638,0,"_",shellscript,content +7042,10444466,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",639,0,"",shellscript,selection_keyboard +7043,10445031,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",639,0,"g",shellscript,content +7044,10445031,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",640,0,"",shellscript,selection_keyboard +7045,10445150,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",640,0,"t",shellscript,content +7046,10445151,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",641,0,"",shellscript,selection_keyboard +7047,10445308,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",641,0,"_",shellscript,content +7048,10445309,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",642,0,"",shellscript,selection_keyboard +7049,10445545,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",642,0,"a",shellscript,content +7050,10445545,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",643,0,"",shellscript,selection_keyboard +7051,10445688,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",643,0,"c",shellscript,content +7052,10445689,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",644,0,"",shellscript,selection_keyboard +7053,10445884,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",644,0,"t",shellscript,content +7054,10445884,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",645,0,"",shellscript,selection_keyboard +7055,10445960,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",645,0,"i",shellscript,content +7056,10445961,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",646,0,"",shellscript,selection_keyboard +7057,10446057,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",646,0,"o",shellscript,content +7058,10446057,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",647,0,"",shellscript,selection_keyboard +7059,10446156,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",647,0,"n",shellscript,content +7060,10446157,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",648,0,"",shellscript,selection_keyboard +7061,10446217,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",648,0,"s",shellscript,content +7062,10446217,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",649,0,"",shellscript,selection_keyboard +7063,10446434,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",648,0,"",shellscript,selection_command +7064,10447585,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",475,0,"",shellscript,selection_mouse +7065,10448084,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",318,0,"",shellscript,selection_mouse +7066,10448948,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",144,0,"",shellscript,selection_mouse +7067,10448959,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",143,0,"",shellscript,selection_command +7068,10449461,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",415,0,"",shellscript,selection_mouse +7069,10449464,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",414,0,"",shellscript,selection_command +7070,10449917,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",352,0,"",shellscript,selection_mouse +7071,10451891,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",416,0,"",shellscript,selection_mouse +7072,10452542,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",415,0,"",shellscript,selection_mouse +7073,10452551,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",414,0,"",shellscript,selection_command +7074,10452762,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",414,2,"m\n",shellscript,selection_mouse +7075,10452763,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",414,3,"m\n\n",shellscript,selection_mouse +7076,10452764,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",415,1,"\n",shellscript,selection_command +7077,10452840,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",415,26,"\n\n\n# Log the sbatch script",shellscript,selection_mouse +7078,10452841,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",415,60,"\n\n\n# Log the sbatch script\ncat $0\nsource .venv/bin/activate\n",shellscript,selection_mouse +7079,10452841,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",415,89,"\n\n\n# Log the sbatch script\ncat $0\nsource .venv/bin/activate\n\npython generate_dataset.py \",shellscript,selection_mouse +7080,10453870,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",415,0,"",shellscript,selection_mouse +7081,10453886,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",414,0,"",shellscript,selection_command +7082,10454247,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",415,0,"",shellscript,selection_command +7083,10454656,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",415,0,"_",shellscript,content +7084,10454657,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",416,0,"",shellscript,selection_keyboard +7085,10455283,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",416,0,"g",shellscript,content +7086,10455284,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",417,0,"",shellscript,selection_keyboard +7087,10455428,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",417,0,"t",shellscript,content +7088,10455429,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",418,0,"",shellscript,selection_keyboard +7089,10455612,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",418,0,"_",shellscript,content +7090,10455613,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",419,0,"",shellscript,selection_keyboard +7091,10455833,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",419,0,"a",shellscript,content +7092,10455833,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",420,0,"",shellscript,selection_keyboard +7093,10455991,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",420,0,"c",shellscript,content +7094,10455992,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",421,0,"",shellscript,selection_keyboard +7095,10456165,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",421,0,"t",shellscript,content +7096,10456165,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",422,0,"",shellscript,selection_keyboard +7097,10456247,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",422,0,"i",shellscript,content +7098,10456248,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",423,0,"",shellscript,selection_keyboard +7099,10456353,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",423,0,"o",shellscript,content +7100,10456354,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",424,0,"",shellscript,selection_keyboard +7101,10456469,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",424,0,"n",shellscript,content +7102,10456469,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",425,0,"",shellscript,selection_keyboard +7103,10456489,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",425,0,"s",shellscript,content +7104,10456489,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",426,0,"",shellscript,selection_keyboard +7105,10456682,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",425,0,"",shellscript,selection_command +7106,10459050,"slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",85,0,"",shellscript,selection_mouse +7107,10469079,"TERMINAL",0,0,"sync-runner-2",,terminal_command +7108,10469123,"TERMINAL",0,0,"]633;E;2025-09-04 12:51:26 sync-runner-2;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;Csending incremental file list\r\n",,terminal_output +7109,10469182,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/jafar_og_reproduction/\r\nslurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m.sbatch\r\nslurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch\r\n",,terminal_output +7110,10469250,"TERMINAL",0,0,"\r\nsent 32,573 bytes received 241 bytes 65,628.00 bytes/sec\r\ntotal size is 128,389,237 speedup is 3,912.64\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs_2]633;D;0",,terminal_output +7111,10473592,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch",,terminal_command +7112,10473683,"TERMINAL",0,0,"]633;E;2025-09-04 12:51:30 sbatch slurm/jobs/mihir/horeka/jafar_og_reproduction/generate_dataset_10m_gt_actions.sbatch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CSubmitted batch job 3465651\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs_2]633;D;0",,terminal_output +7113,10474436,"TERMINAL",0,0,"queue",,terminal_command +7114,10474479,"TERMINAL",0,0,"]633;E;2025-09-04 12:51:31 queue;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +7115,10474563,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Thu Sep 4 12:51:31 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3465195 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)3465285 accelerat train_la tum_cte0 PD\t0:00\t 1 (Priority)3465286 accelerat train_to tum_cte0 PD\t0:00\t 1 (Priority)3465651 cpuonly generate tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output +7116,10475566,"TERMINAL",0,0,"2\t",,terminal_output +7117,10476641,"TERMINAL",0,0,"3\t",,terminal_output +7118,10477649,"TERMINAL",0,0,"4\t",,terminal_output +7119,10478695,"TERMINAL",0,0,"5\t",,terminal_output +7120,10479738,"TERMINAL",0,0,"6\t",,terminal_output +7121,10480776,"TERMINAL",0,0,"7\t",,terminal_output +7122,10481816,"TERMINAL",0,0,"8\t",,terminal_output +7123,10482873,"TERMINAL",0,0,"40\t",,terminal_output +7124,10483927,"TERMINAL",0,0,"1\t",,terminal_output +7125,10484957,"TERMINAL",0,0,"2\t",,terminal_output +7126,10485992,"TERMINAL",0,0,"3\t",,terminal_output +7127,10487030,"TERMINAL",0,0,"4\t",,terminal_output +7128,10487660,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs_2]633;D;0",,terminal_output +7129,10490173,"TERMINAL",0,0,"dev",,terminal_command +7130,10586113,"input_pipeline/download/download_array_records.sh",0,0,"#!/bin/bash\n\n# Download and extract array records from Hugging Face\n# \n# This script performs a two-step process:\n# 1. Downloads compressed array records from a Hugging Face dataset repository\n# 2. Extracts the compressed tar files in parallel for better performance\n#\n# Usage:\n# ./download_array_records.sh [hf_download_dir] [final_dataset_dir]\n#\n# Arguments:\n# hf_download_dir - Directory to store compressed downloads (default: data/minecraft_arrayrecords_compressed)\n# final_dataset_dir - Directory for extracted array records (default: data/minecraft_arrayrecords)\n\n# Set default directories if not provided as arguments\nhf_download_dir=""${1:-data/minecraft_arrayrecords_compressed}"" \nfinal_dataset_dir=""${2:-data/minecraft_arrayrecords}"" \n\nmkdir -p $hf_download_dir\nmkdir -p $final_dataset_dir\n\n# Step 1: Download compressed dataset from Hugging Face\necho ""Starting download from Hugging Face...""\nrepo_id=p-doom/open_ai_minecraft_arrayrecords_chunked\nstart_time_hf_download=$(date +%s)\n\nHF_HUB_ENABLE_HF_TRANSFER=1 HF_HUB_DISABLE_SYMLINKS=1 \\nhuggingface-cli download --repo-type dataset $repo_id --local-dir $hf_download_dir\n\nend_time_hf_download=$(date +%s)\necho ""Download completed. Time taken: $((end_time_hf_download - start_time_hf_download)) seconds""\n\n# Step 2: Extract compressed array records in parallel\necho ""Starting parallel extraction of tar files...""\nnum_workers=64 # Number of parallel extraction processes\nstart_time_uncompress=$(date +%s)\n\n# Find all shard tar files and extract them in parallel:\nxargs -0 -P $num_workers -I {} bash -c 'echo ""Extracting {}""; tar -xf ""{}"" -C ""'$final_dataset_dir'""'\n\nend_time_uncompress=$(date +%s)\n\n# Display timing summary\necho ""================================""\necho ""Extraction completed successfully!""\necho ""Uncompress time: $((end_time_uncompress - start_time_uncompress)) seconds""\necho ""Download time: $((end_time_hf_download - start_time_hf_download)) seconds""\necho ""Total time: $((end_time_uncompress - start_time_hf_download)) seconds""\necho ""Final dataset location: $final_dataset_dir""\n",shellscript,tab +7131,10592744,"TERMINAL",0,0,"bash",,terminal_focus +7132,10615770,"TERMINAL",0,0,"bash",,terminal_focus +7133,10627463,".venv/pyvenv.cfg",0,0,"home = /home/hk-project-p0023960/tum_cte0515/.local/share/uv/python/cpython-3.10.18-linux-x86_64-gnu/bin\nimplementation = CPython\nuv = 0.7.12\nversion_info = 3.10.18\ninclude-system-site-packages = false\nprompt = jasmine\n",properties,tab +7134,10651994,"TERMINAL",0,0,"queue",,terminal_command +7135,10652068,"TERMINAL",0,0,"]633;E;2025-09-04 12:54:29 queue;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Thu Sep 4 12:54:29 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3465195 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)3465285 accelerat train_la tum_cte0 PD\t0:00\t 1 (Priority)3465286 accelerat train_to tum_cte0 PD\t0:00\t 1 (Priority)3465651 cpuonly generate tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output +7136,10653107,"TERMINAL",0,0,"30\t",,terminal_output +7137,10654134,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +7138,10658827,"TERMINAL",0,0,"git branch",,terminal_command +7139,10658951,"TERMINAL",0,0,"]633;E;2025-09-04 12:54:36 git branch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[?1h=\r add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n* coinrun-gt-actions\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/darkness-filter\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n input_pipeline/add-npy2array_record\r\n logging-variants\r\n lr-schedules\r\n main\r\n maskgit-different-maskprob-per-sample\r\n maskgit-sampling-iterative-unmasking-fix\r\n metrics-logging-for-dynamics-model\r\n monkey-patch\r\n new-arch-sampling\r\n preprocess_video\r\n refactor-tmp\r\n revised-dataloader\r\n runner\r\n runner-grain\r\n sample-ali-branch\r\n sample-from-different-topologies\r\n sampling-startframe-indexing-fix\r\n speedup-tfrecord-preprocessing\r\n tmp\r\n\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +7140,10662555,"TERMINAL",0,0,"git status",,terminal_command +7141,10662621,"TERMINAL",0,0,"]633;E;2025-09-04 12:54:39 git status;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;COn branch coinrun-gt-actions\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\tlogs/\r\n\toverfit_dir.zip\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\tutils/visualizer.py\r\n\r\nnothing added to commit but untracked files present (use ""git add"" to track)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +7142,10664125,"TERMINAL",0,0,"git push",,terminal_command +7143,10664190,"TERMINAL",0,0,"]633;E;2025-09-04 12:54:41 git push;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;Cfatal: The current branch coinrun-gt-actions has no upstream branch.\r\nTo push the current branch and set the remote as upstream, use\r\n\r\n git push --set-upstream origin coinrun-gt-actions\r\n\r\nTo have this happen automatically for branches without a tracking\r\nupstream, see 'push.autoSetupRemote' in 'git help config'.\r\n\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;128",,terminal_output +7144,10668551,"TERMINAL",0,0,"git push --set-upstream origin coinrun-gt-actions",,terminal_command +7145,10668599,"TERMINAL",0,0,"]633;E;2025-09-04 12:54:45 git push --set-upstream origin coinrun-gt-actions;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +7146,10670074,"TERMINAL",0,0,"Enumerating objects: 5, done.\r\nCounting objects: 20% (1/5)\rCounting objects: 40% (2/5)\rCounting objects: 60% (3/5)\rCounting objects: 80% (4/5)\rCounting objects: 100% (5/5)\rCounting objects: 100% (5/5), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 33% (1/3)\rCompressing objects: 66% (2/3)\rCompressing objects: 100% (3/3)\rCompressing objects: 100% (3/3), done.\r\nWriting objects: 33% (1/3)\rWriting objects: 66% (2/3)\rWriting objects: 100% (3/3)\rWriting objects: 100% (3/3), 444 bytes | 444.00 KiB/s, done.\r\nTotal 3 (delta 2), reused 0 (delta 0), pack-reused 0\r\n",,terminal_output +7147,10670169,"TERMINAL",0,0,"remote: Resolving deltas: 0% (0/2)\rremote: Resolving deltas: 50% (1/2)\rremote: Resolving deltas: 100% (2/2)\rremote: Resolving deltas: 100% (2/2), completed with 2 local objects.\r\n",,terminal_output +7148,10670343,"TERMINAL",0,0,"remote: \r\nremote: Create a pull request for 'coinrun-gt-actions' on GitHub by visiting:\r\nremote: https://github.com/p-doom/jasmine/pull/new/coinrun-gt-actions\r\nremote: \r\nTo github.com:p-doom/jasmine.git\r\n * [new branch] coinrun-gt-actions -> coinrun-gt-actions\r\nbranch 'coinrun-gt-actions' set up to track 'origin/coinrun-gt-actions'.\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +7149,10950648,"TERMINAL",0,0,"git branch",,terminal_command +7150,10950697,"TERMINAL",0,0,"]633;E;2025-09-04 12:59:27 git branch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[?1h=\r",,terminal_output +7151,10950978,"TERMINAL",0,0," add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n* coinrun-gt-actions\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/darkness-filter\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n input_pipeline/add-npy2array_record\r\n logging-variants\r\n lr-schedules\r\n main\r\n maskgit-different-maskprob-per-sample\r\n maskgit-sampling-iterative-unmasking-fix\r\n metrics-logging-for-dynamics-model\r\n monkey-patch\r\n new-arch-sampling\r\n preprocess_video\r\n refactor-tmp\r\n revised-dataloader\r\n runner\r\n runner-grain\r\n sample-ali-branch\r\n sample-from-different-topologies\r\n sampling-startframe-indexing-fix\r\n speedup-tfrecord-preprocessing\r\n tmp\r\n\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +7152,10968881,"TERMINAL",0,0,"git checkout input_pipeline/add-npy2array_record",,terminal_command +7153,10968918,"TERMINAL",0,0,"]633;E;2025-09-04 12:59:46 git checkout input_pipeline/add-npy2array_record;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +7154,10968997,"TERMINAL",0,0,"Switched to branch 'input_pipeline/add-npy2array_record'\r\nYour branch is up to date with 'origin/input_pipeline/add-npy2array_record'.\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +7155,10971357,"",0,0,"Switched from branch 'coinrun-gt-actions' to 'input_pipeline/add-npy2array_record'",,git_branch_checkout +7156,10971919,"generate_dataset.py",0,0,"""""""\nGenerates a dataset of random-action CoinRun episodes.\nEpisodes are saved individually as memory-mapped files for efficient loading.\n""""""\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom gym3 import types_np\nimport numpy as np\nfrom procgen import ProcgenGym3Env\nimport tyro\nimport pickle\nimport json\nfrom array_record.python.array_record_module import ArrayRecordWriter \n\n\n\n@dataclass\nclass Args:\n num_episodes: int = 10000\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 50\n\n\nargs = tyro.cli(Args)\noutput_dir = Path(args.output_dir)\noutput_dir.mkdir(parents=True, exist_ok=True)\n\n# --- Generate episodes ---\ni = 0\nepisode_metadata = []\nwhile i < args.num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n observations_seq = []\n\n # --- Run episode ---\n for j in range(1000):\n env.act(types_np.sample(env.ac_space, bshape=(env.num,)))\n rew, obs, first = env.observe()\n observations_seq.append(obs[""rgb""])\n if first:\n break\n\n # --- Save episode ---\n if len(observations_seq) >= args.min_episode_length:\n observations_data = np.concatenate(observations_seq, axis=0)\n episode_path = output_dir / f""episode_{i}.array_record"" \n\n # --- Save as ArrayRecord ---\n writer = ArrayRecordWriter(str(episode_path), ""group_size:1"")\n record = {""raw_video"": observations_data.tobytes(), ""sequence_length"": len(observations_seq)}\n writer.write(pickle.dumps(record))\n writer.close()\n\n episode_metadata.append({""path"": str(episode_path), ""length"": len(observations_seq)})\n print(f""Episode {i} completed, length: {len(observations_seq)}"")\n i += 1\n else:\n print(f""Episode too short ({len(observations_seq)}), resampling..."")\n\n# --- Save metadata ---\nmetadata = {\n ""env"": ""coinrun"",\n ""num_episodes"": args.num_episodes,\n ""avg_episode_len"": np.mean([ep[""length""] for ep in episode_metadata]),\n ""episode_metadata"": episode_metadata,\n}\nwith open(output_dir / ""metadata.json"", ""w"") as f:\n json.dump(metadata, f)\n\nprint(f""Dataset generated with {len(episode_metadata)} valid episodes"")\n",python,tab +7157,10977165,"TERMINAL",0,0,"bash",,terminal_focus +7158,10977408,"TERMINAL",0,0,"[[""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/705"", 444], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/630"", 313] , [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/263"", 136], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/747"", 488 ], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/672"", 103], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/743"", 14 5], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/376"", 410], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/789"", 7 16], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/785"", 434], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/326"", 274], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/251"", 434], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/322"",  169], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/735"", 43], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/368"",  417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/806"", 610], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/660"" , 268], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/731"", 128], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/364 "", 743], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/848"", 374], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/36 0"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/773"", 575], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/8 86"", 120], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/310"", 242], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/ 723"", 911], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/427"", 482], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert /352"", 376], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/85"", 201], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert /832"", 1327], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/465"", 177], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbe rt/878"", 627], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/949"", 838], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qb ert/80"", 477], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1034"", 767], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/q bert/119"", 361], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/411"", 417], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/ qbert/824"", 174], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/457"", 184], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens /qbert/528"", 307], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/382"", 68], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens /qbert/820"", 720], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/157"", 80], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens /qbert/866"", 601], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/937"", 852], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screen s/qbert/88"", 214], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/862"", 553], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screen s/qbert/199"", 309], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/403"", 850], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/scree ns/qbert/107"", 376], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/103"", 341], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/scre ens/qbert/516"", 347], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/149"", 495], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/scr eens/qbert/929"", 85], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/854"", 454], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/scr eens/qbert/925"", 261], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/850"", 333], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/sc reens/qbert/483"", 199], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/187"", 171], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/s creens/qbert/892"", 486], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/963"", 379], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/ screens/qbert/596"", 258], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/800"", 97], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/ screens/qbert/208"", 184], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/500"", 266], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1 /screens/qbert/913"", 211], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/546"", 170], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v 1/screens/qbert/471"", 458], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/542"", 309], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_ v1/screens/qbert/955"", 576], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/659"", 183], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari _v1/screens/qbert/584"", 80], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/997"", 1064], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atar i_v1/screens/qbert/51"", 287], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/580"", 255], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atar i_v1/screens/qbert/993"", 350], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/121"", 416], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/ata ri_v1/screens/qbert/534"", 827], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/605"", 109], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/at ari_v1/screens/qbert/530"", 307], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/163"", 97], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/at ari_v1/screens/qbert/647"", 77], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/718"", 889], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/at ari_v1/screens/qbert/59"", 638], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/1017"", 1429], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/ atari_v1/screens/qbert/643"", 836], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/689"", 151], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game /atari_v1/screens/qbert/635"", 233], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/268"", 231], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-gam e/atari_v1/screens/qbert/706"", 94], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/339"", 767], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-gam e/atari_v1/screens/qbert/631"", 402], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/677"", 191], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-ga me/atari_v1/screens/qbert/377"", 332], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-game/atari_v1/screens/qbert/214"", 378], [""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/per-g :",,terminal_output +7159,10982175,"TERMINAL",0,0,"\r ESCESCqq\r:",,terminal_output +7160,10983184,"TERMINAL",0,0,"\r[?1l>[?1049l]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_atari/array_records]633;D;0",,terminal_output +7161,10986721,"TERMINAL",0,0,"bash",,terminal_focus +7162,10986722,"TERMINAL",0,0,"bash",,terminal_focus +7163,10988101,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"import os\nimport numpy as np\nfrom PIL import Image\nimport tyro\nfrom dataclasses import dataclass\nimport pickle\nimport json\nimport multiprocessing as mp\nfrom array_record.python.array_record_module import ArrayRecordWriter\n\n\n@dataclass\nclass Args:\n input_path: str\n output_path: str\n original_fps: int = 60\n target_fps: int = 10\n target_width: int = 64 \n env_name: str = ""atari""\n\ndef preprocess_pngs(input_dir, output_path, original_fps, target_fps, target_width=None):\n print(f""Processing PNGs in {input_dir}"")\n try:\n png_files = sorted([\n f for f in os.listdir(input_dir)\n if f.lower().endswith('.png')\n ], key=lambda x: int(os.path.splitext(x)[0]))\n\n if not png_files:\n print(f""No PNG files found in {input_dir}"")\n return {""path"": input_dir, ""length"": 0} \n\n # Downsample indices\n n_total = len(png_files)\n if original_fps == target_fps:\n selected_indices = np.arange(n_total)\n else:\n n_target = int(np.floor(n_total * target_fps / original_fps))\n selected_indices = np.linspace(0, n_total-1, n_target, dtype=int)\n\n selected_files = [png_files[i] for i in selected_indices]\n\n # Load images\n frames = []\n for fname in selected_files:\n img = Image.open(os.path.join(input_dir, fname)).convert(""RGB"")\n if target_width is not None:\n w, h = img.size # PIL gives (width, height)\n if w != target_width:\n target_height = int(round(h * (target_width / float(w))))\n resample_filter = Image.LANCZOS\n img = img.resize((target_width, target_height), resample=resample_filter)\n frames.append(np.array(img))\n\n frames = np.stack(frames, axis=0) # (n_frames, H, W, 3)\n environment = os.path.basename(os.path.dirname(input_dir)) \n episode_id = os.path.basename(input_dir)\n # Write to array_record\n os.makedirs(output_path, exist_ok=True)\n out_file = os.path.join(\n output_path,\n f""{environment}_{episode_id}.array_record""\n )\n writer = ArrayRecordWriter(str(out_file), ""group_size:1"")\n record = {""raw_video"": frames.tobytes(), \n ""environment"": environment,\n ""sequence_length"": frames.shape[0]}\n writer.write(pickle.dumps(record))\n writer.close()\n print(f""Saved {frames.shape[0]} frames to {out_file}"")\n return {""path"": input_dir, ""length"": frames.shape[0]}\n except Exception as e:\n print(f""Error processing {input_dir}: {e}"")\n return {""path"": input_dir, ""length"": 0}\n\ndef main():\n args = tyro.cli(Args)\n os.makedirs(args.output_path, exist_ok=True)\n print(f""Output path: {args.output_path}"")\n\n games = [\n os.path.join(args.input_path, d)\n for d in os.listdir(args.input_path)\n if os.path.isdir(os.path.join(args.input_path, d))\n ]\n episodes = [\n os.path.join(game, d)\n for game in games\n for d in os.listdir(game)\n ]\n\n results = []\n num_processes = mp.cpu_count()\n print(f""Number of processes: {num_processes}"")\n pool_args = [\n (episode, args.output_path, args.original_fps, args.target_fps, args.target_width)\n for episode in episodes\n ]\n with mp.Pool(processes=num_processes) as pool:\n for result in pool.starmap(preprocess_pngs, pool_args):\n results.append(result)\n\n print(""Done converting png to array_record files"")\n\n # count the number of failed videos\n failed_videos = [result for result in results if result[""length""] == 0]\n short_videos = [result for result in results if result[""length""] < 1600]\n num_successful_videos = len(results) - len(failed_videos) - len(short_videos)\n print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short videos: {len(short_videos)}"")\n print(f""Number of successful videos: {num_successful_videos}"")\n print(f""Number of total videos: {len(results)}"")\n\n metadata = {\n ""env"": args.env_name,\n ""total_videos"": len(results),\n ""num_successful_videos"": len(results) - len(failed_videos) - len(short_videos),\n ""num_failed_videos"": len(failed_videos),\n ""num_short_videos"": len(short_videos),\n ""avg_episode_len"": np.mean([ep[""length""] for ep in results]),\n ""episode_metadata"": results,\n }\n\n with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)\n\n print(""Done."")\n\nif __name__ == ""__main__"":\n main()",python,tab +7164,10994033,"generate_dataset.py",0,0,"",python,tab +7165,10996328,"input_pipeline/preprocess/video_to_array_records.py",0,0,"import ffmpeg\nimport numpy as np\nimport os\nimport tyro\nimport multiprocessing as mp\nfrom dataclasses import dataclass\nimport json\nimport pickle\nfrom array_record.python.array_record_module import ArrayRecordWriter\n\n\n@dataclass\nclass Args:\n input_path: str\n output_path: str\n target_width: int = 160\n target_height: int = 90\n target_fps: int = 10\n env_name: str = ""minecraft""\n\n\ndef preprocess_video(\n idx, in_filename, output_path, target_width, target_height, target_fps\n):\n print(f""Processing video {idx}, Filename: {in_filename}"")\n try:\n out, _ = (\n ffmpeg.input(in_filename)\n .filter(""fps"", fps=target_fps, round=""up"")\n .filter(""scale"", target_width, target_height)\n .output(""pipe:"", format=""rawvideo"", pix_fmt=""rgb24"")\n .run(capture_stdout=True, quiet=True)\n )\n\n output_path = os.path.join(\n output_path,\n os.path.splitext(os.path.basename(in_filename))[0] + "".array_record"",\n )\n\n writer = ArrayRecordWriter(str(output_path), ""group_size:1"")\n\n frame_size = target_height * target_width * 3\n n_frames = len(out) // frame_size\n frames = np.frombuffer(out, np.uint8).reshape(\n n_frames, target_height, target_width, 3\n )\n\n print(f""Saving video {idx} to {output_path}"")\n record = {""raw_video"": frames.tobytes(), ""sequence_length"": n_frames}\n writer.write(pickle.dumps(record))\n writer.close()\n\n return {""path"": in_filename, ""length"": n_frames}\n except Exception as e:\n print(f""Error processing video {idx} ({in_filename}): {e}"")\n return {""path"": in_filename, ""length"": 0}\n\n\ndef main():\n args = tyro.cli(Args)\n\n os.makedirs(args.output_path, exist_ok=True)\n print(f""Output path: {args.output_path}"")\n\n num_processes = mp.cpu_count()\n print(f""Number of processes: {num_processes}"")\n\n print(""Converting video to array_record files..."")\n pool_args = [\n (\n idx,\n os.path.join(args.input_path, in_filename),\n args.output_path,\n args.target_width,\n args.target_height,\n args.target_fps,\n )\n for idx, in_filename in enumerate(os.listdir(args.input_path))\n if in_filename.endswith("".mp4"") or in_filename.endswith("".webm"")\n ]\n\n results = []\n with mp.Pool(processes=num_processes) as pool:\n for result in pool.starmap(preprocess_video, pool_args):\n results.append(result)\n print(""Done converting video to array_record files"")\n\n # count the number of failed videos\n failed_videos = [result for result in results if result[""length""] == 0]\n short_videos = [result for result in results if result[""length""] < 1600]\n num_successful_videos = len(results) - len(failed_videos) - len(short_videos)\n print(f""Number of failed videos: {len(failed_videos)}"")\n print(f""Number of short videos: {len(short_videos)}"")\n print(f""Number of successful videos: {num_successful_videos}"")\n print(f""Number of total videos: {len(results)}"")\n\n metadata = {\n ""env"": args.env_name,\n ""total_videos"": len(results),\n ""num_successful_videos"": len(results) - len(failed_videos) - len(short_videos),\n ""num_failed_videos"": len(failed_videos),\n ""num_short_videos"": len(short_videos),\n ""avg_episode_len"": np.mean([ep[""length""] for ep in results]),\n ""episode_metadata"": results,\n }\n\n with open(os.path.join(args.output_path, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)\n\n\nif __name__ == ""__main__"":\n main()\n",python,tab +7166,11012041,"input_pipeline/preprocess/video_to_array_records.py",922,0,"",python,selection_mouse +7167,11012055,"input_pipeline/preprocess/video_to_array_records.py",921,0,"",python,selection_command +7168,11016432,"input_pipeline/preprocess/video_to_array_records.py",996,0,"",python,selection_mouse +7169,11019520,"input_pipeline/preprocess/video_to_array_records.py",1194,0,"",python,selection_mouse +7170,11019710,"input_pipeline/preprocess/video_to_array_records.py",1190,6,"frames",python,selection_mouse +7171,11020364,"input_pipeline/preprocess/video_to_array_records.py",1211,0,"",python,selection_mouse +7172,11020544,"input_pipeline/preprocess/video_to_array_records.py",1202,10,"frombuffer",python,selection_mouse +7173,11020973,"input_pipeline/preprocess/video_to_array_records.py",1225,0,"",python,selection_mouse +7174,11021144,"input_pipeline/preprocess/video_to_array_records.py",1221,5,"uint8",python,selection_mouse +7175,11021293,"input_pipeline/preprocess/video_to_array_records.py",1220,6,".uint8",python,selection_mouse +7176,11021323,"input_pipeline/preprocess/video_to_array_records.py",1218,8,"np.uint8",python,selection_mouse +7177,11021360,"input_pipeline/preprocess/video_to_array_records.py",1217,9," np.uint8",python,selection_mouse +7178,11021420,"input_pipeline/preprocess/video_to_array_records.py",1216,10,", np.uint8",python,selection_mouse +7179,11022214,"input_pipeline/preprocess/video_to_array_records.py",1213,0,"",python,selection_mouse +7180,11022368,"input_pipeline/preprocess/video_to_array_records.py",1213,3,"out",python,selection_mouse +7181,11022635,"input_pipeline/preprocess/video_to_array_records.py",1213,4,"out,",python,selection_mouse +7182,11022636,"input_pipeline/preprocess/video_to_array_records.py",1213,7,"out, np",python,selection_mouse +7183,11022636,"input_pipeline/preprocess/video_to_array_records.py",1213,8,"out, np.",python,selection_mouse +7184,11022637,"input_pipeline/preprocess/video_to_array_records.py",1213,13,"out, np.uint8",python,selection_mouse +7185,11030397,"generate_dataset.py",0,0,"",python,tab +7186,11032966,"generate_dataset.py",1463,0,"",python,selection_mouse +7187,11033121,"generate_dataset.py",1451,17,"observations_data",python,selection_mouse +7188,11042896,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +7189,11044743,"input_pipeline/preprocess/video_to_array_records.py",1210,0,"",python,selection_mouse +7190,11044895,"input_pipeline/preprocess/video_to_array_records.py",1202,10,"frombuffer",python,selection_mouse +7191,11045498,"input_pipeline/preprocess/video_to_array_records.py",1299,0,"",python,selection_mouse +7192,11045515,"input_pipeline/preprocess/video_to_array_records.py",1298,0,"",python,selection_command +7193,11045629,"input_pipeline/preprocess/video_to_array_records.py",1299,0,"",python,selection_mouse +7194,11045632,"input_pipeline/preprocess/video_to_array_records.py",1298,0,"",python,selection_command +7195,11046329,"input_pipeline/preprocess/video_to_array_records.py",1254,0,"",python,selection_mouse +7196,11046476,"input_pipeline/preprocess/video_to_array_records.py",1249,8,"n_frames",python,selection_mouse +7197,11048039,"input_pipeline/preprocess/video_to_array_records.py",1296,0,"",python,selection_mouse +7198,11048426,"input_pipeline/preprocess/video_to_array_records.py",1299,0,"",python,selection_mouse +7199,11048440,"input_pipeline/preprocess/video_to_array_records.py",1298,0,"",python,selection_command +7200,11050909,"input_pipeline/preprocess/video_to_array_records.py",1268,0,"",python,selection_mouse +7201,11051048,"input_pipeline/preprocess/video_to_array_records.py",1259,13,"target_height",python,selection_mouse +7202,11052064,"input_pipeline/preprocess/video_to_array_records.py",1215,0,"",python,selection_mouse +7203,11052195,"input_pipeline/preprocess/video_to_array_records.py",1213,3,"out",python,selection_mouse +7204,11073420,"generate_dataset.py",0,0,"",python,tab +7205,11074921,"generate_dataset.py",1564,0,"",python,selection_mouse +7206,11074958,"generate_dataset.py",1563,0,"",python,selection_command +7207,11075539,"generate_dataset.py",1587,0,"",python,selection_mouse +7208,11075542,"generate_dataset.py",1586,0,"",python,selection_command +7209,11076113,"generate_dataset.py",1564,0,"",python,selection_mouse +7210,11076116,"generate_dataset.py",1563,0,"",python,selection_command +7211,11076880,"TERMINAL",0,0,"bash",,terminal_focus +7212,11078528,"TERMINAL",0,0,"queue",,terminal_command +7213,11078592,"TERMINAL",0,0,"]633;E;2025-09-04 13:01:35 queue;98e58a9e-4278-4ee6-ae9b-7614f41efecb]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Thu Sep 4 13:01:35 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3465195 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)3465286 accelerat train_to tum_cte0 PD\t0:00\t 1 (Priority)3465285 accelerat train_la tum_cte0 PD\t0:00\t 1 (Priority)3465651 cpuonly generate tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output +7214,11079624,"TERMINAL",0,0,"6\t",,terminal_output +7215,11080236,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs_2]633;D;0",,terminal_output +7216,11081778,"TERMINAL",0,0,"queue",,terminal_command +7217,11081829,"TERMINAL",0,0,"]633;E;2025-09-04 13:01:39 queue;98e58a9e-4278-4ee6-ae9b-7614f41efecb]633;C",,terminal_output +7218,11081883,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1990.localdomain: Thu Sep 4 13:01:39 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3465195 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)3465286 accelerat train_to tum_cte0 PD\t0:00\t 1 (Priority)3465285 accelerat train_la tum_cte0 PD\t0:00\t 1 (Priority)3465651 cpuonly generate tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output +7219,11082936,"TERMINAL",0,0,"40\t",,terminal_output +7220,11083931,"TERMINAL",0,0,"1\t",,terminal_output +7221,11084964,"TERMINAL",0,0,"2\t",,terminal_output +7222,11086011,"TERMINAL",0,0,"3\t",,terminal_output +7223,11087093,"TERMINAL",0,0,"4\t",,terminal_output +7224,11088097,"TERMINAL",0,0,"5\t",,terminal_output +7225,11089140,"TERMINAL",0,0,"6\t",,terminal_output +7226,11090209,"TERMINAL",0,0,"7\t",,terminal_output +7227,11091236,"TERMINAL",0,0,"8\t",,terminal_output +7228,11092271,"TERMINAL",0,0,"9\t",,terminal_output +7229,11093319,"TERMINAL",0,0,"50\t",,terminal_output +7230,11094405,"TERMINAL",0,0,"1\t",,terminal_output +7231,11095393,"TERMINAL",0,0,"2\t",,terminal_output +7232,11096437,"TERMINAL",0,0,"3\t",,terminal_output +7233,11097489,"TERMINAL",0,0,"4\t",,terminal_output +7234,11098509,"TERMINAL",0,0,"5\t",,terminal_output +7235,11099577,"TERMINAL",0,0,"6\t",,terminal_output +7236,11100603,"TERMINAL",0,0,"7\t",,terminal_output +7237,11101648,"TERMINAL",0,0,"8\t",,terminal_output +7238,11102300,"TERMINAL",0,0,"bash",,terminal_focus +7239,11102723,"TERMINAL",0,0,"9\t",,terminal_output +7240,11103662,"TERMINAL",0,0,"logs",,terminal_command +7241,11103692,"TERMINAL",0,0,"]633;E;2025-09-04 13:02:00 logs;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +7242,11103755,"TERMINAL",0,0,"2:00\t",,terminal_output +7243,11104759,"TERMINAL",0,0,"ls",,terminal_command +7244,11104805,"TERMINAL",0,0,"1\t",,terminal_output +7245,11104815,"TERMINAL",0,0,"]633;E;2025-09-04 13:02:01 ls;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +7246,11104936,"TERMINAL",0,0,"atari train_dyn_yolorun_3333026.log train_lam_action_space_scaling_50_3329789.log train_tokenizer_batch_size_scaling_4_node_3321524.log\r\nbig_run train_dyn_yolorun_3333448.log train_lam_action_space_scaling_50_3329804.log train_tokenizer_batch_size_scaling_8_node_3320176.log\r\nbig-runs train_dyn_yolorun_3335345.log train_lam_action_space_scaling_50_3331286.log train_tokenizer_batch_size_scaling_8_node_3321525.log\r\ncausal train_dyn_yolorun_3335362.log train_lam_action_space_scaling_6_3318549.log train_tokenizer_minecraft_overfit_sample_3309656.log\r\ncoinrun train_dyn_yolorun_3348592.log train_lam_action_space_scaling_6_3320178.log train_tokenizer_model_size_scaling_127M_3317233.log\r\ndata_coinrun train_dyn_yolorun_new_arch_3351743.log train_lam_action_space_scaling_6_3321528.log train_tokenizer_model_size_scaling_127M_3318554.log\r\nlam train_dyn_yolorun_new_arch_3352103.log train_lam_action_space_scaling_6_3329790.log train_tokenizer_model_size_scaling_140M_3313562.log\r\nmaskgit train_dyn_yolorun_new_arch_3352115.log train_lam_action_space_scaling_6_3329805.log train_tokenizer_model_size_scaling_140M_3316019.log\r\nmaskgit-maskprob-fix train_dyn_yolorun_new_arch_3358457.log train_lam_action_space_scaling_6_3331287.log train_tokenizer_model_size_scaling_200M_3313563.log\r\npreprocess train_lam_action_space_scaling_10_3320179.log train_lam_action_space_scaling_8_3318550.log train_tokenizer_model_size_scaling_200M_3316020.log\r\ntrain_dyn_causal_180M_3372931.log train_lam_action_space_scaling_10_3321529.log train_lam_action_space_scaling_8_3329791.log train_tokenizer_model_size_scaling_227M_3317234.log\r\ntrain_dyn_causal_180M_3372963.log train_lam_action_space_scaling_10_3329786.log train_lam_action_space_scaling_8_3329806.log train_tokenizer_model_size_scaling_227M_3318555.log\r\ntrain_dyn_causal_180M_3372969.log train_lam_action_space_scaling_10_3329801.log train_lam_action_space_scaling_8_3331288.log train_tokenizer_model_size_scaling_227M_3320173.log\r\ntrain_dyn_causal_180M_3373107.log train_lam_action_space_scaling_10_3331283.log train_lam_minecraft_overfit_sample_3309655.log train_tokenizer_model_size_scaling_227M_3321523.log\r\ntrain_dyn_causal_255M_3372932.log train_lam_action_space_scaling_12_3318546.log train_lam_model_size_scaling_38M_3317098.log train_tokenizer_model_size_scaling_37M_3313565.log\r\ntrain_dyn_causal_255M_3372970.log train_lam_action_space_scaling_12_3320177.log train_lam_model_size_scaling_38M_3317115.log train_tokenizer_model_size_scaling_37M_3316022.log\r\ntrain_dyn_causal_255M_3373108.log train_lam_action_space_scaling_12_3321527.log train_lam_model_size_scaling_38M_3317231.log train_tokenizer_model_size_scaling_37M_3317232.log\r\ntrain_dyn_causal_356M_3372934.log train_lam_action_space_scaling_12_3329787.log train_tokenizer_batch_size_scaling_16_node_3321526.log train_tokenizer_model_size_scaling_37M_3317239.log\r\ntrain_dyn_causal_356M_3372971.log train_lam_action_space_scaling_12_3329802.log train_tokenizer_batch_size_scaling_1_node_3318551.log train_tokenizer_model_size_scaling_37M_3318556.log\r\ntrain_dyn_causal_356M_3373109.log train_lam_action_space_scaling_12_3331284.log train_tokenizer_batch_size_scaling_2_node_3318552.log train_tokenizer_model_size_scaling_74M_3318557.log\r\ntrain_dyn_causal_500M_3372936.log train_lam_action_space_scaling_20_3318547.log train_tokenizer_batch_size_scaling_2_node_3330806.log train_tokenizer_model_size_scaling_74M_3320174.log\r\ntrain_dyn_causal_500M_3372972.log train_lam_action_space_scaling_20_3329788.log train_tokenizer_batch_size_scaling_2_node_3330848.log train_tokenizer_model_size_scaling_74M_3321522.log\r\ntrain_dyn_causal_500M_3373110.log train_lam_action_space_scaling_20_3329803.log train_tokenizer_batch_size_scaling_2_node_3331282.log train_tokenizer_model_size_scaling_80M_3313564.log\r\ntrain_dyn_new_arch-bugfixed-spatial-shift_3359343.log train_lam_action_space_scaling_20_3331285.log train_tokenizer_batch_size_scaling_4_node_3318553.log train_tokenizer_model_size_scaling_80M_3316026.log\r\ntrain_dyn_new_arch-bugfixed-temporal-shift_3359349.log train_lam_action_space_scaling_50_3320180.log train_tokenizer_batch_size_scaling_4_node_3320175.log yoloruns\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir]633;D;0",,terminal_output +7247,11105875,"TERMINAL",0,0,"2\t",,terminal_output +7248,11106356,"TERMINAL",0,0,"cd lam/",,terminal_command +7249,11106374,"TERMINAL",0,0,"]633;E;2025-09-04 13:02:03 cd lam/;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam]633;D;0",,terminal_output +7250,11106642,"TERMINAL",0,0,"ls",,terminal_command +7251,11106692,"TERMINAL",0,0,"]633;E;2025-09-04 13:02:03 ls;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +7252,11106893,"TERMINAL",0,0,"4\t",,terminal_output +7253,11106893,"TERMINAL",0,0,"train_lam_minecraft_8node_3431870.log train_lam_minecraft_8node_3454890.log train_lam_minecraft_8node_darkness_filter_133M_3454956.log train_lam_minecraft_8node_darkness_filter_37M_3457969.log\r\ntrain_lam_minecraft_8node_3431875.log train_lam_minecraft_8node_3454917.log train_lam_minecraft_8node_darkness_filter_133M_3465196.log train_lam_minecraft_8node_darkness_filter_37M_3463210.log\r\ntrain_lam_minecraft_8node_3431876.log train_lam_minecraft_8node_3454941.log train_lam_minecraft_8node_darkness_filter_311M_3454955.log train_lam_minecraft_8node_darkness_filter_400M_3454954.log\r\ntrain_lam_minecraft_8node_3431885.log train_lam_minecraft_8node_3454944.log train_lam_minecraft_8node_darkness_filter_311M_3465197.log train_lam_minecraft_8node_darkness_filter_400M_3465198.log\r\ntrain_lam_minecraft_8node_3431895.log train_lam_minecraft_8node_3454948.log train_lam_minecraft_8node_darkness_filter_37M_3454953.log\r\n]0;tum_cte0515@hkn1990:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam]633;D;0",,terminal_output +7254,11107932,"TERMINAL",0,0,"5\t",,terminal_output +7255,11108947,"TERMINAL",0,0,"6\t",,terminal_output +7256,11110073,"TERMINAL",0,0,"7\t",,terminal_output +7257,11111096,"TERMINAL",0,0,"8\t",,terminal_output +7258,11111928,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/train_lam_minecraft_8node_darkness_filter_400M_3465198.log",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=8\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/%x_%j.log\n#SBATCH --job-name=train_lam_minecraft_8node_darkness_filter_400M\n#SBATCH --reservation=llmtum\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\n# slurm_job_id=$SLURM_JOB_ID\nslurm_job_id=3454954\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/mihir/lam/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_lam.py \\n --save_ckpt \\n --restore_ckpt \\n --wandb_id $slurm_job_id \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=160 \\n --init_lr=0 \\n --max_lr=1e-4 \\n --darkness_threshold=50 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=lam-minecraft-8-node-darkness-filter-400M-$slurm_job_id \\n --tags lam minecraft 8-node darkness-filter 400M \\n --entity instant-uv \\n --project jafar \\n --num_latents=100 \\n --model_dim=1024 \\n --num_blocks=12 \\n --num_heads=16 \\n --latent_dim=64 \\n --ffn_dim=4096 \\n --data_dir $array_records_dir &\n\nchild_pid=$!\n\nwait $child_pid\n/var/spool/slurmd/job3465198/slurm_script: line 39: .venv/bin/activate: No such file or directory\nSLURM_JOB_USER=tum_cte0515\nSLURM_TASKS_PER_NODE=4(x8)\nSLURM_JOB_UID=999226\nSLURM_TASK_PID=2876239\nSLURM_JOB_GPUS=0,1,2,3\nSLURM_LOCALID=0\nSLURM_SUBMIT_DIR=/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs\nSLURMD_NODENAME=hkn0532\nSLURM_JOB_START_TIME=1756977884\nSLURM_CLUSTER_NAME=hk\nSLURM_JOB_END_TIME=1757150684\nSLURM_CPUS_ON_NODE=24\nSLURM_JOB_CPUS_PER_NODE=24(x8)\nSLURM_GPUS_ON_NODE=4\nSLURM_GTIDS=0\nSLURM_JOB_PARTITION=accelerated\nSLURM_TRES_PER_TASK=cpu=5\nSLURM_OOM_KILL_STEP=0\nSLURM_JOB_NUM_NODES=8\nSLURM_JOBID=3465198\nSLURM_JOB_QOS=normal\nSLURM_PROCID=0\nSLURM_CPUS_PER_TASK=5\nSLURM_JOB_RESERVATION=llmtum\nSLURM_NTASKS=32\nSLURM_TOPOLOGY_ADDR=hkibb.hkibbi1.hkibbi1e11.hkn0532\nSLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.switch.node\nSLURM_SCRIPT_CONTEXT=prolog_task\nSLURM_NODELIST=hkn[0532-0533,0536,0722-0723,0728,0731,0813]\nSLURM_JOB_ACCOUNT=hk-project-p0023960\nSLURM_PRIO_PROCESS=0\nSLURM_NPROCS=32\nSLURM_NNODES=8\nSLURM_SUBMIT_HOST=hkn1990.localdomain\nSLURM_JOB_ID=3465198\nSLURM_NODEID=0\nSLURM_CONF=/etc/slurm/slurm.conf\nSLURM_JOB_NAME=train_lam_minecraft_8node_darkness_filter_400M\nSLURM_NTASKS_PER_NODE=4\nSLURM_JOB_GID=502226\nSLURM_JOB_NODELIST=hkn[0532-0533,0536,0722-0723,0728,0731,0813]\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nGpuFreq=control_disabled\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\nsrun: error: hkn0723: tasks 16-19: Exited with exit code 1\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\n import grain\nModuleNotFoundError: No module named 'grain'\n import grain\nModuleNotFoundError: No module named 'grain'\n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\n import grain\nModuleNotFoundError: No module named 'grain'\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \n import grain\nModuleNotFoundError: No module named 'grain'\n import grain\nTraceback (most recent call last):\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine_jobs/train_lam.py"", line 16, in \nModuleNotFoundError: No module named 'grain'\n import grain\nModuleNotFoundError: No module named 'grain'\n import grain\nModuleNotFoundError: No module named 'grain'\nsrun: error: hkn0813: tasks 28-31: Exited with exit code 1\nsrun: error: hkn0536: tasks 8-11: Exited with exit code 1\nsrun: error: hkn0722: tasks 12-15: Exited with exit code 1\nsrun: error: hkn0728: tasks 20-23: Exited with exit code 1\nsrun: error: hkn0533: tasks 4-7: Exited with exit code 1\nsrun: error: hkn0532: tasks 0-3: Exited with exit code 1\nsrun: error: hkn0731: tasks 24-27: Exited with exit code 1\n\n============================= JOB FEEDBACK =============================\n\nJob ID: 3465198\nCluster: hk\nUser/Group: tum_cte0515/hk-project-p0023960\nAccount: hk-project-p0023960\nState: FAILED (exit code 1)\nPartition: accelerated\nNodes: 8\nCores per node: 24\nNodelist: hkn[0532-0533,0536,0722-0723,0728,0731,0813]\nCPU Utilized: 00:01:36\nCPU Efficiency: 0.42% of 06:17:36 core-walltime\nJob Wall-clock time: 00:01:58\nStarttime: Thu Sep 4 11:24:44 2025\nEndtime: Thu Sep 4 11:26:42 2025\nMemory Utilized: 4.20 GB (estimated maximum)\nMemory Efficiency: 0.00% of 0.00 MB (0.00 MB/node)\nEnergy Consumed: 575026 Joule / 159.729444444444 Watthours\nAverage node power draw: 4873.10169491525 Watt\n",log,tab +7259,11112116,"TERMINAL",0,0,"9\t",,terminal_output +7260,11112523,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/train_lam_minecraft_8node_darkness_filter_400M_3465198.log",914,0,"",log,selection_mouse +7261,11113144,"TERMINAL",0,0,"10\t",,terminal_output +7262,11113180,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/train_lam_minecraft_8node_darkness_filter_400M_3465198.log",11704,0,"",log,selection_command +7263,11114143,"TERMINAL",0,0,"1\t",,terminal_output +7264,11115298,"TERMINAL",0,0,"2\t",,terminal_output +7265,11116218,"TERMINAL",0,0,"3\t",,terminal_output +7266,11117393,"TERMINAL",0,0,"4\t",,terminal_output +7267,11118356,"TERMINAL",0,0,"5\t",,terminal_output +7268,11120314,"TERMINAL",0,0,"6\t",,terminal_output +7269,11121358,"TERMINAL",0,0,"8\t",,terminal_output +7270,11122360,"TERMINAL",0,0,"9\t",,terminal_output +7271,11123486,"TERMINAL",0,0,"20\t",,terminal_output +7272,11124512,"TERMINAL",0,0,"1\t",,terminal_output +7273,11125534,"TERMINAL",0,0,"2\t",,terminal_output +7274,11126558,"TERMINAL",0,0,"3\t",,terminal_output +7275,11127584,"TERMINAL",0,0,"4\t",,terminal_output +7276,11128613,"TERMINAL",0,0,"5\t",,terminal_output +7277,11129688,"TERMINAL",0,0,"6\t",,terminal_output +7278,11130566,"requirements.txt",0,0,"# core requirements\ndm_pix>=0.4.3\neinops>=0.8.0\nflax>=0.10.7\njax[cuda12]>=0.6.2\noptax>=0.2.3\ntyro>=0.8.5\nwandb>=0.17.4\ngrain>=0.2.10\narray-record>=0.7.2\n\n# input pipeline\nffmpeg-python==0.2.0\nhf-transfer==0.1.9\nhuggingface-hub[cli]>=0.34.3\npre-commit>=4.2.0\nprocgen>=0.10.7\ntqdm>=4.67.1",pip-requirements,tab +7279,11130709,"TERMINAL",0,0,"7\t",,terminal_output +7280,11131783,"TERMINAL",0,0,"8\t",,terminal_output +7281,11132789,"TERMINAL",0,0,"9\t",,terminal_output +7282,11134029,"TERMINAL",0,0,"31\t",,terminal_output +7283,11134875,"TERMINAL",0,0,"2\t",,terminal_output +7284,11135980,"TERMINAL",0,0,"3\t",,terminal_output +7285,11136985,"TERMINAL",0,0,"4\t",,terminal_output +7286,11138034,"TERMINAL",0,0,"5\t",,terminal_output +7287,11138591,"TERMINAL",0,0,"runner",,terminal_command +7288,11138621,"TERMINAL",0,0,"]633;E;2025-09-04 13:02:35 runner;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +7289,11139158,"TERMINAL",0,0,"6\t",,terminal_output +7290,11140105,"TERMINAL",0,0,"7\t",,terminal_output +7291,11141205,"TERMINAL",0,0,"8\t",,terminal_output +7292,11142188,"TERMINAL",0,0,"9\t",,terminal_output +7293,11143251,"TERMINAL",0,0,"40\t",,terminal_output +7294,11144288,"TERMINAL",0,0,"1\t",,terminal_output +7295,11145379,"TERMINAL",0,0,"2\t",,terminal_output +7296,11146362,"TERMINAL",0,0,"3\t",,terminal_output +7297,11147451,"TERMINAL",0,0,"4\t",,terminal_output +7298,11148474,"TERMINAL",0,0,"5\t",,terminal_output +7299,11149602,"TERMINAL",0,0,"6\t",,terminal_output +7300,11150627,"TERMINAL",0,0,"7\t",,terminal_output +7301,11151700,"TERMINAL",0,0,"8\t",,terminal_output +7302,11152640,"TERMINAL",0,0,"9\t",,terminal_output +7303,11153700,"TERMINAL",0,0,"50\t",,terminal_output +7304,11154717,"TERMINAL",0,0,"1\t",,terminal_output +7305,11155759,"TERMINAL",0,0,"2\t",,terminal_output +7306,11156797,"TERMINAL",0,0,"3\t",,terminal_output +7307,11157841,"TERMINAL",0,0,"5\t",,terminal_output +7308,11158919,"TERMINAL",0,0,"6\t",,terminal_output +7309,11159942,"TERMINAL",0,0,"7\t",,terminal_output +7310,11160973,"TERMINAL",0,0,"8\t",,terminal_output +7311,11161990,"TERMINAL",0,0,"9\t",,terminal_output +7312,11163123,"TERMINAL",0,0,"3:00\t",,terminal_output +7313,11164143,"TERMINAL",0,0,"1\t",,terminal_output +7314,11165165,"TERMINAL",0,0,"2\t",,terminal_output +7315,11166193,"TERMINAL",0,0,"3\t",,terminal_output +7316,11167192,"TERMINAL",0,0,"4\t",,terminal_output +7317,11168239,"TERMINAL",0,0,"5\t",,terminal_output +7318,11169276,"TERMINAL",0,0,"6\t",,terminal_output +7319,11170388,"TERMINAL",0,0,"7\t",,terminal_output +7320,11171413,"TERMINAL",0,0,"8\t",,terminal_output +7321,11172435,"TERMINAL",0,0,"9\t",,terminal_output +7322,11173460,"TERMINAL",0,0,"10\t",,terminal_output +7323,11174489,"TERMINAL",0,0,"1\t",,terminal_output +7324,11175609,"TERMINAL",0,0,"2\t",,terminal_output +7325,11176570,"TERMINAL",0,0,"3\t",,terminal_output +7326,11177609,"TERMINAL",0,0,"4\t",,terminal_output +7327,11178680,"TERMINAL",0,0,"5\t",,terminal_output +7328,11179708,"TERMINAL",0,0,"6\t",,terminal_output +7329,11180734,"TERMINAL",0,0,"7\t",,terminal_output +7330,11181769,"TERMINAL",0,0,"8\t",,terminal_output +7331,11182808,"TERMINAL",0,0,"9\t",,terminal_output +7332,11183848,"TERMINAL",0,0,"21\t",,terminal_output +7333,11184928,"TERMINAL",0,0,"2\t",,terminal_output +7334,11185954,"TERMINAL",0,0,"3\t",,terminal_output +7335,11186976,"TERMINAL",0,0,"4\t",,terminal_output +7336,11188004,"TERMINAL",0,0,"5\t",,terminal_output +7337,11189157,"TERMINAL",0,0,"6\t",,terminal_output +7338,11190085,"TERMINAL",0,0,"7\t",,terminal_output +7339,11191120,"TERMINAL",0,0,"8\t",,terminal_output +7340,11192159,"TERMINAL",0,0,"9\t",,terminal_output +7341,11193201,"TERMINAL",0,0,"30\t",,terminal_output +7342,11194243,"TERMINAL",0,0,"1\t",,terminal_output +7343,11195285,"TERMINAL",0,0,"2\t",,terminal_output +7344,11196332,"TERMINAL",0,0,"3\t",,terminal_output +7345,11197374,"TERMINAL",0,0,"4\t",,terminal_output +7346,11198418,"TERMINAL",0,0,"5\t",,terminal_output +7347,11199463,"TERMINAL",0,0,"6\t",,terminal_output +7348,11200599,"TERMINAL",0,0,"7\t",,terminal_output +7349,11201556,"TERMINAL",0,0,"8\t",,terminal_output +7350,11202609,"TERMINAL",0,0,"9\t",,terminal_output +7351,11203644,"TERMINAL",0,0,"40\t",,terminal_output +7352,11204685,"TERMINAL",0,0,"1\t",,terminal_output +7353,11205732,"TERMINAL",0,0,"2\t",,terminal_output +7354,11206779,"TERMINAL",0,0,"3\t",,terminal_output +7355,11207829,"TERMINAL",0,0,"5\t",,terminal_output +7356,11208874,"TERMINAL",0,0,"6\t",,terminal_output +7357,11209922,"TERMINAL",0,0,"7\t",,terminal_output +7358,11210964,"TERMINAL",0,0,"8\t",,terminal_output +7359,11212011,"TERMINAL",0,0,"9\t",,terminal_output +7360,11213057,"TERMINAL",0,0,"50\t",,terminal_output +7361,11214101,"TERMINAL",0,0,"1\t",,terminal_output +7362,11215141,"TERMINAL",0,0,"2\t",,terminal_output +7363,11216180,"TERMINAL",0,0,"3\t",,terminal_output +7364,11217235,"TERMINAL",0,0,"4\t",,terminal_output +7365,11218265,"TERMINAL",0,0,"5\t",,terminal_output +7366,11219316,"TERMINAL",0,0,"6\t",,terminal_output +7367,11220362,"TERMINAL",0,0,"7\t",,terminal_output +7368,11221399,"TERMINAL",0,0,"8\t",,terminal_output +7369,11222444,"TERMINAL",0,0,"9\t",,terminal_output +7370,11223475,"TERMINAL",0,0,"4:00\t",,terminal_output +7371,11224515,"TERMINAL",0,0,"1\t",,terminal_output +7372,11225554,"TERMINAL",0,0,"2\t",,terminal_output +7373,11226590,"TERMINAL",0,0,"3\t",,terminal_output +7374,11227631,"TERMINAL",0,0,"4\t",,terminal_output +7375,11228666,"TERMINAL",0,0,"5\t",,terminal_output +7376,11229709,"TERMINAL",0,0,"6\t",,terminal_output +7377,11230755,"TERMINAL",0,0,"7\t",,terminal_output +7378,11231803,"TERMINAL",0,0,"8\t",,terminal_output +7379,11232832,"TERMINAL",0,0,"10\t",,terminal_output +7380,11233875,"TERMINAL",0,0,"1\t",,terminal_output +7381,11234927,"TERMINAL",0,0,"2\t",,terminal_output +7382,11235958,"TERMINAL",0,0,"3\t",,terminal_output +7383,11236995,"TERMINAL",0,0,"4\t",,terminal_output +7384,11238060,"TERMINAL",0,0,"5\t",,terminal_output +7385,11239083,"TERMINAL",0,0,"6\t",,terminal_output +7386,11240125,"TERMINAL",0,0,"7\t",,terminal_output +7387,11242108,"TERMINAL",0,0,"8\t",,terminal_output +7388,11243113,"TERMINAL",0,0,"20\t",,terminal_output +7389,11244157,"TERMINAL",0,0,"1\t",,terminal_output +7390,11245190,"TERMINAL",0,0,"2\t",,terminal_output +7391,11246244,"TERMINAL",0,0,"3\t",,terminal_output +7392,11247277,"TERMINAL",0,0,"4\t",,terminal_output +7393,11248637,"TERMINAL",0,0,"5\t",,terminal_output +7394,11249361,"TERMINAL",0,0,"6\t",,terminal_output +7395,11250386,"TERMINAL",0,0,"7\t",,terminal_output +7396,11251447,"TERMINAL",0,0,"8\t",,terminal_output +7397,11252486,"TERMINAL",0,0,"9\t",,terminal_output +7398,11253541,"TERMINAL",0,0,"30\t",,terminal_output +7399,11254580,"TERMINAL",0,0,"1\t",,terminal_output +7400,11255634,"TERMINAL",0,0,"2\t",,terminal_output +7401,11256680,"TERMINAL",0,0,"3\t",,terminal_output +7402,11257699,"TERMINAL",0,0,"4\t",,terminal_output +7403,11258734,"TERMINAL",0,0,"5\t",,terminal_output +7404,11259767,"TERMINAL",0,0,"6\t",,terminal_output +7405,11260809,"TERMINAL",0,0,"7\t",,terminal_output +7406,11261853,"TERMINAL",0,0,"9\t",,terminal_output +7407,11262923,"TERMINAL",0,0,"40\t",,terminal_output +7408,11263963,"TERMINAL",0,0,"1\t",,terminal_output +7409,11265001,"TERMINAL",0,0,"2\t",,terminal_output +7410,11266024,"TERMINAL",0,0,"3\t",,terminal_output +7411,11267109,"TERMINAL",0,0,"4\t",,terminal_output +7412,11267397,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +7413,11268122,"TERMINAL",0,0,"5\t",,terminal_output +7414,11269155,"TERMINAL",0,0,"6\t",,terminal_output +7415,11270237,"TERMINAL",0,0,"7\t",,terminal_output +7416,11271277,"TERMINAL",0,0,"8\t",,terminal_output +7417,11272317,"TERMINAL",0,0,"9\t",,terminal_output +7418,11273431,"TERMINAL",0,0,"50\t",,terminal_output +7419,11274571,"TERMINAL",0,0,"1\t",,terminal_output +7420,11275437,"TERMINAL",0,0,"2\t",,terminal_output +7421,11276485,"TERMINAL",0,0,"3\t",,terminal_output +7422,11277526,"TERMINAL",0,0,"4\t",,terminal_output +7423,11278569,"TERMINAL",0,0,"5\t",,terminal_output +7424,11279613,"TERMINAL",0,0,"6\t",,terminal_output +7425,11280667,"TERMINAL",0,0,"7\t",,terminal_output +7426,11281664,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +7427,11281817,"TERMINAL",0,0,"8\t",,terminal_output +7428,11282743,"TERMINAL",0,0,"9\t",,terminal_output +7429,11283800,"TERMINAL",0,0,"5:00\t",,terminal_output +7430,11284830,"TERMINAL",0,0,"2\t",,terminal_output +7431,11285875,"TERMINAL",0,0,"3\t",,terminal_output +7432,11286938,"TERMINAL",0,0,"4\t",,terminal_output +7433,11287963,"TERMINAL",0,0,"5\t",,terminal_output +7434,11289010,"TERMINAL",0,0,"6\t",,terminal_output +7435,11290056,"TERMINAL",0,0,"7\t",,terminal_output +7436,11291103,"TERMINAL",0,0,"8\t",,terminal_output +7437,11292158,"TERMINAL",0,0,"9\t",,terminal_output +7438,11293193,"TERMINAL",0,0,"10\t",,terminal_output +7439,11294254,"TERMINAL",0,0,"1\t",,terminal_output +7440,11295306,"TERMINAL",0,0,"2\t",,terminal_output +7441,11296333,"TERMINAL",0,0,"3\t",,terminal_output +7442,11297389,"TERMINAL",0,0,"4\t",,terminal_output +7443,11298436,"TERMINAL",0,0,"5\t",,terminal_output +7444,11299487,"TERMINAL",0,0,"6\t",,terminal_output +7445,11300544,"TERMINAL",0,0,"7\t",,terminal_output +7446,11301580,"TERMINAL",0,0,"8\t",,terminal_output +7447,11302636,"TERMINAL",0,0,"9\t",,terminal_output +7448,11303674,"TERMINAL",0,0,"20\t",,terminal_output +7449,11304717,"TERMINAL",0,0,"1\t",,terminal_output +7450,11305776,"TERMINAL",0,0,"2\t",,terminal_output +7451,11306806,"TERMINAL",0,0,"3\t",,terminal_output +7452,11307853,"TERMINAL",0,0,"5\t",,terminal_output +7453,11308902,"TERMINAL",0,0,"6\t",,terminal_output +7454,11309943,"TERMINAL",0,0,"7\t",,terminal_output +7455,11310984,"TERMINAL",0,0,"8\t",,terminal_output +7456,11312027,"TERMINAL",0,0,"9\t",,terminal_output +7457,11313077,"TERMINAL",0,0,"30\t",,terminal_output +7458,11314127,"TERMINAL",0,0,"1\t",,terminal_output +7459,11315173,"TERMINAL",0,0,"2\t",,terminal_output +7460,11316225,"TERMINAL",0,0,"3\t",,terminal_output +7461,11317278,"TERMINAL",0,0,"4\t",,terminal_output +7462,11318314,"TERMINAL",0,0,"5\t",,terminal_output +7463,11319359,"TERMINAL",0,0,"6\t",,terminal_output +7464,11320402,"TERMINAL",0,0,"7\t",,terminal_output +7465,11321455,"TERMINAL",0,0,"8\t",,terminal_output +7466,11322482,"TERMINAL",0,0,"9\t",,terminal_output +7467,11323526,"TERMINAL",0,0,"40\t",,terminal_output +7468,11324567,"TERMINAL",0,0,"1\t",,terminal_output +7469,11325625,"TERMINAL",0,0,"2\t",,terminal_output +7470,11326676,"TERMINAL",0,0,"3\t",,terminal_output +7471,11327698,"TERMINAL",0,0,"4\t",,terminal_output +7472,11328736,"TERMINAL",0,0,"5\t",,terminal_output +7473,11329781,"TERMINAL",0,0,"6\t",,terminal_output +7474,11330839,"TERMINAL",0,0,"8\t",,terminal_output +7475,11331876,"TERMINAL",0,0,"9\t",,terminal_output +7476,11332923,"TERMINAL",0,0,"50\t",,terminal_output +7477,11333971,"TERMINAL",0,0,"1\t",,terminal_output +7478,11335022,"TERMINAL",0,0,"2\t",,terminal_output +7479,11336089,"TERMINAL",0,0,"3\t",,terminal_output +7480,11337119,"TERMINAL",0,0,"4\t",,terminal_output +7481,11338202,"TERMINAL",0,0,"5\t",,terminal_output +7482,11339189,"TERMINAL",0,0,"6\t",,terminal_output +7483,11340230,"TERMINAL",0,0,"7\t",,terminal_output +7484,11341268,"TERMINAL",0,0,"8\t",,terminal_output +7485,11342458,"TERMINAL",0,0,"9\t",,terminal_output +7486,11343361,"TERMINAL",0,0,"6:00\t",,terminal_output +7487,11344401,"TERMINAL",0,0,"1\t",,terminal_output +7488,11345443,"TERMINAL",0,0,"2\t",,terminal_output +7489,11346474,"TERMINAL",0,0,"3\t",,terminal_output +7490,11347514,"TERMINAL",0,0,"4\t",,terminal_output +7491,11348553,"TERMINAL",0,0,"5\t",,terminal_output +7492,11349589,"TERMINAL",0,0,"6\t",,terminal_output +7493,11350632,"TERMINAL",0,0,"7\t",,terminal_output +7494,11351705,"TERMINAL",0,0,"8\t",,terminal_output +7495,11352712,"TERMINAL",0,0,"9\t",,terminal_output +7496,11353756,"TERMINAL",0,0,"10\t",,terminal_output +7497,11354807,"TERMINAL",0,0,"1\t",,terminal_output +7498,11355857,"TERMINAL",0,0,"3\t",,terminal_output +7499,11356894,"TERMINAL",0,0,"4\t",,terminal_output +7500,11357938,"TERMINAL",0,0,"5\t",,terminal_output +7501,11359003,"TERMINAL",0,0,"6\t",,terminal_output +7502,11360005,"TERMINAL",0,0,"7\t",,terminal_output +7503,11361049,"TERMINAL",0,0,"8\t",,terminal_output +7504,11363265,"TERMINAL",0,0,"9\t",,terminal_output +7505,11364320,"TERMINAL",0,0,"21\t",,terminal_output +7506,11365402,"TERMINAL",0,0,"2\t",,terminal_output +7507,11366409,"TERMINAL",0,0,"3\t",,terminal_output +7508,11367457,"TERMINAL",0,0,"4\t",,terminal_output +7509,11368499,"TERMINAL",0,0,"5\t",,terminal_output +7510,11369548,"TERMINAL",0,0,"6\t",,terminal_output +7511,11370588,"TERMINAL",0,0,"7\t",,terminal_output +7512,11371633,"TERMINAL",0,0,"8\t",,terminal_output +7513,11372695,"TERMINAL",0,0,"9\t",,terminal_output +7514,11373718,"TERMINAL",0,0,"30\t",,terminal_output +7515,11374765,"TERMINAL",0,0,"1\t",,terminal_output +7516,11375800,"TERMINAL",0,0,"2\t",,terminal_output +7517,11376845,"TERMINAL",0,0,"4\t",,terminal_output +7518,11377904,"TERMINAL",0,0,"5\t",,terminal_output +7519,11378951,"TERMINAL",0,0,"6\t",,terminal_output +7520,11380006,"TERMINAL",0,0,"7\t",,terminal_output +7521,11381039,"TERMINAL",0,0,"8\t",,terminal_output +7522,11382115,"TERMINAL",0,0,"9\t",,terminal_output +7523,11383122,"TERMINAL",0,0,"40\t",,terminal_output +7524,11384171,"TERMINAL",0,0,"1\t",,terminal_output +7525,11385217,"TERMINAL",0,0,"2\t",,terminal_output +7526,11386268,"TERMINAL",0,0,"3\t",,terminal_output +7527,11387305,"TERMINAL",0,0,"4\t",,terminal_output +7528,11388356,"TERMINAL",0,0,"5\t",,terminal_output +7529,11389389,"TERMINAL",0,0,"6\t",,terminal_output +7530,11390438,"TERMINAL",0,0,"7\t",,terminal_output +7531,11391483,"TERMINAL",0,0,"8\t",,terminal_output +7532,11392530,"TERMINAL",0,0,"9\t",,terminal_output +7533,11393567,"TERMINAL",0,0,"50\t",,terminal_output +7534,11394605,"TERMINAL",0,0,"1\t",,terminal_output +7535,11395645,"TERMINAL",0,0,"2\t",,terminal_output +7536,11396715,"TERMINAL",0,0,"3\t",,terminal_output +7537,11397729,"TERMINAL",0,0,"4\t",,terminal_output +7538,11398776,"TERMINAL",0,0,"5\t",,terminal_output +7539,11399807,"TERMINAL",0,0,"6\t",,terminal_output +7540,11400856,"TERMINAL",0,0,"8\t",,terminal_output +7541,11401883,"TERMINAL",0,0,"9\t",,terminal_output +7542,11402925,"TERMINAL",0,0,"7:00\t",,terminal_output +7543,11403969,"TERMINAL",0,0,"1\t",,terminal_output +7544,11405004,"TERMINAL",0,0,"2\t",,terminal_output +7545,11406040,"TERMINAL",0,0,"3\t",,terminal_output +7546,11407114,"TERMINAL",0,0,"4\t",,terminal_output +7547,11408119,"TERMINAL",0,0,"5\t",,terminal_output +7548,11409167,"TERMINAL",0,0,"6\t",,terminal_output +7549,11410196,"TERMINAL",0,0,"7\t",,terminal_output +7550,11411235,"TERMINAL",0,0,"8\t",,terminal_output +7551,11412281,"TERMINAL",0,0,"9\t",,terminal_output +7552,11413326,"TERMINAL",0,0,"10\t",,terminal_output +7553,11414365,"TERMINAL",0,0,"1\t",,terminal_output +7554,11415412,"TERMINAL",0,0,"2\t",,terminal_output +7555,11416451,"TERMINAL",0,0,"3\t",,terminal_output +7556,11417484,"TERMINAL",0,0,"4\t",,terminal_output +7557,11418519,"TERMINAL",0,0,"5\t",,terminal_output +7558,11419565,"TERMINAL",0,0,"6\t",,terminal_output +7559,11420608,"TERMINAL",0,0,"7\t",,terminal_output +7560,11421661,"TERMINAL",0,0,"8\t",,terminal_output +7561,11422697,"TERMINAL",0,0,"9\t",,terminal_output +7562,11423738,"TERMINAL",0,0,"20\t",,terminal_output +7563,11424782,"TERMINAL",0,0,"1\t",,terminal_output +7564,11425818,"TERMINAL",0,0,"3\t",,terminal_output +7565,11426865,"TERMINAL",0,0,"4\t",,terminal_output +7566,11427938,"TERMINAL",0,0,"5\t",,terminal_output +7567,11428977,"TERMINAL",0,0,"6\t",,terminal_output +7568,11430078,"TERMINAL",0,0,"7\t",,terminal_output +7569,11431069,"TERMINAL",0,0,"8\t",,terminal_output +7570,11432122,"TERMINAL",0,0,"9\t",,terminal_output +7571,11433148,"TERMINAL",0,0,"30\t",,terminal_output +7572,11434174,"TERMINAL",0,0,"1\t",,terminal_output +7573,11435204,"TERMINAL",0,0,"2\t",,terminal_output +7574,11436332,"TERMINAL",0,0,"3\t",,terminal_output +7575,11437282,"TERMINAL",0,0,"4\t",,terminal_output +7576,11438371,"TERMINAL",0,0,"5\t",,terminal_output +7577,11439368,"TERMINAL",0,0,"6\t",,terminal_output +7578,11440418,"TERMINAL",0,0,"7\t",,terminal_output +7579,11441546,"TERMINAL",0,0,"8\t",,terminal_output +7580,11442569,"TERMINAL",0,0,"9\t",,terminal_output +7581,11443526,"TERMINAL",0,0,"40\t",,terminal_output +7582,11444576,"TERMINAL",0,0,"1\t",,terminal_output +7583,11445654,"TERMINAL",0,0,"2\t",,terminal_output +7584,11446645,"TERMINAL",0,0,"3\t",,terminal_output +7585,11447717,"TERMINAL",0,0,"4\t",,terminal_output +7586,11448749,"TERMINAL",0,0,"5\t",,terminal_output +7587,11449772,"TERMINAL",0,0,"6\t",,terminal_output +7588,11450810,"input_pipeline/preprocess/video_to_array_records.py",3151,0,"",python,selection_mouse +7589,11450835,"input_pipeline/preprocess/video_to_array_records.py",3150,0,"",python,selection_command +7590,11450847,"TERMINAL",0,0,"7\t",,terminal_output +7591,11451992,"TERMINAL",0,0,"9\t",,terminal_output +7592,11453015,"TERMINAL",0,0,"50\t",,terminal_output +7593,11454044,"TERMINAL",0,0,"1\t",,terminal_output +7594,11455233,"TERMINAL",0,0,"2\t",,terminal_output +7595,11456085,"TERMINAL",0,0,"3\t",,terminal_output +7596,11457084,"TERMINAL",0,0,"4\t",,terminal_output +7597,11458080,"TERMINAL",0,0,"watch",,terminal_focus +7598,11458153,"TERMINAL",0,0,"5\t",,terminal_output +7599,11459263,"TERMINAL",0,0,"6\t",,terminal_output +7600,11460391,"TERMINAL",0,0,"7\t",,terminal_output +7601,11460804,"TERMINAL",0,0,"bash",,terminal_focus +7602,11461250,"TERMINAL",0,0,"8\t",,terminal_output +7603,11462299,"TERMINAL",0,0,"9\t",,terminal_output +7604,11462821,"TERMINAL",0,0,"scancel 3465195",,terminal_command +7605,11462872,"TERMINAL",0,0,"]633;E;2025-09-04 13:08:00 scancel 3465195;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +7606,11463357,"TERMINAL",0,0,"\r8:00\t",,terminal_output +7607,11464484,"TERMINAL",0,0,"1\t",,terminal_output +7608,11465507,"TERMINAL",0,0,"2\t",,terminal_output +7609,11466493,"TERMINAL",0,0,"3\t",,terminal_output +7610,11467557,"TERMINAL",0,0,"4\t",,terminal_output +7611,11468580,"TERMINAL",0,0,"5\t",,terminal_output +7612,11469606,"TERMINAL",0,0,"6\t",,terminal_output +7613,11470635,"TERMINAL",0,0,"7\t",,terminal_output +7614,11471739,"TERMINAL",0,0,"8\t",,terminal_output +7615,11472762,"TERMINAL",0,0,"9\t",,terminal_output +7616,11473775,"TERMINAL",0,0,"10\t",,terminal_output +7617,11474389,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/lam/train_lam_minecraft_8node_darkness_filter_400M_3465198.log",0,0,"",log,tab +7618,11474809,"TERMINAL",0,0,"1\t",,terminal_output +7619,11475870,"TERMINAL",0,0,"3\t",,terminal_output +7620,11476914,"TERMINAL",0,0,"4\t",,terminal_output +7621,11477952,"TERMINAL",0,0,"5\t",,terminal_output +7622,11478869,"TERMINAL",0,0,"python",,terminal_command +7623,11478897,"TERMINAL",0,0,"]633;E;2025-09-04 13:08:16 python;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +7624,11479023,"TERMINAL",0,0,"6\t",,terminal_output +7625,11479186,"TERMINAL",0,0,"Python 3.10.18 (main, Jun 4 2025, 17:36:27) [Clang 20.1.4 ] on linux\r\nType ""help"", ""copyright"", ""credits"" or ""license"" for more information.\r\n",,terminal_output +7626,11479741,"TERMINAL",0,0,">>> ",,terminal_output +7627,11480020,"TERMINAL",0,0,"i",,terminal_output +7628,11480031,"TERMINAL",0,0,"7\t",,terminal_output +7629,11480159,"TERMINAL",0,0,"[?25lm[?25h",,terminal_output +7630,11480415,"TERMINAL",0,0,"[?25lp[?25h[?25lo[?25h",,terminal_output +7631,11480601,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +7632,11480762,"TERMINAL",0,0,"[?25lt[?25h",,terminal_output +7633,11480859,"TERMINAL",0,0,"[?25l [?25h",,terminal_output +7634,11481023,"TERMINAL",0,0,"[?25lg[?25h",,terminal_output +7635,11481087,"TERMINAL",0,0,"[?25lr[?25h",,terminal_output +7636,11481088,"TERMINAL",0,0,"8\t",,terminal_output +7637,11481319,"TERMINAL",0,0,"[?25la[?25h[?25li[?25h[?25ln[?25h",,terminal_output +7638,11481526,"TERMINAL",0,0,"\r\n",,terminal_output +7639,11482136,"TERMINAL",0,0,"9\t",,terminal_output +7640,11483224,"TERMINAL",0,0,"20\t",,terminal_output +7641,11484451,"TERMINAL",0,0,"15la6to",,terminal_output +7642,11485578,"TERMINAL",0,0,"2\t",,terminal_output +7643,11486522,"TERMINAL",0,0,"3\t",,terminal_output +7644,11487625,"TERMINAL",0,0,"4\t",,terminal_output +7645,11488651,"TERMINAL",0,0,"5\t",,terminal_output +7646,11489721,"TERMINAL",0,0,"6\t",,terminal_output +7647,11490802,"TERMINAL",0,0,"7\t",,terminal_output +7648,11491769,"TERMINAL",0,0,"8\t",,terminal_output +7649,11492822,"TERMINAL",0,0,"9\t",,terminal_output +7650,11493868,"TERMINAL",0,0,"31\t",,terminal_output +7651,11494918,"TERMINAL",0,0,"2\t",,terminal_output +7652,11496026,"TERMINAL",0,0,"3\t",,terminal_output +7653,11497049,"TERMINAL",0,0,"4\t",,terminal_output +7654,11497944,"TERMINAL",0,0,">>> ",,terminal_output +7655,11498097,"TERMINAL",0,0,"5\t",,terminal_output +7656,11499202,"TERMINAL",0,0,"6\t",,terminal_output +7657,11500160,"TERMINAL",0,0,"7\t",,terminal_output +7658,11501255,"TERMINAL",0,0,"8\t",,terminal_output +7659,11502276,"TERMINAL",0,0,"9\t",,terminal_output +7660,11503397,"TERMINAL",0,0,"40\t",,terminal_output +7661,11503903,"TERMINAL",0,0,"^D\r\n",,terminal_output +7662,11504198,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +7663,11504359,"TERMINAL",0,0,"1\t",,terminal_output +7664,11505445,"TERMINAL",0,0,"2\t",,terminal_output +7665,11506469,"TERMINAL",0,0,"3\t",,terminal_output +7666,11507602,"TERMINAL",0,0,"4\t",,terminal_output +7667,11508535,"TERMINAL",0,0,"5\t",,terminal_output +7668,11509642,"TERMINAL",0,0,"6\t",,terminal_output +7669,11510666,"TERMINAL",0,0,"7\t",,terminal_output +7670,11511694,"TERMINAL",0,0,"8\t",,terminal_output +7671,11512741,"TERMINAL",0,0,"9\t",,terminal_output +7672,11513754,"TERMINAL",0,0,"50\t",,terminal_output +7673,11514803,"TERMINAL",0,0,"1\t",,terminal_output +7674,11515841,"TERMINAL",0,0,"3\t",,terminal_output +7675,11516889,"TERMINAL",0,0,"4\t",,terminal_output +7676,11517940,"TERMINAL",0,0,"5\t",,terminal_output +7677,11518262,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch",,terminal_command +7678,11518329,"TERMINAL",0,0,"]633;E;2025-09-04 13:08:55 sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-400M.sbatch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CSubmitted batch job 3465675\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +7679,11519026,"TERMINAL",0,0,"\r63465675 accelerat train_la tum_cte0 PD\t0:00\t 8 (Resources)",,terminal_output +7680,11520034,"TERMINAL",0,0,"7\t",,terminal_output +7681,11521081,"TERMINAL",0,0,"8\t",,terminal_output +7682,11521833,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-311M.sbatch",,terminal_command +7683,11521904,"TERMINAL",0,0,"]633;E;2025-09-04 13:08:59 sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-311M.sbatch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CSubmitted batch job 3465676\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +7684,11522131,"TERMINAL",0,0,"\r93465676 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)",,terminal_output +7685,11523184,"TERMINAL",0,0,"9:00\t",,terminal_output +7686,11524224,"TERMINAL",0,0,"1\t",,terminal_output +7687,11524770,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-133M.sbatch",,terminal_command +7688,11524828,"TERMINAL",0,0,"]633;E;2025-09-04 13:09:01 sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-133M.sbatch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CSubmitted batch job 3465677\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +7689,11525300,"TERMINAL",0,0,"\r23465677 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)",,terminal_output +7690,11526362,"TERMINAL",0,0,"3\t",,terminal_output +7691,11527460,"TERMINAL",0,0,"4\t",,terminal_output +7692,11527723,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch",,terminal_command +7693,11527778,"TERMINAL",0,0,"]633;E;2025-09-04 13:09:04 sbatch slurm/jobs/mihir/horeka/lam/train_lam_minecraft_8node-darkness-filter-base.sbatch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;CSubmitted batch job 3465678\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +7694,11528485,"TERMINAL",0,0,"\r53465678 accelerat train_la tum_cte0 PD\t0:00\t 8 (Priority)",,terminal_output +7695,11529507,"TERMINAL",0,0,"6\t",,terminal_output +7696,11530635,"TERMINAL",0,0,"7\t",,terminal_output +7697,11531699,"TERMINAL",0,0,"8\t",,terminal_output +7698,11532683,"TERMINAL",0,0,"9\t",,terminal_output +7699,11533710,"TERMINAL",0,0,"10\t",,terminal_output +7700,11534729,"TERMINAL",0,0,"1\t",,terminal_output +7701,11535769,"TERMINAL",0,0,"2\t",,terminal_output +7702,11536788,"TERMINAL",0,0,"3\t",,terminal_output +7703,11537907,"TERMINAL",0,0,"5\t",,terminal_output +7704,11538919,"TERMINAL",0,0,"6\t",,terminal_output +7705,11539972,"TERMINAL",0,0,"7\t",,terminal_output +7706,11541082,"TERMINAL",0,0,"8\t",,terminal_output +7707,11542105,"TERMINAL",0,0,"9\t",,terminal_output +7708,11543127,"TERMINAL",0,0,"20\t",,terminal_output +7709,11544152,"TERMINAL",0,0,"1\t",,terminal_output +7710,11545176,"TERMINAL",0,0,"2\t",,terminal_output +7711,11546206,"TERMINAL",0,0,"3\t",,terminal_output +7712,11547327,"TERMINAL",0,0,"4\t",,terminal_output +7713,11548302,"TERMINAL",0,0,"5\t",,terminal_output +7714,11549337,"TERMINAL",0,0,"6\t",,terminal_output +7715,11550400,"TERMINAL",0,0,"7\t",,terminal_output +7716,11551448,"TERMINAL",0,0,"8\t",,terminal_output +7717,11552480,"TERMINAL",0,0,"9\t",,terminal_output +7718,11553578,"TERMINAL",0,0,"30\t",,terminal_output +7719,11554603,"TERMINAL",0,0,"1\t",,terminal_output +7720,11555633,"TERMINAL",0,0,"2\t",,terminal_output +7721,11556706,"TERMINAL",0,0,"3\t",,terminal_output +7722,11557771,"TERMINAL",0,0,"4\t",,terminal_output +7723,11558796,"TERMINAL",0,0,"5\t",,terminal_output +7724,11559827,"TERMINAL",0,0,"6\t",,terminal_output +7725,11560878,"TERMINAL",0,0,"8\t",,terminal_output +7726,11561931,"TERMINAL",0,0,"9\t",,terminal_output +7727,11562969,"TERMINAL",0,0,"40\t",,terminal_output +7728,11564035,"TERMINAL",0,0,"1\t",,terminal_output +7729,11565057,"TERMINAL",0,0,"2\t",,terminal_output +7730,11566170,"TERMINAL",0,0,"3\t",,terminal_output +7731,11567154,"TERMINAL",0,0,"4\t",,terminal_output +7732,11568222,"TERMINAL",0,0,"5\t",,terminal_output +7733,11569248,"TERMINAL",0,0,"6\t",,terminal_output +7734,11570472,"TERMINAL",0,0,"7\t",,terminal_output +7735,11571348,"TERMINAL",0,0,"8\t",,terminal_output +7736,11572413,"TERMINAL",0,0,"9\t",,terminal_output +7737,11573542,"TERMINAL",0,0,"50\t",,terminal_output +7738,11574564,"TERMINAL",0,0,"1\t",,terminal_output +7739,11575593,"TERMINAL",0,0,"2\t",,terminal_output +7740,11576615,"TERMINAL",0,0,"3\t",,terminal_output +7741,11577661,"TERMINAL",0,0,"4\t",,terminal_output +7742,11578694,"TERMINAL",0,0,"5\t",,terminal_output +7743,11579787,"TERMINAL",0,0,"6\t",,terminal_output +7744,11580816,"TERMINAL",0,0,"7\t",,terminal_output +7745,11581839,"TERMINAL",0,0,"9\t",,terminal_output +7746,11583370,"TERMINAL",0,0,"10:00\t",,terminal_output +7747,11584337,"TERMINAL",0,0,"1\t",,terminal_output +7748,11585390,"TERMINAL",0,0,"2\t",,terminal_output +7749,11586427,"TERMINAL",0,0,"3\t",,terminal_output +7750,11587470,"TERMINAL",0,0,"4\t",,terminal_output +7751,11588530,"TERMINAL",0,0,"5\t",,terminal_output +7752,11589572,"TERMINAL",0,0,"6\t",,terminal_output +7753,11590622,"TERMINAL",0,0,"7\t",,terminal_output +7754,11591707,"TERMINAL",0,0,"8\t",,terminal_output +7755,11592711,"TERMINAL",0,0,"9\t",,terminal_output +7756,11593773,"TERMINAL",0,0,"10\t",,terminal_output +7757,11594809,"TERMINAL",0,0,"1\t",,terminal_output +7758,11595852,"TERMINAL",0,0,"3\t",,terminal_output +7759,11596895,"TERMINAL",0,0,"4\t",,terminal_output +7760,11597946,"TERMINAL",0,0,"5\t",,terminal_output +7761,11599001,"TERMINAL",0,0,"6\t",,terminal_output +7762,11600042,"TERMINAL",0,0,"7\t",,terminal_output +7763,11601089,"TERMINAL",0,0,"8\t",,terminal_output +7764,11602137,"TERMINAL",0,0,"9\t",,terminal_output +7765,11603189,"TERMINAL",0,0,"20\t",,terminal_output +7766,11604226,"TERMINAL",0,0,"1\t",,terminal_output +7767,11605551,"TERMINAL",0,0,"25Resources)678Priority)",,terminal_output +7768,11606603,"TERMINAL",0,0,"3\t",,terminal_output +7769,11607647,"TERMINAL",0,0,"4\t",,terminal_output +7770,11608694,"TERMINAL",0,0,"5\t",,terminal_output +7771,11609739,"TERMINAL",0,0,"6\t",,terminal_output +7772,11610820,"TERMINAL",0,0,"7\t",,terminal_output +7773,11611833,"TERMINAL",0,0,"9\t",,terminal_output +7774,11612879,"TERMINAL",0,0,"30\t",,terminal_output +7775,11613922,"TERMINAL",0,0,"1\t",,terminal_output +7776,11614962,"TERMINAL",0,0,"2\t",,terminal_output +7777,11616003,"TERMINAL",0,0,"3\t",,terminal_output +7778,11617049,"TERMINAL",0,0,"4\t",,terminal_output +7779,11618092,"TERMINAL",0,0,"5\t",,terminal_output +7780,11619140,"TERMINAL",0,0,"6\t",,terminal_output +7781,11620190,"TERMINAL",0,0,"7\t",,terminal_output +7782,11621232,"TERMINAL",0,0,"8\t",,terminal_output +7783,11622280,"TERMINAL",0,0,"9\t",,terminal_output +7784,11623324,"TERMINAL",0,0,"40\t",,terminal_output +7785,11624361,"TERMINAL",0,0,"1\t",,terminal_output +7786,11625461,"TERMINAL",0,0,"2\t",,terminal_output +7787,11626482,"TERMINAL",0,0,"3\t",,terminal_output +7788,11627506,"TERMINAL",0,0,"4\t",,terminal_output +7789,11628634,"TERMINAL",0,0,"5\t",,terminal_output +7790,11629589,"TERMINAL",0,0,"6\t",,terminal_output +7791,11630681,"TERMINAL",0,0,"7\t",,terminal_output +7792,11631713,"TERMINAL",0,0,"8\t",,terminal_output +7793,11632832,"TERMINAL",0,0,"9\t",,terminal_output +7794,11633775,"TERMINAL",0,0,"50\t",,terminal_output +7795,11634812,"TERMINAL",0,0,"1\t",,terminal_output +7796,11635850,"TERMINAL",0,0,"3\t",,terminal_output +7797,11636936,"TERMINAL",0,0,"4\t",,terminal_output +7798,11637962,"TERMINAL",0,0,"5\t",,terminal_output +7799,11639078,"TERMINAL",0,0,"6\t",,terminal_output +7800,11640122,"TERMINAL",0,0,"7\t",,terminal_output +7801,11641124,"TERMINAL",0,0,"8\t",,terminal_output +7802,11642152,"TERMINAL",0,0,"9\t",,terminal_output +7803,11643275,"TERMINAL",0,0,"1:00\t",,terminal_output +7804,11644227,"TERMINAL",0,0,"1\t",,terminal_output +7805,11645383,"TERMINAL",0,0,"2\t",,terminal_output +7806,11646405,"TERMINAL",0,0,"3\t",,terminal_output +7807,11647382,"TERMINAL",0,0,"4\t",,terminal_output +7808,11648505,"TERMINAL",0,0,"5\t",,terminal_output +7809,11649522,"TERMINAL",0,0,"6\t",,terminal_output +7810,11650551,"TERMINAL",0,0,"7\t",,terminal_output +7811,11651585,"TERMINAL",0,0,"8\t",,terminal_output +7812,11652702,"TERMINAL",0,0,"9\t",,terminal_output +7813,11653721,"TERMINAL",0,0,"10\t",,terminal_output +7814,11654721,"TERMINAL",0,0,"1\t",,terminal_output +7815,11655772,"TERMINAL",0,0,"2\t",,terminal_output +7816,11656807,"TERMINAL",0,0,"3\t",,terminal_output +7817,11657853,"TERMINAL",0,0,"5\t",,terminal_output +7818,11658890,"TERMINAL",0,0,"6\t",,terminal_output +7819,11659935,"TERMINAL",0,0,"7\t",,terminal_output +7820,11661304,"TERMINAL",0,0,"8\t",,terminal_output +7821,11662259,"TERMINAL",0,0,"9\t",,terminal_output +7822,11663307,"TERMINAL",0,0,"20\t",,terminal_output +7823,11664353,"TERMINAL",0,0,"1\t",,terminal_output +7824,11665498,"TERMINAL",0,0,"2\t",,terminal_output +7825,11666439,"TERMINAL",0,0,"3\t",,terminal_output +7826,11667546,"TERMINAL",0,0,"4\t",,terminal_output +7827,11668620,"TERMINAL",0,0,"5\t",,terminal_output +7828,11669589,"TERMINAL",0,0,"6\t",,terminal_output +7829,11670726,"TERMINAL",0,0,"7\t",,terminal_output +7830,11671702,"TERMINAL",0,0,"8\t",,terminal_output +7831,11672769,"TERMINAL",0,0,"9\t",,terminal_output +7832,11673791,"TERMINAL",0,0,"30\t",,terminal_output +7833,11674830,"TERMINAL",0,0,"1\t",,terminal_output +7834,11675946,"TERMINAL",0,0,"3\t",,terminal_output +7835,11676887,"TERMINAL",0,0,"4\t",,terminal_output +7836,11677950,"TERMINAL",0,0,"5\t",,terminal_output +7837,11678969,"TERMINAL",0,0,"6\t",,terminal_output +7838,11680037,"TERMINAL",0,0,"7\t",,terminal_output +7839,11681061,"TERMINAL",0,0,"8\t",,terminal_output +7840,11682190,"TERMINAL",0,0,"9\t",,terminal_output +7841,11683129,"TERMINAL",0,0,"40\t",,terminal_output +7842,11684172,"TERMINAL",0,0,"1\t",,terminal_output +7843,11685211,"TERMINAL",0,0,"2\t",,terminal_output +7844,11686247,"TERMINAL",0,0,"3\t",,terminal_output +7845,11687283,"TERMINAL",0,0,"4\t",,terminal_output +7846,11688321,"TERMINAL",0,0,"5\t",,terminal_output +7847,11689465,"TERMINAL",0,0,"6\t",,terminal_output +7848,11690394,"TERMINAL",0,0,"7\t",,terminal_output +7849,11691444,"TERMINAL",0,0,"8\t",,terminal_output +7850,11692505,"TERMINAL",0,0,"9\t",,terminal_output +7851,11693530,"TERMINAL",0,0,"50\t",,terminal_output +7852,11694566,"TERMINAL",0,0,"1\t",,terminal_output +7853,11695605,"TERMINAL",0,0,"2\t",,terminal_output +7854,11696640,"TERMINAL",0,0,"3\t",,terminal_output +7855,11697678,"TERMINAL",0,0,"4\t",,terminal_output +7856,11698711,"TERMINAL",0,0,"5\t",,terminal_output +7857,11699751,"TERMINAL",0,0,"6\t",,terminal_output +7858,11700787,"TERMINAL",0,0,"7\t",,terminal_output +7859,11701839,"TERMINAL",0,0,"9\t",,terminal_output +7860,11702860,"TERMINAL",0,0,"2:00\t",,terminal_output +7861,11703896,"TERMINAL",0,0,"1\t",,terminal_output +7862,11704935,"TERMINAL",0,0,"2\t",,terminal_output +7863,11705984,"TERMINAL",0,0,"3\t",,terminal_output +7864,11707008,"TERMINAL",0,0,"4\t",,terminal_output +7865,11708051,"TERMINAL",0,0,"5\t",,terminal_output +7866,11709087,"TERMINAL",0,0,"6\t",,terminal_output +7867,11710128,"TERMINAL",0,0,"7\t",,terminal_output +7868,11711162,"TERMINAL",0,0,"8\t",,terminal_output +7869,11712200,"TERMINAL",0,0,"9\t",,terminal_output +7870,11713238,"TERMINAL",0,0,"10\t",,terminal_output +7871,11714280,"TERMINAL",0,0,"1\t",,terminal_output +7872,11715319,"TERMINAL",0,0,"2\t",,terminal_output +7873,11716357,"TERMINAL",0,0,"3\t",,terminal_output +7874,11717409,"TERMINAL",0,0,"4\t",,terminal_output +7875,11718445,"TERMINAL",0,0,"5\t",,terminal_output +7876,11719481,"TERMINAL",0,0,"6\t",,terminal_output +7877,11720519,"TERMINAL",0,0,"7\t",,terminal_output +7878,11721559,"TERMINAL",0,0,"8\t",,terminal_output +7879,11722595,"TERMINAL",0,0,"9\t",,terminal_output +7880,11723632,"TERMINAL",0,0,"20\t",,terminal_output +7881,11724671,"TERMINAL",0,0,"1\t",,terminal_output +7882,11726696,"TERMINAL",0,0,"2\t",,terminal_output +7883,11727731,"TERMINAL",0,0,"4\t",,terminal_output +7884,11728759,"TERMINAL",0,0,"5\t",,terminal_output +7885,11729800,"TERMINAL",0,0,"6\t",,terminal_output +7886,11730852,"TERMINAL",0,0,"8\t",,terminal_output +7887,11731874,"TERMINAL",0,0,"9\t",,terminal_output +7888,11732975,"TERMINAL",0,0,"30\t",,terminal_output +7889,11734012,"TERMINAL",0,0,"1\t",,terminal_output +7890,11735011,"TERMINAL",0,0,"2\t",,terminal_output +7891,11736049,"TERMINAL",0,0,"3\t",,terminal_output +7892,11737096,"TERMINAL",0,0,"4\t",,terminal_output +7893,11738120,"TERMINAL",0,0,"5\t",,terminal_output +7894,11739158,"TERMINAL",0,0,"6\t",,terminal_output +7895,11740208,"TERMINAL",0,0,"7\t",,terminal_output +7896,11741248,"TERMINAL",0,0,"8\t",,terminal_output +7897,11742285,"TERMINAL",0,0,"9\t",,terminal_output +7898,11743314,"TERMINAL",0,0,"40\t",,terminal_output +7899,11744376,"TERMINAL",0,0,"1\t",,terminal_output +7900,11745397,"TERMINAL",0,0,"2\t",,terminal_output +7901,11746435,"TERMINAL",0,0,"3\t",,terminal_output +7902,11747508,"TERMINAL",0,0,"4\t",,terminal_output +7903,11748519,"TERMINAL",0,0,"5\t",,terminal_output +7904,11749561,"TERMINAL",0,0,"6\t",,terminal_output +7905,11750273,"TERMINAL",0,0,"clear",,terminal_command +7906,11750291,"TERMINAL",0,0,"]633;E;2025-09-04 13:12:47 clear;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C]0;tum_cte0515@hkn1990:~/Projects/jasmine_jobs]633;D;0",,terminal_output +7907,11750594,"TERMINAL",0,0,"7\t",,terminal_output +7908,11751633,"TERMINAL",0,0,"8\t",,terminal_output +7909,11752674,"TERMINAL",0,0,"9\t",,terminal_output +7910,11753709,"TERMINAL",0,0,"50\t",,terminal_output +7911,11754748,"TERMINAL",0,0,"1\t",,terminal_output +7912,11755793,"TERMINAL",0,0,"2\t",,terminal_output +7913,11756831,"TERMINAL",0,0,"4\t",,terminal_output +7914,11757864,"TERMINAL",0,0,"5\t",,terminal_output +7915,11758902,"TERMINAL",0,0,"6\t",,terminal_output +7916,11759958,"TERMINAL",0,0,"7\t",,terminal_output +7917,11760982,"TERMINAL",0,0,"8\t",,terminal_output +7918,11762021,"TERMINAL",0,0,"9\t",,terminal_output +7919,11763073,"TERMINAL",0,0,"3:00\t",,terminal_output +7920,11763902,"TERMINAL",0,0,"watch",,terminal_focus +7921,11764099,"TERMINAL",0,0,"1\t",,terminal_output +7922,11765144,"TERMINAL",0,0,"2\t",,terminal_output +7923,11766189,"TERMINAL",0,0,"3\t",,terminal_output +7924,11767230,"TERMINAL",0,0,"4\t",,terminal_output +7925,11768265,"TERMINAL",0,0,"5\t",,terminal_output +7926,11769304,"TERMINAL",0,0,"6\t",,terminal_output +7927,11770344,"TERMINAL",0,0,"7\t",,terminal_output +7928,11771384,"TERMINAL",0,0,"8\t",,terminal_output +7929,11772422,"TERMINAL",0,0,"9\t",,terminal_output +7930,11773465,"TERMINAL",0,0,"10\t",,terminal_output +7931,11774505,"TERMINAL",0,0,"1\t",,terminal_output +7932,11775545,"TERMINAL",0,0,"2\t",,terminal_output +7933,11776584,"TERMINAL",0,0,"3\t",,terminal_output +7934,11777626,"TERMINAL",0,0,"4\t",,terminal_output +7935,11778658,"TERMINAL",0,0,"5\t",,terminal_output +7936,11779700,"TERMINAL",0,0,"6\t",,terminal_output +7937,11780737,"TERMINAL",0,0,"7\t",,terminal_output +7938,11781776,"TERMINAL",0,0,"8\t",,terminal_output +7939,11782814,"TERMINAL",0,0,"9\t",,terminal_output +7940,11783850,"TERMINAL",0,0,"21\t",,terminal_output +7941,11784892,"TERMINAL",0,0,"2\t",,terminal_output +7942,11785936,"TERMINAL",0,0,"3\t",,terminal_output +7943,11786971,"TERMINAL",0,0,"4\t",,terminal_output +7944,11788015,"TERMINAL",0,0,"5\t",,terminal_output +7945,11789052,"TERMINAL",0,0,"6\t",,terminal_output +7946,11790091,"TERMINAL",0,0,"7\t",,terminal_output +7947,11791133,"TERMINAL",0,0,"8\t",,terminal_output +7948,11792171,"TERMINAL",0,0,"9\t",,terminal_output +7949,11793206,"TERMINAL",0,0,"30\t",,terminal_output +7950,11794249,"TERMINAL",0,0,"1\t",,terminal_output +7951,11795292,"TERMINAL",0,0,"2\t",,terminal_output +7952,11796328,"TERMINAL",0,0,"3\t",,terminal_output +7953,11797406,"TERMINAL",0,0,"4\t",,terminal_output +7954,11798445,"TERMINAL",0,0,"5\t",,terminal_output +7955,11799485,"TERMINAL",0,0,"6\t",,terminal_output +7956,11800532,"TERMINAL",0,0,"7\t",,terminal_output +7957,11801570,"TERMINAL",0,0,"8\t",,terminal_output +7958,11802614,"TERMINAL",0,0,"9\t",,terminal_output +7959,11803648,"TERMINAL",0,0,"40\t",,terminal_output +7960,11804685,"TERMINAL",0,0,"1\t",,terminal_output +7961,11805722,"TERMINAL",0,0,"2\t",,terminal_output +7962,11806762,"TERMINAL",0,0,"3\t",,terminal_output +7963,11807809,"TERMINAL",0,0,"4\t",,terminal_output +7964,11808844,"TERMINAL",0,0,"6\t",,terminal_output +7965,11809885,"TERMINAL",0,0,"7\t",,terminal_output +7966,11810923,"TERMINAL",0,0,"8\t",,terminal_output +7967,11811957,"TERMINAL",0,0,"9\t",,terminal_output +7968,11813005,"TERMINAL",0,0,"50\t",,terminal_output +7969,11814039,"TERMINAL",0,0,"1\t",,terminal_output +7970,11815078,"TERMINAL",0,0,"2\t",,terminal_output +7971,11816116,"TERMINAL",0,0,"3\t",,terminal_output +7972,11817162,"TERMINAL",0,0,"4\t",,terminal_output +7973,11818197,"TERMINAL",0,0,"5\t",,terminal_output +7974,11819248,"TERMINAL",0,0,"6\t",,terminal_output +7975,11820270,"TERMINAL",0,0,"7\t",,terminal_output +7976,11821309,"TERMINAL",0,0,"8\t",,terminal_output +7977,11822345,"TERMINAL",0,0,"9\t",,terminal_output +7978,11823387,"TERMINAL",0,0,"4:00\t",,terminal_output +7979,11824431,"TERMINAL",0,0,"1\t",,terminal_output +7980,11825468,"TERMINAL",0,0,"2\t",,terminal_output +7981,11826509,"TERMINAL",0,0,"3\t",,terminal_output +7982,11827545,"TERMINAL",0,0,"4\t",,terminal_output +7983,11828583,"TERMINAL",0,0,"5\t",,terminal_output +7984,11829622,"TERMINAL",0,0,"6\t",,terminal_output +7985,11830669,"TERMINAL",0,0,"7\t",,terminal_output +7986,11831708,"TERMINAL",0,0,"8\t",,terminal_output +7987,11832766,"TERMINAL",0,0,"9\t",,terminal_output +7988,11833791,"TERMINAL",0,0,"10\t",,terminal_output +7989,11834825,"TERMINAL",0,0,"2\t",,terminal_output +7990,11835883,"TERMINAL",0,0,"3\t",,terminal_output +7991,11836909,"TERMINAL",0,0,"4\t",,terminal_output +7992,11837948,"TERMINAL",0,0,"5\t",,terminal_output +7993,11838984,"TERMINAL",0,0,"6\t",,terminal_output +7994,11840020,"TERMINAL",0,0,"7\t",,terminal_output +7995,11841061,"TERMINAL",0,0,"8\t",,terminal_output +7996,11842136,"TERMINAL",0,0,"9\t",,terminal_output +7997,11843131,"TERMINAL",0,0,"20\t",,terminal_output +7998,11844171,"TERMINAL",0,0,"1\t",,terminal_output +7999,11845215,"TERMINAL",0,0,"2\t",,terminal_output +8000,11846256,"TERMINAL",0,0,"3\t",,terminal_output +8001,11847889,"TERMINAL",0,0,"4\t",,terminal_output +8002,11848905,"TERMINAL",0,0,"6\t",,terminal_output +8003,11849933,"TERMINAL",0,0,"7\t",,terminal_output +8004,11850964,"TERMINAL",0,0,"8\t",,terminal_output +8005,11852001,"TERMINAL",0,0,"9\t",,terminal_output +8006,11853045,"TERMINAL",0,0,"30\t",,terminal_output +8007,11854076,"TERMINAL",0,0,"1\t",,terminal_output +8008,11855114,"TERMINAL",0,0,"2\t",,terminal_output +8009,11856157,"TERMINAL",0,0,"3\t",,terminal_output +8010,11857200,"TERMINAL",0,0,"4\t",,terminal_output +8011,11858237,"TERMINAL",0,0,"5\t",,terminal_output +8012,11859271,"TERMINAL",0,0,"6\t",,terminal_output +8013,11860315,"TERMINAL",0,0,"7\t",,terminal_output +8014,11861376,"TERMINAL",0,0,"8\t",,terminal_output +8015,11862449,"TERMINAL",0,0,"9\t",,terminal_output +8016,11863464,"TERMINAL",0,0,"40\t",,terminal_output +8017,11864500,"TERMINAL",0,0,"1\t",,terminal_output +8018,11865542,"TERMINAL",0,0,"2\t",,terminal_output +8019,11866576,"TERMINAL",0,0,"3\t",,terminal_output +8020,11867615,"TERMINAL",0,0,"4\t",,terminal_output +8021,11868650,"TERMINAL",0,0,"5\t",,terminal_output +8022,11869695,"TERMINAL",0,0,"6\t",,terminal_output +8023,11870733,"TERMINAL",0,0,"7\t",,terminal_output +8024,11871773,"TERMINAL",0,0,"8\t",,terminal_output +8025,11872808,"TERMINAL",0,0,"9\t",,terminal_output +8026,11873845,"TERMINAL",0,0,"51\t",,terminal_output +8027,11874889,"TERMINAL",0,0,"2\t",,terminal_output +8028,11875934,"TERMINAL",0,0,"3\t",,terminal_output +8029,11876992,"TERMINAL",0,0,"4\t",,terminal_output +8030,11878020,"TERMINAL",0,0,"5\t",,terminal_output +8031,11879051,"TERMINAL",0,0,"6\t",,terminal_output +8032,11880091,"TERMINAL",0,0,"7\t",,terminal_output +8033,11881133,"TERMINAL",0,0,"8\t",,terminal_output +8034,11882172,"TERMINAL",0,0,"9\t",,terminal_output +8035,11883210,"TERMINAL",0,0,"5:00\t",,terminal_output +8036,11884246,"TERMINAL",0,0,"1\t",,terminal_output +8037,11885281,"TERMINAL",0,0,"2\t",,terminal_output +8038,11886327,"TERMINAL",0,0,"3\t",,terminal_output +8039,11887365,"TERMINAL",0,0,"4\t",,terminal_output +8040,11888405,"TERMINAL",0,0,"5\t",,terminal_output +8041,11889447,"TERMINAL",0,0,"6\t",,terminal_output +8042,11890489,"TERMINAL",0,0,"7\t",,terminal_output +8043,11891523,"TERMINAL",0,0,"8\t",,terminal_output +8044,11892569,"TERMINAL",0,0,"9\t",,terminal_output +8045,11893609,"TERMINAL",0,0,"10\t",,terminal_output +8046,11894647,"TERMINAL",0,0,"1\t",,terminal_output +8047,11895684,"TERMINAL",0,0,"2\t",,terminal_output +8048,11896723,"TERMINAL",0,0,"3\t",,terminal_output +8049,11897767,"TERMINAL",0,0,"4\t",,terminal_output +8050,11898809,"TERMINAL",0,0,"5\t",,terminal_output +8051,11899847,"TERMINAL",0,0,"7\t",,terminal_output +8052,11900886,"TERMINAL",0,0,"8\t",,terminal_output +8053,11901931,"TERMINAL",0,0,"9\t",,terminal_output +8054,11902968,"TERMINAL",0,0,"20\t",,terminal_output +8055,11904007,"TERMINAL",0,0,"1\t",,terminal_output +8056,11905056,"TERMINAL",0,0,"2\t",,terminal_output +8057,11906091,"TERMINAL",0,0,"3\t",,terminal_output +8058,11907140,"TERMINAL",0,0,"4\t",,terminal_output +8059,11908174,"TERMINAL",0,0,"5\t",,terminal_output +8060,11909226,"TERMINAL",0,0,"6\t",,terminal_output +8061,11910265,"TERMINAL",0,0,"7\t",,terminal_output +8062,11911318,"TERMINAL",0,0,"8\t",,terminal_output +8063,11912353,"TERMINAL",0,0,"9\t",,terminal_output +8064,11913387,"TERMINAL",0,0,"30\t",,terminal_output +8065,11914427,"TERMINAL",0,0,"1\t",,terminal_output +8066,11915468,"TERMINAL",0,0,"2\t",,terminal_output +8067,11916504,"TERMINAL",0,0,"3\t",,terminal_output +8068,11917541,"TERMINAL",0,0,"4\t",,terminal_output +8069,11918582,"TERMINAL",0,0,"5\t",,terminal_output +8070,11919617,"TERMINAL",0,0,"6\t",,terminal_output +8071,11920654,"TERMINAL",0,0,"7\t",,terminal_output +8072,11921699,"TERMINAL",0,0,"8\t",,terminal_output +8073,11922737,"TERMINAL",0,0,"9\t",,terminal_output +8074,11923771,"TERMINAL",0,0,"40\t",,terminal_output +8075,11924819,"TERMINAL",0,0,"1\t",,terminal_output +8076,11925860,"TERMINAL",0,0,"3\t",,terminal_output +8077,11926893,"TERMINAL",0,0,"4\t",,terminal_output +8078,11927931,"TERMINAL",0,0,"5\t",,terminal_output +8079,11928978,"TERMINAL",0,0,"6\t",,terminal_output +8080,11930014,"TERMINAL",0,0,"7\t",,terminal_output +8081,11931074,"TERMINAL",0,0,"8\t",,terminal_output +8082,11932091,"TERMINAL",0,0,"9\t",,terminal_output +8083,11933150,"TERMINAL",0,0,"50\t",,terminal_output +8084,11934189,"TERMINAL",0,0,"1\t",,terminal_output +8085,11935236,"TERMINAL",0,0,"2\t",,terminal_output +8086,11936254,"TERMINAL",0,0,"3\t",,terminal_output +8087,11937305,"TERMINAL",0,0,"4\t",,terminal_output +8088,11938336,"TERMINAL",0,0,"5\t",,terminal_output +8089,11939372,"TERMINAL",0,0,"6\t",,terminal_output +8090,11940411,"TERMINAL",0,0,"7\t",,terminal_output +8091,11941458,"TERMINAL",0,0,"8\t",,terminal_output +8092,11942499,"TERMINAL",0,0,"9\t",,terminal_output +8093,11943543,"TERMINAL",0,0,"6:00\t",,terminal_output +8094,11944586,"TERMINAL",0,0,"1\t",,terminal_output +8095,11945626,"TERMINAL",0,0,"2\t",,terminal_output +8096,11946703,"TERMINAL",0,0,"3\t",,terminal_output +8097,11947725,"TERMINAL",0,0,"4\t",,terminal_output +8098,11948777,"TERMINAL",0,0,"5\t",,terminal_output +8099,11949784,"TERMINAL",0,0,"6\t",,terminal_output +8100,11950833,"TERMINAL",0,0,"8\t",,terminal_output +8101,11951867,"TERMINAL",0,0,"9\t",,terminal_output +8102,11952907,"TERMINAL",0,0,"10\t",,terminal_output +8103,11953961,"TERMINAL",0,0,"1\t",,terminal_output +8104,11954990,"TERMINAL",0,0,"2\t",,terminal_output +8105,11956027,"TERMINAL",0,0,"3\t",,terminal_output +8106,11957072,"TERMINAL",0,0,"4\t",,terminal_output +8107,11958109,"TERMINAL",0,0,"5\t",,terminal_output +8108,11959147,"TERMINAL",0,0,"6\t",,terminal_output +8109,11960191,"TERMINAL",0,0,"7\t",,terminal_output +8110,11961227,"TERMINAL",0,0,"8\t",,terminal_output +8111,11962679,"TERMINAL",0,0,"9\t",,terminal_output +8112,11963727,"TERMINAL",0,0,"20\t",,terminal_output +8113,11964814,"TERMINAL",0,0,"1\t",,terminal_output +8114,11965826,"TERMINAL",0,0,"2\t",,terminal_output +8115,11966876,"TERMINAL",0,0,"4\t",,terminal_output +8116,11969247,"TERMINAL",0,0,"5\t",,terminal_output +8117,11970251,"TERMINAL",0,0,"7\t",,terminal_output +8118,11971302,"TERMINAL",0,0,"8\t",,terminal_output +8119,11972339,"TERMINAL",0,0,"9\t",,terminal_output +8120,11973385,"TERMINAL",0,0,"30\t",,terminal_output +8121,11974428,"TERMINAL",0,0,"1\t",,terminal_output +8122,11975513,"TERMINAL",0,0,"2\t",,terminal_output +8123,11976544,"TERMINAL",0,0,"3\t",,terminal_output +8124,11977552,"TERMINAL",0,0,"4\t",,terminal_output +8125,11978591,"TERMINAL",0,0,"5\t",,terminal_output +8126,11979635,"TERMINAL",0,0,"6\t",,terminal_output +8127,11980683,"TERMINAL",0,0,"7\t",,terminal_output +8128,11981719,"TERMINAL",0,0,"8\t",,terminal_output +8129,11982755,"TERMINAL",0,0,"9\t",,terminal_output +8130,11983807,"TERMINAL",0,0,"40\t",,terminal_output +8131,11984834,"TERMINAL",0,0,"2\t",,terminal_output +8132,11985872,"TERMINAL",0,0,"3\t",,terminal_output +8133,11986934,"TERMINAL",0,0,"4\t",,terminal_output +8134,11987959,"TERMINAL",0,0,"5\t",,terminal_output +8135,11989014,"TERMINAL",0,0,"6\t",,terminal_output +8136,11990053,"TERMINAL",0,0,"7\t",,terminal_output +8137,11991089,"TERMINAL",0,0,"8\t",,terminal_output +8138,11992148,"TERMINAL",0,0,"9\t",,terminal_output +8139,11993175,"TERMINAL",0,0,"50\t",,terminal_output +8140,11994212,"TERMINAL",0,0,"1\t",,terminal_output +8141,11995247,"TERMINAL",0,0,"2\t",,terminal_output +8142,11996283,"TERMINAL",0,0,"3\t",,terminal_output +8143,11997330,"TERMINAL",0,0,"4\t",,terminal_output +8144,11998366,"TERMINAL",0,0,"5\t",,terminal_output +8145,11999408,"TERMINAL",0,0,"6\t",,terminal_output +8146,12000446,"TERMINAL",0,0,"7\t",,terminal_output +8147,12001481,"TERMINAL",0,0,"8\t",,terminal_output +8148,12002519,"TERMINAL",0,0,"9\t",,terminal_output +8149,12003564,"TERMINAL",0,0,"7:00\t",,terminal_output +8150,12004610,"TERMINAL",0,0,"1\t",,terminal_output +8151,12005659,"TERMINAL",0,0,"2\t",,terminal_output +8152,12006697,"TERMINAL",0,0,"3\t",,terminal_output +8153,12007732,"TERMINAL",0,0,"4\t",,terminal_output +8154,12008770,"TERMINAL",0,0,"5\t",,terminal_output +8155,12009806,"TERMINAL",0,0,"6\t",,terminal_output +8156,12010844,"TERMINAL",0,0,"8\t",,terminal_output +8157,12011891,"TERMINAL",0,0,"9\t",,terminal_output +8158,12012919,"TERMINAL",0,0,"10\t",,terminal_output +8159,12013963,"TERMINAL",0,0,"1\t",,terminal_output +8160,12015001,"TERMINAL",0,0,"2\t",,terminal_output +8161,12016046,"TERMINAL",0,0,"3\t",,terminal_output +8162,12017083,"TERMINAL",0,0,"4\t",,terminal_output +8163,12018120,"TERMINAL",0,0,"5\t",,terminal_output +8164,12019155,"TERMINAL",0,0,"6\t",,terminal_output +8165,12020193,"TERMINAL",0,0,"7\t",,terminal_output +8166,12021239,"TERMINAL",0,0,"8\t",,terminal_output +8167,12022287,"TERMINAL",0,0,"9\t",,terminal_output +8168,12023317,"TERMINAL",0,0,"20\t",,terminal_output +8169,12024361,"TERMINAL",0,0,"1\t",,terminal_output +8170,12025400,"TERMINAL",0,0,"2\t",,terminal_output +8171,12026439,"TERMINAL",0,0,"3\t",,terminal_output +8172,12027477,"TERMINAL",0,0,"4\t",,terminal_output +8173,12028511,"TERMINAL",0,0,"5\t",,terminal_output +8174,12029548,"TERMINAL",0,0,"6\t",,terminal_output +8175,12030589,"TERMINAL",0,0,"7\t",,terminal_output +8176,12031626,"TERMINAL",0,0,"8\t",,terminal_output +8177,12032662,"TERMINAL",0,0,"9\t",,terminal_output +8178,12033712,"TERMINAL",0,0,"30\t",,terminal_output +8179,12034743,"TERMINAL",0,0,"1\t",,terminal_output +8180,12035780,"TERMINAL",0,0,"2\t",,terminal_output +8181,12036844,"TERMINAL",0,0,"4\t",,terminal_output +8182,12037898,"TERMINAL",0,0,"5\t",,terminal_output +8183,12038923,"TERMINAL",0,0,"6\t",,terminal_output +8184,12039951,"TERMINAL",0,0,"7\t",,terminal_output +8185,12040991,"TERMINAL",0,0,"8\t",,terminal_output +8186,12042032,"TERMINAL",0,0,"9\t",,terminal_output +8187,12043068,"TERMINAL",0,0,"40\t",,terminal_output +8188,12044107,"TERMINAL",0,0,"1\t",,terminal_output +8189,12045153,"TERMINAL",0,0,"2\t",,terminal_output +8190,12046199,"TERMINAL",0,0,"3\t",,terminal_output +8191,12047238,"TERMINAL",0,0,"4\t",,terminal_output +8192,12048273,"TERMINAL",0,0,"5\t",,terminal_output +8193,12049312,"TERMINAL",0,0,"6\t",,terminal_output +8194,12050360,"TERMINAL",0,0,"7\t",,terminal_output +8195,12051400,"TERMINAL",0,0,"8\t",,terminal_output +8196,12052437,"TERMINAL",0,0,"9\t",,terminal_output +8197,12053477,"TERMINAL",0,0,"50\t",,terminal_output +8198,12054525,"TERMINAL",0,0,"1\t",,terminal_output +8199,12055568,"TERMINAL",0,0,"2\t",,terminal_output +8200,12056605,"TERMINAL",0,0,"3\t",,terminal_output +8201,12057652,"TERMINAL",0,0,"4\t",,terminal_output +8202,12058686,"TERMINAL",0,0,"5\t",,terminal_output +8203,12059737,"TERMINAL",0,0,"6\t",,terminal_output +8204,12060771,"TERMINAL",0,0,"7\t",,terminal_output +8205,12061804,"TERMINAL",0,0,"8\t",,terminal_output +8206,12062842,"TERMINAL",0,0,"8:00\t",,terminal_output +8207,12063893,"TERMINAL",0,0,"1\t",,terminal_output +8208,12064936,"TERMINAL",0,0,"2\t",,terminal_output +8209,12065979,"TERMINAL",0,0,"3\t",,terminal_output +8210,12067006,"TERMINAL",0,0,"4\t",,terminal_output +8211,12068053,"TERMINAL",0,0,"5\t",,terminal_output +8212,12069099,"TERMINAL",0,0,"6\t",,terminal_output +8213,12070139,"TERMINAL",0,0,"7\t",,terminal_output +8214,12071175,"TERMINAL",0,0,"8\t",,terminal_output +8215,12072227,"TERMINAL",0,0,"9\t",,terminal_output +8216,12073265,"TERMINAL",0,0,"10\t",,terminal_output +8217,12074316,"TERMINAL",0,0,"1\t",,terminal_output +8218,12075356,"TERMINAL",0,0,"2\t",,terminal_output +8219,12076386,"TERMINAL",0,0,"3\t",,terminal_output +8220,12077423,"TERMINAL",0,0,"4\t",,terminal_output +8221,12078469,"TERMINAL",0,0,"5\t",,terminal_output +8222,12079507,"TERMINAL",0,0,"6\t",,terminal_output +8223,12080546,"TERMINAL",0,0,"7\t",,terminal_output +8224,12081581,"TERMINAL",0,0,"8\t",,terminal_output +8225,12082628,"TERMINAL",0,0,"9\t",,terminal_output +8226,12083665,"TERMINAL",0,0,"20\t",,terminal_output +8227,12084703,"TERMINAL",0,0,"1\t",,terminal_output +8228,12085737,"TERMINAL",0,0,"2\t",,terminal_output +8229,12086776,"TERMINAL",0,0,"3\t",,terminal_output +8230,12087816,"TERMINAL",0,0,"4\t",,terminal_output +8231,12088855,"TERMINAL",0,0,"6\t",,terminal_output +8232,12090378,"TERMINAL",0,0,"7\t",,terminal_output +8233,12091468,"TERMINAL",0,0,"8\t",,terminal_output +8234,12092470,"TERMINAL",0,0,"9\t",,terminal_output +8235,12093503,"TERMINAL",0,0,"30\t",,terminal_output +8236,12094564,"TERMINAL",0,0,"1\t",,terminal_output +8237,12095592,"TERMINAL",0,0,"2\t",,terminal_output +8238,12096640,"TERMINAL",0,0,"3\t",,terminal_output +8239,12097677,"TERMINAL",0,0,"4\t",,terminal_output +8240,12098726,"TERMINAL",0,0,"5\t",,terminal_output +8241,12099771,"TERMINAL",0,0,"6\t",,terminal_output +8242,12100837,"TERMINAL",0,0,"7\t",,terminal_output +8243,12101877,"TERMINAL",0,0,"9\t",,terminal_output +8244,12102919,"TERMINAL",0,0,"40\t",,terminal_output +8245,12103974,"TERMINAL",0,0,"1\t",,terminal_output +8246,12105001,"TERMINAL",0,0,"2\t",,terminal_output +8247,12106040,"TERMINAL",0,0,"3\t",,terminal_output +8248,12107085,"TERMINAL",0,0,"4\t",,terminal_output +8249,12108124,"TERMINAL",0,0,"5\t",,terminal_output +8250,12109168,"TERMINAL",0,0,"6\t",,terminal_output +8251,12110219,"TERMINAL",0,0,"7\t",,terminal_output +8252,12111263,"TERMINAL",0,0,"8\t",,terminal_output +8253,12112303,"TERMINAL",0,0,"9\t",,terminal_output +8254,12113350,"TERMINAL",0,0,"50\t",,terminal_output +8255,12114395,"TERMINAL",0,0,"1\t",,terminal_output +8256,12115441,"TERMINAL",0,0,"2\t",,terminal_output +8257,12116487,"TERMINAL",0,0,"3\t",,terminal_output +8258,12117532,"TERMINAL",0,0,"4\t",,terminal_output +8259,12118569,"TERMINAL",0,0,"5\t",,terminal_output +8260,12119609,"TERMINAL",0,0,"6\t",,terminal_output +8261,12120646,"TERMINAL",0,0,"7\t",,terminal_output +8262,12121723,"TERMINAL",0,0,"8\t",,terminal_output +8263,12122728,"TERMINAL",0,0,"9\t",,terminal_output +8264,12123768,"TERMINAL",0,0,"9:00\t",,terminal_output +8265,12124807,"TERMINAL",0,0,"1\t",,terminal_output +8266,12125867,"TERMINAL",0,0,"3\t",,terminal_output +8267,12126894,"TERMINAL",0,0,"4\t",,terminal_output +8268,12127927,"TERMINAL",0,0,"5\t",,terminal_output +8269,12128996,"TERMINAL",0,0,"6\t",,terminal_output +8270,12130011,"TERMINAL",0,0,"7\t",,terminal_output +8271,12131051,"TERMINAL",0,0,"8\t",,terminal_output +8272,12132084,"TERMINAL",0,0,"9\t",,terminal_output +8273,12133131,"TERMINAL",0,0,"10\t",,terminal_output +8274,12134167,"TERMINAL",0,0,"1\t",,terminal_output +8275,12135216,"TERMINAL",0,0,"2\t",,terminal_output +8276,12136251,"TERMINAL",0,0,"3\t",,terminal_output +8277,12137289,"TERMINAL",0,0,"4\t",,terminal_output +8278,12138322,"TERMINAL",0,0,"5\t",,terminal_output +8279,12139359,"TERMINAL",0,0,"6\t",,terminal_output +8280,12140402,"TERMINAL",0,0,"7\t",,terminal_output +8281,12141445,"TERMINAL",0,0,"8\t",,terminal_output +8282,12142502,"TERMINAL",0,0,"9\t",,terminal_output +8283,12143521,"TERMINAL",0,0,"20\t",,terminal_output +8284,12144563,"TERMINAL",0,0,"1\t",,terminal_output +8285,12145608,"TERMINAL",0,0,"2\t",,terminal_output +8286,12146644,"TERMINAL",0,0,"3\t",,terminal_output +8287,12147687,"TERMINAL",0,0,"4\t",,terminal_output +8288,12148722,"TERMINAL",0,0,"5\t",,terminal_output +8289,12149759,"TERMINAL",0,0,"6\t",,terminal_output +8290,12150809,"TERMINAL",0,0,"7\t",,terminal_output +8291,12151849,"TERMINAL",0,0,"9\t",,terminal_output +8292,12152884,"TERMINAL",0,0,"30\t",,terminal_output +8293,12153918,"TERMINAL",0,0,"1\t",,terminal_output +8294,12154972,"TERMINAL",0,0,"2\t",,terminal_output +8295,12155999,"TERMINAL",0,0,"3\t",,terminal_output +8296,12157038,"TERMINAL",0,0,"4\t",,terminal_output +8297,12158080,"TERMINAL",0,0,"5\t",,terminal_output +8298,12159117,"TERMINAL",0,0,"6\t",,terminal_output +8299,12160157,"TERMINAL",0,0,"7\t",,terminal_output +8300,12161195,"TERMINAL",0,0,"8\t",,terminal_output +8301,12162244,"TERMINAL",0,0,"9\t",,terminal_output +8302,12163279,"TERMINAL",0,0,"40\t",,terminal_output +8303,12164315,"TERMINAL",0,0,"1\t",,terminal_output +8304,12165383,"TERMINAL",0,0,"2\t",,terminal_output +8305,12166421,"TERMINAL",0,0,"3\t",,terminal_output +8306,12167459,"TERMINAL",0,0,"4\t",,terminal_output +8307,12168494,"TERMINAL",0,0,"5\t",,terminal_output +8308,12169539,"TERMINAL",0,0,"6\t",,terminal_output +8309,12170570,"TERMINAL",0,0,"7\t",,terminal_output +8310,12171650,"TERMINAL",0,0,"8\t",,terminal_output +8311,12172644,"TERMINAL",0,0,"9\t",,terminal_output +8312,12173693,"TERMINAL",0,0,"50\t",,terminal_output +8313,12174075,"generate_dataset.py",0,0,"",python,tab +8314,12174734,"TERMINAL",0,0,"1\t",,terminal_output +8315,12175780,"TERMINAL",0,0,"2\t",,terminal_output +8316,12176825,"TERMINAL",0,0,"3\t",,terminal_output +8317,12177878,"TERMINAL",0,0,"5\t",,terminal_output +8318,12178912,"TERMINAL",0,0,"6\t",,terminal_output +8319,12179959,"TERMINAL",0,0,"7\t",,terminal_output +8320,12181005,"TERMINAL",0,0,"8\t",,terminal_output +8321,12182051,"TERMINAL",0,0,"9\t",,terminal_output +8322,12183100,"TERMINAL",0,0,"20:00\t",,terminal_output +8323,12184136,"TERMINAL",0,0,"1\t",,terminal_output +8324,12185190,"TERMINAL",0,0,"2\t",,terminal_output +8325,12186233,"TERMINAL",0,0,"3\t",,terminal_output +8326,12187273,"TERMINAL",0,0,"4\t",,terminal_output +8327,12188322,"TERMINAL",0,0,"5\t",,terminal_output +8328,12189363,"TERMINAL",0,0,"6\t",,terminal_output +8329,12189935,"generate_dataset.py",1389,0,"",python,selection_mouse +8330,12190173,"generate_dataset.py",1389,12,"episode_path",python,selection_mouse +8331,12190401,"TERMINAL",0,0,"7\t",,terminal_output +8332,12190549,"generate_dataset.py",1459,0,"",python,selection_mouse +8333,12190705,"generate_dataset.py",1451,17,"observations_data",python,selection_mouse +8334,12191444,"TERMINAL",0,0,"8\t",,terminal_output +8335,12192469,"TERMINAL",0,0,"9\t",,terminal_output +8336,12193407,"generate_dataset.py",1192,0,"",python,selection_mouse +8337,12193503,"TERMINAL",0,0,"10\t",,terminal_output +8338,12194048,"generate_dataset.py",1184,17,"observations_data",python,selection_mouse +8339,12194544,"TERMINAL",0,0,"1\t",,terminal_output +8340,12195573,"TERMINAL",0,0,"2\t",,terminal_output +8341,12196607,"TERMINAL",0,0,"3\t",,terminal_output +8342,12197645,"TERMINAL",0,0,"4\t",,terminal_output +8343,12198387,"generate_dataset.py",1195,0,"",python,selection_mouse +8344,12198388,"generate_dataset.py",1184,17,"observations_data",python,selection_mouse +8345,12198686,"TERMINAL",0,0,"5\t",,terminal_output +8346,12199722,"TERMINAL",0,0,"6\t",,terminal_output +8347,12200762,"TERMINAL",0,0,"7\t",,terminal_output +8348,12201797,"TERMINAL",0,0,"8\t",,terminal_output +8349,12202837,"TERMINAL",0,0,"20\t",,terminal_output +8350,12203877,"TERMINAL",0,0,"1\t",,terminal_output +8351,12204915,"TERMINAL",0,0,"2\t",,terminal_output +8352,12205956,"TERMINAL",0,0,"3\t",,terminal_output +8353,12207003,"TERMINAL",0,0,"4\t",,terminal_output +8354,12208039,"TERMINAL",0,0,"5\t",,terminal_output +8355,12209100,"TERMINAL",0,0,"6\t",,terminal_output +8356,12210124,"TERMINAL",0,0,"7\t",,terminal_output +8357,12211551,"TERMINAL",0,0,"8\t",,terminal_output +8358,12212589,"TERMINAL",0,0,"9\t",,terminal_output +8359,12213631,"TERMINAL",0,0,"30\t",,terminal_output +8360,12214670,"TERMINAL",0,0,"1\t",,terminal_output +8361,12215711,"TERMINAL",0,0,"2\t",,terminal_output +8362,12216747,"TERMINAL",0,0,"3\t",,terminal_output +8363,12217788,"TERMINAL",0,0,"4\t",,terminal_output +8364,12218876,"TERMINAL",0,0,"6\t",,terminal_output +8365,12219915,"TERMINAL",0,0,"7\t",,terminal_output +8366,12220953,"TERMINAL",0,0,"8\t",,terminal_output +8367,12221993,"TERMINAL",0,0,"9\t",,terminal_output +8368,12223002,"TERMINAL",0,0,"40\t",,terminal_output +8369,12224074,"TERMINAL",0,0,"1\t",,terminal_output +8370,12225115,"TERMINAL",0,0,"2\t",,terminal_output +8371,12226156,"TERMINAL",0,0,"3\t",,terminal_output +8372,12227167,"TERMINAL",0,0,"4\t",,terminal_output +8373,12228249,"TERMINAL",0,0,"5\t",,terminal_output +8374,12229289,"TERMINAL",0,0,"6\t",,terminal_output +8375,12230331,"TERMINAL",0,0,"7\t",,terminal_output +8376,12231327,"TERMINAL",0,0,"8\t",,terminal_output +8377,12232373,"TERMINAL",0,0,"9\t",,terminal_output +8378,12233400,"TERMINAL",0,0,"50\t",,terminal_output +8379,12234525,"TERMINAL",0,0,"1\t",,terminal_output +8380,12235559,"TERMINAL",0,0,"2\t",,terminal_output +8381,12237361,"TERMINAL",0,0,"3\t",,terminal_output +8382,12237560,"TERMINAL",0,0,"4\t",,terminal_output +8383,12238632,"TERMINAL",0,0,"5\t",,terminal_output +8384,12239673,"TERMINAL",0,0,"6\t",,terminal_output +8385,12240667,"TERMINAL",0,0,"7\t",,terminal_output +8386,12241702,"TERMINAL",0,0,"8\t",,terminal_output +8387,12242773,"TERMINAL",0,0,"9\t",,terminal_output +8388,12243641,"generate_dataset.py",1587,0,"",python,selection_mouse +8389,12243664,"generate_dataset.py",1586,0,"",python,selection_command +8390,12243772,"TERMINAL",0,0,"1:00\t",,terminal_output +8391,12244179,"generate_dataset.py",1311,0,"",python,selection_mouse +8392,12244817,"TERMINAL",0,0,"1\t",,terminal_output +8393,12245898,"TERMINAL",0,0,"3\t",,terminal_output +8394,12246938,"TERMINAL",0,0,"4\t",,terminal_output +8395,12247937,"TERMINAL",0,0,"5\t",,terminal_output +8396,12249139,"TERMINAL",0,0,"6\t",,terminal_output +8397,12250034,"TERMINAL",0,0,"7\t",,terminal_output +8398,12251186,"TERMINAL",0,0,"8\t",,terminal_output +8399,12252136,"TERMINAL",0,0,"9\t",,terminal_output +8400,12253175,"TERMINAL",0,0,"10\t",,terminal_output +8401,12254176,"TERMINAL",0,0,"1\t",,terminal_output +8402,12255221,"TERMINAL",0,0,"2\t",,terminal_output +8403,12256254,"TERMINAL",0,0,"3\t",,terminal_output +8404,12257291,"TERMINAL",0,0,"4\t",,terminal_output +8405,12258336,"TERMINAL",0,0,"5\t",,terminal_output +8406,12259369,"TERMINAL",0,0,"6\t",,terminal_output +8407,12260414,"TERMINAL",0,0,"7\t",,terminal_output +8408,12261464,"TERMINAL",0,0,"8\t",,terminal_output +8409,12262496,"TERMINAL",0,0,"9\t",,terminal_output +8410,12263531,"TERMINAL",0,0,"20\t",,terminal_output +8411,12265294,"TERMINAL",0,0,"1\t",,terminal_output +8412,12266332,"TERMINAL",0,0,"3\t",,terminal_output +8413,12267388,"TERMINAL",0,0,"4\t",,terminal_output +8414,12268465,"TERMINAL",0,0,"5\t",,terminal_output +8415,12269509,"TERMINAL",0,0,"6\t",,terminal_output +8416,12270533,"TERMINAL",0,0,"7\t",,terminal_output +8417,12271585,"TERMINAL",0,0,"8\t",,terminal_output +8418,12272600,"TERMINAL",0,0,"9\t",,terminal_output +8419,12273633,"TERMINAL",0,0,"30\t",,terminal_output +8420,12274713,"TERMINAL",0,0,"1\t",,terminal_output +8421,12275727,"TERMINAL",0,0,"2\t",,terminal_output +8422,12276760,"TERMINAL",0,0,"3\t",,terminal_output +8423,12277798,"TERMINAL",0,0,"4\t",,terminal_output +8424,12278888,"TERMINAL",0,0,"6\t",,terminal_output +8425,12279922,"TERMINAL",0,0,"7\t",,terminal_output +8426,12280949,"TERMINAL",0,0,"8\t",,terminal_output +8427,12281957,"TERMINAL",0,0,"9\t",,terminal_output +8428,12283004,"TERMINAL",0,0,"40\t",,terminal_output +8429,12284049,"TERMINAL",0,0,"1\t",,terminal_output +8430,12285082,"TERMINAL",0,0,"2\t",,terminal_output +8431,12286163,"TERMINAL",0,0,"3\t",,terminal_output +8432,12287159,"TERMINAL",0,0,"4\t",,terminal_output +8433,12288197,"TERMINAL",0,0,"5\t",,terminal_output +8434,12289234,"TERMINAL",0,0,"6\t",,terminal_output +8435,12290273,"TERMINAL",0,0,"7\t",,terminal_output +8436,12291303,"TERMINAL",0,0,"8\t",,terminal_output +8437,12292343,"TERMINAL",0,0,"9\t",,terminal_output +8438,12293448,"TERMINAL",0,0,"50\t",,terminal_output +8439,12294445,"TERMINAL",0,0,"1\t",,terminal_output +8440,12295480,"TERMINAL",0,0,"2\t",,terminal_output +8441,12296519,"TERMINAL",0,0,"3\t",,terminal_output +8442,12297597,"TERMINAL",0,0,"4\t",,terminal_output +8443,12298595,"TERMINAL",0,0,"5\t",,terminal_output +8444,12299631,"TERMINAL",0,0,"6\t",,terminal_output +8445,12300676,"TERMINAL",0,0,"7\t",,terminal_output +8446,12301704,"TERMINAL",0,0,"8\t",,terminal_output +8447,12302087,"generate_dataset.py",2067,0,"",python,selection_mouse +8448,12302226,"generate_dataset.py",2056,16,"episode_metadata",python,selection_mouse +8449,12302748,"TERMINAL",0,0,"9\t",,terminal_output +8450,12303795,"TERMINAL",0,0,"2:00\t",,terminal_output +8451,12304805,"generate_dataset.py",2043,0,"",python,selection_mouse +8452,12304841,"TERMINAL",0,0,"2\t",,terminal_output +8453,12305000,"generate_dataset.py",2037,16,"episode_metadata",python,selection_mouse +8454,12305545,"generate_dataset.py",2075,0,"",python,selection_mouse +8455,12305554,"generate_dataset.py",2074,0,"",python,selection_command +8456,12305676,"generate_dataset.py",2075,0,"",python,selection_mouse +8457,12305687,"generate_dataset.py",2074,0,"",python,selection_command +8458,12305872,"TERMINAL",0,0,"3\t",,terminal_output +8459,12306179,"generate_dataset.py",2064,0,"",python,selection_mouse +8460,12306338,"generate_dataset.py",2056,16,"episode_metadata",python,selection_mouse +8461,12306899,"TERMINAL",0,0,"4\t",,terminal_output +8462,12307935,"TERMINAL",0,0,"5\t",,terminal_output +8463,12308979,"TERMINAL",0,0,"6\t",,terminal_output +8464,12310022,"TERMINAL",0,0,"7\t",,terminal_output +8465,12311060,"TERMINAL",0,0,"8\t",,terminal_output +8466,12312097,"TERMINAL",0,0,"9\t",,terminal_output +8467,12313138,"TERMINAL",0,0,"10\t",,terminal_output +8468,12314177,"TERMINAL",0,0,"1\t",,terminal_output +8469,12315210,"TERMINAL",0,0,"2\t",,terminal_output +8470,12316249,"TERMINAL",0,0,"3\t",,terminal_output +8471,12317294,"TERMINAL",0,0,"4\t",,terminal_output +8472,12318117,"input_pipeline/preprocess/video_to_array_records.py",0,0,"",python,tab +8473,12318343,"TERMINAL",0,0,"5\t",,terminal_output +8474,12319229,"input_pipeline/preprocess/video_to_array_records.py",3476,0,"",python,selection_mouse +8475,12319365,"TERMINAL",0,0,"6\t",,terminal_output +8476,12319409,"input_pipeline/preprocess/video_to_array_records.py",3472,7,"results",python,selection_mouse +8477,12320399,"TERMINAL",0,0,"7\t",,terminal_output +8478,12321452,"TERMINAL",0,0,"8\t",,terminal_output +8479,12322483,"TERMINAL",0,0,"9\t",,terminal_output +8480,12323528,"TERMINAL",0,0,"20\t",,terminal_output +8481,12324566,"TERMINAL",0,0,"1\t",,terminal_output +8482,12325613,"TERMINAL",0,0,"2\t",,terminal_output +8483,12326697,"TERMINAL",0,0,"3\t",,terminal_output +8484,12327695,"TERMINAL",0,0,"4\t",,terminal_output +8485,12328738,"TERMINAL",0,0,"5\t",,terminal_output +8486,12329776,"TERMINAL",0,0,"6\t",,terminal_output +8487,12330464,"TERMINAL",0,0,"bash",,terminal_focus +8488,12330820,"TERMINAL",0,0,"7\t",,terminal_output +8489,12332838,"TERMINAL",0,0,"9\t",,terminal_output +8490,12333418,"input_pipeline/preprocess/pngs_to_array_records.py",0,0,"",python,tab +8491,12333921,"TERMINAL",0,0,"31\t",,terminal_output +8492,12334400,"generate_dataset.py",0,0,"",python,tab +8493,12334959,"TERMINAL",0,0,"2\t",,terminal_output +8494,12335973,"TERMINAL",0,0,"3\t",,terminal_output +8495,12337019,"TERMINAL",0,0,"4\t",,terminal_output +8496,12338045,"TERMINAL",0,0,"5\t",,terminal_output +8497,12339087,"TERMINAL",0,0,"6\t",,terminal_output +8498,12340163,"TERMINAL",0,0,"7\t",,terminal_output +8499,12341213,"TERMINAL",0,0,"8\t",,terminal_output +8500,12342207,"TERMINAL",0,0,"9\t",,terminal_output +8501,12343282,"TERMINAL",0,0,"40\t",,terminal_output +8502,12344267,"TERMINAL",0,0,"1\t",,terminal_output +8503,12345304,"TERMINAL",0,0,"2\t",,terminal_output +8504,12346338,"TERMINAL",0,0,"3\t",,terminal_output +8505,12347373,"TERMINAL",0,0,"4\t",,terminal_output +8506,12348410,"TERMINAL",0,0,"5\t",,terminal_output +8507,12349442,"TERMINAL",0,0,"6\t",,terminal_output +8508,12350488,"TERMINAL",0,0,"7\t",,terminal_output +8509,12351513,"TERMINAL",0,0,"8\t",,terminal_output +8510,12352546,"TERMINAL",0,0,"9\t",,terminal_output +8511,12353583,"TERMINAL",0,0,"50\t",,terminal_output +8512,12354618,"TERMINAL",0,0,"1\t",,terminal_output +8513,12355664,"TERMINAL",0,0,"2\t",,terminal_output +8514,12356688,"TERMINAL",0,0,"3\t",,terminal_output +8515,12357726,"TERMINAL",0,0,"4\t",,terminal_output +8516,12358765,"TERMINAL",0,0,"5\t",,terminal_output +8517,12359811,"TERMINAL",0,0,"6\t",,terminal_output +8518,12360845,"TERMINAL",0,0,"8\t",,terminal_output +8519,12361881,"TERMINAL",0,0,"9\t",,terminal_output +8520,12362919,"TERMINAL",0,0,"3:00\t",,terminal_output +8521,12363955,"TERMINAL",0,0,"1\t",,terminal_output +8522,12364992,"TERMINAL",0,0,"2\t",,terminal_output +8523,12366037,"TERMINAL",0,0,"3\t",,terminal_output +8524,12367061,"TERMINAL",0,0,"4\t",,terminal_output +8525,12368100,"TERMINAL",0,0,"5\t",,terminal_output +8526,12369134,"TERMINAL",0,0,"6\t",,terminal_output +8527,12370177,"TERMINAL",0,0,"7\t",,terminal_output +8528,12371215,"TERMINAL",0,0,"8\t",,terminal_output +8529,12372260,"TERMINAL",0,0,"9\t",,terminal_output +8530,12373309,"TERMINAL",0,0,"10\t",,terminal_output +8531,12374351,"TERMINAL",0,0,"1\t",,terminal_output +8532,12375391,"TERMINAL",0,0,"2\t",,terminal_output +8533,12376429,"TERMINAL",0,0,"3\t",,terminal_output +8534,12377463,"TERMINAL",0,0,"4\t",,terminal_output +8535,12378507,"TERMINAL",0,0,"5\t",,terminal_output +8536,12379542,"TERMINAL",0,0,"6\t",,terminal_output +8537,12380577,"TERMINAL",0,0,"7\t",,terminal_output +8538,12381623,"TERMINAL",0,0,"8\t",,terminal_output +8539,12382651,"TERMINAL",0,0,"9\t",,terminal_output +8540,12383690,"TERMINAL",0,0,"20\t",,terminal_output +8541,12384730,"TERMINAL",0,0,"1\t",,terminal_output +8542,12385769,"TERMINAL",0,0,"2\t",,terminal_output +8543,12386807,"TERMINAL",0,0,"3\t",,terminal_output +8544,12387843,"TERMINAL",0,0,"5\t",,terminal_output +8545,12388886,"TERMINAL",0,0,"6\t",,terminal_output +8546,12389924,"TERMINAL",0,0,"7\t",,terminal_output +8547,12390965,"TERMINAL",0,0,"8\t",,terminal_output +8548,12392003,"TERMINAL",0,0,"9\t",,terminal_output +8549,12393043,"TERMINAL",0,0,"30\t",,terminal_output +8550,12394086,"TERMINAL",0,0,"1\t",,terminal_output +8551,12395131,"TERMINAL",0,0,"2\t",,terminal_output +8552,12396176,"TERMINAL",0,0,"3\t",,terminal_output +8553,12397215,"TERMINAL",0,0,"4\t",,terminal_output +8554,12398256,"TERMINAL",0,0,"5\t",,terminal_output +8555,12399297,"TERMINAL",0,0,"6\t",,terminal_output +8556,12400332,"TERMINAL",0,0,"7\t",,terminal_output +8557,12401370,"TERMINAL",0,0,"8\t",,terminal_output +8558,12402413,"TERMINAL",0,0,"9\t",,terminal_output +8559,12403443,"TERMINAL",0,0,"40\t",,terminal_output +8560,12404479,"TERMINAL",0,0,"1\t",,terminal_output +8561,12405541,"TERMINAL",0,0,"2\t",,terminal_output +8562,12406578,"TERMINAL",0,0,"3\t",,terminal_output +8563,12407616,"TERMINAL",0,0,"4\t",,terminal_output +8564,12408658,"TERMINAL",0,0,"5\t",,terminal_output +8565,12409686,"TERMINAL",0,0,"6\t",,terminal_output +8566,12410724,"TERMINAL",0,0,"7\t",,terminal_output +8567,12411765,"TERMINAL",0,0,"8\t",,terminal_output +8568,12412798,"TERMINAL",0,0,"9\t",,terminal_output +8569,12413843,"TERMINAL",0,0,"51\t",,terminal_output +8570,12414892,"TERMINAL",0,0,"2\t",,terminal_output +8571,12415924,"TERMINAL",0,0,"3\t",,terminal_output +8572,12416972,"TERMINAL",0,0,"4\t",,terminal_output +8573,12418039,"TERMINAL",0,0,"5\t",,terminal_output +8574,12419040,"TERMINAL",0,0,"6\t",,terminal_output +8575,12420077,"TERMINAL",0,0,"7\t",,terminal_output +8576,12421111,"TERMINAL",0,0,"8\t",,terminal_output +8577,12422152,"TERMINAL",0,0,"9\t",,terminal_output +8578,12423185,"TERMINAL",0,0,"4:00\t",,terminal_output +8579,12424218,"TERMINAL",0,0,"1\t",,terminal_output +8580,12427664,"TERMINAL",0,0,"2\t3\t4\t",,terminal_output +8581,12428384,"TERMINAL",0,0,"5\t",,terminal_output +8582,12429413,"TERMINAL",0,0,"6\t",,terminal_output +8583,12430462,"TERMINAL",0,0,"7\t",,terminal_output +8584,12431507,"TERMINAL",0,0,"8\t",,terminal_output +8585,12432546,"TERMINAL",0,0,"9\t",,terminal_output +8586,12433586,"TERMINAL",0,0,"10\t",,terminal_output +8587,12434617,"TERMINAL",0,0,"1\t",,terminal_output +8588,12435655,"TERMINAL",0,0,"2\t",,terminal_output +8589,12436689,"TERMINAL",0,0,"3\t",,terminal_output +8590,12437726,"TERMINAL",0,0,"4\t",,terminal_output +8591,12438766,"TERMINAL",0,0,"5\t",,terminal_output +8592,12439819,"TERMINAL",0,0,"6\t",,terminal_output +8593,12440841,"TERMINAL",0,0,"8\t",,terminal_output +8594,12441906,"TERMINAL",0,0,"9\t",,terminal_output +8595,12442915,"TERMINAL",0,0,"20\t",,terminal_output +8596,12443959,"TERMINAL",0,0,"1\t",,terminal_output +8597,12444988,"TERMINAL",0,0,"2\t",,terminal_output +8598,12446046,"TERMINAL",0,0,"3\t",,terminal_output +8599,12447071,"TERMINAL",0,0,"4\t",,terminal_output +8600,12448104,"TERMINAL",0,0,"5\t",,terminal_output +8601,12449150,"TERMINAL",0,0,"6\t",,terminal_output +8602,12450185,"TERMINAL",0,0,"7\t",,terminal_output +8603,12451229,"TERMINAL",0,0,"8\t",,terminal_output +8604,12452288,"TERMINAL",0,0,"9\t",,terminal_output +8605,12452780,"generate_dataset.py",1213,0,"",python,selection_mouse +8606,12453487,"generate_dataset.py",1244,0,"",python,selection_command +8607,12454185,"TERMINAL",0,0,"30\t",,terminal_output +8608,12455235,"generate_dataset.py",1244,0,"astype(np.uint8)",python,content +8609,12455253,"TERMINAL",0,0,"2\t",,terminal_output +8610,12456270,"TERMINAL",0,0,"3\t",,terminal_output +8611,12456798,"generate_dataset.py",1243,0,"",python,selection_mouse +8612,12457318,"TERMINAL",0,0,"4\t",,terminal_output +8613,12457490,"generate_dataset.py",1243,0,".",python,content +8614,12457491,"generate_dataset.py",1244,0,"",python,selection_keyboard +8615,12458339,"TERMINAL",0,0,"5\t",,terminal_output +8616,12459377,"TERMINAL",0,0,"6\t",,terminal_output +8617,12459896,"generate_dataset.py",1243,1,"",python,content +8618,12460204,"generate_dataset.py",1244,0,"",python,selection_command +8619,12460445,"TERMINAL",0,0,"7\t",,terminal_output +8620,12460867,"generate_dataset.py",1244,0,".",python,content +8621,12460867,"generate_dataset.py",1245,0,"",python,selection_keyboard +8622,12461465,"TERMINAL",0,0,"8\t",,terminal_output +8623,12462500,"TERMINAL",0,0,"9\t",,terminal_output +8624,12463533,"TERMINAL",0,0,"40\t",,terminal_output +8625,12463673,"TERMINAL",0,0,"dev",,terminal_command +8626,12464572,"TERMINAL",0,0,"1\t",,terminal_output +8627,12465610,"TERMINAL",0,0,"2\t",,terminal_output +8628,12466675,"TERMINAL",0,0,"3\t",,terminal_output +8629,12467462,"TERMINAL",0,0,"git branch",,terminal_command +8630,12467550,"TERMINAL",0,0,"]633;E;2025-09-04 13:24:44 git branch;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[?1h=\r add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n coinrun-gt-actions\r\n convert-to-jax-array-in-iter\r\n correct-batched-sampling\r\n dev\r\n dont-let-tf-see-gpu\r\n feat/darkness-filter\r\n feat/explicit-image-dims\r\n fix-action-padding-lam-future-information-access\r\n fix-sampling\r\n fix-transformer-forwardpass\r\n fix/spatiotemporal-pe-once-in-STTransformer\r\n grad-norm-log-and-clip\r\n grain-dataloader\r\n* input_pipeline/add-npy2array_record\r\n logging-variants\r\n lr-schedules\r\n main\r\n maskgit-different-maskprob-per-sample\r\n maskgit-sampling-iterative-unmasking-fix\r\n metrics-logging-for-dynamics-model\r\n monkey-patch\r\n new-arch-sampling\r\n preprocess_video\r\n refactor-tmp\r\n revised-dataloader\r\n runner\r\n runner-grain\r\n sample-ali-branch\r\n sample-from-different-topologies\r\n sampling-startframe-indexing-fix\r\n speedup-tfrecord-preprocessing\r\n tmp\r\n\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +8631,12467760,"TERMINAL",0,0,"4\t",,terminal_output +8632,12468733,"TERMINAL",0,0,"5\t",,terminal_output +8633,12469822,"TERMINAL",0,0,"6\t",,terminal_output +8634,12470142,"TERMINAL",0,0,"git status",,terminal_command +8635,12470190,"TERMINAL",0,0,"]633;E;2025-09-04 13:24:47 git status;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;COn branch input_pipeline/add-npy2array_record\r\nYour branch is up to date with 'origin/input_pipeline/add-npy2array_record'.\r\n\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: generate_dataset.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\tlogs/\r\n\toverfit_dir.zip\r\n\tread_tf_record.py\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\tutils/visualizer.py\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +8636,12470866,"TERMINAL",0,0,"8\t",,terminal_output +8637,12471891,"TERMINAL",0,0,"9\t",,terminal_output +8638,12471931,"TERMINAL",0,0,"git diff",,terminal_command +8639,12471998,"TERMINAL",0,0,"]633;E;2025-09-04 13:24:49 git diff;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C[?1h=\rdiff --git a/generate_dataset.py b/generate_dataset.py\r\nindex 52f7689..eea0b69 100644\r\n--- a/generate_dataset.py\r\n+++ b/generate_dataset.py\r\n@@ -45,7 +45,7 @@ while i < args.num_episodes:\r\n \r\n # --- Save episode ---\r\n if len(observations_seq) >= args.min_episode_length:\r\n- observations_data = np.concatenate(observations_seq, axis=0)\r\n+ observations_data = np.concatenate(observations_seq, axis=0).astype(np.uint8)\r\n episode_path = output_dir / f""episode_{i}.array_record"" \r\n \r\n # --- Save as ArrayRecord ---\r\n\r[?1l>]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +8640,12472934,"TERMINAL",0,0,"50\t",,terminal_output +8641,12473973,"TERMINAL",0,0,"1\t",,terminal_output +8642,12475063,"TERMINAL",0,0,"2\t",,terminal_output +8643,12476101,"TERMINAL",0,0,"3\t",,terminal_output +8644,12477108,"TERMINAL",0,0,"4\t",,terminal_output +8645,12478160,"TERMINAL",0,0,"5\t",,terminal_output +8646,12479199,"TERMINAL",0,0,"6\t",,terminal_output +8647,12480242,"TERMINAL",0,0,"7\t",,terminal_output +8648,12481321,"TERMINAL",0,0,"8\t",,terminal_output +8649,12482331,"TERMINAL",0,0,"9\t",,terminal_output +8650,12483385,"TERMINAL",0,0,"5:00\t",,terminal_output +8651,12484412,"TERMINAL",0,0,"1\t",,terminal_output +8652,12485452,"TERMINAL",0,0,"2\t",,terminal_output +8653,12486488,"TERMINAL",0,0,"3\t",,terminal_output +8654,12487543,"TERMINAL",0,0,"4\t",,terminal_output +8655,12488559,"TERMINAL",0,0,"5\t",,terminal_output +8656,12489597,"TERMINAL",0,0,"6\t",,terminal_output +8657,12490633,"TERMINAL",0,0,"7\t",,terminal_output +8658,12491708,"TERMINAL",0,0,"8\t",,terminal_output +8659,12492710,"TERMINAL",0,0,"9\t",,terminal_output +8660,12493792,"TERMINAL",0,0,"10\t",,terminal_output +8661,12494796,"TERMINAL",0,0,"1\t",,terminal_output +8662,12495843,"TERMINAL",0,0,"3\t",,terminal_output +8663,12496878,"TERMINAL",0,0,"4\t",,terminal_output +8664,12497907,"TERMINAL",0,0,"5\t",,terminal_output +8665,12498964,"TERMINAL",0,0,"6\t",,terminal_output +8666,12499988,"TERMINAL",0,0,"7\t",,terminal_output +8667,12501023,"TERMINAL",0,0,"8\t",,terminal_output +8668,12502068,"TERMINAL",0,0,"9\t",,terminal_output +8669,12503105,"TERMINAL",0,0,"20\t",,terminal_output +8670,12504154,"TERMINAL",0,0,"1\t",,terminal_output +8671,12505191,"TERMINAL",0,0,"2\t",,terminal_output +8672,12506227,"TERMINAL",0,0,"3\t",,terminal_output +8673,12506785,"TERMINAL",0,0,"git commit -am ""converted frames to unit8 during coinrun datagen""",,terminal_command +8674,12506825,"TERMINAL",0,0,"]633;E;2025-09-04 13:25:24 git commit -am ""converted frames to unit8 during coinrun datagen"";86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +8675,12506985,"TERMINAL",0,0,"[input_pipeline/add-npy2array_record 84e90b5] converted frames to unit8 during coinrun datagen\r\n 1 file changed, 1 insertion(+), 1 deletion(-)\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +8676,12507272,"TERMINAL",0,0,"4\t",,terminal_output +8677,12508312,"TERMINAL",0,0,"5\t",,terminal_output +8678,12509383,"TERMINAL",0,0,"6\t",,terminal_output +8679,12509415,"TERMINAL",0,0,"git push",,terminal_command +8680,12509464,"TERMINAL",0,0,"]633;E;2025-09-04 13:25:26 git push;86057cda-56ec-4e2d-8be5-07b868da4eb8]633;C",,terminal_output +8681,12510380,"TERMINAL",0,0,"7\t",,terminal_output +8682,12510774,"TERMINAL",0,0,"Enumerating objects: 5, done.\r\nCounting objects: 20% (1/5)\rCounting objects: 40% (2/5)\rCounting objects: 60% (3/5)\rCounting objects: 80% (4/5)\rCounting objects: 100% (5/5)\rCounting objects: 100% (5/5), done.\r\nDelta compression using up to 152 threads\r\nCompressing objects: 33% (1/3)\rCompressing objects: 66% (2/3)\rCompressing objects: 100% (3/3)\rCompressing objects: 100% (3/3), done.\r\nWriting objects: 33% (1/3)\rWriting objects: 66% (2/3)\rWriting objects: 100% (3/3)\rWriting objects: 100% (3/3), 340 bytes | 340.00 KiB/s, done.\r\nTotal 3 (delta 2), reused 0 (delta 0), pack-reused 0\r\n",,terminal_output +8683,12510878,"TERMINAL",0,0,"remote: Resolving deltas: 0% (0/2)\rremote: Resolving deltas: 50% (1/2)\rremote: Resolving deltas: 100% (2/2)\rremote: Resolving deltas: 100% (2/2), completed with 2 local objects.\r\n",,terminal_output +8684,12511173,"TERMINAL",0,0,"To github.com:p-doom/jasmine.git\r\n e44cb26..84e90b5 input_pipeline/add-npy2array_record -> input_pipeline/add-npy2array_record\r\n]0;tum_cte0515@hkn1990:~/Projects/jasmine]633;D;0",,terminal_output +8685,12511441,"TERMINAL",0,0,"8\t",,terminal_output +8686,12512459,"TERMINAL",0,0,"9\t",,terminal_output +8687,12513497,"TERMINAL",0,0,"30\t",,terminal_output +8688,12514535,"TERMINAL",0,0,"1\t",,terminal_output +8689,12515574,"TERMINAL",0,0,"2\t",,terminal_output +8690,12516606,"TERMINAL",0,0,"3\t",,terminal_output +8691,12517649,"TERMINAL",0,0,"4\t",,terminal_output +8692,12518685,"TERMINAL",0,0,"5\t",,terminal_output +8693,12519735,"TERMINAL",0,0,"6\t",,terminal_output +8694,12520774,"TERMINAL",0,0,"7\t",,terminal_output +8695,12521814,"TERMINAL",0,0,"8\t",,terminal_output +8696,12522852,"TERMINAL",0,0,"40\t",,terminal_output +8697,12523890,"TERMINAL",0,0,"1\t",,terminal_output +8698,12524927,"TERMINAL",0,0,"2\t",,terminal_output +8699,12525964,"TERMINAL",0,0,"3\t",,terminal_output +8700,12527002,"TERMINAL",0,0,"4\t",,terminal_output +8701,12528048,"TERMINAL",0,0,"5\t",,terminal_output +8702,12529083,"TERMINAL",0,0,"6\t",,terminal_output +8703,12530123,"TERMINAL",0,0,"7\t",,terminal_output +8704,12531166,"TERMINAL",0,0,"8\t",,terminal_output +8705,12532208,"TERMINAL",0,0,"9\t",,terminal_output +8706,12533272,"TERMINAL",0,0,"50\t",,terminal_output +8707,12534313,"TERMINAL",0,0,"1\t",,terminal_output +8708,12535352,"TERMINAL",0,0,"2\t",,terminal_output +8709,12536392,"TERMINAL",0,0,"3\t",,terminal_output +8710,12537425,"TERMINAL",0,0,"4\t",,terminal_output +8711,12538461,"TERMINAL",0,0,"5\t",,terminal_output +8712,12539499,"TERMINAL",0,0,"6\t",,terminal_output +8713,12540533,"TERMINAL",0,0,"7\t",,terminal_output +8714,12541575,"TERMINAL",0,0,"8\t",,terminal_output +8715,12542612,"TERMINAL",0,0,"9\t",,terminal_output +8716,12543655,"TERMINAL",0,0,"6:00\t",,terminal_output +8717,12544689,"TERMINAL",0,0,"1\t",,terminal_output +8718,12545725,"TERMINAL",0,0,"2\t",,terminal_output +8719,12546766,"TERMINAL",0,0,"3\t",,terminal_output +8720,12547805,"TERMINAL",0,0,"4\t",,terminal_output +8721,12548839,"TERMINAL",0,0,"6\t",,terminal_output +8722,12549877,"TERMINAL",0,0,"7\t",,terminal_output +8723,12550913,"TERMINAL",0,0,"8\t",,terminal_output +8724,12552063,"TERMINAL",0,0,"9\t",,terminal_output +8725,12552998,"TERMINAL",0,0,"10\t",,terminal_output +8726,12554148,"TERMINAL",0,0,"1\t",,terminal_output +8727,12555077,"TERMINAL",0,0,"2\t",,terminal_output +8728,12556121,"TERMINAL",0,0,"3\t",,terminal_output +8729,12557157,"TERMINAL",0,0,"4\t",,terminal_output +8730,12558193,"TERMINAL",0,0,"5\t",,terminal_output +8731,12559231,"TERMINAL",0,0,"6\t",,terminal_output +8732,12560277,"TERMINAL",0,0,"7\t",,terminal_output +8733,12561318,"TERMINAL",0,0,"8\t",,terminal_output +8734,12562362,"TERMINAL",0,0,"9\t",,terminal_output +8735,12563399,"TERMINAL",0,0,"20\t",,terminal_output +8736,12564435,"TERMINAL",0,0,"1\t",,terminal_output +8737,12565478,"TERMINAL",0,0,"2\t",,terminal_output +8738,12566519,"TERMINAL",0,0,"3\t",,terminal_output +8739,12567569,"TERMINAL",0,0,"4\t",,terminal_output +8740,12568606,"TERMINAL",0,0,"5\t",,terminal_output +8741,12569654,"TERMINAL",0,0,"6\t",,terminal_output +8742,12570699,"TERMINAL",0,0,"7\t",,terminal_output +8743,12571749,"TERMINAL",0,0,"8\t",,terminal_output +8744,12572787,"TERMINAL",0,0,"9\t",,terminal_output +8745,12573825,"TERMINAL",0,0,"31\t",,terminal_output +8746,12575491,"TERMINAL",0,0,"26to5la",,terminal_output +8747,12576547,"TERMINAL",0,0,"3\t",,terminal_output +8748,12577580,"TERMINAL",0,0,"4\t",,terminal_output +8749,12578623,"TERMINAL",0,0,"5\t",,terminal_output +8750,12579658,"TERMINAL",0,0,"6\t",,terminal_output +8751,12580693,"TERMINAL",0,0,"7\t",,terminal_output +8752,12581733,"TERMINAL",0,0,"8\t",,terminal_output +8753,12582777,"TERMINAL",0,0,"9\t",,terminal_output +8754,12583826,"TERMINAL",0,0,"40\t",,terminal_output +8755,12584869,"TERMINAL",0,0,"2\t",,terminal_output +8756,12585912,"TERMINAL",0,0,"3\t",,terminal_output +8757,12586960,"TERMINAL",0,0,"4\t",,terminal_output +8758,12588002,"TERMINAL",0,0,"5\t",,terminal_output +8759,12589058,"TERMINAL",0,0,"6\t",,terminal_output +8760,12590085,"TERMINAL",0,0,"7\t",,terminal_output +8761,12591123,"TERMINAL",0,0,"8\t",,terminal_output +8762,12592158,"TERMINAL",0,0,"9\t",,terminal_output +8763,12593204,"TERMINAL",0,0,"50\t",,terminal_output +8764,12594250,"TERMINAL",0,0,"1\t",,terminal_output +8765,12595300,"TERMINAL",0,0,"2\t",,terminal_output +8766,12596334,"TERMINAL",0,0,"3\t",,terminal_output +8767,12597577,"TERMINAL",0,0,"4\t",,terminal_output +8768,12598604,"TERMINAL",0,0,"5\t",,terminal_output +8769,12599642,"TERMINAL",0,0,"6\t",,terminal_output +8770,12600686,"TERMINAL",0,0,"7\t",,terminal_output +8771,12601733,"TERMINAL",0,0,"8\t",,terminal_output +8772,12602776,"TERMINAL",0,0,"9\t",,terminal_output +8773,12603815,"TERMINAL",0,0,"7:00\t",,terminal_output +8774,12604853,"TERMINAL",0,0,"2\t",,terminal_output +8775,12605889,"TERMINAL",0,0,"3\t",,terminal_output +8776,12606927,"TERMINAL",0,0,"4\t",,terminal_output +8777,12607971,"TERMINAL",0,0,"5\t",,terminal_output +8778,12609020,"TERMINAL",0,0,"6\t",,terminal_output +8779,12610063,"TERMINAL",0,0,"7\t",,terminal_output +8780,12611091,"TERMINAL",0,0,"8\t",,terminal_output +8781,12612156,"TERMINAL",0,0,"9\t",,terminal_output +8782,12613166,"TERMINAL",0,0,"10\t",,terminal_output +8783,12614232,"TERMINAL",0,0,"1\t",,terminal_output +8784,12615250,"TERMINAL",0,0,"2\t",,terminal_output +8785,12616305,"TERMINAL",0,0,"3\t",,terminal_output +8786,12617331,"TERMINAL",0,0,"4\t",,terminal_output +8787,12618371,"TERMINAL",0,0,"5\t",,terminal_output +8788,12619407,"TERMINAL",0,0,"6\t",,terminal_output +8789,12620441,"TERMINAL",0,0,"7\t",,terminal_output +8790,12621481,"TERMINAL",0,0,"8\t",,terminal_output +8791,12622522,"TERMINAL",0,0,"9\t",,terminal_output +8792,12623569,"TERMINAL",0,0,"20\t",,terminal_output +8793,12624614,"TERMINAL",0,0,"1\t",,terminal_output +8794,12625652,"TERMINAL",0,0,"2\t",,terminal_output +8795,12626699,"TERMINAL",0,0,"3\t",,terminal_output +8796,12627730,"TERMINAL",0,0,"4\t",,terminal_output +8797,12628777,"TERMINAL",0,0,"5\t",,terminal_output +8798,12629815,"TERMINAL",0,0,"6\t",,terminal_output +8799,12630854,"TERMINAL",0,0,"8\t",,terminal_output +8800,12631897,"TERMINAL",0,0,"9\t",,terminal_output +8801,12632948,"TERMINAL",0,0,"30\t",,terminal_output +8802,12633985,"TERMINAL",0,0,"1\t",,terminal_output +8803,12635033,"TERMINAL",0,0,"2\t",,terminal_output +8804,12636073,"TERMINAL",0,0,"3\t",,terminal_output +8805,12637115,"TERMINAL",0,0,"4\t",,terminal_output +8806,12638167,"TERMINAL",0,0,"5\t",,terminal_output +8807,12639230,"TERMINAL",0,0,"6\t",,terminal_output +8808,12640260,"TERMINAL",0,0,"7\t",,terminal_output +8809,12641351,"TERMINAL",0,0,"8\t",,terminal_output +8810,12642446,"TERMINAL",0,0,"9\t",,terminal_output +8811,12643479,"TERMINAL",0,0,"40\t",,terminal_output +8812,12644517,"TERMINAL",0,0,"1\t",,terminal_output +8813,12645559,"TERMINAL",0,0,"2\t",,terminal_output +8814,12646601,"TERMINAL",0,0,"3\t",,terminal_output +8815,12647633,"TERMINAL",0,0,"4\t",,terminal_output +8816,12648673,"TERMINAL",0,0,"5\t",,terminal_output +8817,12649714,"TERMINAL",0,0,"6\t",,terminal_output +8818,12650752,"TERMINAL",0,0,"7\t",,terminal_output +8819,12651790,"TERMINAL",0,0,"8\t",,terminal_output +8820,12652833,"TERMINAL",0,0,"50\t",,terminal_output +8821,12653882,"TERMINAL",0,0,"1\t",,terminal_output +8822,12654918,"TERMINAL",0,0,"2\t",,terminal_output +8823,12655964,"TERMINAL",0,0,"3\t",,terminal_output +8824,12657004,"TERMINAL",0,0,"4\t",,terminal_output +8825,12658043,"TERMINAL",0,0,"5\t",,terminal_output +8826,12659084,"TERMINAL",0,0,"6\t",,terminal_output +8827,12660122,"TERMINAL",0,0,"7\t",,terminal_output +8828,12661163,"TERMINAL",0,0,"8\t",,terminal_output +8829,12662204,"TERMINAL",0,0,"9\t",,terminal_output +8830,12663245,"TERMINAL",0,0,"8:00\t",,terminal_output +8831,12664283,"TERMINAL",0,0,"1\t",,terminal_output +8832,12665321,"TERMINAL",0,0,"2\t",,terminal_output +8833,12666366,"TERMINAL",0,0,"3\t",,terminal_output +8834,12667410,"TERMINAL",0,0,"4\t",,terminal_output +8835,12668447,"TERMINAL",0,0,"5\t",,terminal_output +8836,12669489,"TERMINAL",0,0,"6\t",,terminal_output +8837,12670530,"TERMINAL",0,0,"7\t",,terminal_output +8838,12671577,"TERMINAL",0,0,"8\t",,terminal_output +8839,12672627,"TERMINAL",0,0,"9\t",,terminal_output +8840,12673661,"TERMINAL",0,0,"10\t",,terminal_output +8841,12674699,"TERMINAL",0,0,"1\t",,terminal_output +8842,12675739,"TERMINAL",0,0,"2\t",,terminal_output +8843,12676774,"TERMINAL",0,0,"3\t",,terminal_output +8844,12677819,"TERMINAL",0,0,"4\t",,terminal_output +8845,12678866,"TERMINAL",0,0,"6\t",,terminal_output +8846,12679912,"TERMINAL",0,0,"7\t",,terminal_output +8847,12680948,"TERMINAL",0,0,"8\t",,terminal_output +8848,12681989,"TERMINAL",0,0,"9\t",,terminal_output +8849,12683029,"TERMINAL",0,0,"20\t",,terminal_output +8850,12684073,"TERMINAL",0,0,"1\t",,terminal_output +8851,12685112,"TERMINAL",0,0,"2\t",,terminal_output +8852,12686157,"TERMINAL",0,0,"3\t",,terminal_output +8853,12687204,"TERMINAL",0,0,"4\t",,terminal_output +8854,12688259,"TERMINAL",0,0,"5\t",,terminal_output +8855,12689279,"TERMINAL",0,0,"6\t",,terminal_output +8856,12690327,"TERMINAL",0,0,"7\t",,terminal_output +8857,12691362,"TERMINAL",0,0,"8\t",,terminal_output +8858,12692408,"TERMINAL",0,0,"9\t",,terminal_output +8859,12693474,"TERMINAL",0,0,"30\t",,terminal_output +8860,12694514,"TERMINAL",0,0,"1\t",,terminal_output +8861,12696813,"TERMINAL",0,0,"25la6to",,terminal_output +8862,12697857,"TERMINAL",0,0,"5\t",,terminal_output +8863,12698896,"TERMINAL",0,0,"6\t",,terminal_output +8864,12699931,"TERMINAL",0,0,"7\t",,terminal_output +8865,12700976,"TERMINAL",0,0,"8\t",,terminal_output +8866,12702018,"TERMINAL",0,0,"9\t",,terminal_output +8867,12703064,"TERMINAL",0,0,"40\t",,terminal_output +8868,12704101,"TERMINAL",0,0,"1\t",,terminal_output +8869,12705143,"TERMINAL",0,0,"2\t",,terminal_output +8870,12706176,"TERMINAL",0,0,"3\t",,terminal_output +8871,12707225,"TERMINAL",0,0,"4\t",,terminal_output +8872,12708321,"TERMINAL",0,0,"5\t",,terminal_output +8873,12709315,"TERMINAL",0,0,"6\t",,terminal_output +8874,12710356,"TERMINAL",0,0,"7\t",,terminal_output +8875,12711398,"TERMINAL",0,0,"8\t",,terminal_output +8876,12712443,"TERMINAL",0,0,"9\t",,terminal_output +8877,12713487,"TERMINAL",0,0,"50\t",,terminal_output +8878,12714534,"TERMINAL",0,0,"1\t",,terminal_output +8879,12715581,"TERMINAL",0,0,"2\t",,terminal_output +8880,12716624,"TERMINAL",0,0,"3\t",,terminal_output +8881,12717663,"TERMINAL",0,0,"4\t",,terminal_output +8882,12718710,"TERMINAL",0,0,"5\t",,terminal_output +8883,12719750,"TERMINAL",0,0,"6\t",,terminal_output +8884,12720798,"TERMINAL",0,0,"7\t",,terminal_output +8885,12721846,"TERMINAL",0,0,"9\t",,terminal_output +8886,12722886,"TERMINAL",0,0,"9:00\t",,terminal_output +8887,12723930,"TERMINAL",0,0,"1\t",,terminal_output +8888,12724979,"TERMINAL",0,0,"2\t",,terminal_output +8889,12726023,"TERMINAL",0,0,"3\t",,terminal_output +8890,12727072,"TERMINAL",0,0,"4\t",,terminal_output +8891,12728125,"TERMINAL",0,0,"5\t",,terminal_output +8892,12729170,"TERMINAL",0,0,"6\t",,terminal_output +8893,12730214,"TERMINAL",0,0,"7\t",,terminal_output +8894,12731262,"TERMINAL",0,0,"8\t",,terminal_output +8895,12732329,"TERMINAL",0,0,"9\t",,terminal_output +8896,12733352,"TERMINAL",0,0,"10\t",,terminal_output +8897,12734400,"TERMINAL",0,0,"1\t",,terminal_output +8898,12735442,"TERMINAL",0,0,"2\t",,terminal_output +8899,12736491,"TERMINAL",0,0,"3\t",,terminal_output +8900,12737538,"TERMINAL",0,0,"4\t",,terminal_output +8901,12738577,"TERMINAL",0,0,"5\t",,terminal_output +8902,12739614,"TERMINAL",0,0,"6\t",,terminal_output +8903,12740659,"TERMINAL",0,0,"7\t",,terminal_output +8904,12741726,"TERMINAL",0,0,"8\t",,terminal_output +8905,12742772,"TERMINAL",0,0,"9\t",,terminal_output +8906,12743814,"TERMINAL",0,0,"20\t",,terminal_output +8907,12744862,"TERMINAL",0,0,"2\t",,terminal_output +8908,12745905,"TERMINAL",0,0,"3\t",,terminal_output +8909,12746961,"TERMINAL",0,0,"4\t",,terminal_output +8910,12747996,"TERMINAL",0,0,"5\t",,terminal_output +8911,12749036,"TERMINAL",0,0,"6\t",,terminal_output +8912,12750082,"TERMINAL",0,0,"7\t",,terminal_output +8913,12751134,"TERMINAL",0,0,"8\t",,terminal_output +8914,12752179,"TERMINAL",0,0,"9\t",,terminal_output +8915,12753222,"TERMINAL",0,0,"30\t",,terminal_output +8916,12754265,"TERMINAL",0,0,"1\t",,terminal_output +8917,12755316,"TERMINAL",0,0,"2\t",,terminal_output +8918,12756356,"TERMINAL",0,0,"3\t",,terminal_output +8919,12757423,"TERMINAL",0,0,"4\t",,terminal_output +8920,12758468,"TERMINAL",0,0,"5\t",,terminal_output +8921,12759509,"TERMINAL",0,0,"6\t",,terminal_output +8922,12760697,"TERMINAL",0,0,"7\t",,terminal_output +8923,12761784,"TERMINAL",0,0,"8\t",,terminal_output +8924,12762820,"TERMINAL",0,0,"9\t",,terminal_output +8925,12763861,"TERMINAL",0,0,"41\t",,terminal_output +8926,12764898,"TERMINAL",0,0,"2\t",,terminal_output +8927,12765955,"TERMINAL",0,0,"3\t",,terminal_output +8928,12766985,"TERMINAL",0,0,"4\t",,terminal_output +8929,12768036,"TERMINAL",0,0,"5\t",,terminal_output +8930,12769085,"TERMINAL",0,0,"6\t",,terminal_output +8931,12770091,"TERMINAL",0,0,"7\t",,terminal_output +8932,12771173,"TERMINAL",0,0,"8\t",,terminal_output +8933,12772178,"TERMINAL",0,0,"9\t",,terminal_output +8934,12773223,"TERMINAL",0,0,"50\t",,terminal_output +8935,12774269,"TERMINAL",0,0,"1\t",,terminal_output +8936,12775310,"TERMINAL",0,0,"2\t",,terminal_output +8937,12776351,"TERMINAL",0,0,"3\t",,terminal_output +8938,12777392,"TERMINAL",0,0,"4\t",,terminal_output +8939,12778432,"TERMINAL",0,0,"5\t",,terminal_output +8940,12779478,"TERMINAL",0,0,"6\t",,terminal_output +8941,12780526,"TERMINAL",0,0,"7\t",,terminal_output +8942,12781568,"TERMINAL",0,0,"8\t",,terminal_output +8943,12782618,"TERMINAL",0,0,"9\t",,terminal_output +8944,12783662,"TERMINAL",0,0,"30:00\t",,terminal_output +8945,12784706,"TERMINAL",0,0,"1\t",,terminal_output +8946,12785749,"TERMINAL",0,0,"2\t",,terminal_output +8947,12786797,"TERMINAL",0,0,"3\t",,terminal_output +8948,12787845,"TERMINAL",0,0,"5\t",,terminal_output +8949,12788888,"TERMINAL",0,0,"6\t",,terminal_output +8950,12789937,"TERMINAL",0,0,"7\t",,terminal_output +8951,12790984,"TERMINAL",0,0,"8\t",,terminal_output +8952,12792025,"TERMINAL",0,0,"9\t",,terminal_output +8953,12793068,"TERMINAL",0,0,"10\t",,terminal_output +8954,12794117,"TERMINAL",0,0,"1\t",,terminal_output +8955,12795157,"TERMINAL",0,0,"2\t",,terminal_output +8956,12796199,"TERMINAL",0,0,"3\t",,terminal_output +8957,12797239,"TERMINAL",0,0,"4\t",,terminal_output +8958,12798306,"TERMINAL",0,0,"5\t",,terminal_output +8959,12799333,"TERMINAL",0,0,"6\t",,terminal_output +8960,12800379,"TERMINAL",0,0,"7\t",,terminal_output +8961,12801419,"TERMINAL",0,0,"8\t",,terminal_output +8962,12802464,"TERMINAL",0,0,"9\t",,terminal_output +8963,12803509,"TERMINAL",0,0,"20\t",,terminal_output +8964,12804580,"TERMINAL",0,0,"1\t",,terminal_output +8965,12805608,"TERMINAL",0,0,"2\t",,terminal_output +8966,12806706,"TERMINAL",0,0,"3\t",,terminal_output +8967,12807698,"TERMINAL",0,0,"4\t",,terminal_output +8968,12808735,"TERMINAL",0,0,"5\t",,terminal_output +8969,12809786,"TERMINAL",0,0,"6\t",,terminal_output +8970,12810823,"TERMINAL",0,0,"8\t",,terminal_output +8971,12811872,"TERMINAL",0,0,"9\t",,terminal_output +8972,12812911,"TERMINAL",0,0,"30\t",,terminal_output +8973,12813969,"TERMINAL",0,0,"1\t",,terminal_output +8974,12815005,"TERMINAL",0,0,"2\t",,terminal_output +8975,12816057,"TERMINAL",0,0,"3\t",,terminal_output +8976,12818012,"TERMINAL",0,0,"46to5la",,terminal_output +8977,12819056,"TERMINAL",0,0,"6\t",,terminal_output +8978,12820104,"TERMINAL",0,0,"7\t",,terminal_output +8979,12821167,"TERMINAL",0,0,"8\t",,terminal_output +8980,12822209,"TERMINAL",0,0,"9\t",,terminal_output +8981,12823255,"TERMINAL",0,0,"40\t",,terminal_output +8982,12824301,"TERMINAL",0,0,"1\t",,terminal_output +8983,12825343,"TERMINAL",0,0,"2\t",,terminal_output +8984,12826388,"TERMINAL",0,0,"3\t",,terminal_output +8985,12827434,"TERMINAL",0,0,"4\t",,terminal_output +8986,12828477,"TERMINAL",0,0,"5\t",,terminal_output +8987,12829522,"TERMINAL",0,0,"6\t",,terminal_output +8988,12830553,"TERMINAL",0,0,"7\t",,terminal_output +8989,12831594,"TERMINAL",0,0,"8\t",,terminal_output +8990,12832679,"TERMINAL",0,0,"9\t",,terminal_output +8991,12833683,"TERMINAL",0,0,"50\t",,terminal_output +8992,12834724,"TERMINAL",0,0,"1\t",,terminal_output +8993,12835763,"TERMINAL",0,0,"2\t",,terminal_output +8994,12836880,"TERMINAL",0,0,"3\t",,terminal_output +8995,12837854,"TERMINAL",0,0,"5\t",,terminal_output +8996,12838890,"TERMINAL",0,0,"6\t",,terminal_output +8997,12839931,"TERMINAL",0,0,"7\t",,terminal_output +8998,12840971,"TERMINAL",0,0,"8\t",,terminal_output +8999,12842009,"TERMINAL",0,0,"9\t",,terminal_output +9000,12843061,"TERMINAL",0,0,"1:00\t",,terminal_output +9001,12844087,"TERMINAL",0,0,"1\t",,terminal_output +9002,12845121,"TERMINAL",0,0,"2\t",,terminal_output +9003,12846164,"TERMINAL",0,0,"3\t",,terminal_output +9004,12847208,"TERMINAL",0,0,"4\t",,terminal_output +9005,12848257,"TERMINAL",0,0,"5\t",,terminal_output +9006,12849316,"TERMINAL",0,0,"6\t",,terminal_output +9007,12850349,"TERMINAL",0,0,"7\t",,terminal_output +9008,12851389,"TERMINAL",0,0,"8\t",,terminal_output +9009,12852433,"TERMINAL",0,0,"9\t",,terminal_output +9010,12853486,"TERMINAL",0,0,"10\t",,terminal_output +9011,12854589,"TERMINAL",0,0,"1\t",,terminal_output +9012,12855631,"TERMINAL",0,0,"2\t",,terminal_output +9013,12856731,"TERMINAL",0,0,"3\t",,terminal_output +9014,12857658,"TERMINAL",0,0,"4\t",,terminal_output +9015,12858705,"TERMINAL",0,0,"5\t",,terminal_output +9016,12859750,"TERMINAL",0,0,"6\t",,terminal_output +9017,12860789,"TERMINAL",0,0,"7\t",,terminal_output +9018,12861831,"TERMINAL",0,0,"9\t",,terminal_output +9019,12862869,"TERMINAL",0,0,"20\t",,terminal_output +9020,12863916,"TERMINAL",0,0,"1\t",,terminal_output +9021,12864955,"TERMINAL",0,0,"2\t",,terminal_output +9022,12865990,"TERMINAL",0,0,"3\t",,terminal_output +9023,12867032,"TERMINAL",0,0,"4\t",,terminal_output +9024,12868096,"TERMINAL",0,0,"5\t",,terminal_output +9025,12869122,"TERMINAL",0,0,"6\t",,terminal_output +9026,12870166,"TERMINAL",0,0,"7\t",,terminal_output +9027,12871194,"TERMINAL",0,0,"8\t",,terminal_output +9028,12872236,"TERMINAL",0,0,"9\t",,terminal_output +9029,12873270,"TERMINAL",0,0,"30\t",,terminal_output +9030,12874333,"TERMINAL",0,0,"1\t",,terminal_output +9031,12875365,"TERMINAL",0,0,"2\t",,terminal_output +9032,12876394,"TERMINAL",0,0,"3\t",,terminal_output +9033,12877434,"TERMINAL",0,0,"4\t",,terminal_output +9034,12878469,"TERMINAL",0,0,"5\t",,terminal_output +9035,12879564,"TERMINAL",0,0,"6\t",,terminal_output +9036,12880587,"TERMINAL",0,0,"7\t",,terminal_output +9037,12881614,"TERMINAL",0,0,"8\t",,terminal_output +9038,12882740,"TERMINAL",0,0,"9\t",,terminal_output +9039,12883765,"TERMINAL",0,0,"40\t",,terminal_output +9040,12884786,"TERMINAL",0,0,"1\t",,terminal_output +9041,12885810,"TERMINAL",0,0,"2\t",,terminal_output +9042,12886837,"TERMINAL",0,0,"4\t",,terminal_output +9043,12887961,"TERMINAL",0,0,"5\t",,terminal_output +9044,12888986,"TERMINAL",0,0,"6\t",,terminal_output +9045,12890011,"TERMINAL",0,0,"7\t",,terminal_output +9046,12891033,"TERMINAL",0,0,"8\t",,terminal_output +9047,12892020,"TERMINAL",0,0,"9\t",,terminal_output +9048,12893080,"TERMINAL",0,0,"50\t",,terminal_output +9049,12894216,"TERMINAL",0,0,"1\t",,terminal_output +9050,12895136,"TERMINAL",0,0,"2\t",,terminal_output +9051,12896176,"TERMINAL",0,0,"3\t",,terminal_output +9052,12897233,"TERMINAL",0,0,"4\t",,terminal_output +9053,12898266,"TERMINAL",0,0,"5\t",,terminal_output +9054,12899322,"TERMINAL",0,0,"6\t",,terminal_output +9055,12901251,"TERMINAL",0,0,"7\t",,terminal_output +9056,12902303,"TERMINAL",0,0,"9\t",,terminal_output +9057,12903431,"TERMINAL",0,0,"2:00\t",,terminal_output +9058,12904425,"TERMINAL",0,0,"1\t",,terminal_output +9059,12905465,"TERMINAL",0,0,"2\t",,terminal_output +9060,12906468,"TERMINAL",0,0,"3\t",,terminal_output +9061,12907522,"TERMINAL",0,0,"4\t",,terminal_output +9062,12908647,"TERMINAL",0,0,"5\t",,terminal_output +9063,12909671,"TERMINAL",0,0,"6\t",,terminal_output +9064,12910695,"TERMINAL",0,0,"7\t",,terminal_output +9065,12911710,"TERMINAL",0,0,"8\t",,terminal_output +9066,12912739,"TERMINAL",0,0,"9\t",,terminal_output +9067,12913869,"TERMINAL",0,0,"10\t",,terminal_output +9068,12914894,"TERMINAL",0,0,"2\t",,terminal_output +9069,12915917,"TERMINAL",0,0,"3\t",,terminal_output +9070,12916942,"TERMINAL",0,0,"4\t",,terminal_output +9071,12918071,"TERMINAL",0,0,"5\t",,terminal_output +9072,12919090,"TERMINAL",0,0,"6\t",,terminal_output +9073,12920126,"TERMINAL",0,0,"7\t",,terminal_output +9074,12921143,"TERMINAL",0,0,"8\t",,terminal_output +9075,12922223,"TERMINAL",0,0,"9\t",,terminal_output +9076,12923231,"TERMINAL",0,0,"20\t",,terminal_output +9077,12924277,"TERMINAL",0,0,"1\t",,terminal_output +9078,12925442,"TERMINAL",0,0,"2\t",,terminal_output +9079,12926373,"TERMINAL",0,0,"3\t",,terminal_output +9080,12927419,"TERMINAL",0,0,"4\t",,terminal_output +9081,12928468,"TERMINAL",0,0,"5\t",,terminal_output +9082,12929513,"TERMINAL",0,0,"6\t",,terminal_output +9083,12930560,"TERMINAL",0,0,"7\t",,terminal_output +9084,12931696,"TERMINAL",0,0,"8\t",,terminal_output +9085,12932656,"TERMINAL",0,0,"9\t",,terminal_output +9086,12933711,"TERMINAL",0,0,"30\t",,terminal_output +9087,12934749,"TERMINAL",0,0,"1\t",,terminal_output +9088,12935884,"TERMINAL",0,0,"2\t",,terminal_output +9089,12936840,"TERMINAL",0,0,"4\t",,terminal_output +9090,12937887,"TERMINAL",0,0,"5\t",,terminal_output +9091,12939201,"TERMINAL",0,0,"6\t",,terminal_output +9092,12940243,"TERMINAL",0,0,"7\t",,terminal_output +9093,12941290,"TERMINAL",0,0,"8\t",,terminal_output +9094,12942336,"TERMINAL",0,0,"9\t",,terminal_output +9095,12943385,"TERMINAL",0,0,"40\t",,terminal_output +9096,12944433,"TERMINAL",0,0,"1\t",,terminal_output +9097,12945466,"TERMINAL",0,0,"2\t",,terminal_output +9098,12946534,"TERMINAL",0,0,"3\t",,terminal_output +9099,12947662,"TERMINAL",0,0,"4\t",,terminal_output +9100,12948678,"TERMINAL",0,0,"5\t",,terminal_output +9101,12949709,"TERMINAL",0,0,"6\t",,terminal_output +9102,12950691,"TERMINAL",0,0,"7\t",,terminal_output +9103,12951734,"TERMINAL",0,0,"8\t",,terminal_output +9104,12952884,"TERMINAL",0,0,"9\t",,terminal_output +9105,12953908,"TERMINAL",0,0,"51\t",,terminal_output +9106,12954933,"TERMINAL",0,0,"2\t",,terminal_output +9107,12955956,"TERMINAL",0,0,"3\t",,terminal_output +9108,12956971,"TERMINAL",0,0,"4\t",,terminal_output +9109,12958106,"TERMINAL",0,0,"5\t",,terminal_output +9110,12959130,"TERMINAL",0,0,"6\t",,terminal_output +9111,12960258,"TERMINAL",0,0,"7\t",,terminal_output +9112,12961179,"TERMINAL",0,0,"8\t",,terminal_output +9113,12962203,"TERMINAL",0,0,"9\t",,terminal_output +9114,12963233,"TERMINAL",0,0,"3:00\t",,terminal_output +9115,12964279,"TERMINAL",0,0,"1\t",,terminal_output +9116,12965377,"TERMINAL",0,0,"2\t",,terminal_output +9117,12966373,"TERMINAL",0,0,"3\t",,terminal_output +9118,12967429,"TERMINAL",0,0,"4\t",,terminal_output +9119,12968471,"TERMINAL",0,0,"5\t",,terminal_output +9120,12969576,"TERMINAL",0,0,"6\t",,terminal_output +9121,12970600,"TERMINAL",0,0,"7\t",,terminal_output +9122,12971623,"TERMINAL",0,0,"8\t",,terminal_output +9123,12972661,"TERMINAL",0,0,"9\t",,terminal_output +9124,12973703,"TERMINAL",0,0,"10\t",,terminal_output +9125,12974798,"TERMINAL",0,0,"1\t",,terminal_output +9126,12975795,"TERMINAL",0,0,"2\t",,terminal_output +9127,12976846,"TERMINAL",0,0,"4\t",,terminal_output +9128,12977990,"TERMINAL",0,0,"5\t",,terminal_output +9129,12978934,"TERMINAL",0,0,"6\t",,terminal_output +9130,12979985,"TERMINAL",0,0,"7\t",,terminal_output +9131,12981044,"TERMINAL",0,0,"8\t",,terminal_output +9132,12982172,"TERMINAL",0,0,"9\t",,terminal_output +9133,12983121,"TERMINAL",0,0,"20\t",,terminal_output +9134,12984166,"TERMINAL",0,0,"1\t",,terminal_output +9135,12985211,"TERMINAL",0,0,"2\t",,terminal_output +9136,12986254,"TERMINAL",0,0,"3\t",,terminal_output +9137,12987305,"TERMINAL",0,0,"4\t",,terminal_output +9138,12988359,"TERMINAL",0,0,"5\t",,terminal_output +9139,12989394,"TERMINAL",0,0,"6\t",,terminal_output +9140,12990438,"TERMINAL",0,0,"7\t",,terminal_output +9141,12991495,"TERMINAL",0,0,"8\t",,terminal_output +9142,12992523,"TERMINAL",0,0,"9\t",,terminal_output +9143,12993640,"TERMINAL",0,0,"30\t",,terminal_output +9144,12994614,"TERMINAL",0,0,"1\t",,terminal_output +9145,12995688,"TERMINAL",0,0,"2\t",,terminal_output +9146,12996711,"TERMINAL",0,0,"3\t",,terminal_output +9147,12997754,"TERMINAL",0,0,"4\t",,terminal_output +9148,12998814,"TERMINAL",0,0,"5\t",,terminal_output +9149,12999886,"TERMINAL",0,0,"7\t",,terminal_output +9150,13000912,"TERMINAL",0,0,"8\t",,terminal_output +9151,13002038,"TERMINAL",0,0,"9\t",,terminal_output +9152,13003062,"TERMINAL",0,0,"40\t",,terminal_output +9153,13004087,"TERMINAL",0,0,"1\t",,terminal_output +9154,13005111,"TERMINAL",0,0,"2\t",,terminal_output +9155,13006124,"TERMINAL",0,0,"3\t",,terminal_output +9156,13007203,"TERMINAL",0,0,"4\t",,terminal_output +9157,13008212,"TERMINAL",0,0,"5\t",,terminal_output +9158,13009261,"TERMINAL",0,0,"6\t",,terminal_output +9159,13010308,"TERMINAL",0,0,"7\t",,terminal_output +9160,13011349,"TERMINAL",0,0,"8\t",,terminal_output +9161,13012396,"TERMINAL",0,0,"9\t",,terminal_output +9162,13013438,"TERMINAL",0,0,"50\t",,terminal_output +9163,13014493,"TERMINAL",0,0,"1\t",,terminal_output +9164,13015541,"TERMINAL",0,0,"2\t",,terminal_output +9165,13016582,"TERMINAL",0,0,"3\t",,terminal_output +9166,13017621,"TERMINAL",0,0,"4\t",,terminal_output +9167,13018677,"TERMINAL",0,0,"5\t",,terminal_output +9168,13019705,"TERMINAL",0,0,"6\t",,terminal_output +9169,13020761,"TERMINAL",0,0,"7\t",,terminal_output +9170,13021809,"TERMINAL",0,0,"8\t",,terminal_output +9171,13022835,"TERMINAL",0,0,"4:00\t",,terminal_output +9172,13023885,"TERMINAL",0,0,"1\t",,terminal_output +9173,13024935,"TERMINAL",0,0,"2\t",,terminal_output +9174,13025978,"TERMINAL",0,0,"3\t",,terminal_output +9175,13027023,"TERMINAL",0,0,"4\t",,terminal_output +9176,13028067,"TERMINAL",0,0,"5\t",,terminal_output +9177,13029111,"TERMINAL",0,0,"6\t",,terminal_output +9178,13030169,"TERMINAL",0,0,"7\t",,terminal_output +9179,13031213,"TERMINAL",0,0,"8\t",,terminal_output +9180,13032252,"TERMINAL",0,0,"9\t",,terminal_output +9181,13033310,"TERMINAL",0,0,"10\t",,terminal_output +9182,13034343,"TERMINAL",0,0,"1\t",,terminal_output +9183,13035396,"TERMINAL",0,0,"2\t",,terminal_output +9184,13036422,"TERMINAL",0,0,"3\t",,terminal_output +9185,13037459,"TERMINAL",0,0,"4\t",,terminal_output +9186,13038500,"TERMINAL",0,0,"5\t",,terminal_output +9187,13039549,"TERMINAL",0,0,"6\t",,terminal_output +9188,13040588,"TERMINAL",0,0,"7\t",,terminal_output +9189,13041636,"TERMINAL",0,0,"8\t",,terminal_output +9190,13042682,"TERMINAL",0,0,"9\t",,terminal_output +9191,13043724,"TERMINAL",0,0,"20\t",,terminal_output +9192,13044771,"TERMINAL",0,0,"1\t",,terminal_output +9193,13045837,"TERMINAL",0,0,"2\t",,terminal_output +9194,13046879,"TERMINAL",0,0,"4\t",,terminal_output +9195,13047916,"TERMINAL",0,0,"5\t",,terminal_output +9196,13048968,"TERMINAL",0,0,"6\t",,terminal_output +9197,13050012,"TERMINAL",0,0,"7\t",,terminal_output +9198,13051057,"TERMINAL",0,0,"8\t",,terminal_output +9199,13052106,"TERMINAL",0,0,"9\t",,terminal_output +9200,13053145,"TERMINAL",0,0,"30\t",,terminal_output +9201,13054193,"TERMINAL",0,0,"1\t",,terminal_output +9202,13055235,"TERMINAL",0,0,"2\t",,terminal_output +9203,13056281,"TERMINAL",0,0,"3\t",,terminal_output +9204,13057328,"TERMINAL",0,0,"4\t",,terminal_output +9205,13058382,"TERMINAL",0,0,"5\t",,terminal_output +9206,13060700,"TERMINAL",0,0,"6 Rhkn1665",,terminal_output +9207,13061738,"TERMINAL",0,0,"81",,terminal_output +9208,13062780,"TERMINAL",0,0,"92",,terminal_output +9209,13063827,"TERMINAL",0,0,"404",,terminal_output +9210,13064858,"TERMINAL",0,0,"25",,terminal_output +9211,13065897,"TERMINAL",0,0,"36",,terminal_output +9212,13066949,"TERMINAL",0,0,"47",,terminal_output +9213,13067990,"TERMINAL",0,0,"58",,terminal_output +9214,13069025,"TERMINAL",0,0,"69",,terminal_output +9215,13070067,"TERMINAL",0,0,"710",,terminal_output +9216,13071128,"TERMINAL",0,0,"81",,terminal_output +9217,13072182,"TERMINAL",0,0,"92",,terminal_output +9218,13073174,"TERMINAL",0,0,"503",,terminal_output +9219,13074225,"TERMINAL",0,0,"14",,terminal_output +9220,13075252,"TERMINAL",0,0,"25",,terminal_output +9221,13076305,"TERMINAL",0,0,"36",,terminal_output +9222,13077334,"TERMINAL",0,0,"47",,terminal_output +9223,13078449,"TERMINAL",0,0,"58",,terminal_output +9224,13079454,"TERMINAL",0,0,"69",,terminal_output +9225,13080465,"TERMINAL",0,0,"720",,terminal_output +9226,13081500,"TERMINAL",0,0,"81",,terminal_output +9227,13082625,"TERMINAL",0,0,"92",,terminal_output +9228,13083573,"TERMINAL",0,0,"5:003",,terminal_output +9229,13084679,"TERMINAL",0,0,"14",,terminal_output +9230,13085645,"TERMINAL",0,0,"25",,terminal_output +9231,13086710,"TERMINAL",0,0,"36",,terminal_output +9232,13087746,"TERMINAL",0,0,"47",,terminal_output +9233,13088770,"TERMINAL",0,0,"58",,terminal_output +9234,13089897,"TERMINAL",0,0,"630",,terminal_output +9235,13090844,"TERMINAL",0,0,"81",,terminal_output +9236,13091951,"TERMINAL",0,0,"92",,terminal_output +9237,13092970,"TERMINAL",0,0,"103",,terminal_output +9238,13093993,"TERMINAL",0,0,"14",,terminal_output +9239,13095119,"TERMINAL",0,0,"25",,terminal_output +9240,13096143,"TERMINAL",0,0,"36",,terminal_output +9241,13097167,"TERMINAL",0,0,"47",,terminal_output +9242,13098192,"TERMINAL",0,0,"58",,terminal_output +9243,13099223,"TERMINAL",0,0,"69",,terminal_output +9244,13100240,"TERMINAL",0,0,"740",,terminal_output +9245,13101256,"TERMINAL",0,0,"81",,terminal_output +9246,13102391,"TERMINAL",0,0,"92",,terminal_output +9247,13103338,"TERMINAL",0,0,"203",,terminal_output +9248,13104377,"TERMINAL",0,0,"14",,terminal_output +9249,13105464,"TERMINAL",0,0,"25",,terminal_output +9250,13106468,"TERMINAL",0,0,"36",,terminal_output +9251,13107510,"TERMINAL",0,0,"47",,terminal_output +9252,13108553,"TERMINAL",0,0,"58",,terminal_output +9253,13109594,"TERMINAL",0,0,"69",,terminal_output +9254,13110683,"TERMINAL",0,0,"750",,terminal_output +9255,13111699,"TERMINAL",0,0,"81",,terminal_output +9256,13112835,"TERMINAL",0,0,"92",,terminal_output +9257,13113857,"TERMINAL",0,0,"303",,terminal_output +9258,13114883,"TERMINAL",0,0,"15",,terminal_output +9259,13115907,"TERMINAL",0,0,"36",,terminal_output +9260,13116931,"TERMINAL",0,0,"47",,terminal_output +9261,13118051,"TERMINAL",0,0,"58",,terminal_output +9262,13119080,"TERMINAL",0,0,"69",,terminal_output +9263,13120104,"TERMINAL",0,0,"71:00",,terminal_output +9264,13121128,"TERMINAL",0,0,"81",,terminal_output +9265,13122153,"TERMINAL",0,0,"92",,terminal_output +9266,13123279,"TERMINAL",0,0,"403",,terminal_output +9267,13124214,"TERMINAL",0,0,"14",,terminal_output +9268,13125259,"TERMINAL",0,0,"25",,terminal_output +9269,13126303,"TERMINAL",0,0,"36",,terminal_output +9270,13127376,"TERMINAL",0,0,"47",,terminal_output +9271,13128402,"TERMINAL",0,0,"58",,terminal_output +9272,13129440,"TERMINAL",0,0,"69",,terminal_output +9273,13130484,"TERMINAL",0,0,"710",,terminal_output +9274,13131520,"TERMINAL",0,0,"81",,terminal_output +9275,13132597,"TERMINAL",0,0,"92",,terminal_output +9276,13133622,"TERMINAL",0,0,"503",,terminal_output +9277,13134762,"TERMINAL",0,0,"14",,terminal_output +9278,13135691,"TERMINAL",0,0,"25",,terminal_output +9279,13136735,"TERMINAL",0,0,"36",,terminal_output +9280,13137820,"TERMINAL",0,0,"47",,terminal_output +9281,13138846,"TERMINAL",0,0,"69",,terminal_output +9282,13139971,"TERMINAL",0,0,"720",,terminal_output +9283,13140994,"TERMINAL",0,0,"81",,terminal_output +9284,13142017,"TERMINAL",0,0,"92",,terminal_output +9285,13143044,"TERMINAL",0,0,"6:003",,terminal_output +9286,13144068,"TERMINAL",0,0,"14",,terminal_output +9287,13145192,"TERMINAL",0,0,"25",,terminal_output +9288,13146217,"TERMINAL",0,0,"36",,terminal_output +9289,13147248,"TERMINAL",0,0,"47",,terminal_output +9290,13148266,"TERMINAL",0,0,"58",,terminal_output +9291,13149290,"TERMINAL",0,0,"69",,terminal_output +9292,13150314,"TERMINAL",0,0,"730",,terminal_output +9293,13151350,"TERMINAL",0,0,"81",,terminal_output +9294,13152387,"TERMINAL",0,0,"92",,terminal_output +9295,13153432,"TERMINAL",0,0,"103",,terminal_output +9296,13154473,"TERMINAL",0,0,"14",,terminal_output +9297,13155514,"TERMINAL",0,0,"25",,terminal_output +9298,13156553,"TERMINAL",0,0,"36",,terminal_output +9299,13157701,"TERMINAL",0,0,"47",,terminal_output +9300,13158651,"TERMINAL",0,0,"58",,terminal_output +9301,13159734,"TERMINAL",0,0,"69",,terminal_output +9302,13160758,"TERMINAL",0,0,"740",,terminal_output +9303,13161771,"TERMINAL",0,0,"81",,terminal_output +9304,13162907,"TERMINAL",0,0,"93",,terminal_output +9305,13163854,"TERMINAL",0,0,"214",,terminal_output +9306,13164963,"TERMINAL",0,0,"25",,terminal_output +9307,13165927,"TERMINAL",0,0,"36",,terminal_output +9308,13166964,"TERMINAL",0,0,"47",,terminal_output +9309,13168008,"TERMINAL",0,0,"58",,terminal_output +9310,13169053,"TERMINAL",0,0,"69",,terminal_output +9311,13170100,"TERMINAL",0,0,"750",,terminal_output +9312,13171204,"TERMINAL",0,0,"81",,terminal_output +9313,13172188,"TERMINAL",0,0,"92",,terminal_output +9314,13173240,"TERMINAL",0,0,"303",,terminal_output +9315,13174277,"TERMINAL",0,0,"14",,terminal_output +9316,13175317,"TERMINAL",0,0,"25",,terminal_output +9317,13176360,"TERMINAL",0,0,"36",,terminal_output +9318,13177415,"TERMINAL",0,0,"47",,terminal_output +9319,13178446,"TERMINAL",0,0,"58",,terminal_output +9320,13179485,"TERMINAL",0,0,"69",,terminal_output +9321,13180530,"TERMINAL",0,0,"72:00",,terminal_output +9322,13181944,"TERMINAL",0,0,"82",,terminal_output +9323,13182966,"TERMINAL",0,0,"403",,terminal_output +9324,13184045,"TERMINAL",0,0,"14",,terminal_output +9325,13185045,"TERMINAL",0,0,"25",,terminal_output +9326,13186153,"TERMINAL",0,0,"36",,terminal_output +9327,13187179,"TERMINAL",0,0,"47",,terminal_output +9328,13188178,"TERMINAL",0,0,"58",,terminal_output +9329,13189225,"TERMINAL",0,0,"69",,terminal_output +9330,13190270,"TERMINAL",0,0,"710",,terminal_output +9331,13191315,"TERMINAL",0,0,"81",,terminal_output +9332,13192354,"TERMINAL",0,0,"92",,terminal_output +9333,13193409,"TERMINAL",0,0,"503",,terminal_output +9334,13194442,"TERMINAL",0,0,"14",,terminal_output +9335,13195492,"TERMINAL",0,0,"25",,terminal_output +9336,13196538,"TERMINAL",0,0,"36",,terminal_output +9337,13197583,"TERMINAL",0,0,"47",,terminal_output +9338,13198618,"TERMINAL",0,0,"58",,terminal_output +9339,13199670,"TERMINAL",0,0,"69",,terminal_output +9340,13200800,"TERMINAL",0,0,"720",,terminal_output +9341,13201750,"TERMINAL",0,0,"81",,terminal_output +9342,13202846,"TERMINAL",0,0,"93",,terminal_output +9343,13203859,"TERMINAL",0,0,"7:014",,terminal_output +9344,13204896,"TERMINAL",0,0,"25",,terminal_output +9345,13206023,"TERMINAL",0,0,"36",,terminal_output +9346,13207043,"TERMINAL",0,0,"47",,terminal_output +9347,13208018,"TERMINAL",0,0,"58",,terminal_output +9348,13209062,"TERMINAL",0,0,"69",,terminal_output +9349,13210087,"TERMINAL",0,0,"730",,terminal_output +9350,13211132,"TERMINAL",0,0,"81",,terminal_output +9351,13212205,"TERMINAL",0,0,"92",,terminal_output +9352,13213292,"TERMINAL",0,0,"103",,terminal_output +9353,13214313,"TERMINAL",0,0,"14",,terminal_output +9354,13215286,"TERMINAL",0,0,"25",,terminal_output +9355,13216332,"TERMINAL",0,0,"36",,terminal_output +9356,13217375,"TERMINAL",0,0,"47",,terminal_output +9357,13218412,"TERMINAL",0,0,"58",,terminal_output +9358,13219457,"TERMINAL",0,0,"69",,terminal_output +9359,13221171,"TERMINAL",0,0,"741",,terminal_output +9360,13222242,"TERMINAL",0,0,"92",,terminal_output +9361,13223325,"TERMINAL",0,0,"203",,terminal_output +9362,13224325,"TERMINAL",0,0,"14",,terminal_output +9363,13225424,"TERMINAL",0,0,"25",,terminal_output +9364,13226389,"TERMINAL",0,0,"36",,terminal_output +9365,13227436,"TERMINAL",0,0,"47",,terminal_output +9366,13228477,"TERMINAL",0,0,"58",,terminal_output +9367,13229550,"TERMINAL",0,0,"69",,terminal_output +9368,13230595,"TERMINAL",0,0,"750",,terminal_output +9369,13231640,"TERMINAL",0,0,"81",,terminal_output +9370,13232642,"TERMINAL",0,0,"92",,terminal_output +9371,13233689,"TERMINAL",0,0,"303",,terminal_output +9372,13234731,"TERMINAL",0,0,"14",,terminal_output +9373,13235811,"TERMINAL",0,0,"25",,terminal_output +9374,13236848,"TERMINAL",0,0,"37",,terminal_output +9375,13237898,"TERMINAL",0,0,"58",,terminal_output +9376,13238898,"TERMINAL",0,0,"69",,terminal_output +9377,13239940,"TERMINAL",0,0,"73:00",,terminal_output +9378,13240978,"TERMINAL",0,0,"81",,terminal_output +9379,13242025,"TERMINAL",0,0,"92",,terminal_output +9380,13243065,"TERMINAL",0,0,"403",,terminal_output +9381,13244108,"TERMINAL",0,0,"14",,terminal_output +9382,13245145,"TERMINAL",0,0,"25",,terminal_output +9383,13246190,"TERMINAL",0,0,"36",,terminal_output +9384,13247228,"TERMINAL",0,0,"47",,terminal_output +9385,13248310,"TERMINAL",0,0,"58",,terminal_output +9386,13249314,"TERMINAL",0,0,"69",,terminal_output +9387,13250359,"TERMINAL",0,0,"710",,terminal_output +9388,13251397,"TERMINAL",0,0,"81",,terminal_output +9389,13252442,"TERMINAL",0,0,"92",,terminal_output +9390,13253483,"TERMINAL",0,0,"503",,terminal_output +9391,13254532,"TERMINAL",0,0,"14",,terminal_output +9392,13255574,"TERMINAL",0,0,"25",,terminal_output +9393,13256713,"TERMINAL",0,0,"36",,terminal_output +9394,13257654,"TERMINAL",0,0,"47",,terminal_output +9395,13258700,"TERMINAL",0,0,"58",,terminal_output +9396,13259779,"TERMINAL",0,0,"69",,terminal_output +9397,13260804,"TERMINAL",0,0,"720",,terminal_output +9398,13261818,"TERMINAL",0,0,"82",,terminal_output +9399,13262956,"TERMINAL",0,0,"8:003",,terminal_output +9400,13263902,"TERMINAL",0,0,"14",,terminal_output +9401,13264935,"TERMINAL",0,0,"25",,terminal_output +9402,13266028,"TERMINAL",0,0,"36",,terminal_output +9403,13267014,"TERMINAL",0,0,"47",,terminal_output +9404,13268048,"TERMINAL",0,0,"58",,terminal_output +9405,13269088,"TERMINAL",0,0,"69",,terminal_output +9406,13270225,"TERMINAL",0,0,"730",,terminal_output +9407,13271249,"TERMINAL",0,0,"81",,terminal_output +9408,13272246,"TERMINAL",0,0,"92",,terminal_output +9409,13273282,"TERMINAL",0,0,"103",,terminal_output +9410,13274320,"TERMINAL",0,0,"14",,terminal_output +9411,13275355,"TERMINAL",0,0,"25",,terminal_output +9412,13276398,"TERMINAL",0,0,"36",,terminal_output +9413,13277446,"TERMINAL",0,0,"47",,terminal_output +9414,13278476,"TERMINAL",0,0,"58",,terminal_output +9415,13279519,"TERMINAL",0,0,"69",,terminal_output +9416,13280568,"TERMINAL",0,0,"740",,terminal_output +9417,13281603,"TERMINAL",0,0,"81",,terminal_output +9418,13282644,"TERMINAL",0,0,"92",,terminal_output +9419,13283684,"TERMINAL",0,0,"203",,terminal_output +9420,13284727,"TERMINAL",0,0,"14",,terminal_output +9421,13285790,"TERMINAL",0,0,"25",,terminal_output +9422,13286810,"TERMINAL",0,0,"37",,terminal_output +9423,13287847,"TERMINAL",0,0,"58",,terminal_output +9424,13288966,"TERMINAL",0,0,"69",,terminal_output +9425,13289930,"TERMINAL",0,0,"750",,terminal_output +9426,13291014,"TERMINAL",0,0,"81",,terminal_output +9427,13292001,"TERMINAL",0,0,"92",,terminal_output +9428,13293060,"TERMINAL",0,0,"303",,terminal_output +9429,13294077,"TERMINAL",0,0,"14",,terminal_output +9430,13295212,"TERMINAL",0,0,"25",,terminal_output +9431,13296236,"TERMINAL",0,0,"36",,terminal_output +9432,13297201,"TERMINAL",0,0,"47",,terminal_output +9433,13298284,"TERMINAL",0,0,"58",,terminal_output +9434,13299308,"TERMINAL",0,0,"69",,terminal_output +9435,13300333,"TERMINAL",0,0,"74:00",,terminal_output +9436,13301379,"TERMINAL",0,0,"81",,terminal_output +9437,13303209,"TERMINAL",0,0,"93",,terminal_output +9438,13304218,"TERMINAL",0,0,"414",,terminal_output +9439,13305208,"TERMINAL",0,0,"25",,terminal_output +9440,13306258,"TERMINAL",0,0,"36",,terminal_output +9441,13307296,"TERMINAL",0,0,"47",,terminal_output +9442,13308338,"TERMINAL",0,0,"58",,terminal_output +9443,13309424,"TERMINAL",0,0,"69",,terminal_output +9444,13310426,"TERMINAL",0,0,"710",,terminal_output +9445,13311479,"TERMINAL",0,0,"81",,terminal_output +9446,13312511,"TERMINAL",0,0,"92",,terminal_output +9447,13313564,"TERMINAL",0,0,"503",,terminal_output +9448,13314601,"TERMINAL",0,0,"14",,terminal_output +9449,13315648,"TERMINAL",0,0,"25",,terminal_output +9450,13316692,"TERMINAL",0,0,"36",,terminal_output +9451,13317734,"TERMINAL",0,0,"47",,terminal_output +9452,13318830,"TERMINAL",0,0,"59",,terminal_output +9453,13319833,"TERMINAL",0,0,"720",,terminal_output +9454,13320874,"TERMINAL",0,0,"81",,terminal_output +9455,13321925,"TERMINAL",0,0,"92",,terminal_output +9456,13322969,"TERMINAL",0,0,"9:003",,terminal_output +9457,13324016,"TERMINAL",0,0,"14",,terminal_output +9458,13325063,"TERMINAL",0,0,"25",,terminal_output +9459,13326156,"TERMINAL",0,0,"36",,terminal_output +9460,13327196,"TERMINAL",0,0,"47",,terminal_output +9461,13328201,"TERMINAL",0,0,"58",,terminal_output +9462,13329242,"TERMINAL",0,0,"69",,terminal_output +9463,13330290,"TERMINAL",0,0,"730",,terminal_output +9464,13331346,"TERMINAL",0,0,"81",,terminal_output +9465,13332397,"TERMINAL",0,0,"92",,terminal_output +9466,13333413,"TERMINAL",0,0,"103",,terminal_output +9467,13334461,"TERMINAL",0,0,"14",,terminal_output +9468,13335509,"TERMINAL",0,0,"25",,terminal_output +9469,13336553,"TERMINAL",0,0,"36",,terminal_output +9470,13337599,"TERMINAL",0,0,"47",,terminal_output +9471,13338676,"TERMINAL",0,0,"58",,terminal_output +9472,13339758,"TERMINAL",0,0,"69",,terminal_output +9473,13340781,"TERMINAL",0,0,"740",,terminal_output +9474,13341784,"TERMINAL",0,0,"81",,terminal_output +9475,13342884,"TERMINAL",0,0,"93",,terminal_output +9476,13343954,"TERMINAL",0,0,"214",,terminal_output +9477,13344980,"TERMINAL",0,0,"25",,terminal_output +9478,13345985,"TERMINAL",0,0,"36",,terminal_output +9479,13347004,"TERMINAL",0,0,"47",,terminal_output +9480,13348152,"TERMINAL",0,0,"58",,terminal_output +9481,13349164,"TERMINAL",0,0,"69",,terminal_output +9482,13350201,"TERMINAL",0,0,"750",,terminal_output +9483,13351231,"TERMINAL",0,0,"81",,terminal_output +9484,13352232,"TERMINAL",0,0,"92",,terminal_output +9485,13353275,"TERMINAL",0,0,"303",,terminal_output +9486,13354324,"TERMINAL",0,0,"14",,terminal_output +9487,13355372,"TERMINAL",0,0,"25",,terminal_output +9488,13356458,"TERMINAL",0,0,"36",,terminal_output +9489,13357458,"TERMINAL",0,0,"47",,terminal_output +9490,13358506,"TERMINAL",0,0,"58",,terminal_output +9491,13359549,"TERMINAL",0,0,"69",,terminal_output +9492,13360595,"TERMINAL",0,0,"75:00",,terminal_output +9493,13361696,"TERMINAL",0,0,"81",,terminal_output +9494,13362680,"TERMINAL",0,0,"92",,terminal_output +9495,13363819,"TERMINAL",0,0,"403",,terminal_output +9496,13364844,"TERMINAL",0,0,"14",,terminal_output +9497,13365866,"TERMINAL",0,0,"26",,terminal_output +9498,13366892,"TERMINAL",0,0,"47",,terminal_output +9499,13368017,"TERMINAL",0,0,"58",,terminal_output +9500,13368965,"TERMINAL",0,0,"69",,terminal_output +9501,13370065,"TERMINAL",0,0,"710",,terminal_output +9502,13371083,"TERMINAL",0,0,"81",,terminal_output +9503,13372113,"TERMINAL",0,0,"92",,terminal_output +9504,13373137,"TERMINAL",0,0,"503",,terminal_output +9505,13374265,"TERMINAL",0,0,"14",,terminal_output +9506,13375288,"TERMINAL",0,0,"25",,terminal_output +9507,13376243,"TERMINAL",0,0,"36",,terminal_output +9508,13377291,"TERMINAL",0,0,"47",,terminal_output +9509,13378360,"TERMINAL",0,0,"58",,terminal_output +9510,13379379,"TERMINAL",0,0,"69",,terminal_output +9511,13380428,"TERMINAL",0,0,"720",,terminal_output +9512,13381483,"TERMINAL",0,0,"81",,terminal_output +9513,13382559,"TERMINAL",0,0,"92",,terminal_output +9514,13383592,"TERMINAL",0,0,"40:003",,terminal_output +9515,13384635,"TERMINAL",0,0,"14",,terminal_output +9516,13385840,"TERMINAL",0,0,"25",,terminal_output +9517,13386779,"TERMINAL",0,0,"36",,terminal_output +9518,13387884,"TERMINAL",0,0,"58",,terminal_output +9519,13388909,"TERMINAL",0,0,"69",,terminal_output +9520,13389933,"TERMINAL",0,0,"730",,terminal_output +9521,13391058,"TERMINAL",0,0,"81",,terminal_output +9522,13392083,"TERMINAL",0,0,"92",,terminal_output +9523,13393125,"TERMINAL",0,0,"103",,terminal_output +9524,13394131,"TERMINAL",0,0,"14",,terminal_output +9525,13395272,"TERMINAL",0,0,"25",,terminal_output +9526,13396280,"TERMINAL",0,0,"36",,terminal_output +9527,13397244,"TERMINAL",0,0,"47",,terminal_output +9528,13398288,"TERMINAL",0,0,"58",,terminal_output +9529,13399353,"TERMINAL",0,0,"69",,terminal_output +9530,13400379,"TERMINAL",0,0,"740",,terminal_output +9531,13401426,"TERMINAL",0,0,"81",,terminal_output +9532,13402485,"TERMINAL",0,0,"92",,terminal_output +9533,13403551,"TERMINAL",0,0,"203",,terminal_output +9534,13404579,"TERMINAL",0,0,"14",,terminal_output +9535,13405611,"TERMINAL",0,0,"25",,terminal_output +9536,13406695,"TERMINAL",0,0,"36",,terminal_output +9537,13407700,"TERMINAL",0,0,"47",,terminal_output +9538,13408741,"TERMINAL",0,0,"58",,terminal_output +9539,13409787,"TERMINAL",0,0,"650",,terminal_output +9540,13410832,"TERMINAL",0,0,"81",,terminal_output +9541,13411894,"TERMINAL",0,0,"92",,terminal_output +9542,13412923,"TERMINAL",0,0,"303",,terminal_output +9543,13413973,"TERMINAL",0,0,"14",,terminal_output +9544,13415023,"TERMINAL",0,0,"25",,terminal_output +9545,13416062,"TERMINAL",0,0,"36",,terminal_output +9546,13417108,"TERMINAL",0,0,"47",,terminal_output +9547,13418154,"TERMINAL",0,0,"58",,terminal_output +9548,13419210,"TERMINAL",0,0,"69",,terminal_output +9549,13420241,"TERMINAL",0,0,"76:00",,terminal_output +9550,13421286,"TERMINAL",0,0,"81",,terminal_output +9551,13422389,"TERMINAL",0,0,"92",,terminal_output +9552,13424329,"TERMINAL",0,0,"404",,terminal_output +9553,13425363,"TERMINAL",0,0,"25",,terminal_output +9554,13426409,"TERMINAL",0,0,"36",,terminal_output +9555,13427458,"TERMINAL",0,0,"47",,terminal_output +9556,13428529,"TERMINAL",0,0,"58",,terminal_output +9557,13429546,"TERMINAL",0,0,"69",,terminal_output +9558,13430593,"TERMINAL",0,0,"710",,terminal_output +9559,13431650,"TERMINAL",0,0,"81",,terminal_output +9560,13432680,"TERMINAL",0,0,"92",,terminal_output +9561,13433729,"TERMINAL",0,0,"503",,terminal_output +9562,13434770,"TERMINAL",0,0,"14",,terminal_output +9563,13435815,"TERMINAL",0,0,"26",,terminal_output +9564,13436926,"TERMINAL",0,0,"47",,terminal_output +9565,13437916,"TERMINAL",0,0,"58",,terminal_output +9566,13438954,"TERMINAL",0,0,"69",,terminal_output +9567,13439993,"TERMINAL",0,0,"720",,terminal_output +9568,13441038,"TERMINAL",0,0,"81",,terminal_output +9569,13442082,"TERMINAL",0,0,"92",,terminal_output +9570,13443125,"TERMINAL",0,0,"1:003",,terminal_output +9571,13444205,"TERMINAL",0,0,"14",,terminal_output +9572,13445233,"TERMINAL",0,0,"25",,terminal_output +9573,13446354,"TERMINAL",0,0,"36",,terminal_output +9574,13447378,"TERMINAL",0,0,"47",,terminal_output +9575,13448360,"TERMINAL",0,0,"58",,terminal_output +9576,13449404,"TERMINAL",0,0,"69",,terminal_output +9577,13450455,"TERMINAL",0,0,"730",,terminal_output +9578,13451509,"TERMINAL",0,0,"81",,terminal_output +9579,13452602,"TERMINAL",0,0,"92",,terminal_output +9580,13453595,"TERMINAL",0,0,"103",,terminal_output +9581,13454626,"TERMINAL",0,0,"14",,terminal_output +9582,13455776,"TERMINAL",0,0,"25",,terminal_output +9583,13456715,"TERMINAL",0,0,"36",,terminal_output +9584,13457826,"TERMINAL",0,0,"47",,terminal_output +9585,13458849,"TERMINAL",0,0,"59",,terminal_output +9586,13459921,"TERMINAL",0,0,"740",,terminal_output +9587,13460997,"TERMINAL",0,0,"81",,terminal_output +9588,13462024,"TERMINAL",0,0,"92",,terminal_output +9589,13463047,"TERMINAL",0,0,"203",,terminal_output +9590,13464073,"TERMINAL",0,0,"14",,terminal_output +9591,13465096,"TERMINAL",0,0,"25",,terminal_output +9592,13466222,"TERMINAL",0,0,"36",,terminal_output +9593,13467244,"TERMINAL",0,0,"47",,terminal_output +9594,13468268,"TERMINAL",0,0,"58",,terminal_output +9595,13469264,"TERMINAL",0,0,"69",,terminal_output +9596,13470316,"TERMINAL",0,0,"750",,terminal_output +9597,13471356,"TERMINAL",0,0,"81",,terminal_output +9598,13472412,"TERMINAL",0,0,"92",,terminal_output +9599,13473449,"TERMINAL",0,0,"303",,terminal_output +9600,13474507,"TERMINAL",0,0,"14",,terminal_output +9601,13475539,"TERMINAL",0,0,"25",,terminal_output +9602,13476579,"TERMINAL",0,0,"36",,terminal_output +9603,13477622,"TERMINAL",0,0,"47",,terminal_output +9604,13478672,"TERMINAL",0,0,"58",,terminal_output +9605,13479719,"TERMINAL",0,0,"69",,terminal_output +9606,13480865,"TERMINAL",0,0,"77:00",,terminal_output +9607,13481809,"TERMINAL",0,0,"82",,terminal_output +9608,13482912,"TERMINAL",0,0,"403",,terminal_output +9609,13483896,"TERMINAL",0,0,"14",,terminal_output +9610,13484964,"TERMINAL",0,0,"25",,terminal_output +9611,13486087,"TERMINAL",0,0,"36",,terminal_output +9612,13487112,"TERMINAL",0,0,"47",,terminal_output +9613,13488147,"TERMINAL",0,0,"58",,terminal_output +9614,13489157,"TERMINAL",0,0,"69",,terminal_output +9615,13490184,"TERMINAL",0,0,"710",,terminal_output +9616,13491313,"TERMINAL",0,0,"81",,terminal_output +9617,13492270,"TERMINAL",0,0,"92",,terminal_output +9618,13493357,"TERMINAL",0,0,"503",,terminal_output +9619,13494380,"TERMINAL",0,0,"14",,terminal_output +9620,13495404,"TERMINAL",0,0,"25",,terminal_output +9621,13496445,"TERMINAL",0,0,"36",,terminal_output +9622,13497486,"TERMINAL",0,0,"47",,terminal_output +9623,13498545,"TERMINAL",0,0,"58",,terminal_output +9624,13499570,"TERMINAL",0,0,"69",,terminal_output +9625,13500615,"TERMINAL",0,0,"720",,terminal_output +9626,13501668,"TERMINAL",0,0,"81",,terminal_output +9627,13502707,"TERMINAL",0,0,"92",,terminal_output +9628,13503804,"TERMINAL",0,0,"2:003",,terminal_output +9629,13504826,"TERMINAL",0,0,"15",,terminal_output +9630,13505954,"TERMINAL",0,0,"36",,terminal_output +9631,13506978,"TERMINAL",0,0,"47",,terminal_output +9632,13508000,"TERMINAL",0,0,"58",,terminal_output +9633,13508976,"TERMINAL",0,0,"69",,terminal_output +9634,13510026,"TERMINAL",0,0,"730",,terminal_output +9635,13511074,"TERMINAL",0,0,"81",,terminal_output +9636,13512198,"TERMINAL",0,0,"92",,terminal_output +9637,13513223,"TERMINAL",0,0,"103",,terminal_output +9638,13514249,"TERMINAL",0,0,"14",,terminal_output +9639,13515295,"TERMINAL",0,0,"25",,terminal_output +9640,13516398,"TERMINAL",0,0,"36",,terminal_output +9641,13517352,"TERMINAL",0,0,"47",,terminal_output +9642,13518450,"TERMINAL",0,0,"58",,terminal_output +9643,13519440,"TERMINAL",0,0,"69",,terminal_output +9644,13520484,"TERMINAL",0,0,"740",,terminal_output +9645,13521534,"TERMINAL",0,0,"81",,terminal_output +9646,13522571,"TERMINAL",0,0,"92",,terminal_output +9647,13523614,"TERMINAL",0,0,"203",,terminal_output +9648,13524660,"TERMINAL",0,0,"14",,terminal_output +9649,13525717,"TERMINAL",0,0,"25",,terminal_output +9650,13526752,"TERMINAL",0,0,"36",,terminal_output +9651,13527867,"TERMINAL",0,0,"48",,terminal_output +9652,13528890,"TERMINAL",0,0,"69",,terminal_output +9653,13529887,"TERMINAL",0,0,"750",,terminal_output +9654,13530939,"TERMINAL",0,0,"81",,terminal_output +9655,13531974,"TERMINAL",0,0,"92",,terminal_output +9656,13533090,"TERMINAL",0,0,"303",,terminal_output +9657,13534063,"TERMINAL",0,0,"14",,terminal_output +9658,13535137,"TERMINAL",0,0,"25",,terminal_output +9659,13536264,"TERMINAL",0,0,"36",,terminal_output +9660,13537233,"TERMINAL",0,0,"47",,terminal_output +9661,13538313,"TERMINAL",0,0,"58",,terminal_output +9662,13539339,"TERMINAL",0,0,"69",,terminal_output +9663,13540361,"TERMINAL",0,0,"78:00",,terminal_output +9664,13541393,"TERMINAL",0,0,"81",,terminal_output +9665,13542442,"TERMINAL",0,0,"92",,terminal_output +9666,13543540,"TERMINAL",0,0,"403",,terminal_output +9667,13546307,"TERMINAL",0,0,"16",,terminal_output +9668,13547315,"TERMINAL",0,0,"47",,terminal_output +9669,13548350,"TERMINAL",0,0,"58",,terminal_output +9670,13549395,"TERMINAL",0,0,"69",,terminal_output +9671,13550443,"TERMINAL",0,0,"710",,terminal_output +9672,13551486,"TERMINAL",0,0,"81",,terminal_output +9673,13552546,"TERMINAL",0,0,"92",,terminal_output +9674,13553580,"TERMINAL",0,0,"503",,terminal_output +9675,13554731,"TERMINAL",0,0,"14",,terminal_output +9676,18031894,"",0,0,"Switched from branch 'input_pipeline/add-npy2array_record' to 'validation-loss'",,git_branch_checkout +9677,18231922,"",0,0,"Switched from branch 'validation-loss' to 'coinrun-gt-actions'",,git_branch_checkout +9678,18251884,"",0,0,"Switched from branch 'coinrun-gt-actions' to 'validation-loss'",,git_branch_checkout +9679,21172097,"",0,0,"Switched from branch 'validation-loss' to 'coinrun-gt-actions'",,git_branch_checkout +9680,21992171,"",0,0,"Switched from branch 'coinrun-gt-actions' to 'validation-loss'",,git_branch_checkout +9681,22137171,"",0,0,"Switched from branch 'validation-loss' to 'coinrun-gt-actions'",,git_branch_checkout +9682,23202252,"",0,0,"Switched from branch 'coinrun-gt-actions' to 'validation-loss'",,git_branch_checkout diff --git a/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-f124d61c-9077-4922-ba81-6a4a84e52adc1758789659004-2025_09_25-10.41.29.766/source.csv b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-f124d61c-9077-4922-ba81-6a4a84e52adc1758789659004-2025_09_25-10.41.29.766/source.csv new file mode 100644 index 0000000000000000000000000000000000000000..4a82c6bd19999e1d16a1bf8070d09a260538dd6e --- /dev/null +++ b/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-f124d61c-9077-4922-ba81-6a4a84e52adc1758789659004-2025_09_25-10.41.29.766/source.csv @@ -0,0 +1,726 @@ +Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type +2,861,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"10:41:29 AM [info] Activating crowd-code\n10:41:29 AM [info] Recording started\n10:41:29 AM [info] Initializing git provider using file system watchers...\n",Log,tab +3,868,"TERMINAL",0,0,"idling",,terminal_command +4,984,"extension-output-pdoom-org.crowd-code-#1-crowd-code",153,0,"10:41:30 AM [info] Git repository found\n10:41:30 AM [info] Git provider initialized successfully\n10:41:30 AM [info] Initial git state: [object Object]\n",Log,content +5,988,"TERMINAL",0,0,"]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1991.localdomain: Thu Sep 25 10:41:30 2025Partition dev_cpuonly: 11 nodes idle\rPartition cpuonly: 230 nodes idle\rPartition dev_accelerated:\t 1 nodes idle\rPartition accelerated: 34 nodes idle\rPartition dev_accelerated-h100 :\t 0 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 5 nodes idle\rPartition accelerated-h200:\t 2 nodes idle",,terminal_output +6,1622,"TERMINAL",0,0,"1",,terminal_output +7,3322,"TERMINAL",0,0,"2",,terminal_output +8,6424,"TERMINAL",0,0,"345",,terminal_output +9,6805,"TERMINAL",0,0,"6",,terminal_output +10,8683,"TERMINAL",0,0,"7",,terminal_output +11,8853,"TERMINAL",0,0,"8",,terminal_output +12,9507,"TERMINAL",0,0,"bash",,terminal_focus +13,10058,"TERMINAL",0,0,"9",,terminal_output +14,10098,"TERMINAL",0,0,"watch",,terminal_focus +15,10923,"TERMINAL",0,0,"40",,terminal_output +16,11229,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output +17,219473,"utils/visualizer.py",0,0,"import matplotlib.pyplot as plt\nimport numpy as np\nimport jax\n\ndef visualize_and_save_batch(batch, filename=""batch_visualization.png"", max_samples=8):\n """"""\n Visualizes a batch of videos (or images) and saves as a PNG.\n Args:\n batch: jax/numpy array of shape (B, T, H, W, C) or (B, H, W, C)\n filename: output filename\n max_samples: maximum number of batch elements to visualize\n """"""\n\n arr = np.asarray(batch)\n if arr.ndim == 5:\n # (B, T, H, W, C): show first frame of each sample\n arr = arr[:, 0] # (B, H, W, C)\n arr = arr[:max_samples]\n B = arr.shape[0]\n fig, axes = plt.subplots(1, B, figsize=(B * 2.5, 2.5))\n if B == 1:\n axes = [axes]\n for i in range(B):\n axes[i].imshow(arr[i].clip(0, 1))\n axes[i].axis(""off"")\n plt.tight_layout()\n plt.savefig(filename)\n plt.close(fig)",python,tab +18,222230,"utils/visualizer.py",870,0,"",python,selection_mouse +19,222236,"utils/visualizer.py",869,0,"",python,selection_command +20,245564,"utils/visualizer.py",870,0,"",python,selection_mouse +21,245571,"utils/visualizer.py",869,0,"",python,selection_command +22,250963,"utils/visualizer.py",870,0,"",python,selection_mouse +23,250966,"utils/visualizer.py",869,0,"",python,selection_command +24,251088,"utils/visualizer.py",869,1,")",python,selection_mouse +25,251095,"utils/visualizer.py",870,0,"",python,selection_command +26,251221,"utils/visualizer.py",449,421,"f arr.ndim == 5:\n # (B, T, H, W, C): show first frame of each sample\n arr = arr[:, 0] # (B, H, W, C)\n arr = arr[:max_samples]\n B = arr.shape[0]\n fig, axes = plt.subplots(1, B, figsize=(B * 2.5, 2.5))\n if B == 1:\n axes = [axes]\n for i in range(B):\n axes[i].imshow(arr[i].clip(0, 1))\n axes[i].axis(""off"")\n plt.tight_layout()\n plt.savefig(filename)\n plt.close(fig)",python,selection_mouse +27,251222,"utils/visualizer.py",63,807,"def visualize_and_save_batch(batch, filename=""batch_visualization.png"", max_samples=8):\n """"""\n Visualizes a batch of videos (or images) and saves as a PNG.\n Args:\n batch: jax/numpy array of shape (B, T, H, W, C) or (B, H, W, C)\n filename: output filename\n max_samples: maximum number of batch elements to visualize\n """"""\n\n arr = np.asarray(batch)\n if arr.ndim == 5:\n # (B, T, H, W, C): show first frame of each sample\n arr = arr[:, 0] # (B, H, W, C)\n arr = arr[:max_samples]\n B = arr.shape[0]\n fig, axes = plt.subplots(1, B, figsize=(B * 2.5, 2.5))\n if B == 1:\n axes = [axes]\n for i in range(B):\n axes[i].imshow(arr[i].clip(0, 1))\n axes[i].axis(""off"")\n plt.tight_layout()\n plt.savefig(filename)\n plt.close(fig)",python,selection_mouse +28,251223,"utils/visualizer.py",0,870,"import matplotlib.pyplot as plt\nimport numpy as np\nimport jax\n\ndef visualize_and_save_batch(batch, filename=""batch_visualization.png"", max_samples=8):\n """"""\n Visualizes a batch of videos (or images) and saves as a PNG.\n Args:\n batch: jax/numpy array of shape (B, T, H, W, C) or (B, H, W, C)\n filename: output filename\n max_samples: maximum number of batch elements to visualize\n """"""\n\n arr = np.asarray(batch)\n if arr.ndim == 5:\n # (B, T, H, W, C): show first frame of each sample\n arr = arr[:, 0] # (B, H, W, C)\n arr = arr[:max_samples]\n B = arr.shape[0]\n fig, axes = plt.subplots(1, B, figsize=(B * 2.5, 2.5))\n if B == 1:\n axes = [axes]\n for i in range(B):\n axes[i].imshow(arr[i].clip(0, 1))\n axes[i].axis(""off"")\n plt.tight_layout()\n plt.savefig(filename)\n plt.close(fig)",python,selection_mouse +29,286181,"slurm/jobs/mihir/horeka/breakout/noise_schedule_runs/train_dyn_single_gpu.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/breakout/dyn/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/breakout/dyn/%x_%j.log\n#SBATCH --job-name=train_dyn_default_breakout_longer\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_breakout/breakout_episodes_perfect/train\narray_records_dir_val=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_breakout/breakout_episodes_perfect/val\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# lam_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/lam/interactive/3512576\n# tokenizer_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/tokenizer/interactive/3512502\n\nlam_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/lam/interactive/3518963\ntokenizer_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/tokenizer/interactive/3518963\n\nenv | grep SLURM\n\nexport PYTHONUNBUFFERED=1\n\nsrun python jasmine/train_dynamics.py \\n --save_ckpt \\n --image_height=10 \\n --image_width=10 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=120 \\n --init_lr=0 \\n --max_lr=3e-5 \\n --log_image_interval=250 \\n --log_checkpoint_interval=250 \\n --dyna_type=maskgit \\n --log \\n --name=breakout-dyn-noise-lvl-default-$slurm_job_id \\n --tags dyn breakout noise-lvl default \\n --entity instant-uv \\n --project jafar \\n --patch_size 4 \\n --lam_patch_size 4 \\n --warmup_steps 100 \\n --wsd_decay_steps 1000 \\n --num_steps 5000 \\n --data_dir $array_records_dir_train \\n --val_data_dir $array_records_dir_val \\n --tokenizer_checkpoint $tokenizer_checkpoint \\n --lam_checkpoint $lam_checkpoint \\n --val_interval 500 \\n --eval_full_frame \\n",shellscript,tab +30,290489,"TERMINAL",0,0,"bash",,terminal_focus +31,291110,"TERMINAL",0,0,"bash",,terminal_focus +32,292578,"TERMINAL",0,0,"queue",,terminal_command +33,292634,"TERMINAL",0,0,"]633;C",,terminal_output +34,292684,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1991.localdomain: Thu Sep 25 10:46:22 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)",,terminal_output +35,293283,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output +36,304392,"TERMINAL",0,0,"salloc --time=03:00:00 --partition=accelerated --nodes=1 --gres=gpu:1 --cpus-per-task=8",,terminal_command +37,304444,"TERMINAL",0,0,"]633;Csalloc: Pending job allocation 3520226\r\nsalloc: job 3520226 queued and waiting for resources\r\n",,terminal_output +38,384883,"TERMINAL",0,0,"salloc: job 3520226 has been allocated resources\r\nsalloc: Granted job allocation 3520226\r\n",,terminal_output +39,384994,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output +40,412956,"TERMINAL",0,0,"salloc: Prolog hung on node hkn0730\r\n",,terminal_output +41,417667,"TERMINAL",0,0,"salloc: Nodes hkn0730 are ready for job\r\n",,terminal_output +42,418530,"TERMINAL",0,0,"]0;tum_cte0515@hkn0730:~/Projects/jasmine[?2004h[tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +43,1109716,"TERMINAL",0,0,"s",,terminal_output +44,1109904,"TERMINAL",0,0,"o",,terminal_output +45,1110036,"TERMINAL",0,0,"u",,terminal_output +46,1110088,"TERMINAL",0,0,"r",,terminal_output +47,1110367,"TERMINAL",0,0,"c",,terminal_output +48,1110418,"TERMINAL",0,0,"e",,terminal_output +49,1110497,"TERMINAL",0,0," ",,terminal_output +50,1110618,"TERMINAL",0,0,"v.",,terminal_output +51,1110732,"TERMINAL",0,0,"",,terminal_output +52,1110990,"TERMINAL",0,0,"b",,terminal_output +53,1111395,"TERMINAL",0,0,"",,terminal_output +54,1111478,"TERMINAL",0,0,"",,terminal_output +55,1111548,"TERMINAL",0,0,"e",,terminal_output +56,1111764,"TERMINAL",0,0,"",,terminal_output +57,1111982,"TERMINAL",0,0,"b",,terminal_output +58,1112379,"TERMINAL",0,0,"",,terminal_output +59,1112600,"TERMINAL",0,0,"n",,terminal_output +60,1112725,"TERMINAL",0,0,"",,terminal_output +61,1113072,"TERMINAL",0,0,"",,terminal_output +62,1113246,"TERMINAL",0,0,"",,terminal_output +63,1113356,"TERMINAL",0,0,"",,terminal_output +64,1113565,"TERMINAL",0,0,".",,terminal_output +65,1114240,"TERMINAL",0,0,"v",,terminal_output +66,1114295,"TERMINAL",0,0,"e",,terminal_output +67,1114503,"TERMINAL",0,0,"nv/",,terminal_output +68,1114860,"TERMINAL",0,0,"b",,terminal_output +69,1114994,"TERMINAL",0,0,"a",,terminal_output +70,1115063,"TERMINAL",0,0,"c",,terminal_output +71,1115292,"TERMINAL",0,0,"",,terminal_output +72,1115555,"TERMINAL",0,0,"",,terminal_output +73,1115746,"TERMINAL",0,0,"",,terminal_output +74,1115987,"TERMINAL",0,0,"j",,terminal_output +75,1116087,"TERMINAL",0,0,"",,terminal_output +76,1116469,"TERMINAL",0,0,"",,terminal_output +77,1116572,"TERMINAL",0,0,"i",,terminal_output +78,1116701,"TERMINAL",0,0,"n/",,terminal_output +79,1117110,"TERMINAL",0,0,"ac",,terminal_output +80,1117278,"TERMINAL",0,0,"tivate",,terminal_output +81,1117670,"TERMINAL",0,0,"\r\n[?2004l\r]0;tum_cte0515@hkn0730:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +82,1236625,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=01:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/sampling/maskgit/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/sampling/maskgit/%x_%j.log\n#SBATCH --job-name=coinrun_sample_maskgit\n\n# Unload modules that may interfere\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n\n# Activate virtual environment\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_breakout/breakout_episodes_perfect/val\n\nCHECKPOINT_PATH=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn/interactive/3519264\n\n\necho ""Sampling from checkpoint: $CHECKPOINT_PATH""\n\nsrun python jasmine/sample.py \\n --checkpoint ""$CHECKPOINT_PATH"" \\n --data_dir=$array_records_dir \\n --seq_len=16 \\n --batch_size=12 \\n --start_frame=4 \\n --image_height=10 \\n --image_width=10 \\n --dyna_type=maskgit \\n --lam_patch_size=4 \",shellscript,tab +83,1239278,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1190,0,"",shellscript,selection_mouse +84,1239306,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1189,0,"",shellscript,selection_command +85,1241484,"TERMINAL",0,0,"bash",,terminal_focus +86,1245631,"TERMINAL",0,0,"git branch",,terminal_command +87,1245696,"TERMINAL",0,0,"]633;C[?1h=\r action-mapper\r\n* add-noise-to-combat-exposure-bias\r\n add-wandb-name-and-tags\r\n before-nnx\r\n causal-mem-reduce\r\n causal-spatiotemporal-kv-cache\r\n causal-st-transformer\r\n causal-transformer-dynamics-model\r\n causal-transformer-nnx-no-kv-cache\r\n coinrun-gt-actions\r\n:",,terminal_output +88,1248964,"TERMINAL",0,0,"\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output +89,1254076,"TERMINAL",0,0,"git diff",,terminal_command +90,1256345,"TERMINAL",0,0,"git merge main",,terminal_command +91,1256394,"TERMINAL",0,0,"]633;C",,terminal_output +92,1256475,"TERMINAL",0,0,"Already up to date.\r\n]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output +93,1267984,"TERMINAL",0,0,"git checkout main",,terminal_command +94,1268030,"TERMINAL",0,0,"]633;C",,terminal_output +95,1268214,"TERMINAL",0,0,"Switched to branch 'main'\r\nYour branch is up to date with 'origin/main'.\r\n]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output +96,1269174,"TERMINAL",0,0,"git pull",,terminal_command +97,1269223,"TERMINAL",0,0,"]633;C",,terminal_output +98,1270510,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",0,0,"Switched from branch 'add-noise-to-combat-exposure-bias' to 'main'",shellscript,git_branch_checkout +99,1271322,"TERMINAL",0,0,"remote: Enumerating objects: 29, done.\r\nremote: Counting objects: 3% (1/29)\rremote: Counting objects: 6% (2/29)\rremote: Counting objects: 10% (3/29)\rremote: Counting objects: 13% (4/29)\rremote: Counting objects: 17% (5/29)\rremote: Counting objects: 20% (6/29)\rremote: Counting objects: 24% (7/29)\rremote: Counting objects: 27% (8/29)\rremote: Counting objects: 31% (9/29)\rremote: Counting objects: 34% (10/29)\rremote: Counting objects: 37% (11/29)\rremote: Counting objects: 41% (12/29)\rremote: Counting objects: 44% (13/29)\rremote: Counting objects: 48% (14/29)\rremote: Counting objects: 51% (15/29)\rremote: Counting objects: 55% (16/29)\rremote: Counting objects: 58% (17/29)\rremote: Counting objects: 62% (18/29)\rremote: Counting objects: 65% (19/29)\rremote: Counting objects: 68% (20/29)\rremote: Counting objects: 72% (21/29)\rremote: Counting objects: 75% (22/29)\rremote: Counting objects: 79% (23/29)\rremote: Counting objects: 82% (24/29)\rremote: Counting objects: 86% (25/29)\rremote: Counting objects: 89% (26/29)\rremote: Counting objects: 93% (27/29)\rremote: Counting objects: 96% (28/29)\rremote: Counting objects: 100% (29/29)\rremote: Counting objects: 100% (29/29), done.\r\nremote: Compressing objects: 16% (1/6)\rremote: Compressing objects: 33% (2/6)\rremote: Compressing objects: 50% (3/6)\rremote: Compressing objects: 66% (4/6)\rremote: Compressing objects: 83% (5/6)\rremote: Compressing objects: 100% (6/6)\rremote: Compressing objects: 100% (6/6), done.\r\nremote: Total 17 (delta 11), reused 16 (delta 11), pack-reused 0 (from 0)\r\nUnpacking objects: 5% (1/17)\r",,terminal_output +100,1271430,"TERMINAL",0,0,"Unpacking objects: 11% (2/17)\r",,terminal_output +101,1271613,"TERMINAL",0,0,"Unpacking objects: 17% (3/17)\rUnpacking objects: 23% (4/17)\rUnpacking objects: 29% (5/17)\rUnpacking objects: 35% (6/17)\rUnpacking objects: 41% (7/17)\rUnpacking objects: 47% (8/17)\rUnpacking objects: 52% (9/17)\rUnpacking objects: 58% (10/17)\r",,terminal_output +102,1271666,"TERMINAL",0,0,"Unpacking objects: 64% (11/17)\rUnpacking objects: 70% (12/17)\rUnpacking objects: 76% (13/17)\rUnpacking objects: 82% (14/17)\rUnpacking objects: 88% (15/17)\rUnpacking objects: 94% (16/17)\rUnpacking objects: 100% (17/17)\rUnpacking objects: 100% (17/17), 2.64 KiB | 6.00 KiB/s, done.\r\n",,terminal_output +103,1272039,"TERMINAL",0,0,"From github.com:p-doom/jasmine\r\n 0a37bdd..8993b1d main -> origin/main\r\n 896c37a..b59335a atari-rainbow-agent-capture -> origin/atari-rainbow-agent-capture\r\n * [new branch] move-genie-to-jasmine -> origin/move-genie-to-jasmine\r\nUpdating 0a37bdd..8993b1d\r\nFast-forward\r\n",,terminal_output +104,1272130,"TERMINAL",0,0," genie.py => jasmine/genie.py | 0\r\n 1 file changed, 0 insertions(+), 0 deletions(-)\r\n rename genie.py => jasmine/genie.py (100%)\r\n]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output +105,1274306,"TERMINAL",0,0,"git checkout add-noise-to-combat-exposure-bias",,terminal_command +106,1274341,"TERMINAL",0,0,"]633;CSwitched to branch 'add-noise-to-combat-exposure-bias'\r\nYour branch is up to date with 'origin/add-noise-to-combat-exposure-bias'.\r\n]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output +107,1275510,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",0,0,"Switched from branch 'main' to 'add-noise-to-combat-exposure-bias'",shellscript,git_branch_checkout +108,1276004,"TERMINAL",0,0,"git merge main",,terminal_command +109,1276055,"TERMINAL",0,0,"]633;C",,terminal_output +110,1276127,"TERMINAL",0,0,"hint: Waiting for your editor to close the file... ",,terminal_output +111,1276219,"TERMINAL",0,0,"[?1049h[>4;2m[?1h=[?2004h[?1004h[?12h[?12l[?25l""~/Projects/jasmine/.git/MERGE_MSG"" 6L, 285B▽ Pzz\[0%m [>c]10;?]11;?Merge branch 'main' into add-noise-to-combat-exposure-bias\r\n# Please enter a commit message to explain why this merge is necessary,# especially if it merges an updated upstream into a topic branch.#\r\n# Lines starting with '#' will be ignored, and an empty message aborts\r\n# the commit.\r\n~ ~ ~ ~ 1,1All[?25h",,terminal_output +112,1276310,"TERMINAL",0,0,"P+q436f\P+q6b75\P+q6b64\P+q6b72\P+q6b6c\P+q2332\P+q2334\P+q2569\P+q2a37\P+q6b31\[?12$p[?25l/3333/3333 [?25h[?25l/f6f6/e3e3 [?25h",,terminal_output +113,1277272,"TERMINAL",0,0,"[?25l::[?25h",,terminal_output +114,1277389,"TERMINAL",0,0,"w",,terminal_output +115,1277500,"TERMINAL",0,0,"q",,terminal_output +116,1277826,"TERMINAL",0,0,"\r[?25l[?2004l[>4;m"".git/MERGE_MSG"" 6L, 285B written\r\r\r\n[?1004l[?2004l[?1l>[?25h[>4;m[?1049l\rMerge made by the 'ort' strategy.\r\n",,terminal_output +117,1277838,"TERMINAL",0,0," genie.py => jasmine/genie.py | 0\r\n 1 file changed, 0 insertions(+), 0 deletions(-)\r\n rename genie.py => jasmine/genie.py (100%)\r\n]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output +118,1283778,"TERMINAL",0,0,"srun",,terminal_focus +119,1284930,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=01:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/sampling/maskgit/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/sampling/maskgit/%x_%j.log\n#SBATCH --job-name=coinrun_sample_maskgit\n\n# Unload modules that may interfere\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n\n# Activate virtual environment\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_breakout/breakout_episodes_perfect/val\n\nCHECKPOINT_PATH=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn/interactive/3519264\n\n\necho ""Sampling from checkpoint: $CHECKPOINT_PATH""\n\nsrun python jasmine/sample.py \\n --checkpoint ""$CHECKPOINT_PATH"" \\n --data_dir=$array_records_dir \\n --seq_len=16 \\n --batch_size=12 \\n --start_frame=4 \\n --image_height=10 \\n --image_width=10 \\n --dyna_type=maskgit \\n --lam_patch_size=4 \",shellscript,tab +120,1284931,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1165,0,"",shellscript,selection_mouse +121,1285074,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1164,0,"",shellscript,selection_command +122,1285458,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1190,0,"",shellscript,selection_mouse +123,1285470,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1189,0,"",shellscript,selection_command +124,1289627,"jasmine/sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\nimport optax\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nimport orbax.checkpoint as ocp\nfrom PIL import Image, ImageDraw\nimport tyro\nfrom flax import nnx\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n print_action_indices: bool = True\n output_dir: str = ""gifs/""\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 1\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n use_gt_actions: bool = False\n # Dynamics checkpoint\n dyna_type: str = ""maskgit""\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\nif __name__ == ""__main__"":\n """"""\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n jax.distributed.initialize()\n\n rng = jax.random.key(args.seed)\n\n # --- Load Genie checkpoint ---\n rngs = nnx.Rngs(rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_actions=args.num_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n use_gt_actions=args.use_gt_actions,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n # FIXME (f.srambical): implement spatiotemporal KV caching and set decode=True\n decode=False,\n rngs=rngs,\n )\n\n # Need to delete lam decoder for checkpoint loading\n if not args.use_gt_actions:\n assert genie.lam is not None\n del genie.lam.decoder\n\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n dummy_tx = optax.adamw(\n learning_rate=optax.linear_schedule(0.0001, 0.0001, 10000),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n dummy_optimizer = nnx.ModelAndOptimizer(genie, dummy_tx)\n\n abstract_optimizer = nnx.eval_shape(lambda: dummy_optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(dummy_optimizer, restored_optimizer_state)\n\n # --- Define sampling function ---\n def _sampling_fn(model: Genie, batch: dict) -> jax.Array:\n """"""Runs Genie.sample with pre-defined generation hyper-parameters.""""""\n assert args.dyna_type in [\n ""maskgit"",\n ""causal"",\n ], f""Invalid dynamics type: {args.dyna_type}""\n frames, _ = model.sample(\n batch,\n args.seq_len,\n args.temperature,\n args.sample_argmax,\n args.maskgit_steps,\n )\n return frames\n\n # --- Define autoregressive sampling loop ---\n def _autoreg_sample(genie, rng, batch):\n batch[""videos""] = batch[""videos""][:, : args.start_frame]\n batch[""rng""] = rng\n generated_vid_BSHWC = _sampling_fn(genie, batch)\n return generated_vid_BSHWC\n\n # --- Get video + latent actions ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n # We don't use workers in order to avoid grain shutdown issues (https://github.com/google/grain/issues/398)\n num_workers=0,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n dataloader = iter(dataloader)\n batch = next(dataloader)\n gt_video = jnp.asarray(batch[""videos""], dtype=jnp.float32) / 255.0\n batch[""videos""] = gt_video.astype(args.dtype)\n # Get latent actions for all videos in the batch\n action_batch_E = None\n if not args.use_gt_actions:\n action_batch_E = genie.vq_encode(batch, training=False)\n batch[""latent_actions""] = action_batch_E\n\n # --- Sample + evaluate video ---\n recon_video_BSHWC = _autoreg_sample(genie, rng, batch)\n recon_video_BSHWC = recon_video_BSHWC.astype(jnp.float32)\n\n gt = gt_video.clip(0, 1)[:, args.start_frame :]\n recon = recon_video_BSHWC.clip(0, 1)[:, args.start_frame :]\n\n ssim_vmap = jax.vmap(pix.ssim, in_axes=(0, 0))\n psnr_vmap = jax.vmap(pix.psnr, in_axes=(0, 0))\n ssim = ssim_vmap(gt, recon)\n psnr = psnr_vmap(gt, recon)\n per_frame_ssim = ssim.mean(0)\n per_frame_psnr = psnr.mean(0)\n avg_ssim = ssim.mean()\n avg_psnr = psnr.mean()\n\n print(""Per-frame SSIM:\n"", per_frame_ssim)\n print(""Per-frame PSNR:\n"", per_frame_psnr)\n\n print(f""SSIM: {avg_ssim}"")\n print(f""PSNR: {avg_psnr}"")\n\n # --- Construct video ---\n true_videos = (gt_video * 255).astype(np.uint8)\n pred_videos = (recon_video_BSHWC * 255).astype(np.uint8)\n video_comparison = np.zeros((2, *recon_video_BSHWC.shape), dtype=np.uint8)\n video_comparison[0] = true_videos[:, : args.seq_len]\n video_comparison[1] = pred_videos\n frames = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n # --- Save video ---\n imgs = [Image.fromarray(img) for img in frames]\n # Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\n B = batch[""videos""].shape[0]\n if action_batch_E is not None:\n action_batch_BSm11 = jnp.reshape(action_batch_E, (B, args.seq_len - 1, 1))\n else:\n action_batch_BSm11 = jnp.reshape(\n batch[""actions""][:, :-1], (B, args.seq_len - 1, 1)\n )\n for t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(B):\n if args.print_action_indices:\n action = action_batch_BSm11[row, t, 0]\n y_offset = row * batch[""videos""].shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\n\n os.makedirs(args.output_dir, exist_ok=True)\n imgs[0].save(\n os.path.join(args.output_dir, f""generation_{time.time()}.gif""),\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n )\n",python,tab +125,1294920,"jasmine/sample.py",785,0,"",python,selection_mouse +126,1295067,"jasmine/sample.py",781,11,"start_frame",python,selection_mouse +127,1295904,"jasmine/sample.py",756,0,"",python,selection_mouse +128,1296048,"jasmine/sample.py",750,13,"sample_argmax",python,selection_mouse +129,1296850,"jasmine/sample.py",595,0,"",python,selection_mouse +130,1297005,"jasmine/sample.py",586,20,"print_action_indices",python,selection_mouse +131,1301548,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",0,0,"",shellscript,tab +132,1303568,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1190,0,"\n ",shellscript,content +133,1303747,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1195,0,"print_action_indices",shellscript,content +134,1304756,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1195,0,"-",shellscript,content +135,1304757,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1196,0,"",shellscript,selection_keyboard +136,1304932,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1196,0,"-",shellscript,content +137,1304933,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1197,0,"",shellscript,selection_keyboard +138,1307438,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1197,0,"n",shellscript,content +139,1307440,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1198,0,"",shellscript,selection_keyboard +140,1307787,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1198,0,"o",shellscript,content +141,1307788,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1199,0,"",shellscript,selection_keyboard +142,1308098,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1199,0,"-",shellscript,content +143,1308099,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1200,0,"",shellscript,selection_keyboard +144,1309165,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1199,1,"",shellscript,content +145,1309795,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1199,20,"",shellscript,content +146,1310362,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1198,1,"",shellscript,content +147,1310557,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1197,1,"",shellscript,content +148,1312863,"jasmine/sample.py",0,0,"",python,tab +149,1315044,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",0,0,"",shellscript,tab +150,1316522,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1197,0,"print_action_indices",shellscript,content +151,1318154,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1197,0,"n",shellscript,content +152,1318155,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1198,0,"",shellscript,selection_keyboard +153,1318422,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1198,0,"o",shellscript,content +154,1318423,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1199,0,"",shellscript,selection_keyboard +155,1318772,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1199,0,"-",shellscript,content +156,1318773,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1200,0,"",shellscript,selection_keyboard +157,1319814,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1219,0,"",shellscript,selection_command +158,1320052,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1218,0,"",shellscript,selection_command +159,1320488,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1217,0,"",shellscript,selection_command +160,1320513,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1216,0,"",shellscript,selection_command +161,1320582,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1215,0,"",shellscript,selection_command +162,1320595,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1214,0,"",shellscript,selection_command +163,1320708,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1213,0,"",shellscript,selection_command +164,1321084,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1212,1,"",shellscript,content +165,1321575,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1212,0,"-",shellscript,content +166,1321576,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1213,0,"",shellscript,selection_keyboard +167,1322310,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1201,0,"",shellscript,selection_command +168,1322494,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1202,0,"",shellscript,selection_command +169,1322646,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1203,0,"",shellscript,selection_command +170,1322790,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1204,0,"",shellscript,selection_command +171,1323192,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1205,0,"",shellscript,selection_command +172,1323403,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1205,1,"",shellscript,content +173,1323855,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1205,0,".",shellscript,content +174,1323856,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1206,0,"",shellscript,selection_keyboard +175,1324293,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1206,1,"",shellscript,content +176,1325178,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1205,1,"",shellscript,content +177,1325458,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1205,0,"-",shellscript,content +178,1325459,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1206,0,"",shellscript,selection_keyboard +179,1325590,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1206,0,"a",shellscript,content +180,1325591,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1207,0,"",shellscript,selection_keyboard +181,1326866,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1220,0," ",shellscript,content +182,1326867,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1221,0,"",shellscript,selection_keyboard +183,1327330,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1221,0,"\",shellscript,content +184,1327331,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1222,0,"",shellscript,selection_keyboard +185,1327545,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1222,0,"\n ",shellscript,content +186,1329200,"jasmine/sample.py",0,0,"",python,tab +187,1330798,"jasmine/sample.py",632,0,"",python,selection_mouse +188,1331066,"jasmine/sample.py",624,10,"output_dir",python,selection_mouse +189,1333585,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",0,0,"",shellscript,tab +190,1334748,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1227,0,"-",shellscript,content +191,1334749,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1228,0,"",shellscript,selection_keyboard +192,1334850,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1228,0,"-",shellscript,content +193,1334851,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1229,0,"",shellscript,selection_keyboard +194,1335011,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1229,0,"output_dir",shellscript,content +195,1337134,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1239,0," ",shellscript,content +196,1337135,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1240,0,"",shellscript,selection_keyboard +197,1337670,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1240,0,"""",shellscript,content +198,1337671,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1241,0,"",shellscript,selection_keyboard +199,1338292,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1241,0,"g",shellscript,content +200,1338293,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1242,0,"",shellscript,selection_keyboard +201,1338354,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1242,0,"i",shellscript,content +202,1338355,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1243,0,"",shellscript,selection_keyboard +203,1339068,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1243,0,"f",shellscript,content +204,1339069,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1244,0,"",shellscript,selection_keyboard +205,1339375,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1244,0,"x",shellscript,content +206,1339376,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1245,0,"",shellscript,selection_keyboard +207,1339850,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1244,1,"",shellscript,content +208,1339882,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1244,0,"s",shellscript,content +209,1339882,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1245,0,"",shellscript,selection_keyboard +210,1340542,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1245,0,"/",shellscript,content +211,1340543,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1246,0,"",shellscript,selection_keyboard +212,1355967,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1246,0,"5",shellscript,content +213,1355968,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1247,0,"",shellscript,selection_keyboard +214,1355995,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1247,0,"0",shellscript,content +215,1355995,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1248,0,"",shellscript,selection_keyboard +216,1356136,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1248,0,"k",shellscript,content +217,1356137,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1249,0,"",shellscript,selection_keyboard +218,1357068,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1249,0,"/",shellscript,content +219,1357070,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1250,0,"",shellscript,selection_keyboard +220,1366412,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1250,0,"l",shellscript,content +221,1366414,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1251,0,"",shellscript,selection_keyboard +222,1366459,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1251,0,"a",shellscript,content +223,1366460,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1252,0,"",shellscript,selection_keyboard +224,1366533,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1252,0,"m",shellscript,content +225,1366534,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1253,0,"",shellscript,selection_keyboard +226,1367479,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1253,0,"-",shellscript,content +227,1367480,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1254,0,"",shellscript,selection_keyboard +228,1367604,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1254,0,"a",shellscript,content +229,1367605,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1255,0,"",shellscript,selection_keyboard +230,1367712,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1255,0,"c",shellscript,content +231,1367713,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1256,0,"",shellscript,selection_keyboard +232,1367900,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1256,0,"t",shellscript,content +233,1367901,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1257,0,"",shellscript,selection_keyboard +234,1368012,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1257,0,"i",shellscript,content +235,1368015,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1258,0,"",shellscript,selection_keyboard +236,1368131,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1258,0,"o",shellscript,content +237,1368132,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1259,0,"",shellscript,selection_keyboard +238,1368247,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1259,0,"n",shellscript,content +239,1368249,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1260,0,"",shellscript,selection_keyboard +240,1368333,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1260,0,"s",shellscript,content +241,1368334,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1261,0,"",shellscript,selection_keyboard +242,1371869,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1261,0,"""",shellscript,content +243,1371870,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1262,0,"",shellscript,selection_keyboard +244,1389281,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",0,0,"",shellscript,tab +245,1389282,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",771,0,"",shellscript,selection_mouse +246,1390264,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,0,"",shellscript,selection_command +247,1390727,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",769,0,"",shellscript,selection_command +248,1391028,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,0,"",shellscript,selection_command +249,1391912,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,100,"",shellscript,content +250,1391935,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",769,0,"",shellscript,selection_command +251,1392303,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,0,"",shellscript,selection_command +252,1392777,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn/train_dyn_default_breakout_longer/3519480",shellscript,content +253,1393717,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",891,0,"",shellscript,selection_command +254,1400707,"TERMINAL",0,0,"s",,terminal_output +255,1400771,"TERMINAL",0,0,"h",,terminal_output +256,1400837,"TERMINAL",0,0," ",,terminal_output +257,1401143,"TERMINAL",0,0,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",,terminal_output +258,1402135,"TERMINAL",0,0,"\rslurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch\r\n[?2004l\r",,terminal_output +259,1402302,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn/train_dyn_default_breakout_longer/3519480\r\n",,terminal_output +260,1402445,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +261,1410490,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py"", line 86, in \r\n genie = Genie(\r\n ^^^^^^\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib64/python3.12/site-packages/flax/nnx/pytreelib.py"", line 289, in __call__\r\n return _graph_node_meta_call(cls, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib64/python3.12/site-packages/flax/nnx/pytreelib.py"", line 301, in _graph_node_meta_call\r\n cls._pytree_meta_construct(node, *args, **kwargs)\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib64/python3.12/site-packages/flax/nnx/pytreelib.py"", line 292, in _pytree_meta_construct\r\n self.__init__(*args, **kwargs)\r\nTypeError: Genie.__init__() missing 2 required positional arguments: 'max_noise_level' and 'noise_buckets'\r\nW0925 11:05:00.161678 2974972 pjrt_client.cc:1469] WatchJobStateAsync failed for task goo.gle/debugstr job_name: ""jax_worker"": CANCELLED: CANCELLED\r\nAdditional GRPC error information from remote target coordination_service while calling /tensorflow.CoordinationService/WatchJobState:\r\n:UNKNOWN:Error received from peer {grpc_status:1, grpc_message:""CANCELLED""} [type.googleapis.com/tensorflow.DerivedStatus='']\r\n",,terminal_output +262,1411122,"TERMINAL",0,0,"srun: error: hkn0730: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0730:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +263,1436772,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\nimport optax\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nimport orbax.checkpoint as ocp\nfrom PIL import Image, ImageDraw\nimport tyro\nfrom flax import nnx\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n print_action_indices: bool = True\n output_dir: str = ""gifs/""\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 1\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n use_gt_actions: bool = False\n # Dynamics checkpoint\n dyna_type: str = ""maskgit""\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\nif __name__ == ""__main__"":\n """"""\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n jax.distributed.initialize()\n\n rng = jax.random.key(args.seed)\n\n # --- Load Genie checkpoint ---\n rngs = nnx.Rngs(rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_actions=args.num_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n use_gt_actions=args.use_gt_actions,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n # FIXME (f.srambical): implement spatiotemporal KV caching and set decode=True\n decode=False,\n rngs=rngs,\n )\n\n # Need to delete lam decoder for checkpoint loading\n if not args.use_gt_actions:\n assert genie.lam is not None\n del genie.lam.decoder\n\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n dummy_tx = optax.adamw(\n learning_rate=optax.linear_schedule(0.0001, 0.0001, 10000),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n dummy_optimizer = nnx.ModelAndOptimizer(genie, dummy_tx)\n\n abstract_optimizer = nnx.eval_shape(lambda: dummy_optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(dummy_optimizer, restored_optimizer_state)\n\n # --- Define sampling function ---\n def _sampling_fn(model: Genie, batch: dict) -> jax.Array:\n """"""Runs Genie.sample with pre-defined generation hyper-parameters.""""""\n assert args.dyna_type in [\n ""maskgit"",\n ""causal"",\n ], f""Invalid dynamics type: {args.dyna_type}""\n frames, _ = model.sample(\n batch,\n args.seq_len,\n args.temperature,\n args.sample_argmax,\n args.maskgit_steps,\n )\n return frames\n\n # --- Define autoregressive sampling loop ---\n def _autoreg_sample(genie, rng, batch):\n batch[""videos""] = batch[""videos""][:, : args.start_frame]\n batch[""rng""] = rng\n generated_vid_BSHWC = _sampling_fn(genie, batch)\n return generated_vid_BSHWC\n\n # --- Get video + latent actions ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n # We don't use workers in order to avoid grain shutdown issues (https://github.com/google/grain/issues/398)\n num_workers=0,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n dataloader = iter(dataloader)\n batch = next(dataloader)\n gt_video = jnp.asarray(batch[""videos""], dtype=jnp.float32) / 255.0\n batch[""videos""] = gt_video.astype(args.dtype)\n # Get latent actions for all videos in the batch\n action_batch_E = None\n if not args.use_gt_actions:\n action_batch_E = genie.vq_encode(batch, training=False)\n batch[""latent_actions""] = action_batch_E\n\n # --- Sample + evaluate video ---\n recon_video_BSHWC = _autoreg_sample(genie, rng, batch)\n recon_video_BSHWC = recon_video_BSHWC.astype(jnp.float32)\n\n gt = gt_video.clip(0, 1)[:, args.start_frame :]\n recon = recon_video_BSHWC.clip(0, 1)[:, args.start_frame :]\n\n ssim_vmap = jax.vmap(pix.ssim, in_axes=(0, 0))\n psnr_vmap = jax.vmap(pix.psnr, in_axes=(0, 0))\n ssim = ssim_vmap(gt, recon)\n psnr = psnr_vmap(gt, recon)\n per_frame_ssim = ssim.mean(0)\n per_frame_psnr = psnr.mean(0)\n avg_ssim = ssim.mean()\n avg_psnr = psnr.mean()\n\n print(""Per-frame SSIM:\n"", per_frame_ssim)\n print(""Per-frame PSNR:\n"", per_frame_psnr)\n\n print(f""SSIM: {avg_ssim}"")\n print(f""PSNR: {avg_psnr}"")\n\n # --- Construct video ---\n true_videos = (gt_video * 255).astype(np.uint8)\n pred_videos = (recon_video_BSHWC * 255).astype(np.uint8)\n video_comparison = np.zeros((2, *recon_video_BSHWC.shape), dtype=np.uint8)\n video_comparison[0] = true_videos[:, : args.seq_len]\n video_comparison[1] = pred_videos\n frames = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n # --- Save video ---\n imgs = [Image.fromarray(img) for img in frames]\n # Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\n B = batch[""videos""].shape[0]\n if action_batch_E is not None:\n action_batch_BSm11 = jnp.reshape(action_batch_E, (B, args.seq_len - 1, 1))\n else:\n action_batch_BSm11 = jnp.reshape(\n batch[""actions""][:, :-1], (B, args.seq_len - 1, 1)\n )\n for t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(B):\n if args.print_action_indices:\n action = action_batch_BSm11[row, t, 0]\n y_offset = row * batch[""videos""].shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\n\n os.makedirs(args.output_dir, exist_ok=True)\n imgs[0].save(\n os.path.join(args.output_dir, f""generation_{time.time()}.gif""),\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n )\n",python,tab +264,1436774,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1968,0,"",python,selection_command +265,1439120,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2706,0,"",python,selection_mouse +266,1439149,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2705,0,"",python,selection_command +267,1444843,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",0,0,"",python,tab +268,1446988,"jasmine/models/dynamics.py",0,0,"from typing import Dict\n\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\n\nfrom utils.nn import STTransformer, Transformer\n\n\nclass DynamicsMaskGIT(nnx.Module):\n """"""\n MaskGIT dynamics model\n\n Dimension keys:\n B: batch size\n T: sequence length\n N: number of patches per frame\n L: latent dimension\n V: vocabulary size (number of latents)\n """"""\n\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_latents: int,\n latent_action_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n max_noise_level: float,\n noise_buckets: int,\n mask_limit: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.max_noise_level = max_noise_level\n self.noise_buckets = noise_buckets\n self.mask_limit = mask_limit\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.transformer = STTransformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.mask_token = nnx.Param(\n nnx.initializers.lecun_uniform()(rngs.params(), (1, 1, 1, self.model_dim))\n )\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.noise_level_embed = nnx.Embed(\n self.noise_buckets, self.model_dim, rngs=rngs\n )\n\n def __call__(\n self,\n batch: Dict[str, jax.Array],\n ) -> tuple[jax.Array, jax.Array]:\n # --- Mask videos ---\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n vid_embed_BTNM = self.patch_embed(video_tokens_BTN)\n\n B, T, N, M = vid_embed_BTNM.shape\n rng, _rng_prob, *_rngs_mask = jax.random.split(batch[""mask_rng""], B + 2)\n mask_prob = jax.random.uniform(_rng_prob, shape=(B,), minval=self.mask_limit)\n per_sample_shape = vid_embed_BTNM.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(jnp.asarray(_rngs_mask), mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed_BTNM = jnp.where(\n jnp.expand_dims(mask, -1), self.mask_token.value, vid_embed_BTNM\n )\n\n # --- Sample noise ---\n rng, _rng_noise = jax.random.split(rng)\n noise_level_B11 = jax.random.uniform(\n _rng_noise, shape=(B,), minval=0.0, maxval=self.max_noise_level\n ).reshape(B, 1, 1)\n noise_bucket_idx_B11 = jnp.floor(\n (noise_level_B11 / self.max_noise_level) * self.noise_buckets\n ).astype(jnp.int32)\n noise_level_embed_B11M = self.noise_level_embed(noise_bucket_idx_B11)\n noise_level_embed_BT1M = jnp.tile(noise_level_embed_B11M, (1, T, 1, 1))\n vid_embed_BTNM += jnp.expand_dims(noise_level_B11, -1)\n\n # --- Predict transition ---\n act_embed_BTm11M = self.action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n vid_embed_BTNp2M = jnp.concatenate(\n [padded_act_embed_BT1M, noise_level_embed_BT1M, vid_embed_BTNM], axis=2\n )\n logits_BTNp2V = self.transformer(vid_embed_BTNp2M)\n logits_BTNV = logits_BTNp2V[:, :, 2:]\n return logits_BTNV, mask\n\n\nclass DynamicsCausal(nnx.Module):\n """"""Causal dynamics model""""""\n\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_latents: int,\n latent_action_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n\n self.transformer = Transformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=self.decode,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self,\n batch: Dict[str, jax.Array],\n ) -> tuple[jax.Array, jax.Array]:\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n vid_embed_BTNM = self.patch_embed(video_tokens_BTN)\n act_embed_BTm11M = self.action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n vid_embed_BTNp1M = jnp.concatenate(\n [padded_act_embed_BT1M, vid_embed_BTNM], axis=2\n )\n logits_BTNp1V = self.transformer(vid_embed_BTNp1M)\n logits_BTNV = logits_BTNp1V[:, :, :-1]\n return logits_BTNV, jnp.ones_like(video_tokens_BTN)\n",python,tab +269,1455153,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",0,0,"",python,tab +270,1459254,"jasmine/train_dynamics.py",0,0,"import os\n\n\nos.environ.setdefault(""XLA_PYTHON_CLIENT_MEM_FRACTION"", ""0.98"")\n\nfrom dataclasses import dataclass, field\nimport itertools\nfrom typing import cast, Optional\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.train_utils import (\n get_lr_schedule,\n count_parameters_by_component,\n print_mem_stats,\n print_compiled_memory_stats,\n print_compiled_cost_analysis,\n)\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_type: str = ""maskgit"" # supported options: maskgit, causal\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n max_noise_level: float = 0.7\n noise_buckets: int = 10\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n use_gt_actions: bool = False\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n val_data_dir: str = """"\n val_interval: int = 20_000\n val_steps: int = 50\n eval_full_frame: bool = False\n val_maskgit_steps: int = 25\n val_temperature: float = 1\n val_sample_argmax: bool = False\n wandb_id: str = """"\n\n\ndef build_model(args: Args, rng: jax.Array) -> tuple[Genie, jax.Array]:\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_actions=args.num_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n use_gt_actions=args.use_gt_actions,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n max_noise_level=args.max_noise_level,\n noise_buckets=args.noise_buckets,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=False,\n rngs=rngs,\n )\n if args.use_gt_actions:\n assert (\n not args.lam_checkpoint\n ), ""Cannot use LAM when using ground-truth actions.""\n else:\n assert genie.lam is not None\n del genie.lam.decoder\n return genie, rng\n\n\ndef build_optimizer(genie: Genie, args: Args) -> nnx.ModelAndOptimizer:\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.param_dtype, # moments in full precision\n )\n optimizer = nnx.ModelAndOptimizer(genie, tx)\n return optimizer\n\n\ndef build_mesh_and_sharding(\n num_devices: int,\n) -> tuple[Mesh, NamedSharding, NamedSharding, NamedSharding]:\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n actions_sharding = NamedSharding(mesh, PartitionSpec(""data"", None))\n return mesh, replicated_sharding, videos_sharding, actions_sharding\n\n\ndef shard_optimizer_states(\n optimizer: nnx.ModelAndOptimizer, replicated_sharding: NamedSharding\n) -> None:\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n\ndef build_dataloader(args: Args, data_dir: str) -> grain.DataLoaderIterator:\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(data_dir, x)\n for x in os.listdir(data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n return grain_iterator\n\n\ndef build_checkpoint_manager(args: Args) -> Optional[ocp.CheckpointManager]:\n if args.restore_ckpt or args.save_ckpt:\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""train_dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""train_dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n if args.val_data_dir:\n handler_registry.add(\n ""val_dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(\n ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler\n ),\n )\n handler_registry.add(\n ""val_dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(\n ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler\n ),\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n return checkpoint_manager\n else:\n return None\n\n\ndef restore_or_initialize_components(\n args: Args,\n checkpoint_manager: Optional[ocp.CheckpointManager],\n optimizer: nnx.ModelAndOptimizer,\n train_iterator: grain.DataLoaderIterator,\n rng: jax.Array,\n replicated_sharding: NamedSharding,\n val_iterator: Optional[grain.DataLoaderIterator],\n restore_step: Optional[int] = None,\n) -> tuple[\n int,\n nnx.ModelAndOptimizer,\n grain.DataLoaderIterator,\n grain.DataLoaderIterator,\n jax.Array,\n]:\n step = 0\n if checkpoint_manager and restore_step is None:\n restore_step = checkpoint_manager.latest_step()\n if args.restore_ckpt:\n assert checkpoint_manager is not None\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n if val_iterator:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n val_dataloader_state=grain.checkpoint.CheckpointRestore(val_iterator), # type: ignore\n )\n else:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(), args=restore_args\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n train_iterator = restored[""train_dataloader_state""]\n if val_iterator:\n val_iterator = restored[""val_dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n rng, _rng = jax.random.split(rng)\n optimizer = restore_genie_components(optimizer, replicated_sharding, _rng, args)\n return step, optimizer, train_iterator, val_iterator, rng\n\n\ndef _calculate_step_metrics(\n outputs: dict[str, jax.Array],\n gt: jax.Array,\n num_actions: int,\n num_patch_latents: int,\n) -> tuple[jax.Array, dict]:\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt_val = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt_val, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt_val, recon)).mean()\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]),\n size=num_patch_latents,\n fill_value=0,\n )\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n if ""lam_indices"" in outputs.keys():\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]),\n size=num_actions,\n fill_value=0,\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n metrics[""codebook_usage_lam""] = codebook_usage_lam\n return ce_loss, metrics\n\n\ndef main(args: Args) -> None:\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n genie, rng = build_model(args, rng)\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n optimizer = build_optimizer(genie, args)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n _, replicated_sharding, videos_sharding, actions_sharding = build_mesh_and_sharding(\n num_devices\n )\n\n shard_optimizer_states(optimizer, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n checkpoint_manager = build_checkpoint_manager(args)\n\n # --- Create DataLoaderIterator from dataloader ---\n train_iterator = build_dataloader(args, args.data_dir)\n val_iterator = None\n if args.val_data_dir:\n val_iterator = build_dataloader(args, args.val_data_dir)\n\n # --- Restore checkpoint ---\n step, optimizer, train_iterator, val_iterator, rng = (\n restore_or_initialize_components(\n args,\n checkpoint_manager,\n optimizer,\n train_iterator,\n rng,\n replicated_sharding,\n val_iterator,\n )\n )\n\n # --- Define loss and train step (close over args) ---\n def dynamics_loss_fn(\n model: Genie,\n inputs: dict,\n ) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n outputs = model(inputs)\n ce_loss, metrics = _calculate_step_metrics(\n outputs, gt, args.num_actions, args.num_patch_latents\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n @nnx.jit(donate_argnums=0)\n def train_step(\n optimizer: nnx.ModelAndOptimizer, inputs: dict\n ) -> tuple[jax.Array, jax.Array, dict]:\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n model.train()\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(\n optimizer.model\n )\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n @nnx.jit\n def val_step(genie: Genie, inputs: dict) -> dict:\n """"""Evaluate model and compute metrics""""""\n genie.eval()\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n (loss, (recon, metrics)) = dynamics_loss_fn(genie, inputs)\n val_output = {""loss"": loss, ""recon"": recon, ""metrics"": metrics}\n\n # --- Evaluate full frame prediction (sampling) ---\n if args.eval_full_frame:\n inputs[""videos""] = gt.astype(args.dtype)\n tokenizer_outputs = genie.tokenizer.vq_encode(\n inputs[""videos""], training=False\n )\n tokens_full_frame = tokenizer_outputs[""indices""]\n lam_indices_E = None\n if not args.use_gt_actions:\n lam_indices_E = genie.vq_encode(inputs, training=False)\n inputs[""latent_actions""] = lam_indices_E\n inputs[""videos""] = inputs[""videos""][\n :, :-1\n ] # remove last frame for generation\n recon_full_frame, logits_full_frame = genie.sample(\n inputs,\n args.seq_len,\n args.val_temperature,\n args.val_sample_argmax,\n args.val_maskgit_steps,\n )\n # Calculate metrics for the last frame only\n step_outputs = {\n ""recon"": recon_full_frame[:, -1],\n ""token_logits"": logits_full_frame[:, -1],\n ""video_tokens"": tokens_full_frame[:, -1],\n ""mask"": jnp.ones_like(tokens_full_frame[:, -1]),\n }\n if lam_indices_E is not None:\n lam_indices_B = lam_indices_E.reshape((-1, args.seq_len - 1))[:, -1]\n step_outputs[""lam_indices""] = lam_indices_B\n\n loss_full_frame, metrics_full_frame = _calculate_step_metrics(\n step_outputs, gt[:, -1], args.num_actions, args.num_patch_latents\n )\n val_output.update(\n {\n ""loss_full_frame"": loss_full_frame,\n ""recon_full_frame"": recon_full_frame,\n ""metrics_full_frame"": metrics_full_frame,\n }\n )\n return val_output\n\n def calculate_validation_metrics(val_dataloader, genie, rng):\n step = 0\n loss_per_step = []\n metrics_per_step = []\n loss_full_frame_per_step = []\n metrics_full_frame_per_step = []\n batch = None\n recon = None\n recon_full_frame = None\n for batch in val_dataloader:\n rng, _rng_mask = jax.random.split(rng, 2)\n batch[""rng""] = _rng_mask\n val_outputs = val_step(genie, batch)\n loss_per_step.append(val_outputs[""loss""])\n metrics_per_step.append(val_outputs[""metrics""])\n recon = val_outputs[""recon""]\n if args.eval_full_frame:\n loss_full_frame_per_step.append(val_outputs[""loss_full_frame""])\n metrics_full_frame_per_step.append(val_outputs[""metrics_full_frame""])\n recon_full_frame = val_outputs[""recon_full_frame""]\n step += 1\n if step > args.val_steps:\n break\n\n if step < args.val_steps:\n print(\n f""Warning: Your validation dataset is too small to make val_steps many steps. Made {step} steps, expected {args.val_steps}""\n )\n\n val_metrics = {\n f""val_{key}"": np.mean([float(m[key]) for m in metrics_per_step])\n for key in metrics_per_step[0].keys()\n }\n val_metrics[""val_loss""] = np.mean(loss_per_step)\n if args.eval_full_frame:\n val_metrics_full_frame = {\n f""val_full_frame_{key}"": np.mean(\n [float(m[key]) for m in metrics_full_frame_per_step]\n )\n for key in metrics_full_frame_per_step[0].keys()\n }\n val_metrics.update(val_metrics_full_frame)\n val_metrics[""val_full_frame_loss""] = np.mean(loss_full_frame_per_step)\n return val_metrics, batch, recon, recon_full_frame\n\n # --- TRAIN LOOP ---\n dataloader_train = (\n {\n ""videos"": jax.make_array_from_process_local_data(\n videos_sharding, local_data=elem[""videos""]\n ),\n ""actions"": (\n jax.make_array_from_process_local_data(\n actions_sharding, elem[""actions""]\n )\n if args.use_gt_actions\n else None\n ),\n }\n for elem in train_iterator\n )\n dataloader_val = None\n if val_iterator:\n dataloader_val = (\n {\n ""videos"": jax.make_array_from_process_local_data(\n videos_sharding, elem[""videos""]\n ),\n ""actions"": (\n jax.make_array_from_process_local_data(\n actions_sharding, elem[""actions""]\n )\n if args.use_gt_actions\n else None\n ),\n }\n for elem in val_iterator\n )\n if jax.process_index() == 0:\n first_batch = next(dataloader_train)\n first_batch[""rng""] = rng # type: ignore\n compiled = train_step.lower(optimizer, first_batch).compile()\n print_compiled_memory_stats(compiled.memory_analysis())\n print_compiled_cost_analysis(compiled.cost_analysis())\n # Do not skip the first batch during training\n dataloader_train = itertools.chain([first_batch], dataloader_train)\n print(f""Starting training from step {step}..."")\n first_step = step\n while step < args.num_steps:\n for batch in dataloader_train:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n batch[""rng""] = _rng_mask\n loss, recon, metrics = train_step(optimizer, batch)\n if step == first_step:\n print_mem_stats(""After params initialized"")\n step += 1\n\n # --- Validation loss ---\n val_results = {}\n if dataloader_val and step % args.val_interval == 0:\n rng, _rng_mask_val = jax.random.split(rng, 2)\n print(""Calculating validation metrics..."")\n val_metrics, val_gt_batch, val_recon, val_recon_full_frame = (\n calculate_validation_metrics(\n dataloader_val, optimizer.model, _rng_mask_val\n )\n )\n print(f""Step {step}, validation loss: {val_metrics['val_loss']}"")\n val_results = {\n ""metrics"": val_metrics,\n ""gt_batch"": val_gt_batch,\n ""recon"": val_recon,\n ""full_frame"": val_recon_full_frame,\n }\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n log_dict = {""loss"": loss, ""step"": step, **metrics}\n if val_results:\n log_dict.update(val_results[""metrics""])\n wandb.log(log_dict)\n if step % args.log_image_interval == 0:\n gt_seq = batch[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if val_results:\n val_results[""gt_seq_val""] = (\n val_results[""gt_batch""][""videos""][0].astype(jnp.float32)\n / 255.0\n )\n val_results[""recon_seq_val""] = val_results[""recon""][0].clip(\n 0, 1\n )\n val_comparison_seq = jnp.concatenate(\n (val_results[""gt_seq_val""], val_results[""recon_seq_val""]),\n axis=1,\n )\n val_results[""val_comparison_seq""] = einops.rearrange(\n val_comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if args.eval_full_frame:\n val_results[""full_frame_seq_val""] = val_results[\n ""full_frame""\n ][0].clip(0, 1)\n val_results[""val_full_frame_comparison_seq""] = (\n jnp.concatenate(\n (\n val_results[""gt_seq_val""],\n val_results[""full_frame_seq_val""],\n ),\n axis=1,\n )\n )\n val_results[""val_full_frame_comparison_seq""] = (\n einops.rearrange(\n val_results[""val_full_frame_comparison_seq""] * 255,\n ""t h w c -> h (t w) c"",\n )\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n if val_results:\n log_images.update(\n dict(\n val_image=wandb.Image(\n np.asarray(\n val_results[""gt_seq_val""][args.seq_len - 1]\n )\n ),\n val_recon=wandb.Image(\n np.asarray(\n val_results[""recon_seq_val""][\n args.seq_len - 1\n ]\n )\n ),\n val_true_vs_recon=wandb.Image(\n np.asarray(\n val_results[""val_comparison_seq""].astype(\n np.uint8\n )\n )\n ),\n )\n )\n if args.eval_full_frame:\n log_images.update(\n dict(\n val_full_frame=wandb.Image(\n np.asarray(\n val_results[""full_frame_seq_val""][\n args.seq_len - 1\n ]\n )\n ),\n val_true_vs_full_frame=wandb.Image(\n np.asarray(\n val_results[\n ""val_full_frame_comparison_seq""\n ].astype(np.uint8)\n )\n ),\n )\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n assert checkpoint_manager is not None\n optimizer_state = nnx.state(optimizer)\n if val_iterator:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n val_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n val_iterator # type: ignore\n ),\n )\n else:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n )\n checkpoint_manager.save(step, args=ckpt_manager_args)\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n if checkpoint_manager:\n checkpoint_manager.close()\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(Args)\n main(args)\n",python,tab +271,1459950,"jasmine/train_dynamics.py",2470,0,"",python,selection_mouse +272,1460012,"jasmine/train_dynamics.py",2469,0,"",python,selection_command +273,1465603,"jasmine/train_dynamics.py",4118,0,"",python,selection_mouse +274,1473476,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",0,0,"",python,tab +275,1474635,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2706,0,"\n max_noise_level=args.max_noise_level,\n noise_buckets=args.noise_buckets,",python,content +276,1474671,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2715,0,"",python,selection_command +277,1475543,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2752,0,"",python,selection_command +278,1475861,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2751,0,"",python,selection_command +279,1477586,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2736,15,"",python,content +280,1477950,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2735,1,"",python,content +281,1478208,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2734,1,"",python,content +282,1478433,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2731,3,"",python,content +283,1478757,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2731,0,"ß",python,content +284,1478758,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2732,0,"",python,selection_keyboard +285,1480233,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2731,1,"",python,content +286,1480353,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2731,0,"0",python,content +287,1480354,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2732,0,"",python,selection_keyboard +288,1480724,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2732,0,".",python,content +289,1480725,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2733,0,"",python,selection_keyboard +290,1481202,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2761,0,"",python,selection_command +291,1481552,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2760,0,"",python,selection_command +292,1482981,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2757,4,"",python,content +293,1483110,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2757,1,"",python,content +294,1483408,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2757,13,"",python,content +295,1484457,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2757,0,"0",python,content +296,1484458,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2758,0,"",python,selection_keyboard +297,1484606,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2758,0,".",python,content +298,1484607,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2759,0,"",python,selection_keyboard +299,1521366,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",649,0,"",python,selection_mouse +300,1521728,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",649,0,"\n ",python,content +301,1522645,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",654,0,"n",python,content +302,1522646,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",655,0,"",python,selection_keyboard +303,1522875,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",655,0,"i",python,content +304,1522877,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",656,0,"",python,selection_keyboard +305,1523188,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",655,1,"",python,content +306,1523374,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",655,0,"p",python,content +307,1523376,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",656,0,"",python,selection_keyboard +308,1523459,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",656,0,"i",python,content +309,1523461,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",657,0,"",python,selection_keyboard +310,1523559,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",657,0,"s",python,content +311,1523560,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",658,0,"",python,selection_keyboard +312,1523707,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",658,0,"e",python,content +313,1523709,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",659,0,"",python,selection_keyboard +314,1523895,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",658,1,"",python,content +315,1524022,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",657,1,"",python,content +316,1524160,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",656,1,"",python,content +317,1524388,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",655,1,"",python,content +318,1525022,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",655,0,"o",python,content +319,1525024,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",656,0,"",python,selection_keyboard +320,1525024,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",656,0,"i",python,content +321,1525025,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",657,0,"",python,selection_keyboard +322,1525428,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",657,0,"s",python,content +323,1525430,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",658,0,"",python,selection_keyboard +324,1525578,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",658,0,"e",python,content +325,1525580,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",659,0,"",python,selection_keyboard +326,1525839,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",659,0,"_",python,content +327,1525840,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",660,0,"",python,selection_keyboard +328,1526199,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",660,0,"l",python,content +329,1526199,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",661,0,"",python,selection_keyboard +330,1526329,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",661,0,"e",python,content +331,1526330,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",662,0,"",python,selection_keyboard +332,1526522,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",662,0,"v",python,content +333,1526523,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",663,0,"",python,selection_keyboard +334,1526629,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",663,0,"e",python,content +335,1526630,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",664,0,"",python,selection_keyboard +336,1526831,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",664,0,"l",python,content +337,1526832,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",665,0,"",python,selection_keyboard +338,1527266,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",665,0,":",python,content +339,1527267,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",666,0,"",python,selection_keyboard +340,1527476,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",666,0," ",python,content +341,1527477,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",667,0,"",python,selection_keyboard +342,1527859,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",667,0,"f",python,content +343,1527859,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",668,0,"",python,selection_keyboard +344,1527977,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",668,0,"l",python,content +345,1527978,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",669,0,"",python,selection_keyboard +346,1528140,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",669,0,"o",python,content +347,1528141,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",670,0,"",python,selection_keyboard +348,1528326,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",670,0,"a",python,content +349,1528327,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",671,0,"",python,selection_keyboard +350,1528546,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",671,0,"r",python,content +351,1528548,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",672,0,"",python,selection_keyboard +352,1528953,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",672,0," ",python,content +353,1528954,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",673,0,"",python,selection_keyboard +354,1529161,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",672,1,"",python,content +355,1529302,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",671,1,"",python,content +356,1529416,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",671,0,"t",python,content +357,1529417,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",672,0,"",python,selection_keyboard +358,1529648,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",672,0," ",python,content +359,1529650,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",673,0,"",python,selection_keyboard +360,1529816,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",673,0,"=",python,content +361,1529817,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",674,0,"",python,selection_keyboard +362,1529912,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",674,0," ",python,content +363,1529913,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",675,0,"",python,selection_keyboard +364,1530186,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",675,0,"0",python,content +365,1530187,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",676,0,"",python,selection_keyboard +366,1530675,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",676,0,".",python,content +367,1530676,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",677,0,"",python,selection_keyboard +368,1531412,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",676,0,"",python,selection_command +369,1532137,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",650,28,"",python,content +370,1532226,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",654,0,"",python,selection_command +371,1532226,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",669,0,"",python,selection_command +372,1532690,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",693,0,"",python,selection_command +373,1532746,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",721,0,"",python,selection_command +374,1532753,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",750,0,"",python,selection_command +375,1532790,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",781,0,"",python,selection_command +376,1532814,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",806,0,"",python,selection_command +377,1532868,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",833,0,"",python,selection_command +378,1532907,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",862,0,"",python,selection_command +379,1532961,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",896,0,"",python,selection_command +380,1532966,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",927,0,"",python,selection_command +381,1532990,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",961,0,"",python,selection_command +382,1533004,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",985,0,"",python,selection_command +383,1533037,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1019,0,"",python,selection_command +384,1533058,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1052,0,"",python,selection_command +385,1533088,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1073,0,"",python,selection_command +386,1533146,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1096,0,"",python,selection_command +387,1533171,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1124,0,"",python,selection_command +388,1533202,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1156,0,"",python,selection_command +389,1533216,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1181,0,"",python,selection_command +390,1533328,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1210,0,"",python,selection_command +391,1533559,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1238,0,"",python,selection_command +392,1534203,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1210,0,"",python,selection_command +393,1534723,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1181,0,"",python,selection_command +394,1534763,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1156,0,"",python,selection_command +395,1534795,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1124,0,"",python,selection_command +396,1534811,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1096,0,"",python,selection_command +397,1534830,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1073,0,"",python,selection_command +398,1534860,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1052,0,"",python,selection_command +399,1534922,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",1019,0,"",python,selection_command +400,1534945,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",985,0,"",python,selection_command +401,1534959,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",961,0,"",python,selection_command +402,1534997,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",927,0,"",python,selection_command +403,1535013,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",896,0,"",python,selection_command +404,1535046,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",862,0,"",python,selection_command +405,1535088,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",833,0,"",python,selection_command +406,1535130,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",806,0,"",python,selection_command +407,1535142,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",781,0,"",python,selection_command +408,1535674,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",801,0,"\n noise_level: float = 0.",python,content +409,1535705,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",806,0,"",python,selection_command +410,1537041,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",829,0,"\n ",python,content +411,1538009,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",834,0,"n",python,content +412,1538010,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",835,0,"",python,selection_keyboard +413,1538180,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",835,0,"o",python,content +414,1538181,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",836,0,"",python,selection_keyboard +415,1538266,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",836,0,"i",python,content +416,1538267,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",837,0,"",python,selection_keyboard +417,1538321,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",837,0,"s",python,content +418,1538323,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",838,0,"",python,selection_keyboard +419,1538456,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",838,0,"e",python,content +420,1538458,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",839,0,"",python,selection_keyboard +421,1538622,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",839,0,"_",python,content +422,1538624,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",840,0,"",python,selection_keyboard +423,1538942,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",840,0,"b",python,content +424,1538943,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",841,0,"",python,selection_keyboard +425,1539122,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",841,0,"u",python,content +426,1539122,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",842,0,"",python,selection_keyboard +427,1539204,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",842,0,"c",python,content +428,1539205,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",843,0,"",python,selection_keyboard +429,1539322,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",843,0,"k",python,content +430,1539323,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",844,0,"",python,selection_keyboard +431,1539424,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",844,0,"e",python,content +432,1539426,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",845,0,"",python,selection_keyboard +433,1539738,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",845,0,"t",python,content +434,1539739,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",846,0,"",python,selection_keyboard +435,1539838,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",846,0,"s",python,content +436,1539840,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",847,0,"",python,selection_keyboard +437,1540100,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",847,0,":",python,content +438,1540101,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",848,0,"",python,selection_keyboard +439,1540846,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",848,0," ",python,content +440,1540847,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",849,0,"",python,selection_keyboard +441,1541056,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",849,0,"i",python,content +442,1541057,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",850,0,"",python,selection_keyboard +443,1541077,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",850,0,"n",python,content +444,1541078,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",851,0,"",python,selection_keyboard +445,1541180,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",851,0,"t",python,content +446,1541181,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",852,0,"",python,selection_keyboard +447,1541500,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",852,0," ",python,content +448,1541501,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",853,0,"",python,selection_keyboard +449,1541659,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",853,0,"=",python,content +450,1541660,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",854,0,"",python,selection_keyboard +451,1541793,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",854,0," ",python,content +452,1541794,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",855,0,"",python,selection_keyboard +453,1542188,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",855,0,"1",python,content +454,1542189,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",856,0,"",python,selection_keyboard +455,1543344,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",856,0,"0",python,content +456,1543345,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",857,0,"",python,selection_keyboard +457,1543464,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",856,0,"",python,selection_command +458,1546654,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2815,0,"",python,selection_mouse +459,1547743,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2814,1,"",python,content +460,1547866,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2813,1,"",python,content +461,1548093,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2813,0,"a",python,content +462,1548094,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2814,0,"",python,selection_keyboard +463,1548534,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2814,0,"r",python,content +464,1548535,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2815,0,"",python,selection_keyboard +465,1549155,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2815,0,"g",python,content +466,1549156,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2816,0,"",python,selection_keyboard +467,1549309,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2816,0,"s",python,content +468,1549310,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2817,0,"",python,selection_keyboard +469,1549452,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2817,0,".",python,content +470,1549453,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2818,0,"",python,selection_keyboard +471,1549895,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2818,0,"n",python,content +472,1549896,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2819,0,"",python,selection_keyboard +473,1550126,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2819,0,"o",python,content +474,1550127,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2820,0,"",python,selection_keyboard +475,1550226,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2820,0,"i",python,content +476,1550227,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2821,0,"",python,selection_keyboard +477,1551439,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2821,0,"s",python,content +478,1551441,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2822,0,"",python,selection_keyboard +479,1551640,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2822,0,"e",python,content +480,1551642,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2823,0,"",python,selection_keyboard +481,1551833,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2823,0,"_",python,content +482,1551834,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2824,0,"",python,selection_keyboard +483,1552375,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2824,0,"u",python,content +484,1552376,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2825,0,"",python,selection_keyboard +485,1552699,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2824,1,"",python,content +486,1553084,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2824,0,"b",python,content +487,1553085,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2825,0,"",python,selection_keyboard +488,1553632,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py",2818,7,"noise_buckets",python,content +489,1591929,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",,terminal_output +490,1593069,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +491,1593189,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn/train_dyn_default_breakout_longer/3519480\r\n",,terminal_output +492,1593359,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +493,1603282,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/jasmine/sample.py"", line 156, in \r\n restored = checkpoint_manager.restore(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib64/python3.12/site-packages/orbax/checkpoint/checkpoint_manager.py"", line 1673, in restore\r\n restored = self._checkpointer.restore(restore_directory, args=args)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib64/python3.12/site-packages/orbax/checkpoint/_src/checkpointers/async_checkpointer.py"", line 571, in restore\r\n return super().restore(directory, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib64/python3.12/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 306, in restore\r\n restored = self._restore(directory, args=ckpt_args)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib64/python3.12/site-packages/orbax/checkpoint/_src/checkpointers/checkpointer.py"", line 328, in _restore\r\n return self._handler.restore(directory, args=args)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib64/python3.12/site-packages/orbax/checkpoint/_src/handlers/composite_checkpoint_handler.py"", line 857, in restore\r\n restored[item_name] = handler.restore(\r\n ^^^^^^^^^^^^^^^^\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib64/python3.12/site-packages/orbax/checkpoint/_src/handlers/pytree_checkpoint_handler.py"", line 835, in restore\r\n return self._handler_impl.restore(directory, args=args)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib64/python3.12/site-packages/orbax/checkpoint/_src/handlers/base_pytree_checkpoint_handler.py"", line 949, in restore\r\n raise ValueError(\r\nValueError: User-provided restore item and on-disk value metadata tree structures do not match:\r\nmodel.dynamics.noise_level_embed:\r\n - Source: {'embedding': {'value': ShapeDtypeStruct(shape=(10, 512), dtype=float32)}}\r\n - Target: MISSING\r\n\r\nopt_state.0.mu.dynamics.noise_level_embed:\r\n - Source: {'embedding': {'value': ShapeDtypeStruct(shape=(10, 512), dtype=bfloat16)}}\r\n - Target: MISSING\r\n\r\nopt_state.0.nu.dynamics.noise_level_embed:\r\n - Source: {'embedding': {'value': ShapeDtypeStruct(shape=(10, 512), dtype=float32)}}\r\n - Target: MISSING\r\n",,terminal_output +494,1608392,"TERMINAL",0,0,"W0925 11:08:18.062256 2975697 pjrt_client.cc:1469] WatchJobStateAsync failed for task goo.gle/debugstr job_name: ""jax_worker"": UNAVAILABLE: Cancelling all calls\r\nAdditional GRPC error information from remote target coordination_service while calling /tensorflow.CoordinationService/WatchJobState:\r\n:UNKNOWN:Error received from peer {grpc_message:""Cancelling all calls"", grpc_status:14}\r\n",,terminal_output +495,1608903,"TERMINAL",0,0,"srun: error: hkn0730: task 0: Exited with exit code 1\r\n]0;tum_cte0515@hkn0730:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +496,1630642,"TERMINAL",0,0,"gi",,terminal_output +497,1630708,"TERMINAL",0,0,"t",,terminal_output +498,1630773,"TERMINAL",0,0," ",,terminal_output +499,1630966,"TERMINAL",0,0,"s",,terminal_output +500,1631031,"TERMINAL",0,0,"t",,terminal_output +501,1631148,"TERMINAL",0,0,"a",,terminal_output +502,1631200,"TERMINAL",0,0,"t",,terminal_output +503,1631308,"TERMINAL",0,0,"u",,terminal_output +504,1631373,"TERMINAL",0,0,"s",,terminal_output +505,1631548,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +506,1631833,"TERMINAL",0,0,"On branch add-noise-to-combat-exposure-bias\r\n",,terminal_output +507,1631948,"TERMINAL",0,0,"Your branch is ahead of 'origin/add-noise-to-combat-exposure-bias' by 2 commits.\r\n (use ""git push"" to publish your local commits)\r\n\r\nLast commands done (2 commands done):\r\n pick ba37453 feat: generate coinrun dataset with val split\r\n pick faadd10 feat: implemented validation loss for all three models\r\nNext commands to do (26 remaining commands):\r\n pick 9a17dbb fix: pass val data path to dataloader\r\n pick 6e69cdb fix typo in image logging\r\n (use ""git rebase --edit-todo"" to view and edit)\r\nYou are currently editing a commit while rebasing branch 'gt-actions' on 'c7522f2'.\r\n (use ""git commit --amend"" to amend the current commit)\r\n (use ""git rebase --continue"" once you are satisfied with your changes)\r\n\r\nChanges not staged for commit:\r\n (use ""git add ..."" to update what will be committed)\r\n (use ""git restore ..."" to discard changes in working directory)\r\n\tmodified: jasmine/sample.py\r\n\r\nUntracked files:\r\n (use ""git add ..."" to include in what will be committed)\r\n\tdiff.diff\r\n\tdiff2.diff\r\n\tinput_pipeline/\r\n\tkiller.sh\r\n\tkiller_partition.sh\r\n\tlog.log\r\n\toverfit_dir.zip\r\n\trequirements-franz.txt\r\n\tsamples/\r\n\tscripts_cremers/\r\n\tslurm/\r\n\ttest.py\r\n\tutils/\r\n\tuv.lock\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;tum_cte0515@hkn0730:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +508,1634125,"TERMINAL",0,0,"g",,terminal_output +509,1634188,"TERMINAL",0,0,"i",,terminal_output +510,1634253,"TERMINAL",0,0,"t",,terminal_output +511,1634317,"TERMINAL",0,0," ",,terminal_output +512,1634508,"TERMINAL",0,0,"c",,terminal_output +513,1634649,"TERMINAL",0,0,"o",,terminal_output +514,1634725,"TERMINAL",0,0,"m",,terminal_output +515,1634919,"TERMINAL",0,0,"i",,terminal_output +516,1635295,"TERMINAL",0,0,"",,terminal_output +517,1635471,"TERMINAL",0,0,"m",,terminal_output +518,1635587,"TERMINAL",0,0,"i",,terminal_output +519,1635712,"TERMINAL",0,0,"t",,terminal_output +520,1635777,"TERMINAL",0,0," ",,terminal_output +521,1636549,"TERMINAL",0,0,"-",,terminal_output +522,1636765,"TERMINAL",0,0,"a",,terminal_output +523,1636774,"TERMINAL",0,0,"m",,terminal_output +524,1636898,"TERMINAL",0,0," ",,terminal_output +525,1637078,"TERMINAL",0,0,"""",,terminal_output +526,1637340,"TERMINAL",0,0,"a",,terminal_output +527,1637469,"TERMINAL",0,0,"d",,terminal_output +528,1637628,"TERMINAL",0,0,"d",,terminal_output +529,1637758,"TERMINAL",0,0,"e",,terminal_output +530,1637865,"TERMINAL",0,0,"d",,terminal_output +531,1637919,"TERMINAL",0,0," ",,terminal_output +532,1638061,"TERMINAL",0,0,"n",,terminal_output +533,1638284,"TERMINAL",0,0,"oi",,terminal_output +534,1638349,"TERMINAL",0,0,"s",,terminal_output +535,1638769,"TERMINAL",0,0,"e",,terminal_output +536,1638877,"TERMINAL",0,0," ",,terminal_output +537,1639052,"TERMINAL",0,0,"e",,terminal_output +538,1639443,"TERMINAL",0,0,"",,terminal_output +539,1639556,"TERMINAL",0,0,"",,terminal_output +540,1639684,"TERMINAL",0,0,"l",,terminal_output +541,1639749,"TERMINAL",0,0,"e",,terminal_output +542,1640127,"TERMINAL",0,0,"",,terminal_output +543,1640249,"TERMINAL",0,0,"",,terminal_output +544,1640489,"TERMINAL",0,0," ",,terminal_output +545,1640614,"TERMINAL",0,0,"le",,terminal_output +546,1640757,"TERMINAL",0,0,"v",,terminal_output +547,1640872,"TERMINAL",0,0,"e",,terminal_output +548,1640929,"TERMINAL",0,0,"l",,terminal_output +549,1640996,"TERMINAL",0,0," ",,terminal_output +550,1641502,"TERMINAL",0,0,"to",,terminal_output +551,1641640,"TERMINAL",0,0," ",,terminal_output +552,1641704,"TERMINAL",0,0,"s",,terminal_output +553,1641927,"TERMINAL",0,0,"a",,terminal_output +554,1641993,"TERMINAL",0,0,"m",,terminal_output +555,1642230,"TERMINAL",0,0,"pl",,terminal_output +556,1642365,"TERMINAL",0,0,"e",,terminal_output +557,1642491,"TERMINAL",0,0,".",,terminal_output +558,1642700,"TERMINAL",0,0,"p",,terminal_output +559,1642838,"TERMINAL",0,0,"y",,terminal_output +560,1643036,"TERMINAL",0,0,"""",,terminal_output +561,1643298,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +562,1644019,"TERMINAL",0,0,"black....................................................................",,terminal_output +563,1644794,"TERMINAL",0,0,"Failed\r\n- hook id: black\r\n- files were modified by this hook\r\n\r\nreformatted jasmine/sample.py\r\n\r\nAll done! ✨ 🍰 ✨\r\n1 file reformatted.\r\n\r\n]0;tum_cte0515@hkn0730:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +564,1645486,"TERMINAL",0,0,"git commit -am ""added noise level to sample.py""",,terminal_output +565,1645595,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +566,1645822,"TERMINAL",0,0,"black....................................................................",,terminal_output +567,1645965,"TERMINAL",0,0,"Passed\r\n",,terminal_output +568,1646091,"TERMINAL",0,0,"[add-noise-to-combat-exposure-bias 69d3b80] added noise level to sample.py\r\n 1 file changed, 4 insertions(+)\r\n]0;tum_cte0515@hkn0730:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +569,1646813,"TERMINAL",0,0,"g",,terminal_output +570,1646886,"TERMINAL",0,0,"i",,terminal_output +571,1646952,"TERMINAL",0,0,"t",,terminal_output +572,1647017,"TERMINAL",0,0," ",,terminal_output +573,1647208,"TERMINAL",0,0,"p",,terminal_output +574,1647698,"TERMINAL",0,0,"c",,terminal_output +575,1647831,"TERMINAL",0,0,"h",,terminal_output +576,1647898,"TERMINAL",0,0,"e",,terminal_output +577,1648033,"TERMINAL",0,0,"c",,terminal_output +578,1648151,"TERMINAL",0,0,"k",,terminal_output +579,1648424,"TERMINAL",0,0,"o",,terminal_output +580,1648578,"TERMINAL",0,0,"u",,terminal_output +581,1648648,"TERMINAL",0,0,"t",,terminal_output +582,1648712,"TERMINAL",0,0," ",,terminal_output +583,1648909,"TERMINAL",0,0,"ma",,terminal_output +584,1648974,"TERMINAL",0,0,"i",,terminal_output +585,1649042,"TERMINAL",0,0,"n",,terminal_output +586,1649208,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +587,1649329,"TERMINAL",0,0,"Switched to branch 'main'\r\nYour branch is up to date with 'origin/main'.\r\n]0;tum_cte0515@hkn0730:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +588,1650557,"",0,0,"Switched from branch 'add-noise-to-combat-exposure-bias' to 'main'",,git_branch_checkout +589,1651979,"TERMINAL",0,0,"git checkout main",,terminal_output +590,1652894,"TERMINAL",0,0,"ommit -am ""added noise level to sample.py""",,terminal_output +591,1653787,"TERMINAL",0,0,"status",,terminal_output +592,1654435,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",,terminal_output +593,1655254,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +594,1655365,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn/train_dyn_default_breakout_longer/3519480\r\n",,terminal_output +595,1655485,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +596,1658176,"jasmine/train_dynamics.py",0,0,"import os\n\n\nos.environ.setdefault(""XLA_PYTHON_CLIENT_MEM_FRACTION"", ""0.98"")\n\nfrom dataclasses import dataclass, field\nimport itertools\nfrom typing import cast, Optional\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.train_utils import (\n get_lr_schedule,\n count_parameters_by_component,\n print_mem_stats,\n print_compiled_memory_stats,\n print_compiled_cost_analysis,\n)\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_type: str = ""maskgit"" # supported options: maskgit, causal\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n use_gt_actions: bool = False\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n val_data_dir: str = """"\n val_interval: int = 20_000\n val_steps: int = 50\n eval_full_frame: bool = False\n val_maskgit_steps: int = 25\n val_temperature: float = 1\n val_sample_argmax: bool = False\n wandb_id: str = """"\n\n\ndef build_model(args: Args, rng: jax.Array) -> tuple[Genie, jax.Array]:\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_actions=args.num_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n use_gt_actions=args.use_gt_actions,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=False,\n rngs=rngs,\n )\n if args.use_gt_actions:\n assert (\n not args.lam_checkpoint\n ), ""Cannot use LAM when using ground-truth actions.""\n else:\n assert genie.lam is not None\n del genie.lam.decoder\n return genie, rng\n\n\ndef build_optimizer(genie: Genie, args: Args) -> nnx.ModelAndOptimizer:\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.param_dtype, # moments in full precision\n )\n optimizer = nnx.ModelAndOptimizer(genie, tx)\n return optimizer\n\n\ndef build_mesh_and_sharding(\n num_devices: int,\n) -> tuple[Mesh, NamedSharding, NamedSharding, NamedSharding]:\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n actions_sharding = NamedSharding(mesh, PartitionSpec(""data"", None))\n return mesh, replicated_sharding, videos_sharding, actions_sharding\n\n\ndef shard_optimizer_states(\n optimizer: nnx.ModelAndOptimizer, replicated_sharding: NamedSharding\n) -> None:\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n\ndef build_dataloader(args: Args, data_dir: str) -> grain.DataLoaderIterator:\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(data_dir, x)\n for x in os.listdir(data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n return grain_iterator\n\n\ndef build_checkpoint_manager(args: Args) -> Optional[ocp.CheckpointManager]:\n if args.restore_ckpt or args.save_ckpt:\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""train_dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""train_dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n if args.val_data_dir:\n handler_registry.add(\n ""val_dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(\n ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler\n ),\n )\n handler_registry.add(\n ""val_dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(\n ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler\n ),\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n return checkpoint_manager\n else:\n return None\n\n\ndef restore_or_initialize_components(\n args: Args,\n checkpoint_manager: Optional[ocp.CheckpointManager],\n optimizer: nnx.ModelAndOptimizer,\n train_iterator: grain.DataLoaderIterator,\n rng: jax.Array,\n replicated_sharding: NamedSharding,\n val_iterator: Optional[grain.DataLoaderIterator],\n restore_step: Optional[int] = None,\n) -> tuple[\n int,\n nnx.ModelAndOptimizer,\n grain.DataLoaderIterator,\n grain.DataLoaderIterator,\n jax.Array,\n]:\n step = 0\n if checkpoint_manager and restore_step is None:\n restore_step = checkpoint_manager.latest_step()\n if args.restore_ckpt:\n assert checkpoint_manager is not None\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n if val_iterator:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n val_dataloader_state=grain.checkpoint.CheckpointRestore(val_iterator), # type: ignore\n )\n else:\n restore_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointRestore(train_iterator), # type: ignore\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(), args=restore_args\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n train_iterator = restored[""train_dataloader_state""]\n if val_iterator:\n val_iterator = restored[""val_dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n rng, _rng = jax.random.split(rng)\n optimizer = restore_genie_components(optimizer, replicated_sharding, _rng, args)\n return step, optimizer, train_iterator, val_iterator, rng\n\n\ndef _calculate_step_metrics(\n outputs: dict[str, jax.Array],\n gt: jax.Array,\n num_actions: int,\n num_patch_latents: int,\n) -> tuple[jax.Array, dict]:\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt_val = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt_val, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt_val, recon)).mean()\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]),\n size=num_patch_latents,\n fill_value=0,\n )\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n if ""lam_indices"" in outputs.keys():\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]),\n size=num_actions,\n fill_value=0,\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n metrics[""codebook_usage_lam""] = codebook_usage_lam\n return ce_loss, metrics\n\n\ndef main(args: Args) -> None:\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n genie, rng = build_model(args, rng)\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n optimizer = build_optimizer(genie, args)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n _, replicated_sharding, videos_sharding, actions_sharding = build_mesh_and_sharding(\n num_devices\n )\n\n shard_optimizer_states(optimizer, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n checkpoint_manager = build_checkpoint_manager(args)\n\n # --- Create DataLoaderIterator from dataloader ---\n train_iterator = build_dataloader(args, args.data_dir)\n val_iterator = None\n if args.val_data_dir:\n val_iterator = build_dataloader(args, args.val_data_dir)\n\n # --- Restore checkpoint ---\n step, optimizer, train_iterator, val_iterator, rng = (\n restore_or_initialize_components(\n args,\n checkpoint_manager,\n optimizer,\n train_iterator,\n rng,\n replicated_sharding,\n val_iterator,\n )\n )\n\n # --- Define loss and train step (close over args) ---\n def dynamics_loss_fn(\n model: Genie,\n inputs: dict,\n ) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n outputs = model(inputs)\n ce_loss, metrics = _calculate_step_metrics(\n outputs, gt, args.num_actions, args.num_patch_latents\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n @nnx.jit(donate_argnums=0)\n def train_step(\n optimizer: nnx.ModelAndOptimizer, inputs: dict\n ) -> tuple[jax.Array, jax.Array, dict]:\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n model.train()\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(\n optimizer.model\n )\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n @nnx.jit\n def val_step(genie: Genie, inputs: dict) -> dict:\n """"""Evaluate model and compute metrics""""""\n genie.eval()\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n (loss, (recon, metrics)) = dynamics_loss_fn(genie, inputs)\n val_output = {""loss"": loss, ""recon"": recon, ""metrics"": metrics}\n\n # --- Evaluate full frame prediction (sampling) ---\n if args.eval_full_frame:\n inputs[""videos""] = gt.astype(args.dtype)\n tokenizer_outputs = genie.tokenizer.vq_encode(\n inputs[""videos""], training=False\n )\n tokens_full_frame = tokenizer_outputs[""indices""]\n lam_indices_E = None\n if not args.use_gt_actions:\n lam_indices_E = genie.vq_encode(inputs, training=False)\n inputs[""latent_actions""] = lam_indices_E\n inputs[""videos""] = inputs[""videos""][\n :, :-1\n ] # remove last frame for generation\n recon_full_frame, logits_full_frame = genie.sample(\n inputs,\n args.seq_len,\n args.val_temperature,\n args.val_sample_argmax,\n args.val_maskgit_steps,\n )\n # Calculate metrics for the last frame only\n step_outputs = {\n ""recon"": recon_full_frame[:, -1],\n ""token_logits"": logits_full_frame[:, -1],\n ""video_tokens"": tokens_full_frame[:, -1],\n ""mask"": jnp.ones_like(tokens_full_frame[:, -1]),\n }\n if lam_indices_E is not None:\n lam_indices_B = lam_indices_E.reshape((-1, args.seq_len - 1))[:, -1]\n step_outputs[""lam_indices""] = lam_indices_B\n\n loss_full_frame, metrics_full_frame = _calculate_step_metrics(\n step_outputs, gt[:, -1], args.num_actions, args.num_patch_latents\n )\n val_output.update(\n {\n ""loss_full_frame"": loss_full_frame,\n ""recon_full_frame"": recon_full_frame,\n ""metrics_full_frame"": metrics_full_frame,\n }\n )\n return val_output\n\n def calculate_validation_metrics(val_dataloader, genie, rng):\n step = 0\n loss_per_step = []\n metrics_per_step = []\n loss_full_frame_per_step = []\n metrics_full_frame_per_step = []\n batch = None\n recon = None\n recon_full_frame = None\n for batch in val_dataloader:\n rng, _rng_mask = jax.random.split(rng, 2)\n batch[""rng""] = _rng_mask\n val_outputs = val_step(genie, batch)\n loss_per_step.append(val_outputs[""loss""])\n metrics_per_step.append(val_outputs[""metrics""])\n recon = val_outputs[""recon""]\n if args.eval_full_frame:\n loss_full_frame_per_step.append(val_outputs[""loss_full_frame""])\n metrics_full_frame_per_step.append(val_outputs[""metrics_full_frame""])\n recon_full_frame = val_outputs[""recon_full_frame""]\n step += 1\n if step > args.val_steps:\n break\n\n if step < args.val_steps:\n print(\n f""Warning: Your validation dataset is too small to make val_steps many steps. Made {step} steps, expected {args.val_steps}""\n )\n\n val_metrics = {\n f""val_{key}"": np.mean([float(m[key]) for m in metrics_per_step])\n for key in metrics_per_step[0].keys()\n }\n val_metrics[""val_loss""] = np.mean(loss_per_step)\n if args.eval_full_frame:\n val_metrics_full_frame = {\n f""val_full_frame_{key}"": np.mean(\n [float(m[key]) for m in metrics_full_frame_per_step]\n )\n for key in metrics_full_frame_per_step[0].keys()\n }\n val_metrics.update(val_metrics_full_frame)\n val_metrics[""val_full_frame_loss""] = np.mean(loss_full_frame_per_step)\n return val_metrics, batch, recon, recon_full_frame\n\n # --- TRAIN LOOP ---\n dataloader_train = (\n {\n ""videos"": jax.make_array_from_process_local_data(\n videos_sharding, local_data=elem[""videos""]\n ),\n ""actions"": (\n jax.make_array_from_process_local_data(\n actions_sharding, elem[""actions""]\n )\n if args.use_gt_actions\n else None\n ),\n }\n for elem in train_iterator\n )\n dataloader_val = None\n if val_iterator:\n dataloader_val = (\n {\n ""videos"": jax.make_array_from_process_local_data(\n videos_sharding, elem[""videos""]\n ),\n ""actions"": (\n jax.make_array_from_process_local_data(\n actions_sharding, elem[""actions""]\n )\n if args.use_gt_actions\n else None\n ),\n }\n for elem in val_iterator\n )\n if jax.process_index() == 0:\n first_batch = next(dataloader_train)\n first_batch[""rng""] = rng # type: ignore\n compiled = train_step.lower(optimizer, first_batch).compile()\n print_compiled_memory_stats(compiled.memory_analysis())\n print_compiled_cost_analysis(compiled.cost_analysis())\n # Do not skip the first batch during training\n dataloader_train = itertools.chain([first_batch], dataloader_train)\n print(f""Starting training from step {step}..."")\n first_step = step\n while step < args.num_steps:\n for batch in dataloader_train:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n batch[""rng""] = _rng_mask\n loss, recon, metrics = train_step(optimizer, batch)\n if step == first_step:\n print_mem_stats(""After params initialized"")\n step += 1\n\n # --- Validation loss ---\n val_results = {}\n if dataloader_val and step % args.val_interval == 0:\n rng, _rng_mask_val = jax.random.split(rng, 2)\n print(""Calculating validation metrics..."")\n val_metrics, val_gt_batch, val_recon, val_recon_full_frame = (\n calculate_validation_metrics(\n dataloader_val, optimizer.model, _rng_mask_val\n )\n )\n print(f""Step {step}, validation loss: {val_metrics['val_loss']}"")\n val_results = {\n ""metrics"": val_metrics,\n ""gt_batch"": val_gt_batch,\n ""recon"": val_recon,\n ""full_frame"": val_recon_full_frame,\n }\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n log_dict = {""loss"": loss, ""step"": step, **metrics}\n if val_results:\n log_dict.update(val_results[""metrics""])\n wandb.log(log_dict)\n if step % args.log_image_interval == 0:\n gt_seq = batch[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if val_results:\n val_results[""gt_seq_val""] = (\n val_results[""gt_batch""][""videos""][0].astype(jnp.float32)\n / 255.0\n )\n val_results[""recon_seq_val""] = val_results[""recon""][0].clip(\n 0, 1\n )\n val_comparison_seq = jnp.concatenate(\n (val_results[""gt_seq_val""], val_results[""recon_seq_val""]),\n axis=1,\n )\n val_results[""val_comparison_seq""] = einops.rearrange(\n val_comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if args.eval_full_frame:\n val_results[""full_frame_seq_val""] = val_results[\n ""full_frame""\n ][0].clip(0, 1)\n val_results[""val_full_frame_comparison_seq""] = (\n jnp.concatenate(\n (\n val_results[""gt_seq_val""],\n val_results[""full_frame_seq_val""],\n ),\n axis=1,\n )\n )\n val_results[""val_full_frame_comparison_seq""] = (\n einops.rearrange(\n val_results[""val_full_frame_comparison_seq""] * 255,\n ""t h w c -> h (t w) c"",\n )\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n if val_results:\n log_images.update(\n dict(\n val_image=wandb.Image(\n np.asarray(\n val_results[""gt_seq_val""][args.seq_len - 1]\n )\n ),\n val_recon=wandb.Image(\n np.asarray(\n val_results[""recon_seq_val""][\n args.seq_len - 1\n ]\n )\n ),\n val_true_vs_recon=wandb.Image(\n np.asarray(\n val_results[""val_comparison_seq""].astype(\n np.uint8\n )\n )\n ),\n )\n )\n if args.eval_full_frame:\n log_images.update(\n dict(\n val_full_frame=wandb.Image(\n np.asarray(\n val_results[""full_frame_seq_val""][\n args.seq_len - 1\n ]\n )\n ),\n val_true_vs_full_frame=wandb.Image(\n np.asarray(\n val_results[\n ""val_full_frame_comparison_seq""\n ].astype(np.uint8)\n )\n ),\n )\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n assert checkpoint_manager is not None\n optimizer_state = nnx.state(optimizer)\n if val_iterator:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n val_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n val_iterator # type: ignore\n ),\n )\n else:\n ckpt_manager_args = ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n train_dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n train_iterator # type: ignore\n ),\n )\n checkpoint_manager.save(step, args=ckpt_manager_args)\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n if checkpoint_manager:\n checkpoint_manager.close()\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(Args)\n main(args)\n",python,tab +597,1658177,"jasmine/train_dynamics.py",3134,0,"",python,selection_mouse +598,1665124,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib64/python3.12/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1269: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +599,1704227,"TERMINAL",0,0,"Per-frame SSIM:\r\n [0.74998707 0.7326238 0.7775165 0.70783824 0.76672506 0.77292466\r\n 0.77912945 0.83438206 0.83242536 0.7636453 0.74864435 0.7840261 ]\r\nPer-frame PSNR:\r\n [31.437206 30.637875 30.461605 28.525434 29.322098 30.95715 27.870033\r\n 29.826872 31.810665 29.45117 27.755566 26.30378 ]\r\nSSIM: 0.7708223462104797\r\nPSNR: 29.52995491027832\r\n",,terminal_output +600,1704944,"TERMINAL",0,0,"W0925 11:09:54.632645 2976346 pjrt_client.cc:1469] WatchJobStateAsync failed for task goo.gle/debugproto job_name: ""jax_worker"": CANCELLED: CANCELLED\r\nAdditional GRPC error information from remote target coordination_service while calling /tensorflow.CoordinationService/WatchJobState:\r\n:UNKNOWN:Error received from peer {grpc_message:""CANCELLED"", grpc_status:1} [type.googleapis.com/tensorflow.DerivedStatus='']\r\n",,terminal_output +601,1705674,"TERMINAL",0,0,"]0;tum_cte0515@hkn0730:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +602,1795313,"slurm/jobs/mihir/horeka/breakout/noise_schedule_runs/train_dyn_single_gpu_gt_actions_smaller_lr_50k.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=06:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/breakout/dyn/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/breakout/dyn/%x_%j.log\n#SBATCH --job-name=train_dyn_default_gt_actions_breakout_longer_smaller_lr\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_breakout/breakout_episodes_perfect_big/train\narray_records_dir_val=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_breakout/breakout_episodes_perfect_big/val\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn-gt-actions/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/tokenizer/interactive/3518963\n\nenv | grep SLURM\n\nsrun python jasmine/train_dynamics.py \\n --save_ckpt \\n --image_height=10 \\n --image_width=10 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=120 \\n --init_lr=0 \\n --max_lr=1e-5 \\n --log_image_interval=250 \\n --log_checkpoint_interval=250 \\n --dyna_type=maskgit \\n --log \\n --name=breakout-dyn-50ksteps-noise-lvl-gt-actions-lr-1e-5-$slurm_job_id \\n --tags dyn breakout 50ksteps noise-lvl 1e-5 \\n --entity instant-uv \\n --project jafar \\n --patch_size 4 \\n --lam_patch_size 4 \\n --warmup_steps 100 \\n --wsd_decay_steps 1000 \\n --num_steps 50000 \\n --use_gt_actions \\n --data_dir $array_records_dir_train \\n --val_data_dir $array_records_dir_val \\n --tokenizer_checkpoint $tokenizer_checkpoint \\n --val_interval 750 \\n --eval_full_frame \\n",shellscript,tab +603,1831379,"slurm/jobs/mihir/horeka/breakout/noise_schedule_runs/train_dyn_single_gpu_gt_actions_smaller_lr_50k.sh",0,0,"",shellscript,tab +604,1836471,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=01:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/sampling/maskgit/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/sampling/maskgit/%x_%j.log\n#SBATCH --job-name=coinrun_sample_maskgit\n\n# Unload modules that may interfere\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n\n# Activate virtual environment\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_breakout/breakout_episodes_perfect/val\n\nCHECKPOINT_PATH=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn/train_dyn_default_breakout_longer/3519480\n\n\necho ""Sampling from checkpoint: $CHECKPOINT_PATH""\n\nsrun python jasmine/sample.py \\n --checkpoint ""$CHECKPOINT_PATH"" \\n --data_dir=$array_records_dir \\n --seq_len=16 \\n --batch_size=12 \\n --start_frame=4 \\n --image_height=10 \\n --image_width=10 \\n --dyna_type=maskgit \\n --lam_patch_size=4 \\n --no-print-action-indices \\n --output_dir ""gifs/50k/lam-actions""",shellscript,tab +605,1838528,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",771,0,"",shellscript,selection_mouse +606,1839413,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,0,"",shellscript,selection_command +607,1840469,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,122,"",shellscript,content +608,1840497,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",769,0,"",shellscript,selection_command +609,1840835,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,0,"",shellscript,selection_command +610,1841231,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn/train_dyn_default_breakout_longer_smaller_lr/3519483",shellscript,content +611,1843474,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",,terminal_output +612,1843605,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +613,1843756,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn/train_dyn_default_breakout_longer_smaller_lr/3519483\r\n",,terminal_output +614,1843996,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +615,1853514,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib64/python3.12/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1269: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +616,1891927,"TERMINAL",0,0,"Per-frame SSIM:\r\n [0.75207186 0.81390333 0.99189013 0.8411112 0.8520455 0.92152333\r\n 0.92218053 0.917578 0.81181765 0.75858426 0.7855942 0.83286476]\r\nPer-frame PSNR:\r\n [30.593054 33.48621 43.409725 32.908672 34.882893 36.720604 33.09388\r\n 34.759594 32.54592 31.710812 31.553694 31.374647]\r\nSSIM: 0.850097119808197\r\nPSNR: 33.91997528076172\r\n",,terminal_output +617,1892650,"TERMINAL",0,0,"W0925 11:13:02.293972 2978330 pjrt_client.cc:1469] WatchJobStateAsync failed for task goo.gle/debugproto job_name: ""jax_worker"": CANCELLED: CANCELLED\r\nAdditional GRPC error information from remote target coordination_service while calling /tensorflow.CoordinationService/WatchJobState:\r\n:UNKNOWN:Error received from peer {grpc_status:1, grpc_message:""CANCELLED""} [type.googleapis.com/tensorflow.DerivedStatus='']\r\n",,terminal_output +618,1893293,"TERMINAL",0,0,"]0;tum_cte0515@hkn0730:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +619,1895787,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",0,0,"",shellscript,tab +620,1901817,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",0,0,"",shellscript,tab +621,1977989,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",0,0,"",shellscript,tab +622,1981794,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1283,0,"",shellscript,selection_mouse +623,1981964,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1283,1,"l",shellscript,selection_mouse +624,1982021,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1283,2,"la",shellscript,selection_mouse +625,1982095,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1283,3,"lam",shellscript,selection_mouse +626,1983066,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1283,3,"g",shellscript,content +627,1983067,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1284,0,"",shellscript,selection_keyboard +628,1983185,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1284,0,"t",shellscript,content +629,1983185,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1285,0,"",shellscript,selection_keyboard +630,1995100,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1172,0,"",shellscript,selection_mouse +631,1996030,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",771,0,"",shellscript,selection_mouse +632,1996501,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,0,"",shellscript,selection_command +633,1997649,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,133,"",shellscript,content +634,1997694,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",769,0,"",shellscript,selection_command +635,1998137,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,0,"",shellscript,selection_command +636,1998555,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn-gt-actions/train_dyn_default_gt_actions_breakout_longer/3519529",shellscript,content +637,2000954,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1266,0,"",shellscript,selection_mouse +638,2001449,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1266,0,"\n ",shellscript,content +639,2002167,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1271,0,"-",shellscript,content +640,2002167,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1272,0,"",shellscript,selection_keyboard +641,2002295,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1272,0,"-",shellscript,content +642,2002296,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1273,0,"",shellscript,selection_keyboard +643,2002532,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1273,0,"u",shellscript,content +644,2002533,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1274,0,"",shellscript,selection_keyboard +645,2002618,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1274,0,"s",shellscript,content +646,2002619,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1275,0,"",shellscript,selection_keyboard +647,2002736,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1275,0,"e",shellscript,content +648,2002737,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1276,0,"",shellscript,selection_keyboard +649,2002875,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1276,0,"_",shellscript,content +650,2002876,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1277,0,"",shellscript,selection_keyboard +651,2003088,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1277,0,"g",shellscript,content +652,2003088,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1278,0,"",shellscript,selection_keyboard +653,2003219,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1278,0,"t",shellscript,content +654,2003220,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1279,0,"",shellscript,selection_keyboard +655,2003369,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1279,0,"_",shellscript,content +656,2003370,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1280,0,"",shellscript,selection_keyboard +657,2003561,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1280,0,"a",shellscript,content +658,2003562,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1281,0,"",shellscript,selection_keyboard +659,2003677,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1281,0,"c",shellscript,content +660,2003677,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1282,0,"",shellscript,selection_keyboard +661,2003858,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1282,0,"t",shellscript,content +662,2003860,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1283,0,"",shellscript,selection_keyboard +663,2003966,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1283,0,"i",shellscript,content +664,2003967,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1284,0,"",shellscript,selection_keyboard +665,2004031,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1284,0,"o",shellscript,content +666,2004032,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1285,0,"",shellscript,selection_keyboard +667,2004217,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1285,0,"n",shellscript,content +668,2004218,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1286,0,"",shellscript,selection_keyboard +669,2004307,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1286,0,"s",shellscript,content +670,2004308,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1287,0,"",shellscript,selection_keyboard +671,2004477,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1287,0," ",shellscript,content +672,2004478,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1288,0,"",shellscript,selection_keyboard +673,2004727,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1288,0,"\",shellscript,content +674,2004728,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1289,0,"",shellscript,selection_keyboard +675,2004831,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",1288,0,"",shellscript,selection_command +676,2007164,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",,terminal_output +677,2008718,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +678,2008828,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn-gt-actions/train_dyn_default_gt_actions_breakout_longer/3519529\r\n",,terminal_output +679,2008981,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +680,2017749,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib64/python3.12/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1269: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +681,2047586,"TERMINAL",0,0,"Per-frame SSIM:\r\n [0.7812254 0.7858403 0.76890147 0.6958034 0.62305444 0.65509886\r\n 0.69744813 0.6297102 0.6061019 0.6196751 0.62508017 0.64287454]\r\nPer-frame PSNR:\r\n [26.8233 23.919037 21.94614 20.976141 20.258772 19.888817 19.593525\r\n 19.172722 19.441936 20.006874 19.923233 20.250233]\r\nSSIM: 0.6775677800178528\r\nPSNR: 21.016727447509766\r\n",,terminal_output +682,2048095,"TERMINAL",0,0,"W0925 11:15:37.761846 2980186 pjrt_client.cc:1469] WatchJobStateAsync failed for task goo.gle/debugstr job_name: ""jax_worker"": CANCELLED: CANCELLED\r\nAdditional GRPC error information from remote target coordination_service while calling /tensorflow.CoordinationService/WatchJobState:\r\n:UNKNOWN:Error received from peer {grpc_message:""CANCELLED"", grpc_status:1} [type.googleapis.com/tensorflow.DerivedStatus='']\r\n",,terminal_output +683,2048821,"TERMINAL",0,0,"]0;tum_cte0515@hkn0730:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +684,2080593,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",0,0,"",shellscript,tab +685,2081623,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,0,"",shellscript,selection_mouse +686,2081725,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,2,"/h",shellscript,selection_mouse +687,2081726,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,6,"/hkfs/",shellscript,selection_mouse +688,2081792,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,16,"/hkfs/work/works",shellscript,selection_mouse +689,2081793,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",753,17,"\nCHECKPOINT_PATH=",shellscript,selection_mouse +690,2081793,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",696,74,"fa_ws_shared/data_breakout/breakout_episodes_perfect/val\n\nCHECKPOINT_PATH=",shellscript,selection_mouse +691,2081828,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",718,52,"kout/breakout_episodes_perfect/val\n\nCHECKPOINT_PATH=",shellscript,selection_mouse +692,2081828,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",753,17,"\nCHECKPOINT_PATH=",shellscript,selection_mouse +693,2081974,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,94,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn-gt-actions/tr",shellscript,selection_mouse +694,2082033,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,95,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn-gt-actions/tra",shellscript,selection_mouse +695,2082034,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,96,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn-gt-actions/trai",shellscript,selection_mouse +696,2082071,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,98,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn-gt-actions/train_",shellscript,selection_mouse +697,2082071,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,100,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn-gt-actions/train_dy",shellscript,selection_mouse +698,2082113,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,101,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn-gt-actions/train_dyn",shellscript,selection_mouse +699,2082114,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,145,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn-gt-actions/train_dyn_default_gt_actions_breakout_longer/3519529\n",shellscript,selection_mouse +700,2082576,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,144,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn-gt-actions/train_dyn_default_gt_actions_breakout_longer/3519529",shellscript,selection_mouse +701,2083707,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,144,"",shellscript,content +702,2083740,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",769,0,"",shellscript,selection_command +703,2084550,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,0,"",shellscript,selection_command +704,2085162,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",770,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn-gt-actions/train_dyn_default_gt_actions_breakout_longer_smaller_lr/3519530",shellscript,content +705,2089833,"TERMINAL",0,0,"sh slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",,terminal_output +706,2090014,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output +707,2090142,"TERMINAL",0,0,"Sampling from checkpoint: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn-gt-actions/train_dyn_default_gt_actions_breakout_longer_smaller_lr/3519530\r\n",,terminal_output +708,2090333,"TERMINAL",0,0,"GpuFreq=control_disabled\r\n",,terminal_output +709,2099301,"TERMINAL",0,0,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jasmine/.venv/lib64/python3.12/site-packages/orbax/checkpoint/_src/serialization/type_handlers.py:1269: UserWarning: Sharding info not provided when restoring. Populating sharding info from sharding file. Please note restoration time will be slightly increased due to reading from file. Note also that this option is unsafe when restoring on a different topology than the checkpoint was saved with.\r\n warnings.warn(\r\n",,terminal_output +710,2129300,"TERMINAL",0,0,"Per-frame SSIM:\r\n [0.67461824 0.70958847 0.8586652 0.783487 0.7213084 0.7598344\r\n 0.80947256 0.75921077 0.6859379 0.6732184 0.66822684 0.70876646]\r\nPer-frame PSNR:\r\n [25.940617 25.2038 29.020214 26.956312 23.654037 24.552372 24.38194\r\n 23.45362 23.095198 23.228605 22.959173 22.007948]\r\nSSIM: 0.7343612313270569\r\nPSNR: 24.537818908691406\r\n",,terminal_output +711,2134846,"TERMINAL",0,0,"W0925 11:17:04.510413 2981575 pjrt_client.cc:1469] WatchJobStateAsync failed for task goo.gle/debugstr job_name: ""jax_worker"": UNAVAILABLE: Cancelling all calls\r\nAdditional GRPC error information from remote target coordination_service while calling /tensorflow.CoordinationService/WatchJobState:\r\n:UNKNOWN:Error received from peer {grpc_status:14, grpc_message:""Cancelling all calls""}\r\n",,terminal_output +712,2135530,"TERMINAL",0,0,"]0;tum_cte0515@hkn0730:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +713,2146632,"slurm/jobs/mihir/horeka/breakout/sample_maskgit.sbatch",0,0,"",shellscript,tab +714,2779395,"TERMINAL",0,0,"\r(jasmine) [tum_cte0515@hkn0730 jasmine]$ \r(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +715,2781061,"TERMINAL",0,0,"\r(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +716,2791761,"TERMINAL",0,0,"\r(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output +717,2794975,"slurm/jobs/mihir/horeka/breakout/default_runs/train_dyn_single_gpu.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/breakout/dyn/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/breakout/dyn/%x_%j.log\n#SBATCH --job-name=train_dyn_default_breakout_longer\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir_train=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_breakout/breakout_episodes_perfect/train\narray_records_dir_val=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_breakout/breakout_episodes_perfect/val\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/dyn/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# lam_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/lam/interactive/3512576\n# tokenizer_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/tokenizer/interactive/3512502\n\nlam_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/lam/interactive/3518963\ntokenizer_checkpoint=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/breakout/tokenizer/interactive/3518963\n\nenv | grep SLURM\n\nexport PYTHONUNBUFFERED=1\n\nsrun python jasmine/train_dynamics.py \\n --save_ckpt \\n --image_height=10 \\n --image_width=10 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=120 \\n --init_lr=0 \\n --max_lr=3e-5 \\n --log_image_interval=250 \\n --log_checkpoint_interval=250 \\n --dyna_type=maskgit \\n --log \\n --name=breakout-dyn-default-$slurm_job_id \\n --tags dyn breakout default \\n --entity instant-uv \\n --project jafar \\n --patch_size 4 \\n --lam_patch_size 4 \\n --warmup_steps 100 \\n --wsd_decay_steps 1000 \\n --num_steps 5000 \\n --data_dir $array_records_dir_train \\n --val_data_dir $array_records_dir_val \\n --tokenizer_checkpoint $tokenizer_checkpoint \\n --lam_checkpoint $lam_checkpoint \\n --val_interval 500 \\n --eval_full_frame \\n",shellscript,tab +718,2816229,"jasmine/models/dynamics.py",0,0,"from typing import Dict\n\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\n\nfrom utils.nn import STTransformer, Transformer\n\n\nclass DynamicsMaskGIT(nnx.Module):\n """"""\n MaskGIT dynamics model\n\n Dimension keys:\n B: batch size\n T: sequence length\n N: number of patches per frame\n L: latent dimension\n V: vocabulary size (number of latents)\n """"""\n\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_latents: int,\n latent_action_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n max_noise_level: float,\n noise_buckets: int,\n mask_limit: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.max_noise_level = max_noise_level\n self.noise_buckets = noise_buckets\n self.mask_limit = mask_limit\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.transformer = STTransformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.mask_token = nnx.Param(\n nnx.initializers.lecun_uniform()(rngs.params(), (1, 1, 1, self.model_dim))\n )\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.noise_level_embed = nnx.Embed(\n self.noise_buckets, self.model_dim, rngs=rngs\n )\n\n def __call__(\n self,\n batch: Dict[str, jax.Array],\n ) -> tuple[jax.Array, jax.Array]:\n # --- Mask videos ---\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n vid_embed_BTNM = self.patch_embed(video_tokens_BTN)\n\n B, T, N, M = vid_embed_BTNM.shape\n rng, _rng_prob, *_rngs_mask = jax.random.split(batch[""mask_rng""], B + 2)\n mask_prob = jax.random.uniform(_rng_prob, shape=(B,), minval=self.mask_limit)\n per_sample_shape = vid_embed_BTNM.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(jnp.asarray(_rngs_mask), mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed_BTNM = jnp.where(\n jnp.expand_dims(mask, -1), self.mask_token.value, vid_embed_BTNM\n )\n\n # --- Sample noise ---\n rng, _rng_noise = jax.random.split(rng)\n noise_level_B11 = jax.random.uniform(\n _rng_noise, shape=(B,), minval=0.0, maxval=self.max_noise_level\n ).reshape(B, 1, 1)\n noise_bucket_idx_B11 = jnp.floor(\n (noise_level_B11 / self.max_noise_level) * self.noise_buckets\n ).astype(jnp.int32)\n noise_level_embed_B11M = self.noise_level_embed(noise_bucket_idx_B11)\n noise_level_embed_BT1M = jnp.tile(noise_level_embed_B11M, (1, T, 1, 1))\n vid_embed_BTNM += jnp.expand_dims(noise_level_B11, -1)\n\n # --- Predict transition ---\n act_embed_BTm11M = self.action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n vid_embed_BTNp2M = jnp.concatenate(\n [padded_act_embed_BT1M, noise_level_embed_BT1M, vid_embed_BTNM], axis=2\n )\n logits_BTNp2V = self.transformer(vid_embed_BTNp2M)\n logits_BTNV = logits_BTNp2V[:, :, 2:]\n return logits_BTNV, mask\n\n\nclass DynamicsCausal(nnx.Module):\n """"""Causal dynamics model""""""\n\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_latents: int,\n latent_action_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n\n self.transformer = Transformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=self.decode,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self,\n batch: Dict[str, jax.Array],\n ) -> tuple[jax.Array, jax.Array]:\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n vid_embed_BTNM = self.patch_embed(video_tokens_BTN)\n act_embed_BTm11M = self.action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n vid_embed_BTNp1M = jnp.concatenate(\n [padded_act_embed_BT1M, vid_embed_BTNM], axis=2\n )\n logits_BTNp1V = self.transformer(vid_embed_BTNp1M)\n logits_BTNV = logits_BTNp1V[:, :, :-1]\n return logits_BTNV, jnp.ones_like(video_tokens_BTN)\n",python,tab +719,2816463,"jasmine/models/dynamics.py",606,3599," mask_limit: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_latents = num_latents\n self.latent_action_dim = latent_action_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.mask_limit = mask_limit\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.transformer = STTransformer(\n self.model_dim,\n self.model_dim,\n self.ffn_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.patch_embed = nnx.Embed(self.num_latents, self.model_dim, rngs=rngs)\n self.mask_token = nnx.Param(\n nnx.initializers.lecun_uniform()(rngs.params(), (1, 1, 1, self.model_dim))\n )\n self.action_up = nnx.Linear(\n self.latent_action_dim,\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self,\n batch: Dict[str, jax.Array],\n ) -> tuple[jax.Array, jax.Array]:\n # --- Mask videos ---\n video_tokens_BTN = batch[""video_tokens""]\n latent_actions_BTm11L = batch[""latent_actions""]\n vid_embed_BTNM = self.patch_embed(video_tokens_BTN)\n\n batch_size = vid_embed_BTNM.shape[0]\n _rng_prob, *_rngs_mask = jax.random.split(batch[""mask_rng""], batch_size + 1)\n mask_prob = jax.random.uniform(\n _rng_prob, shape=(batch_size,), minval=self.mask_limit\n )\n per_sample_shape = vid_embed_BTNM.shape[1:-1]\n mask = jax.vmap(\n lambda rng, prob: jax.random.bernoulli(rng, prob, per_sample_shape),\n in_axes=(0, 0),\n )(jnp.asarray(_rngs_mask), mask_prob)\n mask = mask.at[:, 0].set(False)\n vid_embed_BTNM = jnp.where(\n jnp.expand_dims(mask, -1), self.mask_token.value, vid_embed_BTNM\n )\n\n # --- Predict transition ---\n act_embed_BTm11M = self.action_up(latent_actions_BTm11L)\n padded_act_embed_BT1M = jnp.pad(\n act_embed_BTm11M, ((0, 0), (1, 0), (0, 0), (0, 0))\n )\n padded_act_embed_BTNM = jnp.broadcast_to(\n padded_act_embed_BT1M, vid_embed_BTNM.shape\n )\n vid_embed_BTNM += padded_act_embed_BTNM\n logits_BTNV = self.transformer(vid_embed_BTNM)\n",python,content +720,2820522,"TERMINAL",0,0,"q",,terminal_output +721,2820597,"TERMINAL",0,0,"u",,terminal_output +722,2820662,"TERMINAL",0,0,"e",,terminal_output +723,2820729,"TERMINAL",0,0,"u",,terminal_output +724,2820858,"TERMINAL",0,0,"e",,terminal_output +725,2820955,"TERMINAL",0,0,"\r\n[?2004l\r[?1049h(B[?7hEvery 1.0s: squeue --mehkn0730.localdomain: Thu Sep 25 11:28:30 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3520226 accelerat interact tum_cte0 R40:36\t 1 hkn0730",,terminal_output +726,2821919,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn0730:~/Projects/jasmine[?2004h(jasmine) [tum_cte0515@hkn0730 jasmine]$ ",,terminal_output